diff --git a/cli/cmd/cluster.go b/cli/cmd/cluster.go index 40d774be..9662601f 100644 --- a/cli/cmd/cluster.go +++ b/cli/cmd/cluster.go @@ -15,8 +15,19 @@ var ( func (r *runners) InitClusterCommand(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "cluster", - Short: "Manage test clusters", - Long: ``, + Short: "Manage test Kubernetes clusters.", + Long: `The 'cluster' command allows you to manage and interact with Kubernetes clusters used for testing purposes. With this command, you can create, list, remove, and manage node groups within clusters, as well as retrieve information about available clusters.`, + Example: ` # Create a single-node EKS cluster + replicated cluster create --distribution eks --version 1.31 + + # List all clusters + replicated cluster ls + + # Remove a specific cluster by ID + replicated cluster rm + + # Create a node group within a specific cluster + replicated cluster nodegroup create --cluster-id --instance-type m6.large --nodes 3`, } parent.AddCommand(cmd) diff --git a/cli/cmd/cluster_addon.go b/cli/cmd/cluster_addon.go index 374f0111..1fd48b09 100644 --- a/cli/cmd/cluster_addon.go +++ b/cli/cmd/cluster_addon.go @@ -12,7 +12,27 @@ import ( func (r *runners) InitClusterAddon(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "addon", - Short: "Manage cluster add-ons", + Short: "Manage cluster add-ons.", + Long: `The 'cluster addon' command allows you to manage add-ons installed on a test cluster. Add-ons are additional components or services that can be installed and configured to enhance or extend the functionality of the cluster. + +You can use various subcommands to create, list, remove, or check the status of add-ons on a cluster. This command is useful for adding databases, object storage, monitoring, security, or other specialized tools to your cluster environment.`, + Example: ` # List all add-ons installed on a cluster + replicated cluster addon ls CLUSTER_ID + + # Remove an add-on from a cluster + replicated cluster addon rm CLUSTER_ID --id ADDON_ID + + # Create a Postgres database add-on for a cluster + replicated cluster addon create postgres CLUSTER_ID --version 13 --disk 100 --instance-type db.t3.micro + + # Create an object store bucket add-on for a cluster + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket + + # List add-ons with JSON output + replicated cluster addon ls CLUSTER_ID --output json + + # Create a Postgres add-on and wait for it to be ready + replicated cluster addon create postgres CLUSTER_ID --version 13 --wait 5m`, } parent.AddCommand(cmd) diff --git a/cli/cmd/cluster_addon_create.go b/cli/cmd/cluster_addon_create.go index 648df48f..5538b73b 100644 --- a/cli/cmd/cluster_addon_create.go +++ b/cli/cmd/cluster_addon_create.go @@ -7,7 +7,19 @@ import ( func (r *runners) InitClusterAddonCreate(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "create", - Short: "Create cluster add-ons", + Short: "Create cluster add-ons.", + Long: `Create new add-ons for a cluster. This command allows you to add functionality or services to a cluster by provisioning the required add-ons.`, + Example: ` # Create a Postgres database add-on for a cluster + replicated cluster addon create postgres CLUSTER_ID --version 13 --disk 100 --instance-type db.t3.micro + + # Create an object store bucket add-on for a cluster + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket + + # Create a Postgres add-on and wait for it to be ready + replicated cluster addon create postgres CLUSTER_ID --version 13 --wait 5m + + # Perform a dry run for creating an object store add-on + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --dry-run`, } parent.AddCommand(cmd) diff --git a/cli/cmd/cluster_addon_create_objectstore.go b/cli/cmd/cluster_addon_create_objectstore.go index f0646394..7115174a 100644 --- a/cli/cmd/cluster_addon_create_objectstore.go +++ b/cli/cmd/cluster_addon_create_objectstore.go @@ -21,24 +21,32 @@ type clusterAddonCreateObjectStoreArgs struct { outputFormat string } -const ( - clusterAddonCreateObjectStoreShort = "Create an object store bucket for a cluster" - clusterAddonCreateObjectStoreLong = `Create an object store bucket for a cluster. - -Requires a bucket name prefix (using flag "--bucket-prefix") that will be used to create a unique bucket name with format "[BUCKET_PREFIX]-[ADDON_ID]-cmx".` - clusterAddonCreateObjectStoreExample = ` $ replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket - 05929b24 Object Store pending {"bucket_prefix":"mybucket"}` -) - func (r *runners) InitClusterAddonCreateObjectStore(parent *cobra.Command) *cobra.Command { args := clusterAddonCreateObjectStoreArgs{} cmd := &cobra.Command{ - Use: "object-store CLUSTER_ID --bucket-prefix BUCKET_PREFIX", - Short: clusterAddonCreateObjectStoreShort, - Long: clusterAddonCreateObjectStoreLong, - Example: clusterAddonCreateObjectStoreExample, - Args: cobra.ExactArgs(1), + Use: "object-store CLUSTER_ID --bucket-prefix BUCKET_PREFIX", + Short: "Create an object store bucket for a cluster.", + Long: `Creates an object store bucket for a cluster, requiring a bucket name prefix. The bucket name will be auto-generated using the format "[BUCKET_PREFIX]-[ADDON_ID]-cmx". This feature provisions an object storage bucket that can be used for storage in your cluster environment. + +Examples: + # Create an object store bucket with a specified prefix + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket + + # Create an object store bucket and wait for it to be ready (up to 5 minutes) + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --wait 5m + + # Perform a dry run to validate inputs without creating the bucket + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --dry-run + + # Create an object store bucket and output the result in JSON format + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix mybucket --output json + + # Create an object store bucket with a custom prefix and wait for 10 minutes + replicated cluster addon create object-store CLUSTER_ID --bucket-prefix custom-prefix --wait 10m`, + Example: `$ replicated cluster addon create object-store 05929b24 --bucket-prefix mybucket + 05929b24 Object Store pending {"bucket_prefix":"mybucket"}`, + Args: cobra.ExactArgs(1), RunE: func(_ *cobra.Command, cmdArgs []string) error { args.clusterID = cmdArgs[0] return r.clusterAddonCreateObjectStoreCreateRun(args) diff --git a/cli/cmd/cluster_addon_create_postgres.go b/cli/cmd/cluster_addon_create_postgres.go index 552ce424..348cbab4 100644 --- a/cli/cmd/cluster_addon_create_postgres.go +++ b/cli/cmd/cluster_addon_create_postgres.go @@ -29,8 +29,25 @@ func (r *runners) InitClusterAddonCreatePostgres(parent *cobra.Command) *cobra.C cmd := &cobra.Command{ Use: "postgres CLUSTER_ID", - Short: "Create a Postgres database for a cluster", - Args: cobra.ExactArgs(1), + Short: "Create a Postgres database for a cluster.", + Long: `Creates a Postgres database instance for the specified cluster, provisioning it with a specified version, disk size, and instance type. This allows you to attach a managed Postgres instance to your cluster for database functionality. + +Examples: + # Create a Postgres database with default settings + replicated cluster addon create postgres CLUSTER_ID + + # Create a Postgres 13 database with 500GB disk and a larger instance type + replicated cluster addon create postgres CLUSTER_ID --version 13 --disk 500 --instance-type db.t3.large + + # Perform a dry run to validate inputs without creating the database + replicated cluster addon create postgres CLUSTER_ID --dry-run + + # Create a Postgres database and wait for it to be ready (up to 10 minutes) + replicated cluster addon create postgres CLUSTER_ID --wait 10m + + # Create a Postgres database and output the result in JSON format + replicated cluster addon create postgres CLUSTER_ID --output json`, + Args: cobra.ExactArgs(1), RunE: func(_ *cobra.Command, cmdArgs []string) error { args.clusterID = cmdArgs[0] return r.clusterAddonCreatePostgresCreateRun(args) diff --git a/cli/cmd/cluster_addon_ls.go b/cli/cmd/cluster_addon_ls.go index d8ee0d74..b59c684d 100644 --- a/cli/cmd/cluster_addon_ls.go +++ b/cli/cmd/cluster_addon_ls.go @@ -15,8 +15,19 @@ func (r *runners) InitClusterAddonLs(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "ls CLUSTER_ID", - Short: "List cluster add-ons for a cluster", - Args: cobra.ExactArgs(1), + Short: "List cluster add-ons for a cluster.", + Long: `The 'cluster addon ls' command allows you to list all add-ons for a specific cluster. This command provides a detailed overview of the add-ons currently installed on the cluster, including their status and any relevant configuration details. + +This can be useful for monitoring the health and configuration of add-ons or performing troubleshooting tasks.`, + Example: ` # List add-ons for a cluster with default table output + replicated cluster addon ls CLUSTER_ID + + # List add-ons for a cluster with JSON output + replicated cluster addon ls CLUSTER_ID --output json + + # List add-ons for a cluster with wide table output + replicated cluster addon ls CLUSTER_ID --output wide`, + Args: cobra.ExactArgs(1), RunE: func(_ *cobra.Command, cmdArgs []string) error { args.clusterID = cmdArgs[0] return r.addonClusterLsRun(args) diff --git a/cli/cmd/cluster_addon_rm.go b/cli/cmd/cluster_addon_rm.go index 56d30782..09fb45ae 100644 --- a/cli/cmd/cluster_addon_rm.go +++ b/cli/cmd/cluster_addon_rm.go @@ -16,8 +16,13 @@ func (r *runners) InitClusterAddonRm(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "rm CLUSTER_ID --id ADDON_ID", - Short: "Remove cluster add-on by ID", - Args: cobra.ExactArgs(1), + Short: "Remove cluster add-on by ID.", + Long: `The 'cluster addon rm' command allows you to remove a specific add-on from a cluster by specifying the cluster ID and the add-on ID. + +This command is useful when you want to deprovision an add-on that is no longer needed or when troubleshooting issues related to specific add-ons. The add-on will be removed immediately, and you will receive confirmation upon successful removal.`, + Example: ` # Remove an add-on with ID 'abc123' from cluster 'cluster456' + replicated cluster addon rm cluster456 --id abc123`, + Args: cobra.ExactArgs(1), RunE: func(_ *cobra.Command, cmdArgs []string) error { args.clusterID = cmdArgs[0] return r.clusterAddonRmRun(args) diff --git a/cli/cmd/cluster_create.go b/cli/cmd/cluster_create.go index 0b041042..a795626b 100644 --- a/cli/cmd/cluster_create.go +++ b/cli/cmd/cluster_create.go @@ -20,14 +20,37 @@ var ErrWaitDurationExceeded = errors.New("wait duration exceeded") func (r *runners) InitClusterCreate(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "create", - Short: "Create test clusters", - Long: `Create test clusters.`, + Use: "create", + Short: "Create test clusters.", + Long: `The 'cluster create' command provisions a new test cluster with the specified Kubernetes distribution and configuration. You can customize the cluster's size, version, node groups, disk space, IP family, and other parameters. + +This command supports creating clusters on multiple Kubernetes distributions, including setting up node groups with different instance types and counts. You can also specify a TTL (Time-To-Live) to automatically terminate the cluster after a set duration. + +Use the '--dry-run' flag to simulate the creation process and get an estimated cost without actually provisioning the cluster.`, + Example: ` # Create a new cluster with basic configuration + replicated cluster create --distribution eks --version 1.21 --nodes 3 --instance-type t3.large --disk 100 --ttl 24h + + # Create a cluster with a custom node group + replicated cluster create --distribution eks --version 1.21 --nodegroup name=workers,instance-type=t3.large,nodes=5 --ttl 24h + + # Simulate cluster creation (dry-run) + replicated cluster create --distribution eks --version 1.21 --nodes 3 --disk 100 --ttl 24h --dry-run + + # Create a cluster with autoscaling configuration + replicated cluster create --distribution eks --version 1.21 --min-nodes 2 --max-nodes 5 --instance-type t3.large --ttl 24h + + # Create a cluster with multiple node groups + replicated cluster create --distribution eks --version 1.21 \ + --nodegroup name=workers,instance-type=t3.large,nodes=3 \ + --nodegroup name=cpu-intensive,instance-type=c5.2xlarge,nodes=2 \ + --ttl 24h + + # Create a cluster with custom tags + replicated cluster create --distribution eks --version 1.21 --nodes 3 --tag env=test --tag project=demo --ttl 24h`, SilenceUsage: true, RunE: r.createCluster, } parent.AddCommand(cmd) - cmd.Flags().StringVar(&r.args.createClusterName, "name", "", "Cluster name (defaults to random name)") cmd.Flags().StringVar(&r.args.createClusterKubernetesDistribution, "distribution", "", "Kubernetes distribution of the cluster to provision") cmd.Flags().StringVar(&r.args.createClusterKubernetesVersion, "version", "", "Kubernetes version to provision (format is distribution dependent)") diff --git a/cli/cmd/cluster_kubeconfig.go b/cli/cmd/cluster_kubeconfig.go index 008cdc4c..92f0d93b 100644 --- a/cli/cmd/cluster_kubeconfig.go +++ b/cli/cmd/cluster_kubeconfig.go @@ -22,9 +22,27 @@ const ( func (r *runners) InitClusterKubeconfig(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "kubeconfig [ID]", - Short: "Download credentials for a test cluster", - Long: `Download credentials for a test cluster`, + Use: "kubeconfig [ID]", + Short: "Download credentials for a test cluster.", + Long: `The 'cluster kubeconfig' command downloads the credentials (kubeconfig) required to access a test cluster. You can either merge these credentials into your existing kubeconfig file or save them as a new file. + +This command ensures that the kubeconfig is correctly configured for use with your Kubernetes tools. You can specify the cluster by ID or by name. Additionally, the kubeconfig can be written to a specific file path or printed to stdout. + +You can also use this command to automatically update your current Kubernetes context with the downloaded credentials.`, + Example: ` # Download and merge kubeconfig into your existing configuration + replicated cluster kubeconfig CLUSTER_ID + + # Save the kubeconfig to a specific file + replicated cluster kubeconfig CLUSTER_ID --output-path ./kubeconfig + + # Print the kubeconfig to stdout + replicated cluster kubeconfig CLUSTER_ID --stdout + + # Download kubeconfig for a cluster by name + replicated cluster kubeconfig --name "My Cluster" + + # Download kubeconfig for a cluster by ID + replicated cluster kubeconfig --id CLUSTER_ID`, RunE: r.kubeconfigCluster, ValidArgsFunction: r.completeClusterIDs, } diff --git a/cli/cmd/cluster_ls.go b/cli/cmd/cluster_ls.go index 306fcd16..50304594 100644 --- a/cli/cmd/cluster_ls.go +++ b/cli/cmd/cluster_ls.go @@ -15,9 +15,30 @@ import ( func (r *runners) InitClusterList(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "ls", - Short: "List test clusters", - Long: `List test clusters`, - RunE: r.listClusters, + Short: "List test clusters.", + Long: `The 'cluster ls' command lists all test clusters. This command provides information about the clusters, such as their status, name, distribution, version, and creation time. The output can be formatted in different ways, depending on your needs. + +You can filter the list of clusters by time range and status (e.g., show only terminated clusters). You can also watch clusters in real-time, which updates the list every few seconds. + +Clusters that have been deleted will be shown with a 'deleted' status.`, + Example: ` # List all clusters with default table output + replicated cluster ls + + # Show clusters created after a specific date + replicated cluster ls --start-time 2023-01-01T00:00:00Z + + # Watch for real-time updates + replicated cluster ls --watch + + # List clusters with JSON output + replicated cluster ls --output json + + # List only terminated clusters + replicated cluster ls --show-terminated + + # List clusters with wide table output + replicated cluster ls --output wide`, + RunE: r.listClusters, } parent.AddCommand(cmd) diff --git a/cli/cmd/cluster_nodegroup.go b/cli/cmd/cluster_nodegroup.go index 5c596a1f..e2dd0158 100644 --- a/cli/cmd/cluster_nodegroup.go +++ b/cli/cmd/cluster_nodegroup.go @@ -6,7 +6,22 @@ import ( func (r *runners) InitClusterNodeGroup(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "nodegroup", + Use: "nodegroup", + Short: "Manage node groups for clusters.", + Long: `The 'cluster nodegroup' command provides functionality to manage node groups within a cluster. This command allows you to create, list, update, or remove node groups in a Kubernetes or VM-based cluster. + +Node groups define a set of nodes with specific configurations, such as instance types, node counts, or scaling rules. You can use subcommands to perform various actions on node groups.`, + Example: ` # List all node groups for a cluster + replicated cluster nodegroup ls CLUSTER_ID + + # Create a new node group in a cluster + replicated cluster nodegroup create CLUSTER_ID --nodes 3 --instance-type m6.large + + # Update an existing node group + replicated cluster nodegroup update CLUSTER_ID NODEGROUP_ID --nodes 5 + + # Remove a node group from a cluster + replicated cluster nodegroup rm CLUSTER_ID NODEGROUP_ID`, } parent.AddCommand(cmd) diff --git a/cli/cmd/cluster_nodegroup_ls.go b/cli/cmd/cluster_nodegroup_ls.go index 5a8dec40..e2624737 100644 --- a/cli/cmd/cluster_nodegroup_ls.go +++ b/cli/cmd/cluster_nodegroup_ls.go @@ -9,9 +9,21 @@ import ( func (r *runners) InitClusterNodeGroupList(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "ls [ID]", - Short: "List node groups for a cluster", - Long: `List node groups for a cluster`, + Use: "ls [ID]", + Short: "List node groups for a cluster.", + Long: `The 'cluster nodegroup ls' command lists all the node groups associated with a given cluster. Each node group defines a specific set of nodes with particular configurations, such as instance types and scaling options. + +You can view information about the node groups within the specified cluster, including their ID, name, node count, and other configuration details. + +You must provide the cluster ID to list its node groups.`, + Example: ` # List all node groups in a cluster with default table output + replicated cluster nodegroup ls CLUSTER_ID + + # List node groups with JSON output + replicated cluster nodegroup ls CLUSTER_ID --output json + + # List node groups with wide table output + replicated cluster nodegroup ls CLUSTER_ID --output wide`, Args: cobra.ExactArgs(1), RunE: r.listNodeGroups, ValidArgsFunction: r.completeClusterIDs, diff --git a/cli/cmd/cluster_port.go b/cli/cmd/cluster_port.go index 90481d34..397a57f1 100644 --- a/cli/cmd/cluster_port.go +++ b/cli/cmd/cluster_port.go @@ -6,7 +6,19 @@ import ( func (r *runners) InitClusterPort(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "port", + Use: "port", + Short: "Manage cluster ports.", + Long: `The 'cluster port' command is a parent command for managing ports in a cluster. It allows users to list, remove, or expose specific ports used by the cluster. Use the subcommands (such as 'ls', 'rm', and 'expose') to manage port configurations effectively. + +This command provides flexibility for handling ports in various test clusters, ensuring efficient management of cluster networking settings.`, + Example: ` # List all exposed ports in a cluster + replicated cluster port ls [CLUSTER_ID] + + # Remove an exposed port from a cluster + replicated cluster port rm [CLUSTER_ID] [PORT] + + # Expose a new port in a cluster + replicated cluster port expose [CLUSTER_ID] [PORT]`, SilenceUsage: true, Hidden: false, } diff --git a/cli/cmd/cluster_port_expose.go b/cli/cmd/cluster_port_expose.go index 64db7394..a9581e89 100644 --- a/cli/cmd/cluster_port_expose.go +++ b/cli/cmd/cluster_port_expose.go @@ -6,26 +6,28 @@ import ( "github.com/spf13/cobra" ) -const ( - clusterPortExposeShort = "Expose a port on a cluster to the public internet" - clusterPortExposeLong = `Expose a port on a cluster to the public internet. +func (r *runners) InitClusterPortExpose(parent *cobra.Command) *cobra.Command { + cmd := &cobra.Command{ + Use: "expose CLUSTER_ID --port PORT", + Short: "Expose a port on a cluster to the public internet.", + Long: `The 'cluster port expose' command is used to expose a specified port on a cluster to the public internet. When exposing a port, the command automatically creates a DNS entry and, if using the "https" protocol, provisions a TLS certificate for secure communication. -This command will create a DNS entry and TLS certificate (if "https") for the specified port on the cluster. +You can also create a wildcard DNS entry and TLS certificate by specifying the "--wildcard" flag. Please note that creating a wildcard certificate may take additional time. -A wildcard DNS entry and TLS certificate can be created by specifying the "--wildcard" flag. This will take extra time to provision. +This command supports different protocols including "http", "https", "ws", and "wss" for web traffic and web socket communication. -NOTE: This feature currently only supports VM cluster distributions.` - clusterPortExposeExample = ` $ replicated cluster port expose 05929b24 --port 8080 --protocol https --wildcard - ID CLUSTER PORT PROTOCOL EXPOSED PORT WILDCARD STATUS - d079b2fc 8080 https https://happy-germain.ingress.replicatedcluster.com true pending` -) +NOTE: Currently, this feature only supports VM-based cluster distributions.`, + Example: ` # Expose port 8080 with HTTPS protocol and wildcard DNS + replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --wildcard -func (r *runners) InitClusterPortExpose(parent *cobra.Command) *cobra.Command { - cmd := &cobra.Command{ - Use: "expose CLUSTER_ID --port PORT", - Short: clusterPortExposeShort, - Long: clusterPortExposeLong, - Example: clusterPortExposeExample, + # Expose port 3000 with HTTP protocol + replicated cluster port expose CLUSTER_ID --port 3000 --protocol http + + # Expose port 8080 with multiple protocols + replicated cluster port expose CLUSTER_ID --port 8080 --protocol http,https + + # Expose port 8080 and display the result in JSON format + replicated cluster port expose CLUSTER_ID --port 8080 --protocol https --output json`, RunE: r.clusterPortExpose, Args: cobra.ExactArgs(1), ValidArgsFunction: r.completeClusterIDs, diff --git a/cli/cmd/cluster_port_ls.go b/cli/cmd/cluster_port_ls.go index 9f32d238..31ec9783 100644 --- a/cli/cmd/cluster_port_ls.go +++ b/cli/cmd/cluster_port_ls.go @@ -7,8 +7,19 @@ import ( func (r *runners) InitClusterPortLs(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "ls CLUSTER_ID", - Short: "List cluster ports for a cluster", + Use: "ls CLUSTER_ID", + Short: "List cluster ports for a cluster.", + Long: `The 'cluster port ls' command lists all the ports configured for a specific cluster. You must provide the cluster ID to retrieve and display the ports. + +This command is useful for viewing the current port configurations, protocols, and other related settings of your test cluster. The output format can be customized to suit your needs, and the available formats include table, JSON, and wide views.`, + Example: ` # List ports for a cluster in the default table format + replicated cluster port ls CLUSTER_ID + + # List ports for a cluster in JSON format + replicated cluster port ls CLUSTER_ID --output json + + # List ports for a cluster in wide format + replicated cluster port ls CLUSTER_ID --output wide`, RunE: r.clusterPortList, Args: cobra.ExactArgs(1), ValidArgsFunction: r.completeClusterIDs, diff --git a/cli/cmd/cluster_port_rm.go b/cli/cmd/cluster_port_rm.go index e6fd0d4c..5645f773 100644 --- a/cli/cmd/cluster_port_rm.go +++ b/cli/cmd/cluster_port_rm.go @@ -8,8 +8,21 @@ import ( func (r *runners) InitClusterPortRm(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "rm CLUSTER_ID --id PORT_ID", - Short: "Remove cluster port by ID", + Use: "rm CLUSTER_ID --id PORT_ID", + Short: "Remove cluster port by ID.", + Long: `The 'cluster port rm' command removes a specific port from a cluster. You must provide either the ID of the port or the port number and protocol(s) to remove. + +This command is useful for managing the network settings of your test clusters by allowing you to clean up unused or incorrect ports. After removing a port, the updated list of ports will be displayed. + +Note that you can only use either the port ID or port number when removing a port, not both at the same time.`, + Example: ` # Remove a port using its ID + replicated cluster port rm CLUSTER_ID --id PORT_ID + + # Remove a port using its number (deprecated) + replicated cluster port rm CLUSTER_ID --port 8080 --protocol http,https + + # Remove a port and display the result in JSON format + replicated cluster port rm CLUSTER_ID --id PORT_ID --output json`, RunE: r.clusterPortRemove, Args: cobra.ExactArgs(1), ValidArgsFunction: r.completeClusterIDs, diff --git a/cli/cmd/cluster_prepare.go b/cli/cmd/cluster_prepare.go index ad1be867..9723faac 100644 --- a/cli/cmd/cluster_prepare.go +++ b/cli/cmd/cluster_prepare.go @@ -44,22 +44,20 @@ import ( func (r *runners) InitClusterPrepare(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "prepare", - Short: "prepare cluster for testing", - Long: `The cluster prepare command will provision a cluster and install -a local helm chart with a custom values.yaml and custom replicated sdk entitlements. + Short: "Prepare cluster for testing.", + Long: `The 'cluster prepare' command provisions a Kubernetes cluster and installs an application using a Helm chart or KOTS YAML configuration. -This is a higher level CLI command that is useful in CI when you have a Helm chart and -want it running in a variety of clusters. +This command is designed to be used in CI environments to prepare a cluster for testing by deploying a Helm chart or KOTS application with entitlements and custom values. You can specify the cluster configuration, such as the Kubernetes distribution, version, node count, and instance type, and then install your application automatically. -For more control over the workflow, consider using the cluster create command and -using kubectl and helm CLI tools to install your application. +Alternatively, if you prefer deploying KOTS applications, you can specify YAML manifests for the release and use the '--shared-password' flag for the KOTS admin console. -Example: +You can also pass entitlement values to configure the cluster's customer entitlements. -replicated cluster prepare --distribution eks --version 1.27 --instance-type c6.xlarge --node-count 3 \ - --entitlement seat_count=100 --entitlement license_type=enterprise \ - --chart ./your-chart.tgz --values ./values.yaml --set chart-key=value --set chart-key2=value2`, - RunE: r.prepareCluster, +Note: +- The '--chart' flag cannot be used with '--yaml', '--yaml-file', or '--yaml-dir'. +- If deploying a Helm chart, use the '--set' flags to pass chart values. When deploying a KOTS application, the '--shared-password' flag is required.`, + Example: ` replicated cluster prepare --distribution eks --version 1.27 --instance-type c6.xlarge --node-count 3 --chart ./your-chart.tgz --values ./values.yaml --set chart-key=value --set chart-key2=value2`, + RunE: r.prepareCluster, } cmd.PreRunE = func(cmd *cobra.Command, args []string) error { diff --git a/cli/cmd/cluster_rm.go b/cli/cmd/cluster_rm.go index 115c2e24..d71efded 100644 --- a/cli/cmd/cluster_rm.go +++ b/cli/cmd/cluster_rm.go @@ -11,10 +11,19 @@ import ( func (r *runners) InitClusterRemove(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "rm ID [ID …]", - Short: "Remove test clusters", - Long: `Removes a cluster immediately. + Short: "Remove test clusters.", + Long: `The 'rm' command removes test clusters immediately. -You can specify the --all flag to terminate all clusters.`, +You can remove clusters by specifying a cluster ID, or by using other criteria such as cluster names or tags. Alternatively, you can remove all clusters in your account at once. + +This command can also be used in a dry-run mode to simulate the removal without actually deleting anything. + +You cannot mix the use of cluster IDs with other options like removing by name, tag, or removing all clusters at once.`, + Example: ` # Remove a specific cluster by ID + replicated cluster rm CLUSTER_ID + + # Remove all clusters + replicated cluster rm --all`, RunE: r.removeClusters, ValidArgsFunction: r.completeClusterIDs, } @@ -70,7 +79,7 @@ func (r *runners) removeClusters(_ *cobra.Command, args []string) error { } for _, cluster := range clusters { - if cluster.Tags != nil && len(cluster.Tags) > 0 { + if len(cluster.Tags) > 0 { for _, tag := range tags { for _, clusterTag := range cluster.Tags { if clusterTag.Key == tag.Key && clusterTag.Value == tag.Value { diff --git a/cli/cmd/cluster_shell.go b/cli/cmd/cluster_shell.go index b4c7c4c0..752cb100 100644 --- a/cli/cmd/cluster_shell.go +++ b/cli/cmd/cluster_shell.go @@ -19,9 +19,18 @@ import ( func (r *runners) InitClusterShell(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "shell [ID]", - Short: "Open a new shell with kubeconfig configured.", - Long: `Open a new shell with kubeconfig configured.`, + Use: "shell [ID]", + Short: "Open a new shell with kubeconfig configured.", + Long: `The 'shell' command opens a new shell session with the kubeconfig configured for the specified test cluster. This allows you to have immediate kubectl access to the cluster within the shell environment. + +You can either specify the cluster ID directly or provide the cluster name to resolve the corresponding cluster ID. The shell will inherit your existing environment and add the necessary kubeconfig context for interacting with the Kubernetes cluster. + +Once inside the shell, you can use 'kubectl' to interact with the cluster. To exit the shell, press Ctrl-D or type 'exit'. When the shell closes, the kubeconfig will be reset back to your default configuration.`, + Example: ` # Open a shell for a cluster by ID + replicated cluster shell CLUSTER_ID + + # Open a shell for a cluster by name + replicated cluster shell --name "My Cluster"`, RunE: r.shellCluster, ValidArgsFunction: r.completeClusterIDs, } diff --git a/cli/cmd/cluster_update.go b/cli/cmd/cluster_update.go index 426e1c82..90a67c28 100644 --- a/cli/cmd/cluster_update.go +++ b/cli/cmd/cluster_update.go @@ -9,8 +9,15 @@ import ( func (r *runners) InitClusterUpdateCommand(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "update", - Short: "Update cluster settings", - Long: `cluster update can be used to update cluster settings`, + Short: "Update cluster settings.", + Long: `The 'update' command allows you to update various settings of a test cluster, such as its name or ID. + +You can either specify the cluster ID directly or provide the cluster name, and the command will resolve the corresponding cluster ID. This allows you to modify the cluster's configuration based on the unique identifier or the name of the cluster.`, + Example: ` # Update a cluster using its ID + replicated cluster update --id [subcommand] + + # Update a cluster using its name + replicated cluster update --name [subcommand]`, } parent.AddCommand(cmd) diff --git a/cli/cmd/cluster_update_nodegroup.go b/cli/cmd/cluster_update_nodegroup.go index 392d7ed7..2c04907f 100644 --- a/cli/cmd/cluster_update_nodegroup.go +++ b/cli/cmd/cluster_update_nodegroup.go @@ -12,9 +12,16 @@ import ( func (r *runners) InitClusterUpdateNodegroup(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "nodegroup [ID]", - Short: "Update a nodegroup for a test cluster", - Long: `Update a nodegroup for a test cluster`, + Use: "nodegroup [ID]", + Short: "Update a nodegroup for a test cluster.", + Long: `The 'nodegroup' command allows you to update the configuration of a nodegroup within a test cluster. You can update attributes like the number of nodes, minimum and maximum node counts for autoscaling, and more. + +If you do not provide the nodegroup ID, the command will try to resolve it based on the nodegroup name provided.`, + Example: ` # Update the number of nodes in a nodegroup + replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --nodes 3 + + # Update the autoscaling limits for a nodegroup + replicated cluster update nodegroup CLUSTER_ID --nodegroup-id NODEGROUP_ID --min-nodes 2 --max-nodes 5`, RunE: r.updateClusterNodegroup, SilenceUsage: true, ValidArgsFunction: r.completeClusterIDs, diff --git a/cli/cmd/cluster_update_ttl.go b/cli/cmd/cluster_update_ttl.go index 0532ce28..03a65026 100644 --- a/cli/cmd/cluster_update_ttl.go +++ b/cli/cmd/cluster_update_ttl.go @@ -10,9 +10,11 @@ import ( func (r *runners) InitClusterUpdateTTL(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "ttl [ID]", - Short: "Update TTL for a test cluster", - Long: `Update TTL for a test cluster`, + Use: "ttl [ID]", + Short: "Update TTL for a test cluster.", + Long: `The 'ttl' command allows you to update the Time-To-Live (TTL) of a test cluster. The TTL represents the duration for which the cluster will remain active before it is automatically terminated. The duration starts from the moment the cluster becomes active. You must provide a valid duration, with a maximum limit of 48 hours.`, + Example: ` # Update the TTL for a specific cluster + replicated cluster update ttl CLUSTER_ID --ttl 24h`, RunE: r.updateClusterTTL, SilenceUsage: true, ValidArgsFunction: r.completeClusterIDs, diff --git a/cli/cmd/cluster_upgrade.go b/cli/cmd/cluster_upgrade.go index 0f5ce1bc..70a5e5eb 100644 --- a/cli/cmd/cluster_upgrade.go +++ b/cli/cmd/cluster_upgrade.go @@ -14,9 +14,17 @@ import ( func (r *runners) InitClusterUpgrade(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "upgrade [ID]", - Short: "Upgrade a test clusters", - Long: `Upgrade a test clusters`, + Use: "upgrade [ID]", + Short: "Upgrade a test cluster.", + Long: `The 'upgrade' command upgrades a Kubernetes test cluster to a specified version. You must provide a cluster ID and the version to upgrade to. The upgrade can be simulated with a dry-run option, or you can choose to wait for the cluster to be fully upgraded.`, + Example: ` # Upgrade a cluster to a new Kubernetes version + replicated cluster upgrade [CLUSTER_ID] --version 1.31 + + # Perform a dry run of a cluster upgrade without making any changes + replicated cluster upgrade [CLUSTER_ID] --version 1.31 --dry-run + + # Upgrade a cluster and wait for it to be ready + replicated cluster upgrade [CLUSTER_ID] --version 1.31 --wait 30m`, Args: cobra.ExactArgs(1), RunE: r.upgradeCluster, SilenceUsage: true, diff --git a/cli/cmd/cluster_versions.go b/cli/cmd/cluster_versions.go index 5dd41b61..8e9ef772 100644 --- a/cli/cmd/cluster_versions.go +++ b/cli/cmd/cluster_versions.go @@ -11,9 +11,17 @@ import ( func (r *runners) InitClusterVersions(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "versions", - Short: "List cluster versions", - Long: `List cluster versions`, - RunE: r.listClusterVersions, + Short: "List cluster versions.", + Long: `The 'versions' command lists available Kubernetes versions for supported distributions. You can filter the versions by specifying a distribution and choose between different output formats.`, + Example: ` # List all available Kubernetes cluster versions + replicated cluster versions + + # List available versions for a specific distribution (e.g., eks) + replicated cluster versions --distribution eks + + # Output the versions in JSON format + replicated cluster versions --output json`, + RunE: r.listClusterVersions, } parent.AddCommand(cmd) diff --git a/cli/cmd/network.go b/cli/cmd/network.go new file mode 100644 index 00000000..65d5733a --- /dev/null +++ b/cli/cmd/network.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +func (r *runners) InitNetworkCommand(parent *cobra.Command) *cobra.Command { + cmd := &cobra.Command{ + Use: "network", + Short: "Manage test networks for VMs and Clusters", + Long: ``, + Hidden: true, + } + parent.AddCommand(cmd) + + return cmd +} diff --git a/cli/cmd/network_create.go b/cli/cmd/network_create.go new file mode 100644 index 00000000..463fe522 --- /dev/null +++ b/cli/cmd/network_create.go @@ -0,0 +1,22 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +func (r *runners) InitNetworkCreate(parent *cobra.Command) *cobra.Command { + cmd := &cobra.Command{ + Use: "create", + Short: "Create networks for VMs and Clusters", + Long: ``, + SilenceUsage: true, + RunE: r.createNetwork, + } + parent.AddCommand(cmd) + + return cmd +} + +func (r *runners) createNetwork(_ *cobra.Command, args []string) error { + return nil +} diff --git a/cli/cmd/network_join.go b/cli/cmd/network_join.go new file mode 100644 index 00000000..9aaf0128 --- /dev/null +++ b/cli/cmd/network_join.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +func (r *runners) InitNetworkJoin(parent *cobra.Command) *cobra.Command { + cmd := &cobra.Command{ + Use: "join", + Short: "Join a test network", + Long: ``, + RunE: r.joinNetwork, + } + parent.AddCommand(cmd) + + return cmd +} + +func (r *runners) joinNetwork(_ *cobra.Command, args []string) error { + return nil +} diff --git a/cli/cmd/network_ls.go b/cli/cmd/network_ls.go new file mode 100644 index 00000000..c894984e --- /dev/null +++ b/cli/cmd/network_ls.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +func (r *runners) InitNetworkList(parent *cobra.Command) *cobra.Command { + cmd := &cobra.Command{ + Use: "ls", + Short: "List test networks", + Long: ``, + RunE: r.listNetworks, + } + parent.AddCommand(cmd) + + return cmd +} + +func (r *runners) listNetworks(_ *cobra.Command, args []string) error { + return nil +} diff --git a/cli/cmd/network_rm.go b/cli/cmd/network_rm.go new file mode 100644 index 00000000..cbd8685c --- /dev/null +++ b/cli/cmd/network_rm.go @@ -0,0 +1,21 @@ +package cmd + +import ( + "github.com/spf13/cobra" +) + +func (r *runners) InitNetworkRemove(parent *cobra.Command) *cobra.Command { + cmd := &cobra.Command{ + Use: "rm ID [ID …]", + Short: "Remove test network", + Long: ``, + RunE: r.removeNetworks, + } + parent.AddCommand(cmd) + + return cmd +} + +func (r *runners) removeNetworks(_ *cobra.Command, args []string) error { + return nil +} diff --git a/cli/cmd/root.go b/cli/cmd/root.go index f2d178bf..6d693b7e 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -262,12 +262,15 @@ func Execute(rootCmd *cobra.Command, stdin io.Reader, stdout io.Writer, stderr i runCmds.InitVMVersions(vmCmd) runCmds.InitVMRemove(vmCmd) - vmNodeGroupCmd := runCmds.InitVMGroup(vmCmd) - runCmds.InitVMGroupList(vmNodeGroupCmd) - vmUpdateCmd := runCmds.InitVMUpdateCommand(vmCmd) runCmds.InitVMUpdateTTL(vmUpdateCmd) + networkCmd := runCmds.InitNetworkCommand(runCmds.rootCmd) + runCmds.InitNetworkCreate(networkCmd) + runCmds.InitNetworkList(networkCmd) + runCmds.InitNetworkRemove(networkCmd) + runCmds.InitNetworkJoin(networkCmd) + runCmds.InitLoginCommand(runCmds.rootCmd) runCmds.InitLogoutCommand(runCmds.rootCmd) diff --git a/cli/cmd/runner.go b/cli/cmd/runner.go index 9d03adce..fba82650 100644 --- a/cli/cmd/runner.go +++ b/cli/cmd/runner.go @@ -294,4 +294,31 @@ type runnerArgs struct { compatibilitySuccess bool compatibilityFailure bool compatibilityNotes string + + createVMName string + createVMDistribution string + createVMVersion string + createVMCount int + createVMDiskGiB int64 + createVMTTL string + createVMInstanceType string + createVMWaitDuration time.Duration + createVMTags []string + createVMNetwork string + createVMDryRun bool + + lsVMShowTerminated bool + lsVMStartTime string + lsVMEndTime string + lsVMWatch bool + + removeVMAll bool + removeVMTags []string + removeVMNames []string + removeVMDryRun bool + + updateVMTTL string + + updateVMName string + updateVMID string } diff --git a/cli/cmd/vm.go b/cli/cmd/vm.go index 84839338..5d7bcb27 100644 --- a/cli/cmd/vm.go +++ b/cli/cmd/vm.go @@ -10,8 +10,19 @@ import ( func (r *runners) InitVMCommand(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "vm", - Short: "Manage test vms", - Long: ``, + Short: "Manage test virtual machines.", + Long: `The 'vm' command allows you to manage and interact with virtual machines (VMs) used for testing purposes. With this command, you can create, list, remove, update, and manage VMs, as well as retrieve information about available VM versions.`, + Example: ` # Create a single Ubuntu VM + replicated vm create --distribution ubuntu --version 20.04 + + # List all VMs + replicated vm ls + + # Remove a specific VM by ID + replicated vm rm + + # Update TTL for a specific VM + replicated vm update ttl --ttl 24h`, } parent.AddCommand(cmd) diff --git a/cli/cmd/vm_create.go b/cli/cmd/vm_create.go index 3d378d69..641a48a0 100644 --- a/cli/cmd/vm_create.go +++ b/cli/cmd/vm_create.go @@ -3,8 +3,6 @@ package cmd import ( "fmt" "os" - "strconv" - "strings" "time" "github.com/moby/moby/pkg/namesgenerator" @@ -20,28 +18,41 @@ var ErrVMWaitDurationExceeded = errors.New("wait duration exceeded") func (r *runners) InitVMCreate(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "create", - Short: "Create test VMs", - Long: `Create test VMs.`, + Use: "create", + Short: "Create one or more test VMs with specified distribution, version, and configuration options.", + Long: `Create one or more test VMs with a specified distribution, version, and a variety of customizable configuration options. + +This command allows you to provision VMs with different distributions (e.g., Ubuntu, RHEL), versions, instance types, and more. You can set the number of VMs to create, disk size, and specify the network to use. If no network is provided, a new network will be created automatically. You can also assign tags to your VMs and use a TTL (Time-To-Live) to define how long the VMs should live. + +By default, the command provisions one VM, but you can customize the number of VMs to create by using the "--count" flag. Additionally, you can use the "--dry-run" flag to simulate the creation without actually provisioning the VMs. + +The command also supports a "--wait" flag to wait for the VMs to be ready before returning control, with a customizable timeout duration.`, + Example: ` # Create a single Ubuntu 20.04 VM + replicated vm create --distribution ubuntu --version 20.04 + + # Create 3 RHEL 9 VMs + replicated vm create --distribution ubuntu --version 20.04 --count 3 + + # Create 5 Ubuntu VMs with a custom instance type and disk size + replicated vm create --distribution ubuntu --version 20.04 --count 5 --instance-type r1.medium --disk 100`, SilenceUsage: true, RunE: r.createVM, } parent.AddCommand(cmd) - cmd.Flags().StringVar(&r.args.createClusterName, "name", "", "VM name (defaults to random name)") - cmd.Flags().StringVar(&r.args.createClusterKubernetesDistribution, "distribution", "", "Distribution of the vm to provision") - cmd.Flags().StringVar(&r.args.createClusterKubernetesVersion, "version", "", "Vversion to provision (format is distribution dependent)") - cmd.Flags().StringVar(&r.args.createClusterIPFamily, "ip-family", "", "IP Family to use for the vm (ipv4|ipv6|dual).") - cmd.Flags().IntVar(&r.args.createClusterNodeCount, "nodes", int(1), "Node count") - cmd.Flags().Int64Var(&r.args.createClusterDiskGiB, "disk", int64(50), "Disk Size (GiB) to request per node") - cmd.Flags().StringVar(&r.args.createClusterTTL, "ttl", "", "VM TTL (duration, max 48h)") - cmd.Flags().DurationVar(&r.args.createClusterWaitDuration, "wait", time.Second*0, "Wait duration for VM to be ready (leave empty to not wait)") - cmd.Flags().StringVar(&r.args.createClusterInstanceType, "instance-type", "", "The type of instance to use (e.g. r1.medium)") - cmd.Flags().StringArrayVar(&r.args.createClusterNodeGroups, "group", []string{}, "Group to create (name=?,instance-type=?,nodes=?,disk=? format, can be specified multiple times). For each group, one of the following flags must be specified: name, instance-type, nodes or disk.") + cmd.Flags().StringVar(&r.args.createVMName, "name", "", "VM name (defaults to random name)") + cmd.Flags().StringVar(&r.args.createVMDistribution, "distribution", "", "Distribution of the vm to provision") + cmd.Flags().StringVar(&r.args.createVMVersion, "version", "", "Vversion to provision (format is distribution dependent)") + cmd.Flags().IntVar(&r.args.createVMCount, "count", int(1), "Number of matching VMs to create") + cmd.Flags().Int64Var(&r.args.createVMDiskGiB, "disk", int64(50), "Disk Size (GiB) to request per node") + cmd.Flags().StringVar(&r.args.createVMTTL, "ttl", "", "VM TTL (duration, max 48h)") + cmd.Flags().DurationVar(&r.args.createVMWaitDuration, "wait", time.Second*0, "Wait duration for VM(s) to be ready (leave empty to not wait)") + cmd.Flags().StringVar(&r.args.createVMInstanceType, "instance-type", "", "The type of instance to use (e.g. r1.medium)") + cmd.Flags().StringVar(&r.args.createVMNetwork, "network", "", "The network to use for the VM(s). If not supplied, create a new network") - cmd.Flags().StringArrayVar(&r.args.createClusterTags, "tag", []string{}, "Tag to apply to the VM (key=value format, can be specified multiple times)") + cmd.Flags().StringArrayVar(&r.args.createVMTags, "tag", []string{}, "Tag to apply to the VM (key=value format, can be specified multiple times)") - cmd.Flags().BoolVar(&r.args.createClusterDryRun, "dry-run", false, "Dry run") + cmd.Flags().BoolVar(&r.args.createVMDryRun, "dry-run", false, "Dry run") cmd.Flags().StringVar(&r.outputFormat, "output", "table", "The output format to use. One of: json|table|wide (default: table)") @@ -51,32 +62,25 @@ func (r *runners) InitVMCreate(parent *cobra.Command) *cobra.Command { } func (r *runners) createVM(_ *cobra.Command, args []string) error { - if r.args.createClusterName == "" { - r.args.createClusterName = namesgenerator.GetRandomName(0) + if r.args.createVMName == "" { + r.args.createVMName = namesgenerator.GetRandomName(0) } - tags, err := parseTags(r.args.createClusterTags) + tags, err := parseTags(r.args.createVMTags) if err != nil { return errors.Wrap(err, "parse tags") } - nodeGroups, err := parseVMNodeGroups(r.args.createClusterNodeGroups) - if err != nil { - return errors.Wrap(err, "parse node groups") - } - opts := kotsclient.CreateVMOpts{ - Name: r.args.createClusterName, - Distribution: r.args.createClusterKubernetesDistribution, - Version: r.args.createClusterKubernetesVersion, - IPFamily: r.args.createClusterIPFamily, - NodeCount: r.args.createClusterNodeCount, - DiskGiB: r.args.createClusterDiskGiB, - TTL: r.args.createClusterTTL, - InstanceType: r.args.createClusterInstanceType, - NodeGroups: nodeGroups, + Name: r.args.createVMName, + Distribution: r.args.createVMDistribution, + Version: r.args.createVMVersion, + Count: r.args.createVMCount, + DiskGiB: r.args.createVMDiskGiB, + TTL: r.args.createVMTTL, + InstanceType: r.args.createVMInstanceType, Tags: tags, - DryRun: r.args.createClusterDryRun, + DryRun: r.args.createVMDryRun, } vm, err := r.createAndWaitForVM(opts) @@ -114,7 +118,9 @@ func (r *runners) createAndWaitForVM(opts kotsclient.CreateVMOpts) (*types.VM, e if ve != nil && ve.Message != "" { if ve.ValidationError != nil && len(ve.ValidationError.Errors) > 0 { if len(ve.ValidationError.SupportedDistributions) > 0 { - _ = print.VMVersions("table", r.w, ve.ValidationError.SupportedDistributions) + if err := print.VMVersions("table", r.w, ve.ValidationError.SupportedDistributions); err != nil { + return nil, errors.Wrap(err, "print vm versions") + } } } return nil, errors.New(ve.Message) @@ -125,8 +131,8 @@ func (r *runners) createAndWaitForVM(opts kotsclient.CreateVMOpts) (*types.VM, e } // if the wait flag was provided, we poll the api until the vm is ready, or a timeout - if r.args.createClusterWaitDuration > 0 { - return waitForVM(r.kotsAPI, vm.ID, r.args.createClusterWaitDuration) + if r.args.createVMWaitDuration > 0 { + return waitForVM(r.kotsAPI, vm.ID, r.args.createVMWaitDuration) } return vm, nil @@ -140,9 +146,9 @@ func waitForVM(kotsRestClient *kotsclient.VendorV3Client, id string, duration ti return nil, errors.Wrap(err, "get vm") } - if vm.Status == types.ClusterStatusRunning { + if vm.Status == types.VMStatus(types.VMStatusRunning) { return vm, nil - } else if vm.Status == types.ClusterStatusError || vm.Status == types.ClusterStatusUpgradeError { + } else if vm.Status == types.VMStatus(types.VMStatusError) { return nil, errors.New("vm failed to provision") } else { if time.Now().After(start.Add(duration)) { @@ -154,42 +160,3 @@ func waitForVM(kotsRestClient *kotsclient.VendorV3Client, id string, duration ti time.Sleep(time.Second * 5) } } - -func parseVMNodeGroups(nodeGroups []string) ([]kotsclient.VMNodeGroup, error) { - parsedNodeGroups := []kotsclient.VMNodeGroup{} - for _, nodeGroup := range nodeGroups { - field := strings.Split(nodeGroup, ",") - ng := kotsclient.VMNodeGroup{} - for _, f := range field { - fieldParsed := strings.SplitN(f, "=", 2) - if len(fieldParsed) != 2 { - return nil, errors.Errorf("invalid node group format: %s", nodeGroup) - } - parsedFieldKey := fieldParsed[0] - parsedFieldValue := fieldParsed[1] - switch parsedFieldKey { - case "name": - ng.Name = parsedFieldValue - case "instance-type": - ng.InstanceType = parsedFieldValue - case "nodes": - nodes, err := strconv.Atoi(parsedFieldValue) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse nodes value: %s", parsedFieldValue) - } - ng.Nodes = nodes - case "disk": - diskSize, err := strconv.Atoi(parsedFieldValue) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse disk value: %s", parsedFieldValue) - } - ng.Disk = diskSize - default: - return nil, errors.Errorf("invalid node group field: %s", parsedFieldKey) - } - } - - parsedNodeGroups = append(parsedNodeGroups, ng) - } - return parsedNodeGroups, nil -} diff --git a/cli/cmd/vm_group.go b/cli/cmd/vm_group.go deleted file mode 100644 index cd2f302e..00000000 --- a/cli/cmd/vm_group.go +++ /dev/null @@ -1,14 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -func (r *runners) InitVMGroup(parent *cobra.Command) *cobra.Command { - cmd := &cobra.Command{ - Use: "group", - } - parent.AddCommand(cmd) - - return cmd -} diff --git a/cli/cmd/vm_group_ls.go b/cli/cmd/vm_group_ls.go deleted file mode 100644 index 6617d6f4..00000000 --- a/cli/cmd/vm_group_ls.go +++ /dev/null @@ -1,40 +0,0 @@ -package cmd - -import ( - "github.com/pkg/errors" - "github.com/replicatedhq/replicated/cli/print" - "github.com/replicatedhq/replicated/pkg/platformclient" - "github.com/spf13/cobra" -) - -func (r *runners) InitVMGroupList(parent *cobra.Command) *cobra.Command { - cmd := &cobra.Command{ - Use: "ls [ID]", - Short: "List groups for a vm", - Long: `List groups for a vm`, - Args: cobra.ExactArgs(1), - RunE: r.listVMGroups, - ValidArgsFunction: r.completeVMIDs, - } - parent.AddCommand(cmd) - - cmd.Flags().StringVar(&r.outputFormat, "output", "table", "The output format to use. One of: json|table|wide (default: table)") - - return cmd -} - -func (r *runners) listVMGroups(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("vm id is required") - } - vmID := args[0] - - vm, err := r.kotsAPI.GetVM(vmID) - if errors.Cause(err) == platformclient.ErrForbidden { - return ErrCompatibilityMatrixTermsNotAccepted - } else if err != nil { - return errors.Wrap(err, "get vm") - } - - return print.NodeGroups(r.outputFormat, r.w, vm.NodeGroups) -} diff --git a/cli/cmd/vm_ls.go b/cli/cmd/vm_ls.go index 512749b4..57a93ca2 100644 --- a/cli/cmd/vm_ls.go +++ b/cli/cmd/vm_ls.go @@ -15,17 +15,34 @@ import ( func (r *runners) InitVMList(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "ls", - Short: "List test vms", - Long: `List test vms`, - RunE: r.listVMs, + Short: "List test VMs and their status, with optional filters for start/end time and terminated VMs.", + Long: `List all test VMs in your account, including their current status, distribution, version, and more. You can use optional flags to filter the output based on VM termination status, start time, or end time. This command can also watch the VM status in real-time. + +By default, the command will return a table of all VMs, but you can switch to JSON or wide output formats for more detailed information. The command supports filtering to show only terminated VMs or to specify a time range for the query. + +You can use the '--watch' flag to monitor VMs continuously. This will refresh the list of VMs every 2 seconds, displaying any updates in real-time, such as new VMs being created or existing VMs being terminated. + +The command also allows you to customize the output format, supporting 'json', 'table', and 'wide' views for flexibility based on your needs.`, + Example: ` # List all active VMs + replicated vm ls + + # List all VMs that were created after a specific start time + replicated vm ls --start-time 2024-10-01T00:00:00Z + + # Show only terminated VMs + replicated vm ls --show-terminated + + # Watch VM status changes in real-time + replicated vm ls --watch`, + RunE: r.listVMs, } parent.AddCommand(cmd) - cmd.Flags().BoolVar(&r.args.lsClusterShowTerminated, "show-terminated", false, "when set, only show terminated vms") - cmd.Flags().StringVar(&r.args.lsClusterStartTime, "start-time", "", "start time for the query (Format: 2006-01-02T15:04:05Z)") - cmd.Flags().StringVar(&r.args.lsClusterEndTime, "end-time", "", "end time for the query (Format: 2006-01-02T15:04:05Z)") + cmd.Flags().BoolVar(&r.args.lsVMShowTerminated, "show-terminated", false, "when set, only show terminated vms") + cmd.Flags().StringVar(&r.args.lsVMStartTime, "start-time", "", "start time for the query (Format: 2006-01-02T15:04:05Z)") + cmd.Flags().StringVar(&r.args.lsVMEndTime, "end-time", "", "end time for the query (Format: 2006-01-02T15:04:05Z)") cmd.Flags().StringVar(&r.outputFormat, "output", "table", "The output format to use. One of: json|table|wide (default: table)") - cmd.Flags().BoolVarP(&r.args.lsClusterWatch, "watch", "w", false, "watch vms") + cmd.Flags().BoolVarP(&r.args.lsVMWatch, "watch", "w", false, "watch vms") return cmd } @@ -33,22 +50,22 @@ func (r *runners) InitVMList(parent *cobra.Command) *cobra.Command { func (r *runners) listVMs(_ *cobra.Command, args []string) error { const longForm = "2006-01-02T15:04:05Z" var startTime, endTime *time.Time - if r.args.lsClusterStartTime != "" { - st, err := time.Parse(longForm, r.args.lsClusterStartTime) + if r.args.lsVMStartTime != "" { + st, err := time.Parse(longForm, r.args.lsVMStartTime) if err != nil { return errors.Wrap(err, "parse start time") } startTime = &st } - if r.args.lsClusterEndTime != "" { - et, err := time.Parse(longForm, r.args.lsClusterEndTime) + if r.args.lsVMEndTime != "" { + et, err := time.Parse(longForm, r.args.lsVMEndTime) if err != nil { return errors.Wrap(err, "parse end time") } endTime = &et } - vms, err := r.kotsAPI.ListVMs(r.args.lsClusterShowTerminated, startTime, endTime) + vms, err := r.kotsAPI.ListVMs(r.args.lsVMShowTerminated, startTime, endTime) if errors.Cause(err) == platformclient.ErrForbidden { return ErrCompatibilityMatrixTermsNotAccepted } else if err != nil { @@ -56,7 +73,7 @@ func (r *runners) listVMs(_ *cobra.Command, args []string) error { } header := true - if r.args.lsClusterWatch { + if r.args.lsVMWatch { // Checks to see if the outputFormat is table if r.outputFormat != "table" && r.outputFormat != "wide" { @@ -74,7 +91,7 @@ func (r *runners) listVMs(_ *cobra.Command, args []string) error { // Runs until ctrl C is recognized for range time.Tick(2 * time.Second) { - newVMs, err := r.kotsAPI.ListVMs(r.args.lsClusterShowTerminated, startTime, endTime) + newVMs, err := r.kotsAPI.ListVMs(r.args.lsVMShowTerminated, startTime, endTime) if err != nil { if err == promptui.ErrInterrupt { @@ -111,7 +128,7 @@ func (r *runners) listVMs(_ *cobra.Command, args []string) error { // Check for removed vms and print them, changing their status to be "deleted" for id, vm := range oldVMMap { if _, found := newVMMap[id]; !found { - vm.Status = types.ClusterStatusDeleted + vm.Status = types.VMStatusDeleted vmsToPrint = append(vmsToPrint, vm) } } diff --git a/cli/cmd/vm_rm.go b/cli/cmd/vm_rm.go index bdf382c3..e5a9add9 100644 --- a/cli/cmd/vm_rm.go +++ b/cli/cmd/vm_rm.go @@ -11,44 +11,63 @@ import ( func (r *runners) InitVMRemove(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "rm ID [ID …]", - Short: "Remove test VM", - Long: `Removes a VM immediately. + Short: "Remove test VM(s) immediately, with options to filter by name, tag, or remove all VMs.", + Long: `The 'rm' command allows you to remove test VMs from your account immediately. You can specify one or more VM IDs directly, or use flags to filter which VMs to remove based on their name, tags, or simply remove all VMs at once. -You can specify the --all flag to terminate all vms.`, +This command supports multiple filtering options, including removing VMs by their name, by specific tags, or by specifying the '--all' flag to remove all VMs in your account. + +You can also use the '--dry-run' flag to simulate the removal without actually deleting the VMs.`, + Example: ` # Remove a VM by ID + replicated vm rm aaaaa11 + + # Remove multiple VMs by ID + replicated vm rm aaaaa11 bbbbb22 ccccc33 + + # Remove all VMs with a specific name + replicated vm rm --name test-vm + + # Remove all VMs with a specific tag + replicated vm rm --tag env=dev + + # Remove all VMs + replicated vm rm --all + + # Perform a dry run of removing all VMs + replicated vm rm --all --dry-run`, RunE: r.removeVMs, ValidArgsFunction: r.completeVMIDs, } parent.AddCommand(cmd) - cmd.Flags().StringArrayVar(&r.args.removeClusterNames, "name", []string{}, "Name of the vm to remove (can be specified multiple times)") + cmd.Flags().StringArrayVar(&r.args.removeVMNames, "name", []string{}, "Name of the vm to remove (can be specified multiple times)") cmd.RegisterFlagCompletionFunc("name", r.completeVMNames) - cmd.Flags().StringArrayVar(&r.args.removeClusterTags, "tag", []string{}, "Tag of the vm to remove (key=value format, can be specified multiple times)") + cmd.Flags().StringArrayVar(&r.args.removeVMTags, "tag", []string{}, "Tag of the vm to remove (key=value format, can be specified multiple times)") - cmd.Flags().BoolVar(&r.args.removeClusterAll, "all", false, "remove all vms") + cmd.Flags().BoolVar(&r.args.removeVMAll, "all", false, "remove all vms") - cmd.Flags().BoolVar(&r.args.removeClusterDryRun, "dry-run", false, "Dry run") + cmd.Flags().BoolVar(&r.args.removeVMDryRun, "dry-run", false, "Dry run") return cmd } func (r *runners) removeVMs(_ *cobra.Command, args []string) error { - if len(args) == 0 && !r.args.removeClusterAll && len(r.args.removeClusterNames) == 0 && len(r.args.removeClusterTags) == 0 { + if len(args) == 0 && !r.args.removeVMAll && len(r.args.removeVMNames) == 0 && len(r.args.removeVMTags) == 0 { return errors.New("One of ID, --all, --name or --tag flag required") - } else if len(args) > 0 && (r.args.removeClusterAll || len(r.args.removeClusterNames) > 0 || len(r.args.removeClusterTags) > 0) { + } else if len(args) > 0 && (r.args.removeVMAll || len(r.args.removeVMNames) > 0 || len(r.args.removeVMTags) > 0) { return errors.New("cannot specify ID and --all, --name or --tag flag") - } else if len(args) == 0 && r.args.removeClusterAll && (len(r.args.removeClusterNames) > 0 || len(r.args.removeClusterTags) > 0) { + } else if len(args) == 0 && r.args.removeVMAll && (len(r.args.removeVMNames) > 0 || len(r.args.removeVMTags) > 0) { return errors.New("cannot specify --all and --name or --tag flag") - } else if len(args) == 0 && !r.args.removeClusterAll && len(r.args.removeClusterNames) > 0 && len(r.args.removeClusterTags) > 0 { + } else if len(args) == 0 && !r.args.removeVMAll && len(r.args.removeVMNames) > 0 && len(r.args.removeVMTags) > 0 { return errors.New("cannot specify --name and --tag flag") } - if len(r.args.removeClusterNames) > 0 { + if len(r.args.removeVMNames) > 0 { vms, err := r.kotsAPI.ListVMs(false, nil, nil) if err != nil { return errors.Wrap(err, "list vms") } for _, vm := range vms { - for _, name := range r.args.removeClusterNames { + for _, name := range r.args.removeVMNames { if vm.Name == name { err := removeVM(r, vm.ID) if err != nil { @@ -59,21 +78,21 @@ func (r *runners) removeVMs(_ *cobra.Command, args []string) error { } } - if len(r.args.removeClusterTags) > 0 { + if len(r.args.removeVMTags) > 0 { vms, err := r.kotsAPI.ListVMs(false, nil, nil) if err != nil { return errors.Wrap(err, "list vms") } - tags, err := parseTags(r.args.removeClusterTags) + tags, err := parseTags(r.args.removeVMTags) if err != nil { return errors.Wrap(err, "parse tags") } for _, vm := range vms { - if vm.Tags != nil && len(vm.Tags) > 0 { + if len(vm.Tags) > 0 { for _, tag := range tags { - for _, clusterTag := range vm.Tags { - if clusterTag.Key == tag.Key && clusterTag.Value == tag.Value { + for _, vmTag := range vm.Tags { + if vmTag.Key == tag.Key && vmTag.Value == tag.Value { err := removeVM(r, vm.ID) if err != nil { return errors.Wrap(err, "remove vm") @@ -85,7 +104,7 @@ func (r *runners) removeVMs(_ *cobra.Command, args []string) error { } } - if r.args.removeClusterAll { + if r.args.removeVMAll { vms, err := r.kotsAPI.ListVMs(false, nil, nil) if err != nil { return errors.Wrap(err, "list vms") @@ -109,7 +128,7 @@ func (r *runners) removeVMs(_ *cobra.Command, args []string) error { } func removeVM(r *runners, vmID string) error { - if r.args.removeClusterDryRun { + if r.args.removeVMDryRun { fmt.Printf("would remove vm %s\n", vmID) return nil } diff --git a/cli/cmd/vm_update.go b/cli/cmd/vm_update.go index bc433c05..53b094d3 100644 --- a/cli/cmd/vm_update.go +++ b/cli/cmd/vm_update.go @@ -9,15 +9,25 @@ import ( func (r *runners) InitVMUpdateCommand(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "update", - Short: "Update vm settings", - Long: `vm update can be used to update vm settings`, + Short: "Update VM settings.", + Long: `The 'vm update' command allows you to modify the settings of a virtual machine. You can update a VM either by providing its ID or by specifying its name. This command supports updating various VM settings, which will be handled by specific subcommands. + +- To update the VM by its ID, use the '--id' flag. +- To update the VM by its name, use the '--name' flag. + +Subcommands will allow for more specific updates like TTL`, + Example: ` # Update a VM by specifying its ID + replicated vm update --id aaaaa11 --ttl 12h + + # Update a VM by specifying its name + replicated vm update --name --ttl 12h`, } parent.AddCommand(cmd) - cmd.PersistentFlags().StringVar(&r.args.updateClusterName, "name", "", "Name of the vm to update.") + cmd.PersistentFlags().StringVar(&r.args.updateVMName, "name", "", "Name of the vm to update.") cmd.RegisterFlagCompletionFunc("name", r.completeVMNames) - cmd.PersistentFlags().StringVar(&r.args.updateClusterID, "id", "", "id of the vm to update (when name is not provided)") + cmd.PersistentFlags().StringVar(&r.args.updateVMID, "id", "", "id of the vm to update (when name is not provided)") cmd.RegisterFlagCompletionFunc("id", r.completeVMIDs) return cmd @@ -28,8 +38,8 @@ func (r *runners) ensureUpdateVMIDArg(args []string) error { // but if it's not provided, we look for a viper flag named "name" and use it // as the name of the vm, not the id if len(args) > 0 { - r.args.updateClusterID = args[0] - } else if r.args.updateClusterName != "" { + r.args.updateVMID = args[0] + } else if r.args.updateVMName != "" { vms, err := r.kotsAPI.ListVMs(false, nil, nil) if errors.Cause(err) == platformclient.ErrForbidden { return ErrCompatibilityMatrixTermsNotAccepted @@ -37,12 +47,12 @@ func (r *runners) ensureUpdateVMIDArg(args []string) error { return errors.Wrap(err, "list vms") } for _, vm := range vms { - if vm.Name == r.args.updateClusterName { - r.args.updateClusterID = vm.ID + if vm.Name == r.args.updateVMName { + r.args.updateVMID = vm.ID break } } - } else if r.args.updateClusterID != "" { + } else if r.args.updateVMID != "" { // do nothing // but this is here for readability } else { diff --git a/cli/cmd/vm_update_ttl.go b/cli/cmd/vm_update_ttl.go index 13ab68a5..265dba67 100644 --- a/cli/cmd/vm_update_ttl.go +++ b/cli/cmd/vm_update_ttl.go @@ -10,16 +10,27 @@ import ( func (r *runners) InitVMUpdateTTL(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ - Use: "ttl [ID]", - Short: "Update TTL for a test vm", - Long: `Update TTL for a test vm`, + Use: "ttl [ID]", + Short: "Update TTL for a test VM.", + Long: `The 'ttl' command allows you to update the Time to Live (TTL) for a test VM. This command modifies the lifespan of a running VM by updating its TTL, which is a duration starting from the moment the VM is provisioned. + +The TTL specifies how long the VM will run before it is automatically terminated. You can specify a duration up to a maximum of 48 hours. + +The command accepts a VM ID as an argument and requires the '--ttl' flag to specify the new TTL value. + +You can also specify the output format (json, table, wide) using the '--output' flag.`, + Example: ` # Update the TTL of a VM to 2 hours + replicated vm update ttl aaaaa11 --ttl 2h + + # Update the TTL of a VM to 30 minutes + replicated vm update ttl aaaaa11 --ttl 30m`, RunE: r.updateVMTTL, SilenceUsage: true, ValidArgsFunction: r.completeVMIDs, } parent.AddCommand(cmd) - cmd.Flags().StringVar(&r.args.updateClusterTTL, "ttl", "", "Update TTL which starts from the moment the vm is running (duration, max 48h).") + cmd.Flags().StringVar(&r.args.updateVMTTL, "ttl", "", "Update TTL which starts from the moment the vm is running (duration, max 48h).") cmd.Flags().StringVar(&r.outputFormat, "output", "table", "The output format to use. One of: json|table|wide (default: table)") cmd.MarkFlagRequired("ttl") @@ -33,9 +44,9 @@ func (r *runners) updateVMTTL(cmd *cobra.Command, args []string) error { } opts := kotsclient.UpdateVMTTLOpts{ - TTL: r.args.updateClusterTTL, + TTL: r.args.updateVMTTL, } - vm, err := r.kotsAPI.UpdateVMTTL(r.args.updateClusterID, opts) + vm, err := r.kotsAPI.UpdateVMTTL(r.args.updateVMID, opts) if errors.Cause(err) == platformclient.ErrForbidden { return ErrCompatibilityMatrixTermsNotAccepted } else if err != nil { diff --git a/cli/cmd/vm_versions.go b/cli/cmd/vm_versions.go index 4244778f..d3ae4541 100644 --- a/cli/cmd/vm_versions.go +++ b/cli/cmd/vm_versions.go @@ -11,9 +11,20 @@ import ( func (r *runners) InitVMVersions(parent *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "versions", - Short: "List vm versions", - Long: `List vm versions`, - RunE: r.listVMVersions, + Short: "List available VM versions.", + Long: `The 'vm versions' command lists all the available versions of virtual machines that can be provisioned. This includes the available distributions and their respective versions. + +- You can filter the list by a specific distribution using the '--distribution' flag. +- The output can be formatted as a table or in JSON format using the '--output' flag.`, + Example: ` # List all available VM versions + replicated vm versions + + # List VM versions for a specific distribution (e.g., Ubuntu) + replicated vm versions --distribution ubuntu + + # Display the output in JSON format + replicated vm versions --output json`, + RunE: r.listVMVersions, } parent.AddCommand(cmd) @@ -32,7 +43,7 @@ func (r *runners) listVMVersions(_ *cobra.Command, args []string) error { } if r.args.lsVersionsDistribution != "" { - var filteredCV []*types.ClusterVersion + var filteredCV []*types.VMVersion for _, vmVersion := range vmVersions { if vmVersion.Name == r.args.lsVersionsDistribution { filteredCV = append(filteredCV, vmVersion) diff --git a/cli/print/clusters.go b/cli/print/clusters.go index 2d10108f..d550bce0 100644 --- a/cli/print/clusters.go +++ b/cli/print/clusters.go @@ -17,7 +17,7 @@ var clusterFuncs = template.FuncMap{ // Table formatting var clustersTmplTableHeaderSrc = `ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES COST` var clustersTmplTableRowSrc = `{{ range . -}} -{{ .ID }} {{ padding .Name 27 }} {{ padding .KubernetesDistribution 12 }} {{ padding .KubernetesVersion 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" .CreatedAt) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" .ExpiresAt) 30 }}{{end}} {{ padding (CreditsToDollarsDisplay .EstimatedCost) 11 }} +{{ .ID }} {{ padding .Name 27 }} {{ padding .KubernetesDistribution 12 }} {{ padding .KubernetesVersion 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" (localeTime .CreatedAt)) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" (localeTime .ExpiresAt)) 30 }}{{end}} {{ padding (CreditsToDollarsDisplay .EstimatedCost) 11 }} {{ end }}` var clustersTmplTableSrc = fmt.Sprintln(clustersTmplTableHeaderSrc) + clustersTmplTableRowSrc var clustersTmplTable = template.Must(template.New("clusters").Funcs(clusterFuncs).Funcs(funcs).Parse(clustersTmplTableSrc)) @@ -26,7 +26,7 @@ var clustersTmplTableNoHeader = template.Must(template.New("clusters").Funcs(clu // Wide table formatting var clustersTmplWideHeaderSrc = `ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES COST TOTAL NODES NODEGROUPS TAGS` var clustersTmplWideRowSrc = `{{ range . -}} -{{ .ID }} {{ padding .Name 27 }} {{ padding .KubernetesDistribution 12 }} {{ padding .KubernetesVersion 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" .CreatedAt) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" .ExpiresAt) 30 }}{{end}} {{ padding (CreditsToDollarsDisplay .EstimatedCost) 11 }} {{$nodecount:=0}}{{ range $index, $ng := .NodeGroups}}{{$nodecount = add $nodecount $ng.NodeCount}}{{ end }}{{ padding (printf "%d" $nodecount) 11 }} {{ len .NodeGroups}} {{ range $index, $tag := .Tags }}{{if $index}}, {{end}}{{ $tag.Key }}={{ $tag.Value }}{{ end }} +{{ .ID }} {{ padding .Name 27 }} {{ padding .KubernetesDistribution 12 }} {{ padding .KubernetesVersion 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" (localeTime .CreatedAt)) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" (localeTime .ExpiresAt)) 30 }}{{end}} {{ padding (CreditsToDollarsDisplay .EstimatedCost) 11 }} {{$nodecount:=0}}{{ range $index, $ng := .NodeGroups}}{{$nodecount = add $nodecount $ng.NodeCount}}{{ end }}{{ padding (printf "%d" $nodecount) 11 }} {{ len .NodeGroups}} {{ range $index, $tag := .Tags }}{{if $index}}, {{end}}{{ $tag.Key }}={{ $tag.Value }}{{ end }} {{ end }}` var clustersTmplWideSrc = fmt.Sprintln(clustersTmplWideHeaderSrc) + clustersTmplWideRowSrc var clustersTmplWide = template.Must(template.New("clusters").Funcs(clusterFuncs).Funcs(funcs).Parse(clustersTmplWideSrc)) diff --git a/cli/print/util.go b/cli/print/util.go index f0022f59..8dc0a480 100644 --- a/cli/print/util.go +++ b/cli/print/util.go @@ -23,4 +23,10 @@ var funcs = template.FuncMap{ "formatURL": func(protocol, hostname string) string { return fmt.Sprintf("%s://%s", protocol, hostname) }, + "localeTime": func(t time.Time) string { + if t.IsZero() { + return "-" + } + return t.Local().Format("2006-01-02 15:04 MST") + }, } diff --git a/cli/print/vms.go b/cli/print/vms.go index 8e76ee8c..56f8834a 100644 --- a/cli/print/vms.go +++ b/cli/print/vms.go @@ -13,16 +13,16 @@ import ( // Table formatting var vmsTmplTableHeaderSrc = `ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES` var vmsTmplTableRowSrc = `{{ range . -}} -{{ .ID }} {{ padding .Name 27 }} {{ padding .Distribution 12 }} {{ padding .Version 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" .CreatedAt) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" .ExpiresAt) 30 }}{{end}} +{{ .ID }} {{ padding .Name 27 }} {{ padding .Distribution 12 }} {{ padding .Version 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" (localeTime .CreatedAt)) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" (localeTime .ExpiresAt)) 30 }}{{end}} {{ end }}` var vmsTmplTableSrc = fmt.Sprintln(vmsTmplTableHeaderSrc) + vmsTmplTableRowSrc var vmsTmplTable = template.Must(template.New("vms").Funcs(funcs).Parse(vmsTmplTableSrc)) var vmsTmplTableNoHeader = template.Must(template.New("vms").Funcs(funcs).Parse(vmsTmplTableRowSrc)) // Wide table formatting -var vmsTmplWideHeaderSrc = `ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES TOTAL NODES NODEGROUPS TAGS` +var vmsTmplWideHeaderSrc = `ID NAME DISTRIBUTION VERSION STATUS CREATED EXPIRES TAGS` var vmsTmplWideRowSrc = `{{ range . -}} -{{ .ID }} {{ padding .Name 27 }} {{ padding .Distribution 12 }} {{ padding .Version 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" .CreatedAt) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" .ExpiresAt) 30 }}{{end}} {{$nodecount:=0}}{{ range $index, $ng := .NodeGroups}}{{$nodecount = add $nodecount $ng.NodeCount}}{{ end }}{{ padding (printf "%d" $nodecount) 11 }} {{ len .NodeGroups}} {{ range $index, $tag := .Tags }}{{if $index}}, {{end}}{{ $tag.Key }}={{ $tag.Value }}{{ end }} +{{ .ID }} {{ padding .Name 27 }} {{ padding .Distribution 12 }} {{ padding .Version 10 }} {{ padding (printf "%s" .Status) 12 }} {{ padding (printf "%s" (localeTime .CreatedAt)) 30 }} {{if .ExpiresAt.IsZero}}{{ padding "-" 30 }}{{else}}{{ padding (printf "%s" (localeTime .ExpiresAt)) 30 }}{{end}} {{ range $index, $tag := .Tags }}{{if $index}}, {{end}}{{ $tag.Key }}={{ $tag.Value }}{{ end }} {{ end }}` var vmsTmplWideSrc = fmt.Sprintln(vmsTmplWideHeaderSrc) + vmsTmplWideRowSrc var vmsTmplWide = template.Must(template.New("vms").Funcs(funcs).Parse(vmsTmplWideSrc)) @@ -33,8 +33,7 @@ var vmVersionsTmplSrc = `Supported VM distributions and versions are: {{ range $d := . -}} DISTRIBUTION: {{ $d.Name }} • VERSIONS: {{ range $i, $v := $d.Versions -}}{{if $i}}, {{end}}{{ $v }}{{ end }} -• INSTANCE TYPES: {{ range $i, $it := $d.InstanceTypes -}}{{if $i}}, {{end}}{{ $it }}{{ end }} -• MAX NODES: {{ $d.NodesMax }}{{if $d.Status}} +• INSTANCE TYPES: {{ range $i, $it := $d.InstanceTypes -}}{{if $i}}, {{end}}{{ $it }}{{ end }}{{if $d.Status}} • ENABLED: {{ $d.Status.Enabled }} • STATUS: {{ $d.Status.Status }} • DETAILS: {{ $d.Status.StatusMessage }}{{end}} @@ -112,7 +111,7 @@ func NoVMVersions(outputFormat string, w *tabwriter.Writer) error { return w.Flush() } -func VMVersions(outputFormat string, w *tabwriter.Writer, versions []*types.ClusterVersion) error { +func VMVersions(outputFormat string, w *tabwriter.Writer, versions []*types.VMVersion) error { switch outputFormat { case "table": if err := vmVersionsTmpl.Execute(w, versions); err != nil { @@ -168,8 +167,5 @@ func updateEstimatedVMCost(vm *types.VM) { minutesRunning := int64(expireDuration.Minutes()) totalCredits := int64(minutesRunning) * vm.CreditsPerHourPerVM / 60.0 vm.EstimatedCost = vm.FlatFee + totalCredits - for _, ng := range vm.NodeGroups { - vm.EstimatedCost += int64(minutesRunning) * ng.CreditsPerHour / 60.0 * int64(ng.NodeCount) - } } } diff --git a/pkg/kotsclient/vm_create.go b/pkg/kotsclient/vm_create.go index 5ad4c977..eab2b316 100644 --- a/pkg/kotsclient/vm_create.go +++ b/pkg/kotsclient/vm_create.go @@ -11,17 +11,14 @@ import ( ) type CreateVMRequest struct { - Name string `json:"name"` - Distribution string `json:"distribution"` - Version string `json:"version"` - IPFamily string `json:"ip_family"` - LicenseID string `json:"license_id"` - NodeCount int `json:"node_count"` - DiskGiB int64 `json:"disk_gib"` - TTL string `json:"ttl"` - NodeGroups []VMNodeGroup `json:"groups"` - InstanceType string `json:"instance_type"` - Tags []types.Tag `json:"tags"` + Name string `json:"name"` + Distribution string `json:"distribution"` + Version string `json:"version"` + Count int `json:"count"` + DiskGiB int64 `json:"disk_gib"` + TTL string `json:"ttl"` + InstanceType string `json:"instance_type"` + Tags []types.Tag `json:"tags"` } type CreateVMResponse struct { @@ -40,39 +37,27 @@ type CreateVMOpts struct { Name string Distribution string Version string - IPFamily string - NodeCount int + Count int DiskGiB int64 TTL string InstanceType string - NodeGroups []VMNodeGroup Tags []types.Tag DryRun bool } -type VMNodeGroup struct { - Name string `json:"name"` - InstanceType string `json:"instance_type"` - Nodes int `json:"node_count"` - Disk int `json:"disk_gib"` -} - type CreateVMErrorResponse struct { Error CreateVMErrorError `json:"error"` } type CreateVMErrorError struct { - Message string `json:"message"` - MaxDiskGiB int64 `json:"maxDiskGiB,omitempty"` - MaxEKS int64 `json:"maxEKS,omitempty"` - MaxGKE int64 `json:"maxGKE,omitempty"` - MaxAKS int64 `json:"maxAKS,omitempty"` - ValidationError *ClusterValidationError `json:"validationError,omitempty"` + Message string `json:"message"` + MaxDiskGiB int64 `json:"maxDiskGiB,omitempty"` + ValidationError *VMValidationError `json:"validationError,omitempty"` } type VMValidationError struct { - Errors []string `json:"errors"` - SupportedDistributions []*types.ClusterVersion `json:"supported_distributions"` + Errors []string `json:"errors"` + SupportedDistributions []*types.VMVersion `json:"supported_distributions"` } func (c *VendorV3Client) CreateVM(opts CreateVMOpts) (*types.VM, *CreateVMErrorError, error) { @@ -80,12 +65,10 @@ func (c *VendorV3Client) CreateVM(opts CreateVMOpts) (*types.VM, *CreateVMErrorE Name: opts.Name, Distribution: opts.Distribution, Version: opts.Version, - IPFamily: opts.IPFamily, - NodeCount: opts.NodeCount, + Count: opts.Count, DiskGiB: opts.DiskGiB, TTL: opts.TTL, InstanceType: opts.InstanceType, - NodeGroups: opts.NodeGroups, Tags: opts.Tags, } diff --git a/pkg/kotsclient/vm_versions.go b/pkg/kotsclient/vm_versions.go index e0528a9e..298c02fc 100644 --- a/pkg/kotsclient/vm_versions.go +++ b/pkg/kotsclient/vm_versions.go @@ -7,10 +7,10 @@ import ( ) type ListVMVersionsResponse struct { - Versions []*types.ClusterVersion `json:"cluster-versions"` + Versions []*types.VMVersion `json:"cluster-versions"` } -func (c *VendorV3Client) ListVMVersions() ([]*types.ClusterVersion, error) { +func (c *VendorV3Client) ListVMVersions() ([]*types.VMVersion, error) { versions := ListVMVersionsResponse{} err := c.DoJSON("GET", "/v3/vm/versions", http.StatusOK, nil, &versions) if err != nil { diff --git a/pkg/types/vm.go b/pkg/types/vm.go index ef4e3eb1..a63b8f7f 100644 --- a/pkg/types/vm.go +++ b/pkg/types/vm.go @@ -3,15 +3,14 @@ package types import "time" type VM struct { - ID string `json:"id"` - Name string `json:"name"` - Distribution string `json:"distribution"` - Version string `json:"version"` - NodeGroups []*NodeGroup `json:"node_groups"` + ID string `json:"id"` + Name string `json:"name"` + Distribution string `json:"distribution"` + Version string `json:"version"` - Status ClusterStatus `json:"status"` - CreatedAt time.Time `json:"created_at"` - ExpiresAt time.Time `json:"expires_at"` + Status VMStatus `json:"status"` + CreatedAt time.Time `json:"created_at"` + ExpiresAt time.Time `json:"expires_at"` TTL string `json:"ttl"` @@ -26,3 +25,23 @@ type VM struct { Tags []Tag `json:"tags"` } + +type VMStatus string + +const ( + VMStatusQueued VMStatus = "queued" // Not assigned to a runner yet + VMStatusAssigned VMStatus = "assigned" // Assigned to a runner, but have not heard back from the runner + VMStatusPreparing VMStatus = "preparing" // The runner sets this when is receives the request + VMStatusProvisioning VMStatus = "provisioning" // The runner sets this when it starts provisioning + VMStatusRunning VMStatus = "running" // The runner sets this when it is done provisioning or upgrading and available + VMStatusTerminated VMStatus = "terminated" // This is set when the cluster expires or is deleted + VMStatusError VMStatus = "error" // Something unexpected + VMStatusDeleted VMStatus = "deleted" +) + +type VMVersion struct { + Name string `json:"short_name"` + Versions []string `json:"versions"` + InstanceTypes []string `json:"instance_types"` + Status *ClusterDistributionStatus `json:"status,omitempty"` +}