diff --git a/docusaurus.config.js b/docusaurus.config.js index 4f80a073..305126cd 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -40,7 +40,7 @@ const config = { 'https://github.com/kubeslice/docs/tree/master/', routeBasePath: '/', includeCurrentVersion: false, - lastVersion: '1.1.0', + lastVersion: '1.2.0', versions:{ /*** @@ -55,6 +55,11 @@ const config = { * } * */ + '1.2.0':{ + label: '1.2.0', + path: '1.2.0', + banner: 'none' + }, '1.1.0':{ label: '1.1.0', path: '1.1.0', diff --git a/images/version1.2.0/key-features/support-for-private-clusters.png b/images/version1.2.0/key-features/support-for-private-clusters.png new file mode 100644 index 00000000..1b18219a Binary files /dev/null and b/images/version1.2.0/key-features/support-for-private-clusters.png differ diff --git a/images/version1.2.0/monitor/add-to-slack.png b/images/version1.2.0/monitor/add-to-slack.png new file mode 100644 index 00000000..a3aaf5ec Binary files /dev/null and b/images/version1.2.0/monitor/add-to-slack.png differ diff --git a/images/version1.2.0/monitor/slack-add-incoming-webhook-integration.png b/images/version1.2.0/monitor/slack-add-incoming-webhook-integration.png new file mode 100644 index 00000000..7de4d825 Binary files /dev/null and b/images/version1.2.0/monitor/slack-add-incoming-webhook-integration.png differ diff --git a/images/version1.2.0/monitor/slack-channel-admin-option.png b/images/version1.2.0/monitor/slack-channel-admin-option.png new file mode 100644 index 00000000..f16f2dd2 Binary files /dev/null and b/images/version1.2.0/monitor/slack-channel-admin-option.png differ diff --git a/images/version1.2.0/monitor/slack-copy-webhook-url.png b/images/version1.2.0/monitor/slack-copy-webhook-url.png new file mode 100644 index 00000000..ac5767df Binary files /dev/null and b/images/version1.2.0/monitor/slack-copy-webhook-url.png differ diff --git a/images/version1.2.0/monitor/slack-installed-apps.png b/images/version1.2.0/monitor/slack-installed-apps.png new file mode 100644 index 00000000..699c203c Binary files /dev/null and b/images/version1.2.0/monitor/slack-installed-apps.png differ diff --git a/images/version1.2.0/monitor/slack-post-to-channel.png b/images/version1.2.0/monitor/slack-post-to-channel.png new file mode 100644 index 00000000..fa9abf1d Binary files /dev/null and b/images/version1.2.0/monitor/slack-post-to-channel.png differ diff --git a/images/version1.2.0/monitor/slack-webhook-save.png b/images/version1.2.0/monitor/slack-webhook-save.png new file mode 100644 index 00000000..8790e2ab Binary files /dev/null and b/images/version1.2.0/monitor/slack-webhook-save.png differ diff --git a/images/version1.2.0/monitoring-events/add-bot-to-channel.png b/images/version1.2.0/monitoring-events/add-bot-to-channel.png new file mode 100644 index 00000000..749d9fea Binary files /dev/null and b/images/version1.2.0/monitoring-events/add-bot-to-channel.png differ diff --git a/images/version1.2.0/monitoring-events/app-level-token.png b/images/version1.2.0/monitoring-events/app-level-token.png new file mode 100644 index 00000000..9b35b7c4 Binary files /dev/null and b/images/version1.2.0/monitoring-events/app-level-token.png differ diff --git a/images/version1.2.0/monitoring-events/bot-user-oauth-token.png b/images/version1.2.0/monitoring-events/bot-user-oauth-token.png new file mode 100644 index 00000000..6d1cecfc Binary files /dev/null and b/images/version1.2.0/monitoring-events/bot-user-oauth-token.png differ diff --git a/images/version1.2.0/monitoring-events/botkube-app-in-slack.png b/images/version1.2.0/monitoring-events/botkube-app-in-slack.png new file mode 100644 index 00000000..94be00d3 Binary files /dev/null and b/images/version1.2.0/monitoring-events/botkube-app-in-slack.png differ diff --git a/images/version1.2.0/monitoring-events/copy-app-level-token.png b/images/version1.2.0/monitoring-events/copy-app-level-token.png new file mode 100644 index 00000000..0709efad Binary files /dev/null and b/images/version1.2.0/monitoring-events/copy-app-level-token.png differ diff --git a/images/version1.2.0/monitoring-events/create-channel1.png b/images/version1.2.0/monitoring-events/create-channel1.png new file mode 100644 index 00000000..b7b7f4bb Binary files /dev/null and b/images/version1.2.0/monitoring-events/create-channel1.png differ diff --git a/images/version1.2.0/monitoring-events/generate-app-level-token.png b/images/version1.2.0/monitoring-events/generate-app-level-token.png new file mode 100644 index 00000000..a2fa60c4 Binary files /dev/null and b/images/version1.2.0/monitoring-events/generate-app-level-token.png differ diff --git a/images/version1.2.0/monitoring-events/ksm-events.png b/images/version1.2.0/monitoring-events/ksm-events.png new file mode 100644 index 00000000..fb638fb8 Binary files /dev/null and b/images/version1.2.0/monitoring-events/ksm-events.png differ diff --git a/images/version1.2.0/monitoring-events/kubernetes-events1.png b/images/version1.2.0/monitoring-events/kubernetes-events1.png new file mode 100644 index 00000000..a5d73333 Binary files /dev/null and b/images/version1.2.0/monitoring-events/kubernetes-events1.png differ diff --git a/images/version1.2.0/monitoring-events/kubernetes-events2.png b/images/version1.2.0/monitoring-events/kubernetes-events2.png new file mode 100644 index 00000000..ef7c9462 Binary files /dev/null and b/images/version1.2.0/monitoring-events/kubernetes-events2.png differ diff --git a/images/version1.2.0/monitoring-events/kubernetes-events3.png b/images/version1.2.0/monitoring-events/kubernetes-events3.png new file mode 100644 index 00000000..c392fcae Binary files /dev/null and b/images/version1.2.0/monitoring-events/kubernetes-events3.png differ diff --git a/images/version1.2.0/monitoring-events/kubernetes-events4.png b/images/version1.2.0/monitoring-events/kubernetes-events4.png new file mode 100644 index 00000000..29106d9a Binary files /dev/null and b/images/version1.2.0/monitoring-events/kubernetes-events4.png differ diff --git a/images/version1.2.0/monitoring-events/kubernetes-events5.png b/images/version1.2.0/monitoring-events/kubernetes-events5.png new file mode 100644 index 00000000..eebc3c6b Binary files /dev/null and b/images/version1.2.0/monitoring-events/kubernetes-events5.png differ diff --git a/images/version1.2.0/monitoring-events/kubeslice-controller-events.png b/images/version1.2.0/monitoring-events/kubeslice-controller-events.png new file mode 100644 index 00000000..bcab8446 Binary files /dev/null and b/images/version1.2.0/monitoring-events/kubeslice-controller-events.png differ diff --git a/images/version1.2.0/monitoring-events/kubeslice-worker-events.png b/images/version1.2.0/monitoring-events/kubeslice-worker-events.png new file mode 100644 index 00000000..1c03a9c3 Binary files /dev/null and b/images/version1.2.0/monitoring-events/kubeslice-worker-events.png differ diff --git a/versioned_docs/version-1.2.0/add-ons/add-ons-slack-events.mdx b/versioned_docs/version-1.2.0/add-ons/add-ons-slack-events.mdx new file mode 100644 index 00000000..2337719c --- /dev/null +++ b/versioned_docs/version-1.2.0/add-ons/add-ons-slack-events.mdx @@ -0,0 +1,210 @@ +# Slack Event Monitoring +In this topic, you discover the step-by-step process of creating a vibrant Slack app +and seamlessly integrating both kube-state-metrics (KSM) events and KubeSlice events into +a dynamic Slack channel. Unleash the power of real-time updates and vibrant notifications, +making your Slack workspace a hub of insightful information. Let's dive in and explore +this exciting integration journey! + +## Create an App + +Create an app using the Slack API in your workspace. + +To create an app: + +1. Open the [Slack API](https://api.slack.com/apps) console. + +2. Click the **Create an App** button. + +![events](/images/version1.2.0/monitoring-events/kubernetes-events1.png) + +3. Select **From an app manifest** to configure your app scopes and settings. + +![events](/images/version1.2.0/monitoring-events/kubernetes-events2.png) + +4. Select a workspace where you want to create an app and click **Next**. + +![events](/images/version1.2.0/monitoring-events/kubernetes-events3.png) + +5. In the **Enter app manifest below** section, click on the **YAML** tab and enter the following manifest: + + ```yaml + display_information: + name: Botkube + description: Botkube + background_color: "#a653a6" + features: + bot_user: + display_name: Botkube + always_online: false + oauth_config: + scopes: + bot: + - channels:read + - app_mentions:read + - chat:write + - files:write + - users:read # Remote configuration only: Used to get Real Name for audit reporting + settings: + event_subscriptions: + bot_events: + - app_mention + interactivity: + is_enabled: true + org_deploy_enabled: false + socket_mode_enabled: true + token_rotation_enabled: false + ``` + +6. Click **Next**. + +7. In the **Review summary & create your app** section, review the details and click **Create**. + +![events](/images/version1.2.0/monitoring-events/kubernetes-events4.png) + + +## Install an App + +Once you have successfully created your app, you will be directed to an application details page that provides you with the option to install the app in your desired workspace. + +To install an app to your workspace: + +1. Go to **Settings** > **Basic Information** on the left sidebar. + +2. In the **Building Apps for Slack** section, under **Install your app**, click the **Install to workspace** button. + +3. Click **Allow** to finish the installation. + + +## Get an OAuth Token + +During the configuration process of botkube in Slack, it is essential to provide the OAuth token for authentication purposes. This token serves as a secure credential that verifies the identity of the botkube integration and enables seamless communication between Slack and botkube. + +To get an oauth token for your workspace: + +1. Go to **Features** > **OAuth & Permissions** on the left sidebar. + +2. Copy the **Bot User OAuth Token** and save the token for later use while deploying the + **kubeslice-botkube** bundle on the controller and worker clusters. + + ![events](/images/version1.2.0/monitoring-events/bot-user-oauth-token.png) + + +## Get an App-Level Token + +To establish a web socket connection with Slack App using Socket Mode, you need an +App-Level token. This token is specifically required to facilitate the communication and interaction between your Slack App and the web socket, ensuring a smooth and uninterrupted connection. + +To get an app-level token: + +1. Go to **Settings** > **Basic Information** on the left sidebar. + +2. Click the **Generate Token and Scopes** button. + +![events](/images/version1.2.0/monitoring-events/app-level-token.png) + +3. In the **Generate an app-level token** section, enter a token name. For example, `botkube` is a token name. + +![events](/images/version1.2.0/monitoring-events/generate-app-level-token.png) + +4. Click **Add Scope** and select **connections:write scope**. + +5. Click **Generate**. + +6. Review the details and click **Done**. Copy the app-level token for later use while deploying the + **kubeslice-botkube** bundle on the controller and worker clusters. + +## Create Slack Channel for Events + +Once you have successfully installed the Botkube application, you will notice a new bot user named `botkube` added to your workspace. + +Next, proceed to create **two channels** within your workspace to receive KSM and KubeSlice events. Save the channel names for later use. + +![events](/images/version1.2.0/monitoring-events/create-channel1.png) + +To receive notifications in a specific Slack channel, you need to add the Botkube bot. This can be done by inviting the **@Botkube** user to the desired channel. + +![events](/images/version1.2.0/monitoring-events/add-bot-to-channel.png) + + + +## Deploy the kubeslice-botkube Bundle + +To deploy the **kubeslice-botkube** bundle: + +1. Switch the context to controller cluster. + + ``` + kubectx + ``` + +2. Add the **kubeslice-botkube** repo using the following command: + + ``` + helm repo add kubeslice/botkube + ``` + +3. Use the following command to create the `botkube` namespace on the controller and worker clusters. + + ``` + kubectl create ns botkube + ``` + +4. Use the following command to deploy the **kubeslice-botkube** bundle on the **controller cluster**: + + :::note + Replace the `ksm_channel_name`, `kubeslice_event_channel_name`, `cluster_name`, `app_token`, and `bot_token` in the command below with your values. + ::: + + ``` + helm install kubeslice-botkube kubeslice/botkube --set "sources.k8s-controller-events.botkube/kubernetes.enabled=true" --namespace botkube \ + --set communications.default-group.socketSlack.enabled=true \ + --set communications.default-group.socketSlack.channels.default.name= \ + --set communications.default-group.socketSlack.channels.kubeslice.name= \ + --set communications.default-group.socketSlack.appToken= \ + --set communications.default-group.socketSlack.botToken= \ + --set settings.clusterName= \ + --set 'executors.k8s-default-tools.botkube/kubectl.enabled'=true \ + --set 'executors.k8s-default-tools.botkube/helm.enabled'=true + ``` + +5. Switch the context to worker cluster(s). + + ``` + kubectx + ``` + +6. Use the following command to deploy the **kubeslice-botkube** bundle on each of the **worker cluster(s)**: + + :::note + Replace the `ksm_channel_name`, `kubeslice_event_channel_name`, `cluster_name`, `app_token`, and `bot_token` in the command below with your values. + ::: + + ``` + helm install kubeslice-botkube kubeslice/botkube --set "sources.k8s-worker-events.botkube/kubernetes.enabled=true" --namespace botkube \ + --set communications.default-group.socketSlack.enabled=true \ + --set communications.default-group.socketSlack.channels.default.name= \ + --set communications.default-group.socketSlack.channels.kubeslice.name= \ + --set communications.default-group.socketSlack.appToken= \ + --set communications.default-group.socketSlack.botToken= \ + --set settings.clusterName= \ + --set 'executors.k8s-default-tools.botkube/kubectl.enabled'=true \ + --set 'executors.k8s-default-tools.botkube/helm.enabled'=true + ``` + +### Slack KSM Events + +After deploying the charts in the workspace, you will start receiving notifications. The KSM events received in the Slack channel are depicted in the figures below: + + +![events](/images/version1.2.0/monitoring-events/ksm-events.png) + +### Slack KubeSlice Events +#### Controller Events +The Slack channel displays the KubeSlice events for the controller cluster, as depicted in the following figure. + + ![events](/images/version1.2.0/monitoring-events/kubeslice-controller-events.png) + +#### Worker Events +The Slack channel displays the KubeSlice events for the worker cluster, as depicted in the following figure. + +![events](/images/version1.2.0/monitoring-events/kubeslice-worker-events.png) \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/add-ons/add-ons-slack-metrics.mdx b/versioned_docs/version-1.2.0/add-ons/add-ons-slack-metrics.mdx new file mode 100644 index 00000000..e223f457 --- /dev/null +++ b/versioned_docs/version-1.2.0/add-ons/add-ons-slack-metrics.mdx @@ -0,0 +1,361 @@ +# Slack Metric Alerting +This topic describes integrating the KubeSlice metric alerts with Slack. +The integration involves configuring the Prometheus rules. The integration helps you +receive alerts on issues such as a pod not working on your cluster. + +:::info +For more information on the metrics description, see [KubeSlice Metrics](/versioned_docs/version-1.1.0/install-kubeslice/yaml/metrics/yaml-metrics-controller-metrics.mdx/#controller-metrics). +::: + +[Comment]: + +## Configure Alerts Integration + +To integrate KubeSlice alerts with Slack, follow these steps: + +1. If you are already using Prometheus in your environment, ensure that you have a namespace dedicated to Prometheus. You can use this existing namespace for the KubeSlice-Prometheus integration. If you don't have an existing Prometheus namespace, continue with the next step. + +2. Add the kubeslice-Prometheus repo using the following command: + ``` + helm repo add kubeslice/prometheus + ``` + +3. On your Slack workspace, create a channel called `#kubeslice-event-alerts`. +4. You should get the Slack API URL for Slack-based alerts. To create the Slack API URL on the `#kubeslice-events-alerts` channel, + click the **Settings** icon and go to **Administration** > **Manage apps**. + + ![alt](/images/version1.2.0/monitor/slack-channel-admin-option.png) + +5. On the **slack app directory** page, go to **Installed Apps** in the left sidebar. +6. Look for `incoming-webhook` in the **Search** field, and click `incoming-webhook` + from the search results. + + ![alt](/images/version1.2.0/monitor/slack-installed-apps.png) + +7. Click **Add to Slack** to add incoming webhooks. + + ![alt](/images/version1.2.0/monitor/add-to-slack.png) + +8. Under **Post to Channel**, choose **kubeslice-events-alerts** to post the alerts. + + ![alt](/images/version1.2.0/monitor/slack-post-to-channel.png) + +9. Click **Add Incoming Webhooks integration**. + + ![alt](/images/version1.2.0/monitor/slack-add-incoming-webhook-integration.png) + +10. You get a webhook URL that you should copy and note it down in a separate file for using it later. + + ![alt](/images/version1.2.0/monitor/slack-copy-webhook-url.png) + +11. Click **Save Settings** to save the webhook integration. + + ![alt](/images/version1.2.0/monitor/slack-webhook-save.png) + +12. This step ensures that the necessary `monitoring` namespace is available for Prometheus. Return to the KubeSlice Controller command terminal. If you already have an existing `monitoring` namespace for Prometheus, you can continue with the next step. If the `monitoring` namespace does not exist, you need to create it by running the following command: + + ``` + kubectl create ns monitoring + ``` + +13. Use the following command to complete the integration: + + (Replace `Slack API URL` with the webhook URL you had copied from the Slack + `#kubernetes-events-alerts` channel.) + + ``` + helm install prometheus kubeslice/prometheus --set alertmanager.enabled=true --set-string alertmanager.config.global.slack_api_url="" -n + ``` + +### Verify the KubeSlice Alerts Integration with Slack +Verify the running pods in the `monitoring` namespace using the following command: + +``` +kubectl get pods -n +``` + +Expected Output +``` +NAME READY STATUS RESTARTS AGE +prometheus-alertmanager-0 1/1 Running 0 26s +prometheus-kube-state-metrics-7f6bc6c4ff-29kts 1/1 Running 0 27s +prometheus-prometheus-node-exporter-7nmxc 1/1 Running 0 27s +prometheus-prometheus-node-exporter-9bvlk 1/1 Running 0 27s +prometheus-prometheus-node-exporter-rd6tk 1/1 Running 0 27s +prometheus-server-6bf6cb66cd-w6w4r 2/2 Running 0 27s +``` + +## Configure Prometheus Alert Rules + +Prometheus is a powerful real-time series database used for monitoring various components. To effectively monitor and receive alerts when components deviate from expected behavior, it is necessary to configure the corresponding rules on Prometheus. It's important to note that these rules are specifically required for brownfield Prometheus deployments. However, if you are using the Prometheus instance that comes bundled with KubeSlice, these rules are already pre-configured and you can proceed without the need for additional setup. + +### Slack Metric Alert Support +At present, our support for metric alerts is limited to Slack integration. To configure +Prometheus alert rules and receive alerts, it is essential to be a Slack user. +Ensure that you have access to a Slack workspace and the necessary permissions to set up +and manage alerts. Our alerting system leverages the power of Slack to deliver timely +notifications for monitoring and managing your components effectively. + +## KSM and Custom Resources Metrics +Kube State Metrics (KSM) is a vital service that establishes communication with the Kubernetes +API server to gather comprehensive information about various objects, including deployments, +pods, and namespaces. It generates metrics following the stable Prometheus format, aligning with the reliability of the Kubernetes API. + +In addition to KSM, Custom Resources Metrics play a crucial role in providing specific +insights into KubeSlice components such as slices, service exports/imports, Slice Gateway, +and more. + +To ensure effective monitoring, we have Prometheus alert rules in place that target specific +namespaces, including: + +- kubeslice-controller +- kubeslice-system +- istio-system +- spire + +It's important to note that KubeSlice triggers alerts only when pods are in the +** Failed, Pending, or Unknown** state. This selective approach helps streamline the +alerting process, focusing on critical pod states that require immediate attention. + +## Add the Alert Rules into Prometheus +To install the KSM Metrics and Custom Resource Metrics alert rules in Prometheus: + +1. Open the Prometheus configuration file. +2. Locate the section containing the existing alert rules. +3. Add the following alert rules for KSM Metrics under the existing rules section: + +#### Rules for KSM Metrics +The following code snippet contains the rules for KSM metrics. + +``` + - alert: kubeslice component pod status with labels_app + annotations: + description: ' Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels' + summary: ' {{ $labels.pod }} Pod went down' + expr: (sum by (pod,namespace,kubernetes_node,phase,job,instance) (kube_pod_status_phase{namespace=~"^kubeslice.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_app,label_cluster_name)(sum by(pod,namespace,kubernetes_node,label_app,instance,label_cluster_name)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",label_app=~"app_net_op|nsmgr|admission-webhook-k8s|kubeslice-dns|forwarder-kernel|cert-manager|controller-manager|kubeslice-api-gw|kubeslice-ui-proxy|kubeslice-ui|registry|spire-agent|spire-server|istiod"})>0) + for: 1m + labels: + severity: slack + - alert: kubeslice operator pod status + annotations: + description: ' Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels' + summary: ' {{ $labels.pod }} Pod went down' + expr: (sum by(pod,namespace,kubernetes_node,phase,job,instance)(kube_pod_status_phase{namespace=~"^kubeslice.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_spoke_cluster,label_control_plane,label_cluster_name)(sum by(pod, namespace,label_cluster_name,kubernetes_node,label_spoke_cluster,label_control_plane,instance)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",label_spoke_cluster=~"^w.*|^c.*"})>0) + for: 1m + labels: + severity: slack + - alert: vl3 pod status + annotations: + description: Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels + summary: '{{ $labels.pod }} Pod went down' + expr: (sum by(pod,namespace,kubernetes_node,phase,job,instance)(kube_pod_status_phase{namespace=~"^kubeslice.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_networkservicemesh_io_app,label_cluster_name,label_networkservicemesh_io_impl,label_kubeslice_io_slice,label_pod_template_hash)(sum by(pod,namespace,label_cluster_name,kubernetes_node,label_networkservicemesh_io_app,label_networkservicemesh_io_impl,label_kubeslice_io_slice,label_pod_template_hash,instance)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",pod=~"^vl3-slice-router.*"})>0) + for: 1m + labels: + severity: slack + - alert: vpn gw pod status + annotations: + description: Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels + expr: (sum by(pod,namespace,kubernetes_node,phase,job,instance)(kube_pod_status_phase{namespace=~"^kube.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_kubeslice_io_slice,label_cluster_name,label_kubeslice_io_slice_gw,label_networkservicemesh_io_app,label_pod_template_hash,label_kubeslice_io_pod_type)(sum by(pod,namespace,kubernetes_node,label_cluster_name,label_kubeslice_io_slice,label_kubeslice_io_slice_gw,label_networkservicemesh_io_app,label_pod_template_hash,label_kubeslice_io_pod_type,instance)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",pod=~"^b.*|^i.*"})>0) + for: 1m + labels: + severity: slack +``` + +#### Rules for Custom Resource Metrics +The following code snippet contains the rules for custom resources metrics. + +``` + - alert: kubeslice cluster health + annotations: + description: kubeslice cluster {{ $labels.slice_cluster }} is not up for project {{ $labels.slice_project }}, see the Details for more info with labels + expr: kubeslice_cluster_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice cluster component health + annotations: + description: cluster component {{ $labels.slice_cluster_component }} is unhealthy for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_cluster_component_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice slice health + annotations: + description: slice component {{ $labels.slice }} is unhealthy for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_slice_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice slice component health + annotations: + description: slice component {{ $labels.slice_component }} is unhealthy for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_slice_component_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice app pod details + annotations: + description: No app pod is active on slice {{ $labels.slice }} for project {{ $labels.slice_project }} at cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_app_pods < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice service export endpoints + annotations: + description: for project {{ $labels.slice_project }} no service export endpoints is active on slice {{ $labels.slice }} in namespace {{ $labels.slice_namespace }}, see the Details for more info with labels + expr: kubeslice_serviceexport_endpoints < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice service import endpoints + annotations: + description: for project {{ $labels.slice_project }} no service import endpoints is active on slice {{ $labels.slice }} in namespace {{ $labels.slice_namespace }}, see the Details for more info with labels + expr: kubeslice_serviceimport_endpoints < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice netpol validation + annotations: + description: netpol voilation is not active for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }} and the slice reporting controller is {{ $labels.slice_reporting_controller }}, see the Details for more info with labels + expr: kubeslice_netpol_violations_active < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice slice gateway tunnel validation + annotations: + description: no vpn tunnel is active for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }} and the slice reporting controller is {{ $labels.slice_reporting_controller }}, see the Details for more info with labels + expr: kubeslice_slicegateway_tunnel_up < 0 + for: 1m + labels: + severity: slack +``` + +4. Add the [KSM](#rules-for-custom-resource-metrics) and [custom metrics](#rules-for-custom-resource-metrics) + alert rules into the Prometheus configuration. The updated + configuration with alert rules should look as the file below. + + ``` + groups: + - name: kubeslice component status + rules: + - alert: kubeslice component pod status with labels_app + annotations: + description: ' Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels' + summary: ' {{ $labels.pod }} Pod went down' + expr: (sum by (pod,namespace,kubernetes_node,phase,job,instance) (kube_pod_status_phase{namespace=~"^kubeslice.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_app,label_cluster_name)(sum by(pod,namespace,kubernetes_node,label_app,instance,label_cluster_name)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",label_app=~"app_net_op|nsmgr|admission-webhook-k8s|kubeslice-dns|forwarder-kernel|cert-manager|controller-manager|kubeslice-api-gw|kubeslice-ui-proxy|kubeslice-ui|registry|spire-agent|spire-server|istiod"})>0) + for: 1m + labels: + severity: slack + - alert: kubeslice operator pod status + annotations: + description: ' Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels' + summary: ' {{ $labels.pod }} Pod went down' + expr: (sum by(pod,namespace,kubernetes_node,phase,job,instance)(kube_pod_status_phase{namespace=~"^kubeslice.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_spoke_cluster,label_control_plane,label_cluster_name)(sum by(pod, namespace,label_cluster_name,kubernetes_node,label_spoke_cluster,label_control_plane,instance)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",label_spoke_cluster=~"^w.*|^c.*"})>0) + for: 1m + labels: + severity: slack + - alert: vl3 pod status + annotations: + description: Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels + summary: '{{ $labels.pod }} Pod went down' + expr: (sum by(pod,namespace,kubernetes_node,phase,job,instance)(kube_pod_status_phase{namespace=~"^kubeslice.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_networkservicemesh_io_app,label_cluster_name,label_networkservicemesh_io_impl,label_kubeslice_io_slice,label_pod_template_hash)(sum by(pod,namespace,label_cluster_name,kubernetes_node,label_networkservicemesh_io_app,label_networkservicemesh_io_impl,label_kubeslice_io_slice,label_pod_template_hash,instance)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",pod=~"^vl3-slice-router.*"})>0) + for: 1m + labels: + severity: slack + - alert: vpn gw pod status + annotations: + description: Pod {{ $labels.pod }} is {{ $labels.phase }} , see the Details + for more info with labels + expr: (sum by(pod,namespace,kubernetes_node,phase,job,instance)(kube_pod_status_phase{namespace=~"^kube.*|spire|istio-system",phase=~"Pending|Unknown|Failed"})>0)+on(instance,pod,kubernetes_node,namespace)group_left(label_kubeslice_io_slice,label_cluster_name,label_kubeslice_io_slice_gw,label_networkservicemesh_io_app,label_pod_template_hash,label_kubeslice_io_pod_type)(sum by(pod,namespace,kubernetes_node,label_cluster_name,label_kubeslice_io_slice,label_kubeslice_io_slice_gw,label_networkservicemesh_io_app,label_pod_template_hash,label_kubeslice_io_pod_type,instance)(kube_pod_labels{namespace=~"^kubeslice.*|spire|istio-system",pod=~"^b.*|^i.*"})>0) + for: 1m + labels: + severity: slack + - alert: kubeslice cluster health + annotations: + description: kubeslice cluster {{ $labels.slice_cluster }} is not up for project {{ $labels.slice_project }}, see the Details for more info with labels + expr: kubeslice_cluster_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice cluster component health + annotations: + description: cluster component {{ $labels.slice_cluster_component }} is unhealthy for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_cluster_component_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice slice health + annotations: + description: slice component {{ $labels.slice }} is unhealthy for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_slice_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice slice component health + annotations: + description: slice component {{ $labels.slice_component }} is unhealthy for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_slice_component_up < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice app pod details + annotations: + description: No app pod is active on slice {{ $labels.slice }} for project {{ $labels.slice_project }} at cluster {{ $labels.slice_cluster }}, see the Details for more info with labels + expr: kubeslice_app_pods < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice service export endpoints + annotations: + description: for project {{ $labels.slice_project }} no service export endpoints is active on slice {{ $labels.slice }} in namespace {{ $labels.slice_namespace }}, see the Details for more info with labels + expr: kubeslice_serviceexport_endpoints < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice service import endpoints + annotations: + description: for project {{ $labels.slice_project }} no service import endpoints is active on slice {{ $labels.slice }} in namespace {{ $labels.slice_namespace }}, see the Details for more info with labels + expr: kubeslice_serviceimport_endpoints < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice netpol validation + annotations: + description: netpol voilation is not active for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }} and the slice reporting controller is {{ $labels.slice_reporting_controller }}, see the Details for more info with labels + expr: kubeslice_netpol_violations_active < 0 + for: 1m + labels: + severity: slack + - alert: kubeslice slice gateway tunnel validation + annotations: + description: no vpn tunnel is active for project {{ $labels.slice_project }} on cluster {{ $labels.slice_cluster }} and the slice reporting controller is {{ $labels.slice_reporting_controller }}, see the Details for more info with labels + expr: kubeslice_slicegateway_tunnel_up < 0 + for: 1m + labels: + severity: slack + ``` + +2. To upgrade Prometheus, you can use the following command: + +```shell +helm upgrade prometheus kubeslice/prometheus --set alertmanager.enabled=true --set-string alertmanager.config.global.slack_api_url="" -n +``` + +Make sure to replace `` with the actual URL of your Slack API and `` with the desired namespace for Prometheus. + +:::info + For instructions on how to get the Slack API URL (also known as webhook URL), see [Configure Alerts integration](#configure-alerts-integration). +::: + +:::success +Congratulations! You have successfully configured the alert rules to monitor KubeSlice. Now, +whenever there is an event that requires attention, Prometheus will send alerts through Slack. +Stay informed and keep an eye on the monitoring alerts to ensure the smooth operation of KubeSlice. +::: \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-authentication.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-authentication.mdx new file mode 100644 index 00000000..7aba58ba --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-authentication.mdx @@ -0,0 +1,343 @@ +# Cluster Authentication +Before registering each worker cluster with the KubeSlice Controller, you must +authenticate with each cloud provider used in the installation. + +To register your worker clusters with the KubeSlice Controller, it is +necessary to authenticate with each cloud provider used in the installation. +To accomplish this, run the commands below to retrieve the relevant kubeconfig file +and add it to your default kubeconfig path. + +## Azure Kubernetes Service (AKS) +For information on prerequisites and authentication, see [Microsoft AKS Docs](https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az-aks-get-credentials). +The following information is required to retrieve your Microsoft Azure +Kubernetes Service (AKS) `kubeconfig`. + +| **Variable** | **Description** | +|-----|-----| +| | The name of the resource group the cluster belongs to. + | The name of the cluster you would like to get credentials for.| + +The following command retrieves your AKS cluster `kubeconfig` and add it to your default `kubeconfig` path. Complete this step for each +AKS cluster in the configuration. + +``` +az aks get-credentials --resource-group --name +``` + +## AWS Elastic Kubernetes Service (EKS) +For information on prerequisites and other required details, see the [Amazon EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html). + +The following information is required to retrieve your Elastic Kubernetes +Service (EKS) `kubeconfig`. + +| **Variable** | **Description** | +|-----|----| +| | The name of the cluster you want to get credentials for.| +| | The AWS region the cluster belongs to.| + + +The following command retrieves your EKS cluster `kubeconfig` and adds +it to your default `kubeconfig` path. Complete this step for each EKS +cluster in the configuration. + +``` +aws eks update-kubeconfig --name --region +``` + +## Google Kubernetes Engine (GKE) +For information on the prerequisites and other required details, see [Google Cloud CLI Docs](https://cloud.google.com/sdk/gcloud/reference/container/clusters/get-credentials). + +The following information is required to retrieve your Google Kubernetes +Engine (GKE) `kubeconfig`. + +| **Variable** | **Description** | +|-----|----| +| | The name of the cluster you want to get credentials for.| +| | The region the cluster belongs to.| +| | The project ID that the cluster belongs to.| + + +The following command retrieves your GKE cluster `kubeconfig` and adds +it to your default `kubeconfig` path. Complete this step for each GKE +cluster in the configuration. + +``` +gcloud container clusters get-credentials --region --project +``` + +Expected Output + +``` +Fetching cluster endpoint and auth data. +kubeconfig entry generated for +``` + +## Kind Clusters Authentication +Using the kubeslice cli's `minimal-demo` or `full-demo` option creates kind clusters for you. +To use a new topology file for kubeslice-cli or configure KubeSlice with YAML, you must +prepare the kind clusters as described below. + +### Prepare the Controller Cluster for Registration +Create a YAML file to prepare the controller cluster for registration by using the +following template: + +:::info +The `networking` property is required for the namespace isolation +feature. By default, the kind cluster has the kindnet CNI setting, but it needs to be +disabled for the namespace isolation feature to work. We install Calico instead for the +CNI network. +::: + +:::info +To understand more about the configuration parameters, see +[kind - Configuration](https://kind.sigs.k8s.io/docs/user/configuration/). +::: + +:::caution +If you face memory issues with a **two-nodes** kind cluster, then use a +**single-node** kind cluster. +::: + +``` +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + # WARNING: It is _strongly_ recommended that you keep this the default + # (127.0.0.1) for security reasons. However it is possible to change this. + apiServerAddress: "127.0.0.1" + # By default the API server listens on a random open port. + # You may choose a specific port but probably don't need to in most cases. + # Using a random port makes it easier to spin up multiple clusters. + apiServerPort: 6443 + # By default kind takes kindnet CNI but we are disabling this to use netpol feature + disableDefaultCNI: true # disable kindnet + podSubnet: 192.168.0.0/16 # set to Calico's default subnet +nodes: + - role: control-plane + image: kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c + - role: worker + image: kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "kubeslice.io/node-type=gateway" +``` + + Use the following template to create a single-node controller cluster. + + ``` + kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + # WARNING: It is _strongly_ recommended that you keep this the default + # (127.0.0.1) for security reasons. However it is possible to change this. + apiServerAddress: "127.0.0.1" + # By default the API server listens on a random open port. + # You may choose a specific port but probably don't need to in most cases. + # Using a random port makes it easier to spin up multiple clusters. + apiServerPort: 6443 + # By default kind takes kindnet CNI but we are disabling this to use netpol feature + disableDefaultCNI: true # disable kindnet + podSubnet: 192.168.0.0/16 # set to Calico's default subnet +nodes: + - role: control-plane + image: kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "kubeslice.io/node-type=gateway" + ``` + +### Apply the YAML File to Create the Controller Cluster +Apply the YAML File to create the controller cluster by running this command: + +``` +kind create cluster --name --config kind-controller-cluster.yaml +``` + + +### Prepare the Worker Cluster +Create a YAML file to prepare the worker cluster for registration by using the +following template: + +:::info +The `networking` property is required for the namespace isolation +feature. By default, the kind cluster has the kindnet CNI setting, but it needs to be +disabled for the namespace isolation feature to work. We install Calico instead for the +CNI network. +::: + +:::info +To understand more about the configuration parameters, see +[kind – Configuration](https://kind.sigs.k8s.io/docs/user/configuration/). +::: + +:::caution +If you face memory issues with a **two-nodes** kind cluster, then use a +**single-node** kind cluster. +::: + +``` +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + # By default kind takes kindnet CNI but we are disabling this to use netpol feature + disableDefaultCNI: true # disable kindnet + podSubnet: 192.168.0.0/16 # set to Calico's default subnet +nodes: + - role: control-plane + image: kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c + - role: worker + image: kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "kubeslice.io/node-type=gateway" +``` + +Use the following template to create a single-node worker cluster. + +``` +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + # By default kind takes kindnet CNI but we are disabling this to use netpol feature + disableDefaultCNI: true # disable kindnet + podSubnet: 192.168.0.0/16 # set to Calico's default subnet +nodes: + - role: control-plane + image: kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "kubeslice.io/node-type=gateway" +``` + +### Apply the YAML File to Create the Worker Cluster +Apply the YAML File to create the worker cluster by running this +command: + +**For worker cluster 1** + +``` +kind create cluster --name --config kind-Worker-cluster.yaml +``` + +**For worker cluster 2** + +``` +kind create cluster --name --config kind-Worker-cluster.yaml +``` + +### Install Calico Networking and Network Security +Install +[Calico](https://projectcalico.docs.tigera.io/about/about-calico) to provide networking and +network security for kind clusters. + +:::info +Install Calico only after creating the clusters. +::: + +To install Calico on a kind cluster: +1. Install the operator on your cluster by using the following command: + +``` +kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/tigera-operator.yaml +``` + +2. Download the custom resources required to configure Calico by using +the following command: + +``` +curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/custom-resources.yaml -O +``` + +Running the above command downloads a file, which contains the following +content. + +``` +# This section includes base Calico installation configuration. +# For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.Installation +apiVersion: operator.tigera.io/v1 +kind: Installation +metadata: + name: default +spec: + # Configures Calico networking. + calicoNetwork: + # Note: The ipPools section cannot be modified post-install. + ipPools: + - blockSize: 26 + cidr: 192.168.0.0/16 + encapsulation: VXLANCrossSubnet + natOutgoing: Enabled + nodeSelector: all() + +--- + +# This section configures the Calico API server. +# For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.APIServer +apiVersion: operator.tigera.io/v1 +kind: APIServer +metadata: + name: default +spec: {} +``` + +3. Create the manifest to install Calico by using the following +command: + +``` +kubectl create -f custom-resources.yaml +``` + +4. Validate namespaces related to Calico by using the following +command: + +``` +kubectl get ns +``` + +Expected Output + +``` +NAME STATUS AGE +calico-apiserver Active 3d +calico-system Active 3d +default Active 3d +kube-node-lease Active 3d +kube-public Active 3d +kube-system Active 3d +local-path-storage Active 3d +tigera-operator Active 3d +``` + +5. Validate the Calico pods by using the following command: + +``` +kubectl get pods -n calico-system +``` + +Expected Output + +``` +NAME READY STATUS RESTARTS AGE +calico-kube-controllers-59f859b79d-vbmqh 1/1 Running 1 30s +calico-node-nq7sp 1/1 Running 0 30s +calico-node-rhw7h 1/1 Running 0 30s +calico-node-tfqzp 1/1 Running 0 30s +calico-typha-8b888f7d8-fx62t 1/1 Running 0 30s +calico-typha-8b888f7d8-vnb67 1/1 Running 0 30s +``` + +:::success +Calico networking is installed successfully. +::: diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-networking.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-networking.mdx new file mode 100644 index 00000000..a8ac733c --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-networking.mdx @@ -0,0 +1,9 @@ +# Cluster Networking +To ensure the proper functioning of the KubeSlice Gateway Nodes in both public and +private clusters, please open the required UDP ports. + +| Kubernetes Service | CNI with Network Policy Support | Open UDP Ports | Reachability Options | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------- | ------------------------------------ | +| Azure Kubernetes Service | [Enable Calico for Azure AKS](https://projectcalico.docs.tigera.io/getting-started/kubernetes/managed-public-cloud/aks#install-aks-with-calico-for-network-policy) | 30000-33000 | Public Node IP with NodePort Service | +| AWS Elastic Kubernetes Service | [Enable Calico for Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/calico.html) | 30000-33000 | Public Node IP with NodePort Service | +| Google Kubernetes Engine | [Enable Calico for Google GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#enabling_network_policy_enforcement) | 30000-33000 | Public Node IP with NodePort Service | \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-rbac-roles.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-rbac-roles.mdx new file mode 100644 index 00000000..6352948c --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-rbac-roles.mdx @@ -0,0 +1,5 @@ +# Required Cluster Roles (RBAC) +Installing the KubeSlice Controller and Slice Operator **requires admin privileges** on the +Kubernetes cluster. Therefore, only a user with the Kubernetes cluster admin role can +perform this task. The correct name of the RBAC role for admin privileges in Kubernetes is +`cluster-admin`. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-command-line-tools.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-command-line-tools.mdx new file mode 100644 index 00000000..20ec9884 --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-command-line-tools.mdx @@ -0,0 +1,19 @@ +# Command Line Tools +This section of the documentation aims to meet the requirements for product installations +by providing information about the necessary prerequisites for installing KubeSlice. +It is essential to have a well-configured environment that fulfills the installation +requirements for KubeSlice. This involves the installation of the KubeSlice Controller +and registering clusters with it. This topic will explain the prerequisites in detail to +help you successfully install KubeSlice. + +## Required Tools +To make the installation process of KubeSlice smoother and more accessible, we have listed the +required command line tools below. With these tools readily available, you can easily install +KubeSlice and start benefiting from its features. + +| Package Required | Installation Instructions | +| ----------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | +| [Helm](https://helm.sh) - The Package Manager for Kubernetes | [Installing Helm](https://helm.sh/docs/intro/install/) | +| [kubectl](https://kubernetes.io/docs/reference/kubectl/overview) - Kubernetes CLI | [Installing kubectl](https://kubernetes.io/docs/tasks/tools/) | +| [kubectx and kubens](https://github.com/ahmetb/kubectx) - Cluster Context Switching & Namespace Assignments | [Installing kubectx & kubens](https://github.com/ahmetb/kubectx#installation) | +| [kubeslice-cli ](https://github.com/kubeslice/kubeslice-cli/releases)- KubeSlice Installer Binaries | [Installing kubeslice-cli](/get-started/prerequisites/prerequisites-kubeslice-cli-install.mdx) | diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-configure-helm-repository.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-configure-helm-repository.mdx new file mode 100644 index 00000000..5c8b97ca --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-configure-helm-repository.mdx @@ -0,0 +1,65 @@ +# Configure Helm Repository +The KubeSlice repository serves as a centralized hub for accessing and distributing +essential resources, including charts, templates, and configurations, specifically designed +for the KubeSlice monitoring and observability solution in Kubernetes. By offering a curated +collection of KubeSlice-specific charts, the repository simplifies the deployment and +management process, allowing users to seamlessly set up and monitor their Kubernetes clusters. +With comprehensive charts for monitoring, metrics, and alerting systems, the repository ensures +efficient monitoring and observability while promoting versioning, distribution, and +collaboration among users. Overall, the KubeSlice repository acts as a valuable resource, +enabling users to effortlessly deploy and manage KubeSlice components, facilitating effective +monitoring and observability within Kubernetes environments. + +## Add Repository + +Add the helm repository information to your local system. + + ``` + helm repo add kubeslice https://kubeslice.github.io/kubeslice/ + ``` + + **Expected Output** + + ``` + "kubeslice" has been added to your repositories + ``` + +## Update Repository +Update the repositories on your system with the following command: + + ``` + helm repo update + ``` + + **Expected Output** + + ``` + Hang tight while we grab the latest from your chart repositories... + ...Successfully got an update from the "kubeslice" chart repository + Update Complete. ⎈Happy Helming!⎈ + ``` +## Verify Repository +To verify if the repository was added successfully, view the KubeSlice +charts using the following command: + + ``` + helm search repo kubeslice + ``` + + **Expected Output** + + ``` + + NAME CHART VERSION APP VERSION DESCRIPTION + kubeslice/botkube v1.0.0 v1.0.0 Controller for the Kubeslice Botkube Slack app ... + kubeslice/cert-manager v1.7.0 v1.7.0 A Helm chart for cert-manager + kubeslice/istio-base 1.16.0 1.16.0 Helm chart for deploying Istio cluster resource... + kubeslice/istio-discovery 1.16.0 1.16.0 Helm chart for istio control plane + kubeslice/kubeslice-controller 1.2.0 1.2.0 A Helm chart for kubeslice-controller + kubeslice/kubeslice-worker 1.2.0 1.2.0 A Helm chart for kubeslice-worker + kubeslice/prometheus 19.3.0 v2.41.0 Prometheus is a monitoring system and time seri... + ``` + +:::success +You have successfully prepared your clusters to install the KubeSlice. +::: \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-gateway-node-label.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-gateway-node-label.mdx new file mode 100644 index 00000000..eaedf67a --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-gateway-node-label.mdx @@ -0,0 +1,70 @@ +# Label KubeSlice Gateway Nodes +Labeling your gateway nodes on the **worker cluster** is required to ensure proper management +of scheduling rules for nodes and enabling node gateway to gateway network communication. + +We recommend using a **dedicated node pool** for your gateway components. + +However, if your worker cluster contains only one node pool, follow the instructions below to +label worker node pools. + +Additionally, we recommend using a dedicated node pool for your gateway components and +following the instructions for labeling accordingly. + +Perform these steps in each worker cluster that you want to register with +the KubeSlice Controller. + +### Azure Kubernetes Service +AKS nodepools can only be set during nodepool creation. The nodepool +must contain the `kubeslice.io/node-type=gateway` label. For instructions on creating +a labeled nodepool, see [AKS documentation](https://docs.microsoft.com/en-us/azure/aks/use-multiple-node-pools#setting-nodepool-labels). + +## AWS Elastic Kubernetes Service +Nodepools are called node groups in EKS clusters. You can add or remove the Kubernetes +labels by editing a node group configuration as described in +[updating managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/update-managed-node-group.html). +Add the `kubeslice.io/node-type=gateway` label to the EKS node groups. + +## Google Kubernetes Engine +The following information is required to label the GKE cluster nodepools. + +|**Variable** | **Description** | +|-----|----| +|| The name of the nodepool being labeled.| +| | The name of the cluster the nodepool being labeled belongs to.| +| | The Compute Engine region for the cluster the nodepool belongs to.| +| | The Compute Engine zone for the cluster the nodepool belongs to.| + +The following command labels the GKE cluster nodepool: + +``` +gcloud container node-pools update \ + --node-labels=kubeslice.io/node-type=gateway \ + --cluster= \ + [--region= | --zone=] +``` +## Other Kubernetes Cloud Clusters +Ensure that the nodepools on the Kubernetes clusters are labeled with `kubeslice.io/node-type=gateway`. + +## Verify Your Labels +To verify the labels of your cluster, perform the following steps on each worker cluster that you wish to register with the KubeSlice Controller. + +1. To verify the label, switch to the context of the cluster that you want to verify. + + ``` + kubectx + ``` + +2. Run the following command to get all nodes with the `kubeslice.io/node-type=gateway` label. + + ``` + kubectl get no -l kubeslice.io/node-type=gateway + ``` + +3. If you successfully set your labels, you get a list of the labeled nodes in the cluster. + \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-install-istio.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-install-istio.mdx new file mode 100644 index 00000000..670b6fc9 --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-install-istio.mdx @@ -0,0 +1,41 @@ +# Istio +[Istio](http://istio.io/) is an open source service mesh that is used frequently to connect and secure +microservices within a cluster. The below instructions will install istio from the +helm repository chart. + +:::caution +You can skip these steps if you have already installed the recommended Istio version on the cluster. +::: + +## Install Istio +Install Istio on all worker cluster(s) participating in the configuration: + +1. Switch the context to the worker cluster that will be registered with the KubeSlice Controller. + ``` + kubectx + ``` +2. Create the `istio-system` namespace using the following command: + ``` + kubectl create ns istio-system + ``` +3. Install the `istio-base` chart from the helm repository using the following command: + ``` + helm install istio-base kubeslice/istio-base -n istio-system + ``` +4. Install the `istio-discovery` chart from the helm repository using the following command: + ``` + helm install istiod kubeslice/istio-discovery -n istio-system + ``` +## Validate Istio Installation + +Validate the installation of Istio by checking the pod status. Use the following command +to check if the pods are running: +``` +kubectl get pods -n istio-system +``` + +**Example Output** +``` +NAME READY STATUS RESTARTS AGE +istiod-66f576dd98-jtshj 1/1 Running 0 60s +``` \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-kubeslice-cli-install.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-kubeslice-cli-install.mdx new file mode 100644 index 00000000..b403445a --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-kubeslice-cli-install.mdx @@ -0,0 +1,102 @@ +# Installing kubeslice-cli + +## Introduction + +kubeslice-cli is a command-line tool designed to simplify KubeSlice operations on Kubernetes +and cloud clusters. With this tool, you can easily install and uninstall the necessary +workloads to run the KubeSlice Controller and Slice Operator on specific clusters. Additionally, +it registers these clusters as part of a KubeSlice multi-cluster configuration and manages slices across them. + +To use the tool, you provide input in the form of YAML files. The topology YAML file describes +the cluster membership in the multi-cluster configuration, while the slice configuration YAML +file defines slices across these clusters. By using these YAML files, kubeslice-cli simplifies +the KubeSlice installation process by configuring each cluster appropriately. This way, you +can install KubeSlice on your on-premises or cloud clusters without any hassle. + +## Install the kubeslice-cli Tool +To install the `kubeslice-cli` tool on different operating systems, download an executable version from the +[Releases](https://github.com/kubeslice/kubeslice-cli/releases) page. + +### Install kubeslice-cli on Windows + +To install kubeslice-cli: + +1. Download the latest Windows version from the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page. + After downloading the executable file, rename it to **kubeslice-cli.exe** for ease of use. + +2. Navigate to the directory where you have downloaded the executable file or provide the absolute path to run the `kubeslice-cli` command. + +3. Ensure the downloaded version is the latest using the following command: + ``` + .\kubeslice-cli.exe --version + ``` +### Install kubeslice-cli on Linux + +To install kubeslice-cli on Linux or Ubuntu (Linux-based OS): + +1. Download the latest macOS version from the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page + using the following command: + + ``` + sudo curl -fL https://github.com/kubeslice/kubeslice-cli/releases/download//kubeslice-cli--linux-<386 | amd | arm>64 -o /usr/local/bin/kubeslice-cli + ``` +2. Execute the file using the following command: + + ``` + sudo chmod a+x /usr/local/bin/kubeslice-cli + ``` +3. Ensure the downloaded version is the latest using the following command: + ``` + kubeslice-cli --version + ``` + +### Install kubeslice-cli on macOS + +To install kubeslice-cli: + +1. Download the latest macOS version from the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page + using the following command: + + ``` + sudo curl -fL https://github.com/kubeslice/kubeslice-cli/releases/download//kubeslice-cli--darwin-64 -o /usr/local/bin/kubeslice-cli + ``` +2. Execute the file using the following command: + + ``` + sudo chmod a+x /usr/local/bin/kubeslice-cli + ``` + +3. [In case of an error] When you try to install kubeslice-cli on macOS, you get the **Unverified Developer Error Message**. + This error message appears when you try to install an application from a developer who is not registered with Apple. + + ![mac](/images/kubeslice-cli/mac-download.png) + + + To fix the `Unverified developer error message`, follow the instructions in [enabling the application for macOS](https://www.alphr.com/cannot-be-opened-because-the-developer-cannot-be-verified/). + +4. Ensure the downloaded version is the latest using the following command: + ``` + kubeslice-cli --version + ``` + +### Download kubeslice-cli using cURL + +See the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page for the latest version. Use the following command to +download the latest binary (~6 MB): + +``` +sudo curl -fL https://github.com/kubeslice/kubeslice-cli/releases/download//kubeslice-cli--linux-amd64 -o /usr/local/bin/kubeslice-cli +``` + +Example +``` +sudo curl -fL https://github.com/kubeslice/kubeslice-cli/releases/download/0.5.0/kubeslice-cli-0.5.0-linux-amd64 -o /usr/local/bin/kubeslice-cli +``` +Make the binary executable using the following command: +``` +sudo chmod a+x /usr/local/bin/kubeslice-cli +``` +Ensure the downloaded version is the latest using the following command: +``` +kubeslice-cli -v +``` \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-kubeslice-controller-requirements.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-kubeslice-controller-requirements.mdx new file mode 100644 index 00000000..54f1e027 --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-kubeslice-controller-requirements.mdx @@ -0,0 +1,46 @@ +# Kubeslice Controller +Note that the information provided on this page represents the **minimum** requirements +for installing the product. It is essential to understand that these requirements may vary +depending on the specific workload of each customer. However, following these minimum +requirements will ensure a successful installation of the product. + +| | | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| **Cluster Requirements** | 1 Kubernetes Cluster | +| **Node Requirements** | KubeSlice needs at least two nodes each with a minimum of 4 vCPUs and 16GB of RAM. We highly recommend creating a separate NodePool for KubeSlice components to improve performance and simplify management. | +| **Supported Kubernetes Versions** | [1.26](https://v1-26.docs.kubernetes.io/) and [1.27](https://kubernetes.io/docs/home/supported-doc-versions/) | +| **Supported Kubernetes Services** | Azure Kubernetes Service (AKS), AWS Elastic Kubernetes Service (EKS), Google Kubernetes Engine (GKE), and Rancher Kubernetes Engine (RKE) | +| **Required Helm Version** | 3.7.0 | + + +## Kind Clusters +The following are the infrastructure requirements to install KubeSlice +components. + +:::info +If you are on Ubuntu OS, then it is recommended to increase the `ulimit` +to 2048 or unlimited. + +If you still face issues, see [errors due to too many open files](https://kind.sigs.k8s.io/docs/user/known-issues/#pod-errors-due-to-too-many-open-files). +::: + +| | | +|-----------------------------------------|---------------------------| +| **Host Requirements** | Minimum of 8vCPUs and 8GB of RAM | + +:::info +Ensure to modify the memory and CPU usage allowed to docker as described in +https://docs.docker.com/desktop/windows/#resources. +::: + +:::info +KubeSlice has been tested on AKS, GKE, and EKS, but it should run in any Kubernetes +environment with the supported Kubernetes versions. +::: + +:::info +In an intra-slice scenario, a single cluster can serve as both a controller cluster and a +worker cluster. This allows KubeSlice to segment the cluster into application slices. This +enables you to better manage and isolate your applications, improving the overall efficiency +and performance of your cluster. +::: \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-worker-cluster-requirements.mdx b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-worker-cluster-requirements.mdx new file mode 100644 index 00000000..fcfa0fbe --- /dev/null +++ b/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-worker-cluster-requirements.mdx @@ -0,0 +1,15 @@ +# Worker Cluster +Note that the information provided on this page represents the **minimum** requirements +for installing the product. It is essential to understand that these requirements may vary +depending on the specific workload of each customer. However, following these minimum +requirements will ensure a successful installation of the product. + +| | | +| ----------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| **Cluster Requirements** | 1 Kubernetes Cluster | +| **NodePools Required** | 2 node pools with a minimum of 2 nodes each sized with a minimum of 4 vCPUs and 16GB of RAM. | +| **NodePools Reserved for KubeSlice Components** | 1 NodePool - We highly recommend creating a separate NodePool for KubeSlice components to improve performance and simplify management. | +| **Supported Kubernetes Versions** | [1.26](https://v1-26.docs.kubernetes.io/) and [1.27](https://kubernetes.io/docs/home/supported-doc-versions/) | +| **Supported Kubernetes Services** | Azure Kubernetes Service (AKS), AWS Elastic Kubernetes Service (EKS), Google Kubernetes Engine (GKE), and Rancher Kubernetes Engine (RKE) | +| **Required Helm Version** | 3.7.0 | +| **Required Istio Version** | 1.16.0 | diff --git a/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/command-reference.mdx b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/command-reference.mdx new file mode 100644 index 00000000..ed0c7bcb --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/command-reference.mdx @@ -0,0 +1,736 @@ +# CLI Command Reference +This topic contains information about kubeslice-cli commands, syntax, options, resource types, and a few examples of command usage. + +## Syntax + +Use the following syntax to run the kubeslice-cli tool: + +``` +kubeslice-cli [global-options] [] [command-options] +``` + +## Commands + +The following table describes the syntax and descriptions for all the `kubeslice-cli` commands. + +| Operations | Syntax | Description | +| ---------- | ----------------------------------- | ------------------------------------------------------------------ | +| create | `kubeslice-cli create [options]` | Creates one or more resources from a file. | +| delete | `kubeslice-cli delete [options]` | Delete resources from a file. | +| describe | `kubeslice-cli describe [options]` | Describe the KubeSlice resources. | +| edit | `kubeslice-cli edit [options]` | Edit and update the definition of one or more kubeSlice resources. | +| get | `kubeslice-cli get [options]` | List one or more kubeSlice resources. | +| install | `kubeslice-cli install [options]` | Install kubeSlice resources. | +| uninstall | `kubeslice-cli uninstall [options]` | Uninstall kubeSlice resources. | +| help | `kubeslice-cli help` | Get more information on any command. | + +## Options + +The following are the kubeslice-cli options. + +| Options | Shorthand | Description | +| --------- | --------- | ---------------------------------------------------------------------------- | +| --help | -h | It provides more information on the kubeslice-cli. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --version | -v | It is the version of the `kubeslice-cli` tool. | + +:::note +The kubeslice-cli tool must have access to the clusters you want it to manage. To grant it access, set the KUBECONFIG environment +variable to a path to the kubeconfig file containing the necessary keys to access those clusters. +To set the KUBECONFIG environment variable, use these commands: + +- On Linux/macOS: `export KUBECONFIG=` +- On Windows: `$env:KUBECONFIG = "kubeslice/kubeconfig.yaml"` +::: + +:::caution + +- If the global **—config** option is not specified, kubeslice-cli attempts to execute against the current cluster context. Run the + following command to determine which context you are currently in: `kubectx -c`. +- The currently supported operations are all controller cluster specific. You must run the kubeslice-cli commands on the controller cluster. + use this command to switch the cluster context: `kubectx `. +::: + +## create + +Use this command to create one or more KubeSlice resources. For example, use the create command to create a slice, project, or +service export. + +### Syntax + +``` +kubeslice-cli --config create project --namespace +kubeslice-cli create --namespace --filename +``` + +### Options + +The following are the `kubeslice-cli create` options. + +| Option | Shorthand | Description | +| ----------- | --------- | ------------------------------------------------------------------------------ | +| --filename | -f | It is the filename, directory, or URL of the file used to create the resource. | +| --namespace | -n | It is the name of the project namespace on the KubeSlice Controller. | +| --setWorker | -w | It is the list of worker clusters to be registered in the SliceConfig. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --help | -h | It provides information on the create command. | + +### Resource Types + +The following are the resource types used with the create command: + +- **project**: It is used to create, edit, delete, describe, and get the project. +- **sliceConfig**: It is used to create, edit, delete, describe, and get the slice. +- **serviceExportConfig**: It is used to create, edit, delete, describe, and get the service export. + +### Examples + +The following are the example commands: + +1. To create a slice, use the following command: + + ``` + kubeslice-cli create sliceConfig -n -f + ``` + + Example + + ``` + kubeslice-cli create sliceConfig -n kubeslice-avesha -f slice-config.yaml + ``` + + Example output + + ``` + 🏃 Running command: /usr/local/bin/kubectl apply -f slice-config.yaml -n kubeslice-demo + sliceconfig.controller.kubeslice.io/slice-red created + + Successfully Applied Slice Configuration. + ``` + +2. To create a project, use the following command: + + ``` + Kubeslice-cli create project -n + ``` + + Example + + ``` + kubeslice-cli create project kubeslice-avesha -n kubeslice-controller + ``` + + Example Output + + ``` + Creating KubeSlice Project... + ✔ Generated project manifest project.yaml + 🏃 Running command: /usr/local/bin/kubectl apply -f kubeslice/project.yaml -n kubeslice-controller + ✔ Applied project.yaml + Created KubeSlice Project. + ``` + +3. To create service export, use the following command: + + ``` + kubeslice-cli create serviceExportConfig -f -n + ``` + +## delete + +Use this command to delete the KubeSlice resources. For example, use the delete command to delete a slice, project, or +service export. The delete command can also be used to unregister the registered worker cluster. +You must offboard the namespaces and delete the slice before unregistering the worker cluster. +For more information, see [Uninstalling KubeSlice](../../uninstall-kubeslice). + +### Syntax + +``` +kubeslice-cli project --namespace +kubeslice-cli --namespace +``` + +### Options + +The following are the `kubeslice-cli delete` command options. + +| Name | Shorthand | Usage | +| ----------- | --------- | ---------------------------------------------------------------------------- | +| --namespace | -n | It is the name of the project namespace on the KubeSlice Controller. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --help | -h | It provides information on the delete command. | + +### Resource Types + +The following are the resource types used with the create command: + +- **project**: It is used to create, edit, delete, describe, and get the project. +- **sliceConfig**: It is used to create, edit, delete, describe, and get the slice. +- **serviceExportConfig**: It is used to create, edit, delete, describe, and get the service export. + +### Examples + +The following are the example commands: + +1. To delete the slice configuration, use the following command: + + ``` + kubeslice-cli delete sliceConfig -n + ``` + + Example + + ``` + kubeslice-cli delete sliceConfig blue -n kubeslice-demo + ``` + + Example Output + + ``` + Deleting KubeSlice SliceConfig... + 🏃 Running command: /usr/local/bin/kubectl delete sliceconfigs.controller.kubeslice.io blue -n kubeslice-demo + sliceconfig.controller.kubeslice.io "blue" deleted + ``` + +2. To delete a project, use the following command: + + ``` + kubeslice-cli delete project -n + ``` + + Example + + ``` + kubeslice-cli delete project rainbow -n kubeslice-controller + ``` + + Example Output + + ``` + Deleting KubeSlice Project... + 🏃 Running command: /usr/local/bin/kubectl delete projects.controller.kubeslice.io uma -n kubeslice-controller + project.controller.kubeslice.io "rainbow" deleted + ``` + +3. To delete a registered worker cluster, use the following command: + ``` + kubeslice-cli delete worker -n + ``` + Example: + ``` + kubeslice-cli delete worker kind-ks-w-4 -n kubeslice-demo + ``` + Example Output + ``` + Deleting KubeSlice Worker... + 🏃 Running command: /usr/local/bin/kubectl delete clusters.controller.kubeslice.io kind-ks-w-4 -n kubeslice-demo + cluster.controller.kubeslice.io "kind-ks-w-4" deleted + ``` + +## describe + +Use this command to describe KubeSlice resources. This shows the details of a specific KubeSlice resource. + +### Syntax + +``` +kubeslice-cli describe project --namespace +kubeslice-cli describe --namespace +``` + +### Options + +The following are the `kubeslice-cli describe` command options. + +| Option | Shorthand | Description | +| ----------- | --------- | ---------------------------------------------------------------------------- | +| --namespace | -n | It is the name of the project namespace on the KubeSlice Controller. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --help | -h | It provides information on the describe command. | + +### Resource Types + +The following are the resource types used with the describe command: + +- **project**: It is used to create, edit, delete, describe, and get the project. +- **sliceConfig**: It is used to create, edit, delete, describe, and get the slice. +- **serviceExportConfig**: It is used to create, edit, delete, describe, and get the service export. + +### Examples + +The following are the example commands: + +1. To describe the slice configuration, use the following command: + + ``` + kubeslice-cli describe sliceConfig -n + ``` + + Example + + ``` + kubeslice-cli describe sliceConfig slice-red -n kubeslice-demo + ``` + + Example Output + + ``` + Describing KubeSlice SliceConfig... + 🏃 Running command: /usr/local/bin/kubectl describe sliceconfigs.controller.kubeslice.io slice-red -n kubeslice-demo + Name: slice-red + Namespace: kubeslice-demo + Labels: + Annotations: + API Version: controller.kubeslice.io/v1alpha1 + Kind: SliceConfig + Metadata: + Creation Timestamp: 2022-10-04T12:35:54Z + Finalizers: + controller.kubeslice.io/slice-configuration-finalizer + Generation: 1 + Managed Fields: + API Version: controller.kubeslice.io/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:clusters: + f:qosProfileDetails: + .: + f:bandwidthCeilingKbps: + f:bandwidthGuaranteedKbps: + f:dscpClass: + f:priority: + f:queueType: + f:tcType: + f:sliceGatewayProvider: + .: + f:sliceCaType: + f:sliceGatewayType: + f:sliceIpamType: + f:sliceSubnet: + f:sliceType: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-10-04T12:35:54Z + API Version: controller.kubeslice.io/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:finalizers: + .: + v:"controller.kubeslice.io/slice-configuration-finalizer": + Manager: manager + Operation: Update + Time: 2022-10-04T12:35:54Z + Resource Version: 60976 + UID: d7fa839e-9b05-4264-adc6-a061850d54d5 + Spec: + Clusters: + ks-w-1 + ks-w-2 + Namespace Isolation Profile: + Isolation Enabled: false + Qos Profile Details: + Bandwidth Ceiling Kbps: 30000 + Bandwidth Guaranteed Kbps: 20000 + Dscp Class: AF11 + Priority: 0 + Queue Type: HTB + Tc Type: BANDWIDTH_CONTROL + Slice Gateway Provider: + Slice Ca Type: Local + Slice Gateway Type: OpenVPN + Slice Ipam Type: Local + Slice Subnet: 10.190.0.0/16 + Slice Type: Application + Events: + ``` + +## edit + +Use this command to directly edit any KubeSlice resource you can retrieve through the command line tools. It opens the +editor defined by your KUBE_EDITOR, or EDITOR environment variables, or falls back to `vi` for Linux or `notepad` for Windows. +You can edit multiple objects, although changes are applied one at a time. The command accepts filenames as well as command line arguments, +although the files you point to must be previously saved versions of resources. The default format is YAML. In the event of an error +while updating, a temporary file is created on disk that contains your unapplied changes. The most common error when updating a +resource is another editor changing the resource on the server. When this occurs, apply your changes to the newer version +of the resource, or update your temporary saved copy to include the latest resource version. + +### Syntax + +``` +kubeslice-cli project --namespace +kubeslice-cli --namespace +``` + +### Options + +The following are the `kubeslice-cli edit` command options. + +| Option | Shorthand | Description | +| ----------- | --------- | ------------------------------------------------------------------------------ | +| --namespace | -n | It is the name of the project namespace on the KubeSlice Controller. | +| --filename | -f | It is the filename, directory, or URL of the file used to create the resource. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --help | -h | It provides information on the edit command. | + +### Resource Types + +The following are the resource types used with create command: + +- **project**: It is used to create, edit, delete, describe, and get the project. +- **sliceConfig**: It is used to create, edit, delete, describe, and get the slice. +- **serviceExportConfig**: It is used to create, edit, delete, describe, and get the service export. + +### Examples + +The following are the example commands: + +1. To edit the slice configuration, use the following command: + ``` + kubeslice-cli edit sliceConfig -n + ``` + Example + ``` + kubeslice-cli edit sliceConfig blue -n kubeslice-demo + ``` +2. To edit the project details, use the following command: + ``` + kubeslice-cli edit project -n + ``` + Examples + ``` + kubeslice-cli edit project demo -n kubeslice-controller + kubeslice-cli edit project kubeslice-avesha -n kubeslice-controller + ``` +3. To edit the service export, use the following command: + ``` + kubeslice-cli edit serviceExportConfig --namespace + ``` + The `` is obtained using this command: + ``` + kubeslice-cli get serviceExportConfig -n + ``` + Example + ``` + kubeslice-cli get serviceExportConfig -n kubeslice-demo + ``` + Example Output + ``` + Fetching KubeSlice serviceExportConfig... + 🏃 Running command: /home/eric/bin/kubectl get serviceexportconfigs.controller.kubeslice.io -n kubeslice-demo + NAME AGE + iperf-server-iperf-worker-2 4d19h + ``` + Example + ``` + kubeslice-cli edit serviceExportConfig iperf-server-iperf-worker-2 -n kubeslice-demo + ``` + +## get + +Use this command to get one or more KubeSlice resources. For example, use the get command to get a slice, project, or +service export. + +### Syntax + +``` +kubeslice-cli project --namespace +kubeslice-cli --namespace +``` + +### Options + +The following are the `kubeslice-cli get` command options. + +| Option | Shorthand | Description | +| ----------- | --------- | ---------------------------------------------------------------------------- | +| --namespace | -n | It is the name of the project namespace on the KubeSlice Controller. | +| --output | -o | It supports json and yaml values. | +| --worker | -w | It is the worker cluster name. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --help | -h | It provides information on the get command. | + +### Resource Types + +The following are the resource types used with the get command: + +- **project**: It is used to create, edit, delete, describe, and get the project. +- **sliceConfig**: It is used to create, edit, delete, describe, and get the slice. +- **serviceExportConfig**: It is used to create, edit, delete, describe, and get the service export. +- **secrets**: It is used to get the secrets of the registered worker clusters from the controller cluster. +- **ui-endpoint**: It is used to get the KubeSlice Manager URL. + +### Examples + +The following are the example commands: + +1. To get the worker details, use the following command: + + ``` + kubeslice-cli get worker -n + ``` + + Example + + ``` + kubeslice-cli get worker -n kubeslice-demo + ``` + + Example Output + + ``` + Fetching KubeSlice Worker... + 🏃 Running command: /usr/local/bin/kubectl get clusters.controller.kubeslice.io -n kubeslice-demo + NAME AGE + ks-w-1 8m13s + ks-w-2 8m13s + ``` + +2. To get a slice, use the following command: + + ``` + kubeslice-cli get sliceConfig -n + ``` + + Example + + ``` + kubeslice-cli get sliceConfig -n kubeslice-demo + ``` + + Example Output: + + ``` + Fetching KubeSlice sliceConfig... + 🏃 Running command: /usr/local/bin/kubectl get sliceconfigs.controller.kubeslice.io -n kubeslice-demo + NAME AGE + blue 27s + ``` + +3. To get the project details, use the following command: + + ``` + kubeslice-cli get project -n + ``` + + Example + + ``` + kubeslice-cli get project -n kubeslice-controller + ``` + + Example Output + + ``` + Fetching KubeSlice Project... + 🏃 Running command: /usr/local/bin/kubectl get projects.controller.kubeslice.io -n kubeslice-controller + NAME AGE + demo 7m6s + ``` + +4. To get the list of secrets that belong to project namespace, use the following command: + + ``` + kubeslice-cli get secrets -n kubeslice-demo + ``` + + Example 1 + + ``` + kubeslice-cli get secrets -n kubeslice-demo + ``` + + Example Output + + ``` + Fetching KubeSlice secret... + 🏃 Running command: /usr/local/bin/kubectl get secrets -n kubeslice-demo + NAME TYPE DATA AGE + default-token-6qz9w kubernetes.io/service-account-token 3 73m + demo-ks-w-1-ks-w-2 Opaque 7 69m + demo-ks-w-2-ks-w-1 Opaque 1 69m + kubeslice-rbac-rw-john-token-smmfj kubernetes.io/service-account-token 3 73m + kubeslice-rbac-worker-ks-w-1-token-rn7dl kubernetes.io/service-account-token 5 73m + kubeslice-rbac-worker-ks-w-2-token-qld6t kubernetes.io/service-account-token 5 73m + ``` + + Example 2 + + Use the following command to get the details of secrets in the yaml file. + + ``` + kubeslice-cli get secrets -n kubeslice-demo -o yaml + ``` + +5. To get the service export, use the following command: + ``` + kubeslice-cli get serviceExportConfig -n + ``` + Example + ``` + kubeslice-cli get serviceExportConfig -n kubeslice-demo + ``` + Example Output + ``` + Fetching KubeSlice serviceExportConfig... + 🏃 Running command: /usr/local/bin/kubectl get serviceexportconfigs.controller.kubeslice.io -n kubeslice-demo + NAME AGE + iperf-server-iperf-worker-2 4d19h + ``` + +## install + +Use this command to install the required workloads to run KubeSlice Controller and worker clusters. + +### Syntax + +``` +kubeslice-cli --config +kubeslice-cli install +kubeslice-cli install --profile= +``` + +### Options + +The following are the `kubeslice-cli install` command options. + +| Option | Shorthand | Description | Supported Values | +| ------------------- | --------- | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| --profile | -p | It is the profile to install or uninstall KubeSlice. | **full-demo**: showcases the KubeSlice inter-cluster connectivity by spawning 3 kind clusters, including 1 KubeSlice Controller and 2 worker clusters, and installing iPerf application to generate network traffic. **minimal-demo**: sets up 3 kind clusters, including 1 KubeSlice Controller and 2 worker clusters. Generates the Kubernetes manifests for user to manually apply, and verify the functionality. **enterprise-demo**: showcases the KubeSlice Enterprise functionality by spawning 3 kind clusters, including 1 KubeSlice Controller and 2 KubeSlice Workers. Installs the Enterprise charts for the controller, the worker(s), the KubeSlice Manager (UI), and iPerf application to generate network traffic. The imagePullSecrets (username and password) listed must be set as environment variables: **KUBESLICE_IMAGE_PULL_USERNAME**: `optional`, the default value is `aveshaenterprise`. **KUBESLICE_IMAGE_PULL_PASSWORD**: `required`. | +| --skip | -s | It skips the installation steps (comma-separated) with `minimal-demo`, `full-demo`, and `enterprise-demo` options. | **kind**: skips the creation of kind clusters. **calico**: skips the installation of Calico. **controller**: skips the installation of KubeSlice Controller. **worker-registration**: skips the registration of worker clusters with the KubeSlice Controller. **worker**: skips the installation of worker clusters. **demo**: skips the installation of additional example applications. **prometheus**: skips the installation of the Prometheus. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --with-cert-manager | | It installs the cert-manager charts. **This is required for the KubeSlice Controller v0.7.0 and below versions.** | +| --help | -h | It provides information on the install command. | + +:::info +The global option **--config** and install command option **--profile** are mutually exclusive and cannot be used together. +::: + +### Examples + +The following are the example commands: + +1. Use the following command to create the kind clusters, install the KubeSlice Controller and worker clusters. + + ``` + kubectl-cli install --profile=minimal-demo + ``` + +2. To install the KubeSlice using custom topology file, use the following command: + ``` + kubeslice-cli --config install + ``` + +## register + +Use this command to register a new worker cluster with the KubeSlice Controller in an existing multi-cluster setup. + +### Syntax + +``` +kubeslice-cli register worker --namespace +kubeslice-cli --config [options] +``` + +### Options + +The following are the `kubeslice-cli register` command options. + +| Option | Shorthand | Description | +| ----------- | --------- | ------------------------------------------------------------------------------ | +| --filename | -f | It is the filename, directory, or URL of the file used to create the resource. | +| --namespace | -n | It is the name of the project namespace on the KubeSlice Controller. | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --help | -h | It provides information on the register command. | + +### Resource Type + +The **worker** is the resource type used with the register command. + +### Examples + +The following are the example commands: +:::info +You must switch the context to the controller cluster to register the worker cluster. +::: + +1. To register a new worker cluster with the KubeSlice Controller in demo setup, use the following command: + + ``` + kubeslice-cli register worker -n kubeslice-demo + ``` + + Example + + ``` + kubeslice-cli register worker kind-ks-w-4 -n kubeslice-demo + ``` + + Example Output + + ``` + Registering Worker Clusters with Project... + ✔ Generated cluster registration manifest kubeslice/custom-cluster-registration.yaml + 🏃 Running command: /usr/local/bin/kubectl apply -f kubeslice/custom-cluster-registration.yaml -n kubeslice-demo + ✔ Applied kubeslice/custom-cluster-registration.yaml + Registered Worker Clusters with Project. + ``` + +2. To register a new worker cluster with the KubeSlice Controller in an existing multi-cluster setup, use the following command: + :::info + Add a new worker information in the same custom topology file that you used to install KubeSlice. The **-s controller** + option skips the installation of KubeSlice Controller. + ::: + + ``` + kubeslice-cli --config= install -s controller + ``` + +## uninstall + +Use this command to uninstall all the KubeSlice components on kind and cloud clusters. + +### Syntax + +``` +kubeslice-cli +``` + +### Options + +The following are the `kubeslice-cli uninstall` options + +| Option | Shorthand | Description | +| -------------- | --------- | ------------------------------------------------------------------------------------ | +| --config | -c | It is a **global** option. The path to the topology configuration YAML file. | +| --help | -h | It provides information on the delete command. | +| --all | -a | Uninstalls all the KubeSlice components (worker, controller, and Kubeslice Manager). | +| --ui | -u | Uninstalls the enterprise user interface component (Kubeslice Manager). | +| --cert-manager | | It uninstalls the cert-manager charts. | + +### Examples + +The following is an example command: + +1. To uninstall the KubeSlice components on kind clusters and delete the kind clusters created using `full-demo|minimal-demo` option, + use the following command: + + ``` + kubeslice-cli uninstall + ``` + +2. To uninstall the KubeSlice components that were installed using a custom topology file on cloud clusters, use the following command: + + :::info + KubeSlice must be uninstalled using the topology file that was used to install it on cloud clusters. + ::: + + ``` + kubeslice-cli uninstall --config= --all + ``` diff --git a/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/install-kubeslice-cli.mdx b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/install-kubeslice-cli.mdx new file mode 100644 index 00000000..2ba69921 --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/install-kubeslice-cli.mdx @@ -0,0 +1,97 @@ +# Install kubeslice-cli + +## Introduction +kubeslice-cli is a command-line tool that allows you to perform KubeSlice operations on Kubernetes and cloud clusters. +It simplifies the process to install and uninstall the workloads needed to run KubeSlice Controller and Slice Operator in the specified clusters. +This tool registers those clusters as part of a KubeSlice multi-cluster, and administer slices across those clusters. + +The tool takes input in the form of YAML files that describe the membership of clusters in the multi-cluster (the `topology` YAML file) +as well as the definition of slices across those clusters (the `slice configuration` YAML file). Given those YAML files, it simplifies +installation of KubeSlice by performing all of the necessary configuration steps to apply the appropriate config to each cluster. +You can install KubeSlice on your existing kind or cloud clusters. + +Alternatively, to get familiar with KubeSlice, you can create a multicluster using kind clusters. Use the +`kubeslice-cli install —profile` option to create a demo setup consisting of one controller and two worker clusters. + +## Install the kubeslice-cli Tool +To install the `kubeslice-cli` tool on different operating systems, download an executable version from the +[Releases](https://github.com/kubeslice/kubeslice-cli/releases) page. + +### Install kubeslice-cli on Windows + +To install kubeslice-cli: + +1. Download the latest Windows version from the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page. + After downloading the executable file, rename it to **kubeslice-cli.exe** for ease of use. + +2. Navigate to the directory where you have downloaded the executable file or provide the absolute path to run the `kubeslice-cli` command. + +3. Ensure the downloaded version is the latest using the following command: + ``` + .\kubeslice-cli.exe --version + ``` +### Install kubeslice-cli on Linux + +To install kubeslice-cli on Linux or Ubuntu: + +1. Download the latest Linux version from the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page. + +2. Create a symlink to access the file: + :::info + A symlink is a symbolic Linux/ UNIX link that points to another file or folder on your computer, or a connected file system. + ::: + ``` + ln -s ~/bin/kubeslice-cli + ``` +3. Ensure the downloaded version is the latest using the following command: + ``` + kubeslice-cli --version + ``` + +### Install kubeslice-cli on macOS + +To install kubeslice-cli: + +1. Download the latest macOS version from the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page. + +2. Fix the error. When you try to install kubeslice-cli on macOS, you get the **Unverified Developer Error Message**. + This error message appears when you try to install an application from a developer who is not registered with Apple. + + ![mac](/images/kubeslice-cli/mac-download.png) + + To fix the `Unverified developer error message`, follow the instructions + in [enabling the application for macOS](https://www.alphr.com/cannot-be-opened-because-the-developer-cannot-be-verified/). + +3. Create a symlink to access the file: + :::info + A symlink is a symbolic Linux/ UNIX link that points to another file or folder on your computer, or a connected file system. + ::: + ``` + ln -s ~/bin/kubeslice-cli + ``` +4. Ensure the downloaded version is up-to-date using the following command: + ``` + kubeslice-cli --version + ``` + +### Download kubeslice-cli using Wget + +See the [Releases](https://github.com/kubeslice/kubeslice-cli/releases) page for the latest version. Use the following command to +download the latest binary (~6 MB): + +``` +sudo curl -fL https://github.com/kubeslice/kubeslice-cli/releases/download//kubeslice-cli--linux-amd64 -o /usr/local/bin/kubeslice-cli +``` + +Example +``` +sudo curl -fL https://github.com/kubeslice/kubeslice-cli/releases/download/0.5.4/kubeslice-cli-0.5.4-linux-amd64 -o /usr/local/bin/kubeslice-cli +``` +Make the binary executable using the following command: +``` +sudo chmod a+x /usr/local/bin/kubeslice-cli +``` +Ensure the downloaded version is the latest using the following command: +``` +kubeslice-cli -v +``` \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/install-kubeslice.mdx b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/install-kubeslice.mdx new file mode 100644 index 00000000..03e7fc9d --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/install-kubeslice.mdx @@ -0,0 +1,66 @@ +# Install KubeSlice +Kubeslice-cli is a command-line tool designed to simplify KubeSlice operations on Kubernetes +and cloud clusters. With this tool, you can easily install the necessary workloads to run the +KubeSlice Controller and Slice Operator on specific clusters. Additionally, it registers these +clusters as part of a KubeSlice multi-cluster and manages slices across them. + +To use the tool, you provide input in the form of YAML files. The topology YAML file describes +the cluster membership in the multi-cluster. + +## Create Topology YAML file +To install KubeSlice, you need to create a topology configuration file that specifies the +controller cluster and the worker cluster names. The file should include other relevant details +as described in the sample configuration file provided below. This file is essential in setting +up KubeSlice. + +Custom chart values can be specified in the topology configuration. For example, you can add `values:metrics.insecure=true` under +**configuration.helm_chart_configuration.worker_chart** for worker charts. +``` +worker_chart: + chart_name: kubeslice-worker + values: + "metrics.insecure": "true" + "some.other.key": "value" +``` + +### Sample Topology Configuration File +The following a minimal configuration file to install KubeSlice Enterprise on cloud clusters: + +Create the topology configuration file using the following template: +```yaml +configuration: + cluster_configuration: + kube_config_path: + controller: + name: + context_name: + workers: + - name: + context_name: + - name: + context_name: + kubeslice_configuration: + project_name: + helm_chart_configuration: + repo_alias: kubeslice + repo_url: https://kubeslice.aveshalabs.io/repository/kubeslice-helm-ent-prod/ + cert_manager_chart: + chart_name: cert-manager + controller_chart: + chart_name: kubeslice-controller + worker_chart: + chart_name: kubeslice-worker +``` + +Custom chart values can be specified in the topology configuration. See [Topology Configuration Parameters](topology-configuration). + +## Apply the Topology Configuration YAML +To install KubeSlice using the topology YAML file, use the following command: + +``` +kubeslice-cli --config install +``` + +:::success +You have successfully installed KubeSlice. +::: \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/topology-configuration.mdx b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/topology-configuration.mdx new file mode 100644 index 00000000..fa12f392 --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/topology-configuration.mdx @@ -0,0 +1,199 @@ +# Topology Configuration Parameters + +In this topic, you will find a description of the parameters required to create the topology +YAML file for installing KubeSlice. + +## Cluster Configuration Parameters +The following table describes the cluster configuration parameters used to create the topology YAML. + +| Parameter | Parameter Type | Description | Required | +| ---------------------------------------------------------- | -------------- | ----------------------------------------------------------------------------------------------------------------- | --------- | +| profile | String | The KubeSlice profile to create demo clusters. The supported values are **minimal-demo** and **full-demo**. | Mandatory | +| kube_config_path | String | The absolute path to the `kubeconfig` file to use topology setup. It is used in the topology YAML file. | Optional | +| cluster_type | String | The type of cluster on which KubeSlice is deployed. The acceptable values are `cloud`, `kind`, and `data-center`. | Optional | +| [controller](#controller-cluster-configuration-parameters) | Object | The KubeSlice Controller cluster details. | Mandatory | +| [worker](#worker-cluster-configuration-parameters) | Object | The KubeSlice Worker clusters details. | Mandatory | + +### Controller Cluster Configuration Parameters +The following table describes the controller cluster configuration parameters used to create the topology YAML. + +| Parameter | Parameter Type | Description | Required | +| --------------------- | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| name | String | The user-defined name of the controller cluster. The maximum allowed number of characters is 30. Provide the name as defined in [RFC 1123](#https://datatracker.ietf.org/doc/html/rfc1123). To know more, see [Kubernetes Object Names and IDs](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/). | Mandatory | +| context_name | String | The name of the context to use from the kubeconfig file; It is used in the topology YAML file. | Optional | +| kube_config_path | String | The absolute path to the kubeconfig file to install controller cluster. This takes precedence over the kubeconfig path defined under cluster configuration. | Optional | +| control_plane_address | AlphaNumeric | The address of the control plane kube-apiserver. The kubeslice-cli determines the address from kubeconfig. Override this option if the address in the kubeconfig is not reachable by other clusters in the topology. | Mandatory | +| node_ip | Numeric | The IP address of one of the node in the cluster. The kubeslice-cli determines the address from this command:`kubectl get nodes`. Override this option to an address which is discoverable by other clusters in the topology. | Mandatory | + +### Worker Cluster Configuration Parameters + +The following table describes the worker cluster configuration parameters. + +| Parameter | Parameter Type | Description | Required | +| --------------------- | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| name | String | The user-defined name of the worker cluster. The maximum allowed number of characters is 30. Provide the name as defined in [RFC 1123](#https://datatracker.ietf.org/doc/html/rfc1123). To know more, see [Kubernetes Object Names and IDs](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/). | Mandatory | +| context_name | String | The name of the context to use from kubeconfig file; It is used in the topology YAML only. Use the short alias name. For example, you can use `worker-cluster` or `api-cluster` as the cluster contexts. | Optional | +| kube_config_path | String | The absolute path to the kubeconfig file to install a worker cluster. This takes precedence over the kubeconfig path defined under cluster configuration. | Optional | +| control_plane_address | AlphaNumeric | The address of the control plane `kube-apiserver`. The kubeslice-cli determines the address from kubeconfig. Override this option if the address in the kubeconfig is not reachable by other clusters in the topology. | Mandatory | +| node_ip | Numeric | The IP address of one of the node in the cluster. The kubeslice-cli determines the address from this command:`kubectl get nodes`. Override this option to an address which is discoverable by other clusters in the topology. | Mandatory | + +## KubeSlice Configuration Parameters +The following table describes the KubeSlice configuration parameters. + +| Parameter | Parameter Type | Description | Required | +| ------------- | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| project_name | String | the user-defined name of the Kubeslice project. The maximum allowed number of characters is 30. Provide the name as defined in [RFC 1123](#https://datatracker.ietf.org/doc/html/rfc1123). To know more, see [Kubernetes Object Names and IDs](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/). | Mandatory | +| project_users | String | This contains the list of KubeSlice project users to be created. The users will have read/write privileges. By default, admin user is created. | Optional | + +## Helm Chart Configuration Parameters +The following table describes the helm chart configuration parameters. + +| Parameter | Parameter Type | Description | Required | +| ------------------------------------------------------------------------- | -------------- | ---------------------------------------------------------------------------------------------- | --------- | +| repo_alias | String | The alias of the helm repo for KubeSlice charts. | Optional | +| repo_url | String | The URL of the helm charts for KubeSlice. | Mandatory | +| [cert_manager_chart](#certificate-manager-chart-configuration-parameters) | Object | The cert-manager chart details. | Mandatory | +| [controller_chart](#controller-chart-configuration-parameters) | Object | The Kubeslice Controller chart details. | Mandatory | +| [worker_chart](#worker-chart-configuration-parameters) | Object | The KubeSlice Worker chart details. | Mandatory | +| [ui_chart](#ui-chart-configuration-parameters) | Object | The KubeSlice Manager chart details. This object is only required for enterprise charts. | Optional | +| [prometheus_chart](#prometheus-chart-configuration-parameters) | Object | The Prometheus chart details. This object is only required for enterprise charts. | Optional | +| helm_username | AlphaNumeric | The helm username if the repo is private. | Optional | +| helm_password | AlphaNumeric | The helm password if the repo is private. | Optional | +| [image_pull_secret](#image-pull-configuration-parameters) | Object | The image pull secrets to pull the Kubernetes dashboard image. This object is only required for enterprise charts. | Mandatory | + +## Certificate Manager Chart Configuration Parameters +The following table describes the certificate manager chart configuration parameters used to create topology YAML. + +| Parameter | Parameter Type | Description | Required | +| ---------- | -------------- | ------------------------------------------------------------------------------------- | --------- | +| chart_name | String | The name of the cert-manager chart. | Mandatory | +| version | Numeric | The version of the cert-manager chart. Leave this value blank for the latest version. | Optional | + +## Controller Chart Configuration Parameters +The following table describes the controller chart configuration parameters used to create topology YAML. + +| Parameter | Parameter Type | Description | Required | +| ---------- | -------------- | -------------------------------------------------------------------------------- | --------- | +| chart_name | String | The name of the controller chart. | Mandatory | +| version | Numeric | The version of the chart. Leave this value blank for the latest version. | Optional | +| values | String | The additional value that can be passed as --set arguments for **helm install**. | Optional | + +## Worker Chart Configuration Parameters +The following table describes the worker chart configuration parameters used to create topology YAML. + +| Parameter | Parameter Type | Description | Required | +| ---------- | -------------- | -------------------------------------------------------------------------------- | --------- | +| chart_name | String | The name of the worker chart. | Mandatory | +| version | Numeric | The version of the chart. Leave this value blank for the latest version. | Optional | +| values | String | The additional value that can be passed as --set arguments for **helm install**. | Optional | + +## UI Chart Configuration Parameters +The following table describes the UI chart configuration parameters used to create topology YAML. +These parameters are required for the `enterprise-demo` option only. + +| Parameter | Parameter Type | Description | Required | +| ---------- | -------------- | -------------------------------------------------------------------------------- | --------- | +| chart_name | String | The name of the UI or enterprise chart. | Mandatory | +| version | Numeric | The version of the chart. Leave it blank for the latest version. | Optional | +| values | String | The additional value that can be passed as --set arguments for **helm install**. | Optional | + +## Prometheus Chart Configuration parameters +The following table describes the Prometheus chart configuration parameters used to create topology YAML. +These parameters are required for the `enterprise-demo` option only. + +| Parameter | Parameter Type | Description | Required | +| ---------- | -------------- | -------------------------------------------------------------------------------- | --------- | +| chart_name | String | The name of the Prometheus chart. | Mandatory | +| version | Numeric | The version of the chart. Leave it blank for the latest version. | Optional | +| values | String | The additional value that can be passed as --set arguments for **helm install**. | Optional | + +## Image Pull Secret Configuration Parameters +The following table describes the image pull secret configuration parameters used to create topology YAML. +These parameters are required for the `enterprise-demo` option only. + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | ------------------------------------------------------ | --------- | +| registry | String | The endpoint of the OCI registry to use. | Mandatory | +| username | AlphaNumeric | The username to authenticate against the OCI registry. | Mandatory | +| password | AlphaNumeric | The password to authenticate against the OCI registry. | Mandatory | +| email | AlphaNumeric | The email to authenticate against the OCI registry. | Mandatory | + + + +:::note +The 'kube_config_path' parameter in the topology configuration requires the absolute path to the kubeconfig file. +The 'kube_config_path' parameter must be set at the top-level, that is under **cluster_configuration**,' or under +the **controller** and **workers** configuration. +::: + +### Complete Topology Template +The following template can be used to create a topology YAML file for installing KubeSlice. +This is a complete list of all the values that can be passed in a topology file. + +```yaml +configuration: + cluster_configuration: + profile: #{the KubeSlice Profile for the demo. Possible values [full-demo, minimal-demo]} + kube_config_path: #{specify the kube config file to use for topology setup; for topology only} + cluster_type: #{optional: specify the type of cluster. Valid values are kind, cloud, data-center} + controller: + name: #{the user defined name of the controller cluster} + context_name: #{the name of the context to use from kubeconfig file; for topology only} + kube_config_path:#{the path to kube config file to use for controller installation; for topology only.} + #{This takes precedence over configuration.cluster_configuration.kube_config_path} + control_plane_address:#{the address of the control plane kube-apiserver. kubeslice-cli determines the address from kubeconfig} + #{Override this flag if the address in kubeconfig is not reachable by other clusters in topology} + node_ip:#{the IP address of one of the node in this cluster. kubeslice-cli determines this address from kubectl get nodes} + #{Override this flag to an address which is discoverable by other clusters in the topology} + workers: #{specify the list of worker clusters} + - name: #{the user defined name of the worker cluster} + context_name: #{the name of the context to use from the kubeconfig file; for topology only} + kube_config_path:#{the path to kube config file to use for worker installation; for topology only.} + #{This takes precedence over configuration.cluster_configuration.kube_config_path} + control_plane_address:#{the address of the control plane kube-apiserver. kubeslice-cli determines the address from kubeconfig} + #{Override this flag if the address in kubeconfig is not reachable by other clusters in topology} + node_ip:#{the IP address of one of the node in this cluster. kubeslice-cli determines this address from kubectl get nodes} + #{Override this flag to an address which is discoverable by other clusters in the topology} + - name: #{the user defined name of the worker cluster} + context_name: #{the name of the context to use from the kubeconfig file; for topology only} + kube_config_path:#{the path to kube config file to use for worker installation; for topology only.} + #{This takes precedence over configuration.cluster_configuration.kube_config_path} + control_plane_address:#{the address of the control plane kube-apiserver. kubeslice-cli determines the address from kubeconfig} + #{Override this flag if the address in kubeconfig is not reachable by other clusters in topology} + node_ip:#{the IP address of one of the node in this cluster. kubeslice-cli determines this address from kubectl get nodes} + #{Override this flag to an address which is discoverable by other clusters in the topology} + kubeslice_configuration: + project_name: #{the name of the KubeSlice Project} + project_users: #{optional: specify KubeSlice Project users with Read-Write access. Default is admin} + helm_chart_configuration: + repo_alias: #{The alias of the helm repo for KubeSlice Charts} + repo_url: #{The URL of the Helm Charts for KubeSlice} + cert_manager_chart: + chart_name: #{The name of the Cert Manager Chart} + version: #{The version of the chart to use. Leave blank for latest version} + controller_chart: + chart_name: #{The name of the Controller Chart} + version: #{The version of the chart to use. Leave blank for latest version} + values: #(Values to be passed as --set arguments to helm install) + worker_chart: + chart_name: #{The name of the Worker Chart} + version: #{The version of the chart to use. Leave blank for latest version} + values: #(Values to be passed as --set arguments to helm install) + + # The ui_chart, prometheus_chart, and image_pull_secret parameters are required for the enterprise-demo option (enterprise-chart) only. + ui_chart: + chart_name: #{The name of the UI/Enterprise Chart} + version: #{The version of the chart to use. Leave blank for latest version} + values: #(Values to be passed as --set arguments to helm install) + prometheus_chart: + chart_name: #{The name of the Prometheus Chart} + version: #{The version of the chart to use. Leave blank for latest version} + values: #(Values to be passed as --set arguments to helm install) + helm_username: #{Helm Username if the repo is private} + helm_password: #{Helm Password if the repo is private} + image_pull_secret: #{The image pull secrets. Optional for OpenSource, required for enterprise} + registry: #{The endpoint of the OCI registry to use. Default is `https://index.docker.io/v1/`} + username: #{The username to authenticate against the OCI registry} + password: #{The password to authenticate against the OCI registry} + email: #{The email to authenticate against the OCI registry} +``` \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/uninstall-kubeslice.mdx b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/uninstall-kubeslice.mdx new file mode 100644 index 00000000..102eb0fd --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/uninstall-kubeslice.mdx @@ -0,0 +1,62 @@ +# Uninstall KubeSlice + +This topic describes how to uninstall KubeSlice. Follow these steps to uninstall the KubeSlice Controller: + +- Offboard all namespaces from the slice +- Delete a slice +- Deregister the worker cluster +- Delete a project +- Uninstall the KubeSlice Controller + +:::info +The order of operations in uninstalling the KubeSlice Controller is important to ensure all resources are cleaned up properly. Follow the +order of operations provided under this topic. +::: + +## Offboard Namespaces +To offboard the namespaces from slice, you must first delete the serviceexport for each application. For more information, see +[offboard Namespaces](/versioned_docs/version-1.1.0/uninstall-kubeslice/uninstall-kubeslice.mdx#offboard-application-namespaces). + + +## Delete a Slice +Before deleting the slice, you must offboard all namespaces. Failure to do so may result in slices and resources not being properly cleaned up. + +Use the following command to delete the slice: +``` +kubeslice-cli delete sliceConfig -n +``` + +## Deregister the Worker Cluster +:::caution +Before deregistering the worker cluster, ensure you have offboarded all namespaces and deleted the slice. +::: + +You must first uninstall the Slice Operator from the worker cluster before deregistering it from the KubeSlice Controller. To uninstall +the Slice Operator, follow the instructions in the [uninstall the Slice Operator](/versioned_docs/version-1.1.0/uninstall-kubeslice/uninstall-kubeslice.mdx#uninstall-the-slice-operator) +section. + +Use the following command to delete a registered worker cluster: +``` +kubeslice-cli delete worker -n +``` + +## Delete a Project +:::caution +Before deleting a project, ensure all namespaces have been offboarded and the slice has been deleted. +::: + +Delete a project after deregistering the worker cluster. The service account, namespaces, clusters, secrets, certificates, and tokens +are all deleted when the project is deleted from the KubeSlice Controller. + +Use the following command to delete the project: +``` +kubeslice-cli delete project -n +``` + +## Uninstall the KubeSlice Controller +:::caution +Before deleting a project, ensure all namespaces have been offboarded, the slice(s) have been deleted, and the worker cluster has been deregistered. +::: + +After deleting all the projects, uninstall the KubeSlice Controller. To uninstall the KubeSlice Controller, see +[uninstall the KubeSlice Controller](/versioned_docs/version-1.1.0/uninstall-kubeslice/uninstall-kubeslice.mdx#uninstall-the-kubeslice-controller). diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/events/yaml-events-controller-events.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/events/yaml-events-controller-events.mdx new file mode 100644 index 00000000..e24ea6f8 --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/events/yaml-events-controller-events.mdx @@ -0,0 +1,375 @@ +# Controller Events +The KubeSlice Controller generates events that represent an operation on the controller +cluster. All of these generated events are specific to KubeSlice operations. + +By default,the KubeSlice Controller logs the events in the following namespaces: + +- [kubeslice-controller](#kubeslice-controller-namespace-events) +- [kubeslice-project](#kubeslice-project-namespace-events) + +## kubeslice-controller Namespace Events +This section describes the events that are generated on the `kubeslice-controller` namespace. +Use the following command to get the `kubeslice-controller` namespace events: + +``` +kubectl get events -n kubeslice-controller +``` + +### Project +These events are related to the KubeSlice projects that you create on the controller cluster. + +| Sl. No | Event | Type | Description | +| ------ | --------------------- | ------- | -------------------------------- | +| 1 | ProjectDeleted | WARNING | The project has been deleted. | +| 2 | ProjectDeletionFailed | WARNING | The project deletion has failed. | + +### Namespace +These events are related to the project namespace in the controller cluster. + +| Sl. No | Event | Type | Description | +| ------ | ----------------------- | ------- | ---------------------------------- | +| 1 | NamespaceCreated | NORMAL | The namespace has been created. | +| 2 | NamespaceCreationFailed | WARNING | The namespace creation has failed. | +| 3 | NamespaceDeleted | WARNING | The namespace has been deleted. | +| 4 | NamespaceDeletionFailed | WARNING | The namespace deletion has failed. | + +### Licensing + +These events are related to the KubeSlice license in the controller cluster. + +| Sl. No | Event | Type | Description | +| ------ | -------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------ | +| 1 | ClusterMetadataCollectionFailed | WARNING | The automatic license creation has failed. The cluster details could not be collected. | +| 2 | ClusterMetadataCollectionSuccess | NORMAL | The cluster details have been collected. | +| 3 | ConfigMapErr | WARNING | The license is invalid. Please contact Avesha Support at support@avesha.io. | +| 4 | GotConfigMap | NORMAL | The ConfigMap and KubeSlice-license public keys have been collected. | +| 5 | GotMachineFingerPrint | NORMAL | The Machine Key has been collected. | +| 6 | LicenseDataFetchError | WARNING | The automatic license creation has failed. Please contact Avesha Support at support@avesha.io. | +| 7 | LicenseDataFetchSuccess | NORMAL | The license is received from Avesha. | +| 8 | LicenseExpired | WARNING | The license has expired. You cannot make any changes. Please contact Avesha Support at support@avesha.io. | +| 9 | LicenseExpiredGracePeriodOn | WARNING | The license has expired. Please contact Avesha Support at support@avesha.io. | +| 10 | LicenseKeyInvalid | WARNING | The license is invalid. Please contact Avesha Support at support@avesha.io. | +| 11 | LicenseProxyUnreachable | WARNING | The automatic license creation has failed. You are unable to reach the license server. Please contact Avesha Support at support@avesha.io. | +| 12 | LicenseSecretCreationFailed | WARNING | The automatic license creation has failed. Please contact Avesha Support at support@avesha.io. | +| 13 | LicenseSecretCreationSuccess | NORMAL | The license has been created. | +| 14 | LicenseSecretNotFound | WARNING | The license is not found. | +| 15 | MachineFingerPrintErr | WARNING | The Machine Key could not be collected. | +| 16 | MachineFileNotFound | WARNING | The license is invalid. Please contact Avesha Support at support@avesha.io. | +| 17 | MachineFileInvalid | WARNING | The license is invalid. Please contact Avesha Support at support@avesha.io. | + +## kubeslice-project Namespace Events +This section describes the events that are generated on the `kubeslice-` namespace. +Use the following command to get the `kubeslice-` namespace events: + +``` +kubectl get events -n kubeslice- +``` + +### Access Control +These events are associated with Kubernetes RBAC on the controller cluster. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------ | ------- | ------------------------------------------------- | +| 1 | WorkerClusterRoleCreated | NORMAL | The worker cluster role has been created. | +| 2 | WorkerClusterRoleCreationFailed | WARNING | The worker cluster role creation has failed. | +| 3 | WorkerClusterRoleUpdated | NORMAL | The worker cluster role has been updated. | +| 4 | WorkerClusterRoleUpdateFailed | WARNING | The worker cluster role update has failed. | +| 5 | ReadOnlyRoleCreated | NORMAL | The read-only role has been created. | +| 6 | ReadOnlyRoleCreationFailed | WARNING | The read-only role creation has failed. | +| 7 | ReadOnlyRoleUpdated | NORMAL | The read-only role has been updated. | +| 8 | ReadOnlyRoleUpdateFailed | WARNING | The read-only role update has failed. | +| 9 | ReadWriteRoleCreated | NORMAL | The read-write role has been created. | +| 10 | ReadWriteRoleCreationFailed | WARNING | The read-write role creation has failed. | +| 11 | ReadWriteRoleUpdated | NORMAL | The read-write role has been updated. | +| 12 | ReadWriteRoleUpdateFailed | WARNING | The read-write role update has failed. | +| 13 | ServiceAccountCreated | NORMAL | The service account has been created. | +| 14 | ServiceAccountCreationFailed | WARNING | The service account creation has failed. | +| 15 | ServiceAccountSecretCreated | NORMAL | The service account secret has been updated. | +| 16 | ServiceAccountSecretCreationFailed | WARNING | The service account secret creation has failed. | +| 17 | DefaultRoleBindingCreated | NORMAL | The default role binding has been created. | +| 18 | DefaultRoleBindingCreationFailed | WARNING | The default role binding creation has failed. | +| 19 | DefaultRoleBindingUpdated | NORMAL | The default role binding has been deleted. | +| 20 | DefaultRoleBindingDeleted | WARNING | The service account creation has failed. | +| 21 | InactiveRoleBindingDeleted | WARNING | The inactive role binding has been deleted. | +| 22 | InactiveRoleBindingDeletionFailed | WARNING | The inactive role binding deletion has failed. | +| 23 | InactiveServiceAccountDeleted | WARNING | The inactive service account has been deleted. | +| 24 | InactiveServiceAccountDeletionFailed | WARNING | The inactive service account deletion has failed. | +| 25 | ServiceAccountDeleted | WARNING | The service account has been deleted. | +| 26 | ServiceAccountDeletionFailed | WARNING | The service account deletion has failed. | +| 27 | SecretDeleted | WARNING | The service account secret has been deleted. | +| 28 | SecretDeletionFailed | WARNING | The service account secret deletion has failed. | + +### Cluster + +These events associated with worker cluster deletion are listed below. + +| Sl. No | Event | Type | Description | +| ------ | --------------------- | ------- | ------------------------------------------------------ | +| 1 | ClusterDeleted | WARNING | The worker cluster has been deleted from KubeSlice. | +| 2 | ClusterDeletionFailed | WARNING | The worker cluster deletion from KubeSlice has failed. | + +### ServiceExportConfig +These events are related to the [service export](../slice-operations/slice-operations-slice-creation#serviceexports-and-serviceimports) +of a worker cluster. + +| Sl. No | Event | Type | Description | +| ------ | --------------------------------- | ------- | ----------------------------------------------------- | +| 1 | ServiceExportConfigDeleted | WARNING | The service export configuration has been deleted. | +| 2 | ServiceExportConfigDeletionFailed | WARNING | The service export configuration deletion has failed. | + +### SliceConfig +These events are related to a slice deletion. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------- | ------- | ------------------------------ | +| 1 | SliceConfigDeleted | WARNING | The slice has been deleted. | +| 2 | SliceConfigDeletionFailed | WARNING | The slice deletion has failed. | + +### SliceNodeAffinity +These events are related to the slice node affinity. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------------- | ------- | -------------------------------------------------------------------------------- | +| 1 | SliceNodeAffinityDeletionFailed | WARNING | The slice node affinity deletion has failed. | +| 2 | SliceNodeAffinityDeleted | WARNING | The slice node affinity has been deleted. | +| 3 | SetSliceConfigAsOwnerOfSliceNodeAffinityFailed | WARNING | The slice configuration could not be set as the owner of the slice node affinity | +| 4 | SetSliceConfigAsOwnerOfSliceNodeAffinitySucceeded | NORMAL | The slice configuration is set as the owner of the slice node affinity. | + +### SliceQoSConfig +These events are related to a slice quality-of-service (QoS) profile. + +| Sl. No | Event | Type | Description | +| ------ | ---------------------------- | ------- | ------------------------------------------------ | +| 1 | SliceQoSConfigDeleted | WARNING | The slice QoS configuration has been deleted. | +| 2 | SliceQoSConfigDeletionFailed | WARNING | The slice QoS configuration deletion has failed. | + +### SliceResourceQuotaConfig +These events are related to the slice resource quota configuration. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------------------------ | ------- | ---------------------------------------------------------------------------------- | +| 1 | SliceResourceQuotaCreationFailed | WARNING | The slice role quota creation has failed. | +| 2 | SliceResourceQuotaCreatedOnSliceConfigCreation | NORMAL | The slice role quota has been created on the slice configuration. | +| 3 | SliceResourceQuotaDeletionFailed | WARNING | The slice role quota deletion has failed. | +| 4 | SliceResourceQuotaDeleted | WARNING | The slice role quota has been deleted. | +| 5 | SliceResourceQuotaRecreationFailed | WARNING | The slice role quota recreation has failed. | +| 6 | SliceResourceQuotaRecreated | NORMAL | The slice role quota has been recreated. | +| 7 | SetSliceConfigAsOwnerOfSliceResourceQuotaFailed | WARNING | The slice configuration could not be set as the owner of the slice resource quota. | +| 8 | SetSliceConfigAsOwnerOfSliceResourceQuotaSucceeded | NORMAL | The slice configuration is set as the owner of the slice role quota. | +| 9 | SliceResourceQuotaCreated | NORMAL | The slice role quota is created. | +| 10 | SliceResourceQuotaUpdated | NORMAL | The slice role quota is updated. | +| 11 | AllRQSpecificationViolationMetricsResetSuccess | NORMAL | All the resource quota violation metrics have been reset. | +| 12 | SliceRQSpecificationViolationMetricsResetSuccess | NORMAL | The slice resource quota violation metrics have been reset. | +| 13 | ClusterRQSpecificationViolationMetricsResetSuccess | NORMAL | The slice resource quota violation metrics have been reset. | +| 14 | OutdatedNamespaceRQSpecificationViolationMetricsResetSuccess | NORMAL | The outdated namespace resource quota violation metrics have been reset. | +| 15 | OutdatedClusterRQSpecificationViolationMetricsResetSuccess | NORMAL | The outdated cluster resource quota violation metrics have been reset. | + +### SliceRoleBinding +These events are related to the slice role assignment. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------------ | ------- | ---------------------------------------------------------------------------------------------- | +| 1 | SliceRoleBindingDeletingFailed | WARNING | The slice role binding or the slice RBAC deletion has failed. | +| 2 | SliceRoleBindingDeleted | WARNING | The slice role binding or the slice RBAC has been deleted. | +| 3 | SetSliceConfigAsOwnerOfSliceRoleBindingFailed | WARNING | The slice configuration could not be set as owner of the slice role binding or the slice RBAC. | +| 4 | SetSliceConfigAsOwnerOfSliceRoleBindingSucceeded | NORMAL | The slice configuration is set as the owner of the slice role binding or the slice RBAC. | + +### SliceRoleTemplate +These events are related to the slice RBAC role. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------------ | ------- | ------------------------------------------------------ | +| 1 | DefaultDeploymentSliceRoleTemplateCreationFailed | WARNING | The slice role template creation has failed. | +| 2 | DefaultDeploymentSliceRoleTemplateCreated | NORMAL | The slice role template has been created. | +| 3 | DefaultReaderSliceRoleTemplateCreationFailed | WARNING | The read-only slice role template creation has failed. | +| 4 | DefaultReaderSliceRoleTemplateCreated | NORMAL | The read-only slice role template has been created. | +| 5 | SliceRoleTemplateDeletionFailed | WARNING | The slice role template deletion has failed. | +| 6 | SliceRoleTemplateDeleted | WARNING | The slice role template has been deleted. | + +### WorkerServiceImport +These events are related to the worker objects of [service import](../slice-operations/slice-operations-slice-creation#serviceimports) +of a worker cluster. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------ | ------- | ------------------------------------------------------------------------ | +| 1 | WorkerServiceImportDeletedForcefully | WARNING | The worker service import has been deleted forcefully. | +| 2 | WorkerServiceImportRecreationFailed | WARNING | The worker service import recreation has failed after forceful deletion. | +| 3 | WorkerServiceImportRecreated | NORMAL | The worker service import has been recreated after forceful deletion. | +| 4 | WorkerServiceImportCreationFailed | WARNING | The worker service import creation has failed. | +| 5 | WorkerServiceImportCreated | NORMAL | The worker service import has been created. | +| 6 | WorkerServiceImportUpdateFailed | WARNING | The worker service import update has failed. | +| 7 | WorkerServiceImportUpdated | NORMAL | The worker service import has been updated. | +| 8 | WorkerServiceImportDeleted | WARNING | The worker service import has been deleted. | +| 9 | WorkerServiceImportDeletionFailed | WARNING | The worker service import deletion has failed. | + +### WorkerSliceConfig +These events are related to the worker objects of a slice. A worker object is automatically generated after slice creation. +A worker object is created for every worker cluster connected to a slice. + +| Sl. No | Event | Type | Description | +| ------ | ---------------------------------- | ------- | ----------------------------------------------------------------------------- | +| 1 | WorkerSliceConfigDeletedForcefully | WARNING | The worker slice configuration has been deleted forcefully. | +| 2 | WorkerSliceConfigRecreationFailed | WARNING | The worker slice configuration recreation has failed after forceful deletion. | +| 3 | WorkerSliceConfigRecreated | NORMAL | The worker slice configuration has been recreated after forceful deletion. | +| 4 | WorkerSliceConfigCreationFailed | WARNING | The worker slice configuration creation has failed. | +| 5 | WorkerSliceConfigCreated | NORMAL | The worker slice configuration has been created. | +| 6 | WorkerSliceConfigUpdateFailed | WARNING | The worker slice configuration update has failed. | +| 7 | WorkerSliceConfigUpdated | NORMAL | The worker slice configuration has been updated. | +| 8 | WorkerSliceConfigDeleted | WARNING | The worker slice configuration has been deleted. | +| 9 | WorkerSliceConfigDeletionFailed | WARNING | The worker slice configuration deletion has failed. | + +### WorkerSliceGateway +These events are related to the worker slice gateways for a slice with more than one worker cluster. The number +of worker slice gateways on a slice is equal to `n * (n-1)`, where `n` is the total number of worker +clusters on that slice. + +| Sl. No | Event | Type | Description | +| ------ | ----------------------------------- | ------- | ----------------------------------------------------------------------- | +| 1 | WorkerSliceGatewayDeletedForcefully | WARNING | The worker slice gateway has been deleted forcefully. | +| 2 | WorkerSliceGatewayRecreationFailed | WARNING | The worker slice gateway recreation has failed after forceful deletion. | +| 3 | WorkerSliceGatewayRecreated | NORMAL | The worker slice gateway has been recreated after forceful deletion. | +| 4 | WorkerSliceGatewayCreationFailed | WARNING | The worker slice gateway creation has failed. | +| 5 | WorkerSliceGatewayCreated | NORMAL | The worker slice gateway has been created. | +| 6 | SliceGatewayJobCreationFailed | WARNING | The worker slice gateway job creation has failed. | +| 7 | SliceGatewayJobCreated | NORMAL | The worker slice gateway job has been created. | +| 8 | WorkerSliceGatewayDeleted | WARNING | The worker slice gateway has been deleted. | +| 9 | WorkerSliceGatewayDeletionFailed | WARNING | The worker slice gateway deletion has failed. | + +### WorkerSliceNodeAffinity +These events are related to the worker objects of the slice node affinity (node labels). A worker +object is automatically generated after the slice node affinity is created. + +| Sl. No | Event | Type | Description | +| ------ | ---------------------------------------- | ------- | ----------------------------------------------------------------------------- | +| 1 | WorkerSliceNodeAffinityDeletedForcefully | WARNING | The worker slice node affinity has been deleted forcefully. | +| 2 | WorkerSliceNodeAffinityRecreationFailed | WARNING | The worker slice node affinity recreation has failed after forceful deletion. | +| 3 | WorkerSliceNodeAffinityRecreated | NORMAL | The worker slice node affinity has been recreated after forceful deletion. | +| 4 | NodeAffinityRilesExpansionFailed | WARNING | The worker slice node affinity rules expansion has failed. | +| 5 | SliceNodeAffinityConfigDeepCopyFailed | WARNING | The deep copy of the worker slice node affinity configuration has failed. | +| 6 | WorkerSliceNodeAffinityCreationFailed | WARNING | The worker slice node affinity creation has failed. | +| 7 | WorkerSliceNodeAffinityCreated | NORMAL | The worker slice node affinity has been created. | +| 8 | WorkerSliceNodeAffinityUpdateFailed | WARNING | The worker slice node affinity update has failed. | +| 9 | WorkerSliceNodeAffinityUpdated | NORMAL | The worker slice node affinity has been updated. | +| 10 | WorkerSliceNodeAffinityDeletionFailed | WARNING | The worker slice node affinity deletion failed. | +| 11 | WorkerSliceNodeAffinityDeleted | WARNING | The worker slice node affinity has been deleted. | + +### WorkerSliceResourceQuota +These events are related to the worker objects of the slice resource quota. A worker object is +automatically generated after the slice resource quota is created. + +These events also include the events generated when a resource is violated at namespace, cluster, +and slice levels. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------ | ------- | ------------------------------------------------------------------------------ | +| 1 | WorkerSliceResourceQuotaDeletedForcefully | WARNING | The worker slice resource quota has been deleted forcefully. | +| 2 | WorkerSliceResourceQuotaRecreationFailed | WARNING | The worker slice resource quota recreation has failed after forceful deletion. | +| 3 | WorkerSliceResourceQuotaRecreated | NORMAL | The worker slice resource quota has been recreated after forceful deletion. | +| 4 | OffBoardedNamespaceUtilizationMetricsReset | NORMAL | The utilization metrics of off-boarded namespaces have been reset. | +| 5 | ResourceQuotaMetricsPopulated | NORMAL | The resource quota metrics have been populated. | +| 6 | ClusterCPULimitViolated | WARNING | The CPU limit is violated at the cluster level. | +| 7 | ClusterMemoryLimitViolated | WARNING | The memory limit is violated at the cluster level. | +| 8 | ClusterPodCountViolated | WARNING | The pod count is violated at the cluster level. | +| 9 | ClusterEphemeralStorageLimitViolated | WARNING | The ephemeral storage limit is violated at the cluster level. | +| 10 | ClusterCPURequestViolated | WARNING | The CPU request is violated at the cluster level. | +| 11 | ClusterMemoryRequestViolated | WARNING | The memory request is violated at the cluster level. | +| 12 | ClusterEphemeralStorageRequestViolated | WARNING | The ephemeral storage request is violated at the cluster level. | +| 13 | NamespaceCPULimitViolated | WARNING | The CPU limit is violated at the namespace level. | +| 14 | NamespaceMemoryLimitViolated | WARNING | The memory limit is violated at the namespace level. | +| 15 | NamespacePodCountViolated | WARNING | The pod count is violated at the namespace level. | +| 16 | NamespaceEphemeralStorageLimitViolated | WARNING | The ephemeral storage limit is violated at the namespace level. | +| 17 | NamespaceCPURequestViolated | WARNING | The CPU request is violated at the namespace level. | +| 18 | NamespaceMemoryRequestViolated | WARNING | The memory request is violated at the namespace level. | +| 19 | NamespaceEphemeralStorageRequestViolated | WARNING | The ephemeral storage request is violated at the namespace level. | +| 20 | SliceCPULimitViolated | WARNING | The CPU limit is violated at the slice level. | +| 21 | SliceMemoryLimitViolated | WARNING | The memory limit is violated at the slice level. | +| 22 | SlicePodCountViolated | WARNING | The pod count is violated at the slice level. | +| 23 | SliceEphemeralStorageLimitViolated | WARNING | The ephemeral storage limit is violated at the slice level. | +| 24 | SliceCPURequestViolated | WARNING | The CPU request is violated at the slice level. | +| 25 | SliceMemoryRequestViolated | WARNING | The memory request is violated at the slice level. | +| 26 | SliceEphemeralStorageRequestViolated | WARNING | The ephemeral storage request is violated at the slice level. | +| 27 | WorkerSliceResourceQuotaCreationFailed | WARNING | The worker slice resource quota creation has failed. | +| 28 | WorkerSliceResourceQuotaCreated | NORMAL | The worker slice resource quota has been created. | +| 29 | WorkerSliceResourceQuotaUpdateFailed | WARNING | The worker slice resource quota update has failed. | +| 30 | WorkerSliceResourceQuotaUpdated | NORMAL | The worker slice resource quota has been updated. | +| 31 | WorkerSliceResourceQuotaDeletionFailed | WARNING | The worker slice resource quota deletion has failed. | +| 32 | WorkerSliceResourceQuotaDeleted | WARNING | The worker slice resource quota has been deleted. | + +### WorkerSliceRoleBinding +These events are related to the worker objects of the slice role assignment. A worker object is +automatically generated after the slice role assignment. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------- | ------- | -------------------------------------------------------------------------------------------------------------- | +| 1 | WorkerSliceRoleBindingReconciliationSuccess | NORMAL | The reconciliation of the worker slice role assignment is successful due to the change in slice role template. | +| 2 | WorkerSliceRoleBindingDeletedForcefully | WARNING | The worker slice role assignment has been deleted forcefully. | +| 3 | WorkerSliceRoleBindingRecreationFailed | WARNING | The worker slice role assignment recreation has failed. | +| 4 | WorkerSliceRoleBindingRecreated | NORMAL | The worker slice role assignment has been recreated after forceful deletion. | +| 5 | WorkerSliceRoleBindingCreationFailed | WARNING | The worker slice role assignment creation has failed. | +| 6 | WorkerSliceRoleBindingCreated | NORMAL | The worker slice role assignment has been created successfully. | +| 7 | WorkerSliceRoleBindingUpdateFailed | WARNING | The worker slice role assignment update has failed. | +| 8 | WorkerSliceRoleBindingUpdated | NORMAL | The worker slice role assignment has been updated successfully. | +| 9 | WorkerSliceRoleBindingDeletionFailed | WARNING | The worker slice role assignment deletion has failed. | +| 10 | WorkerSliceRoleBindingDeleted | WARNING | The worker slice role assignment has been deleted successfully. | + +## Disable the KubeSlice Controller Events +You can disable all the KubeSlice Controller events or a few of them. + +### Disable All the KubeSlice Controller Events +If you want to disable all the events, then edit the KubeSlice Controller configuration +file and set the following configuration for `events`. + +``` + events: + disabled: true +``` +After changing the configuration, use the following command to update the KubeSlice Controller: + +``` +helm upgrade kubeslice-controller kubeslice/kubeslice-controller -f -n kubeslice-controller +``` + +Where `values.yaml` is the KubeSlice Controller configuration file. + +### Disable a Few KubeSlice Controller Events +If you do not want to see a KubeSlice Controller event, then you can list it under `disabledEvents` in +the `kubeslice-controller-event-schema-conf` file that is located in the `kubeslice-controller` +namespace. + +For example, if you do not want to see the `WorkerSliceConfigCreated` event, then go to +the `kubeslice-controller-event-schema-conf` file using the following command: + +``` +kubectl edit cm kubeslice-controller-event-schema-conf -n kubeslice-controller +``` + +In the `kubeslice-controller-event-schema-conf` file, add the `WorkerSliceConfigCreated` event that you want +to hide under `disabledEvents` as illustrated below. + +``` +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +data: + controller.yaml: |- + disabledEvents: + - WorkerSliceConfigCreated +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: kubeslice-controller + meta.helm.sh/release-namespace: kubeslice-controller + creationTimestamp: "2023-04-10T07:32:57Z" + labels: + app.kubernetes.io/managed-by: Helm + name: event-schema + name: kubeslice-controller-event-schema-conf + namespace: kubeslice-controller + resourceVersion: "201971" + uid: 6cbcaba7-08b8-4d2e-b880-7516f594950f +``` + +Save the file after adding the events that you want to disable. + diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/events/yaml-events-worker-events.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/events/yaml-events-worker-events.mdx new file mode 100644 index 00000000..9f9c1b0f --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/events/yaml-events-worker-events.mdx @@ -0,0 +1,225 @@ +# Slice Operator Events + +The Slice Operator generates events that are specific to KubeSlice operations, representing various operations on the worker cluster. These events are logged in the `kubeslice-system` namespace. + +To retrieve the Worker Operator events, use the following command: + +``` +kubectl get events -n kubeslice-system +``` + +## Slice + +These events are related to slice creation and management. + +| Sl. No | Event | Type | Description | +| ------ | ----------------------------------- | ------- | ------------------------------------------------------------------------------------------------------ | +| 1 | SliceCreated | NORMAL | The slice has been created. | +| 2 | SliceUpdated | NORMAL | The slice has been updated. | +| 3 | SliceDeleted | WARNING | The slice has been deleted. | +| 4 | SliceCreationFailed | WARNING | The slice creation has failed. Please check the slice configuration. | +| 5 | SliceUpdateFailed | WARNING | The slice update has failed. Please check the slice configuration. | +| 6 | SliceDeletionFailed | WARNING | The slice deletion has failed. Please check the slice configuration. | +| 7 | SliceQoSProfileWithNetOpsSyncFailed | WARNING | The slice QoS profile sync with NetOp has failed. Please ask the admin to check the Slice QoS Profile. | +| 8 | SliceIngressInstallFailed | WARNING | The slice ingress installation has failed. | +| 9 | SliceEgressInstallFailed | WARNING | The slice egress installation has failed. | +| 10 | SliceAppPodsListUpdateFailed | WARNING | The slice application pods list is not updated. Please ask the admin to check slice configuration. | +| 11 | SliceRouterDeploymentFailed | WARNING | The slice router deployment has failed. | +| 12 | SliceRouterServiceFailed | WARNING | The slice router service has failed. | + +## Cluster + +These events are related to the worker cluster health and node IP. + +| Sl. No | Event | Type | Description | +| ------ | ----------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------- | +| 1 | ClusterHealthy | NORMAL | The cluster is in a healthy state. | +| 1 | ClusterUnhealthy | WARNING | The cluster is in an unhealthy state. Please check if all worker components are running as expected. | +| 3 | ClusterNodeIpAutoDetected | NORMAL | The auto-detection of the cluster node IP address was successful due to changes detected in the worker nodes. | +| 4 | ClusterNodeIpAutoDetectionFailed | WARNING | The auto-detection of the cluster node IP address has failed. | +| 5 | ClusterProviderUpdateInfoSuccesfull | NORMAL | The cluster cloud provider or physical location info has been updated. | +| 6 | ClusterProviderUpdateInfoFailed | WARNING | The cluster cloud provider or physical location info update has failed. | +| 7 | ClusterCNISubnetUpdateSuccessfull | NORMAL | The cluster CNI subnet has been updated. | +| 8 | ClusterCNISubnetUpdateFailed | WARNING | The cluster cloud provider or physical location info update has failed. | +| 9 | ClusterDashboardCredsUpdated | NORMAL | The cluster dashboard credentials have been updated. | +| 10 | ClusterDashboardCredsUpdateFailed | WARNING | The cluster dashboard credentials update has failed. | +| 11 | ClusterHealthStatusUpdated | NORMAL | The cluster health status has been updated. | +| 12 | ClusterHealthStatusUpdateFailed | WARNING | The cluster health status update has failed. | + +## SliceGateway + +These events are related to the Slice Gateway that is used to communicate between worker clusters. + +| Sl. No | Event | Type | Description | +| ------ | ---------------------------------- | ------- | ------------------------------------------------ | +| 1 | SliceGWCreated | NORMAL | The Slice Gateway has been created. | +| 2 | SliceGWUpdated | NORMAL | The Slice Gateway has been updated. | +| 3 | SliceGWDeleted | WARNING | The Slice Gateway has been deleted. | +| 4 | SliceGWCreateFailed | WARNING | The Slice Gateway creation has failed. | +| 5 | SliceGWUpdateFailed | WARNING | The Slice Gateway update has failed. | +| 6 | SliceGWDeleteFailed | WARNING | The Slice Gateway deletion has failed. | +| 7 | SliceGWPodReconcileFailed | WARNING | The Slice Gateway pod reconciliation has failed. | +| 8 | SliceGWConnectionContextFailed | WARNING | The Slice Gateway connection context has failed. | +| 9 | SliceRouterConnectionContextFailed | WARNING | The Slice Gateway router context has failed. | +| 10 | SliceNetopQoSSyncFailed | WARNING | The Slice Netop and QoS profile sync has failed. | +| 11 | SliceGWRebalancingFailed | WARNING | The Slice Gateway rebalancing has failed. | +| 12 | SliceGWRemotePodSyncFailed | WARNING | The Slice Gateway remote pod sync has failed. | +| 13 | SliceGWRebalancingSuccess | NORMAL | The Slice Gateway rebalancing is successful. | +| 14 | SliceGWServiceCreationFailed | WARNING | The Slice Gateway service creation has failed. | +| 15 | SliceGWNodePortUpdateFailed | WARNING | The Slice Gateway node port update has failed. | + +## ServiceExport + +These events are related to [service export](../slice-operations/slice-operations-slice-creation#serviceexports-and-serviceimports) +of a worker cluster. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------- | ------- | --------------------------------------------------------------- | +| 1 | ServiceExportSliceFetchFailed | WARNING | The slice service export's slice fetch has failed. | +| 2 | ServiceExportStatusPending | WARNING | The slice service export status has been pending. | +| 3 | ServiceExportInitialStatusUpdated | NORMAL | The slice service export's initial status has been updated. | +| 4 | SliceServiceExportInitialStatusUpdateFailed | WARNING | The slice service export's initial status has failed. | +| 5 | ServiceExportDeleted | WARNING | The slice service export has been deleted. | +| 6 | ServiceExportDeleteFailed | WARNING | The slice service export deletion has failed. | +| 7 | ServiceExportUpdatePortsFailed | WARNING | The update of the slice service export's ports has failed. | +| 8 | IngressGWPodReconciledSuccessfully | NORMAL | The slice service export's gateway pod has been reconciled. | +| 9 | IngressGWPodReconcileFailed | WARNING | The slice service export gateway pod reconciliation has failed. | +| 10 | SyncServiceExportStatusFailed | WARNING | The slice service export status sync has failed. | +| 11 | SyncServiceExportStatusSuccessfully | NORMAL | The slice service export status has been synced. | + +## ServiceImport + +These events are related to [service import](../slice-operations/slice-operations-slice-creation#serviceimports) +of a worker cluster. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------------------ | ------- | --------------------------------------------------------------- | +| 1 | SliceServiceImportCreated | NORMAL | The slice service import has been created. | +| 2 | SliceServiceImportCreateFailed | WARNING | The slice service import creation has failed. | +| 3 | SliceServiceImportUpdateAvailableEndpointsFailed | WARNING | The slice service import available endpoints update has failed. | +| 4 | SliceServiceImportDeleted | WARNING | The slice service import has been deleted. | +| 5 | SliceServiceImportUpdatePorts | NORMAL | The slice service import's ports have been updated. | + +## WorkerSliceConfig + +These events are related to the worker objects of a slice. A worker object is automatically generated after slice creation. +A worker object is created for every worker cluster connected to a slice. + +| Sl. No | Event | Type | Description | +| ------ | ----------------------------- | ------- | --------------------------------------------------- | +| 1 | WorkerSliceConfigCreated | NORMAL | The worker slice configuration has been created. | +| 2 | WorkerSliceConfigCreateFailed | WARNING | The worker slice configuration creation has failed. | +| 3 | WorkerSliceConfigUpdated | NORMAL | The worker slice configuration has been updated. | +| 4 | WorkerSliceHealthUpdated | NORMAL | The worker slice health has been updated. | +| 5 | WorkerSliceHealthUpdateFailed | WARNING | The worker slice health update has failed. | + +## WorkerServiceImport + +These events are related to the worker objects of [service import](../slice-operations/slice-operations-slice-creation#serviceimports) +of a worker cluster. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------- | ------- | ---------------------------------------------- | +| 1 | WorkerServiceImportCreated | NORMAL | The worker service import has been created. | +| 2 | WorkerServiceImportCreateFailed | WARNING | The worker service import creation has failed. | + +## WorkerSliceGWRecycler + +These events are related to the WorkerSliceGWRecycler, which is an object +used for recycling gateways using a finite-state machine. + +| Sl. No | Event | Type | Description | +| ------ | --------------------------- | ------- | ------------------------------------------------------------- | +| 1 | FSMNewGWSpawned | NORMAL | The Slice Gateway Recycler's new gateway has been spawned. | +| 2 | FSMRoutingTableUpdated | NORMAL | The Slice Gateway Recycler's routing table has been updated. | +| 3 | FSMDeleteOldGW | WARNING | The Slice Gateway Recycler's old gateway has been deleted. | +| 4 | FSMNewGWSpawnFailed | WARNING | The Slice Gateway Recycler's new gateway has failed to spawn. | +| 5 | FSMRoutingTableUpdateFailed | WARNING | The Slice Gateway Recycler's routing table update has failed. | +| 6 | FSMDeleteOldGWFailed | WARNING | The Slice Gateway Recycler's old gateway deletion has failed. | + +## Namespace + +These events are related to the namespace events of the worker cluster. + +| Sl. No | Event | Type | Description | +| ------ | ------------------------------------- | ------- | ----------------------------------------------------------------------------- | +| 1 | UpdatedNamespaceInfoToController | NORMAL | The namespace information has been updated to the KubeSlice Controller. | +| 2 | UpdateNamespaceInfoToControllerFailed | WARNING | The namespace information update to the KubeSlice Controller has failed. | +| 3 | DeleteNamespaceInfoToController | NORMAL | The namespace information has been deleted from the KubeSlice Controller. | +| 4 | DeleteNamespaceInfoToControllerFailed | WARNING | The namespace information could not be deleted from the KubeSlice Controller. | + +## NetworkPolicy + +These events are related to the slice network policy. + +| Sl. No | Event | Type | Description | +| ------ | --------------------------- | ------- | --------------------------------------------------------------------------------------------------------------------------------- | +| 1 | NetPolAdded | NORMAL | The slice network policy is added. | +| 2 | NetPolScopeWidenedNamespace | WARNING | The slice network policy scope has been widened due to namespace violation. | +| 3 | NetPolScopeWidenedIPBlock | WARNING | The slice network policy scope has been widened due to IP address block violation. | +| 4 | NetPolViolation | WARNING | The slice network policy has been violated. Please ask the admin to check the network policy configuration on the worker cluster. | + +## Disable the Worker Operator Events + +You can disable all the Worker Operator events or a few of them. + +### Disable All the Worker Operator Events + +If you want to disable all the events, then edit the Worker Operator configuration +file and set the following configuration for `events`. + +``` + events: + disabled: true +``` + +After changing the configuration, use the following command to update the Worker Operator: + +``` +helm upgrade kubeslice-worker kubeslice/kubeslice-worker -f -n kubeslice-system +``` + +Where `values.yaml` is the Worker Operator configuration file. + +### Disable a Few Worker Operator Events + +If you do not want to see a Worker Operator event, then you can list it under `disabledEvents` in +the `kubeslice-worker-event-schema-conf` file that is located in the `kubeslice-system` +namespace. + +For example, if you don't want to see the `SliceGWUpdated` event, then go to +the `kubeslice-worker-event-schema-conf` file using the following command: + +``` +kubectl edit cm kubeslice-worker-event-schema-conf -n kubeslice-system +``` + +In the `kubeslice-worker-event-schema-conf` file, add the `SliceGWUpdated` event that you want +to hide under `disabledEvents` as illustrated below. + +``` +# Please edit the object below. Lines beginning with a '#' will be ignored, +# and an empty file will abort the edit. If an error occurs while saving this file will be +# reopened with the relevant failures. +# +apiVersion: v1 +data: + worker.yaml: |- + disabledEvents: + - SliceGWUpdated +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: kubeslice-worker + meta.helm.sh/release-namespace: kubeslice-system + creationTimestamp: "2023-05-08T12:06:33Z" + labels: + app.kubernetes.io/managed-by: Helm + name: event-schema + name: kubeslice-worker-event-schema-conf + namespace: kubeslice-system + resourceVersion: "163809" + uid: 0d0a88c2-b856-460e-bda3-ec443917ca44 +``` + +Save the file after adding the events that you want to disable. diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/metrics/yaml-metrics-controller-metrics.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/metrics/yaml-metrics-controller-metrics.mdx new file mode 100644 index 00000000..9217f0ed --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/metrics/yaml-metrics-controller-metrics.mdx @@ -0,0 +1,31 @@ +# Controller Metrics +The KubeSlice Controller metrics provide insights into the operations and events performed +by the controller for various components. The `kubeslice_controller_events_counter` metric +is a counter that tracks the count of events raised for a specific action on an object. Each +component has its own set of labels, allowing you to distinguish and analyze metrics based on +the corresponding component. These labels include the slice name, project, cluster, namespace, +reporting controller, action, event, object name, and object kind. By monitoring these metrics, +you can gain visibility into the events and actions performed by the KubeSlice Controller, +enabling you to track resource quotas and effectively manage your KubeSlice deployments. + +The metric below is common for all the components, which is distinguished by the corresponding label for each component. + +| Sl. No | Name | Type | Labels | Description | +| ------ | ----------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------- | +| 1 | kubeslice_controller_events_counter | Counter | slice_name, slice_project, slice_cluster, slice_namespace, slice_reporting_controller, action, event, object_name, object_kind | This metric provides the count of all the events raised for a particular action in an object. | + + +## Labels +The following table describes the labels that are attached to the KubeSlice Controller metrics. + +| Label | Description | +| -------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| action | The action which triggered the metric record. | +| event | The event title to which the metric is attached to. | +| object_kind | The kind of the object on which the metric is recorded. | +| object_name | The name of the object on which the metric is recorded. | +| slice_cluster | The name of the cluster the metric is attached to. The value is controller for all the KubeSlice Controller related metrics. | +| slice_name | The name of the slice which the metric is attached to. The value is `NA` if the metric is unrelated to any slice. | +| slice_namespace | The namespace of the object for which the metric is recorded. | +| slice_project | The name of the project on which the slice is created. | +| slice_reporting_controller | The name of the reporting controller. The value is `controller` for all the KubeSlice Controller related metrics. | \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/metrics/yaml-metrics-worker-metrics.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/metrics/yaml-metrics-worker-metrics.mdx new file mode 100644 index 00000000..5381d026 --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/metrics/yaml-metrics-worker-metrics.mdx @@ -0,0 +1,41 @@ +# Slice Operator Metrics + +The Slice Operator metrics provide valuable insights into the performance and health of various components within the KubeSlice environment. These metrics are categorized based on different labels associated with each component. The metrics include information such as the number of active endpoints in service imports and exports, the number of application pods in each slice per namespace, the health status of the worker cluster and its components, the health status of individual slices and their components, and counters for slice-related events. These metrics help you monitor the state of your KubeSlice deployment and make informed decisions for efficient management. The metrics are accompanied by labels that provide additional context, such as the action triggering the metric, the component or object involved, the project and cluster names, and more. By leveraging these metrics and labels, you can effectively monitor and optimize your KubeSlice infrastructure. + + +| Sl. No | Name | Type | Labels | Description | +| ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1 | kubeslice_serviceimport_endpoints | Gauge | slice_project, slice_cluster, slice, namespace, service, slice_reporting_controller | Number of active endpoints in the service import. | +| 2 | kubeslice_serviceexport_endpoints | Gauge | slice_project, slice_cluster, slice, namespace, service, slice_reporting_controller | Number of active endpoints in the service export. | +| 3 | kubeslice_app_pods | Gauge | slice_project, slice_cluster, slice, slice_namespace | Number of application pods in each slice per namespace. | +| 4 | kubeslice_cluster_up | Gauge | slice_project, slice_cluster | Denotes 1 if the worker cluster is healthy and denotes 0 if it is unhealthy. | +| 5 | kubeslice_cluster_component_up | Gauge | slice_project, slice_cluster, component | Denotes 1 if the worker cluster is healthy and denotes 0 if it is unhealthy. | +| 6 | kubeslice_slice_up | Gauge | slice_project, slice_cluster, slice | Denotes 1 if the slice is healthy and denotes 0 if it is unhealthy. | +| 7 | kubeslice_slice_component_up | Gauge | slice_project, slice_cluster, slice, component | Denotes 1 if the slice component is healthy and denotes 0 if it is unhealthy. | +| 8 | kubeslice_slice_created_total, kubeslice_slice_updated_total, kubeslice_slice_deleted_total, kubeslice_slice_creation_failed_total, kubeslice_slice_updation_failed_total, kubeslice_slice_deletion_failed_total | Counter | slice_project, slice_cluster, slice | Counter for slice related events. | +| 9 | kubeslice_nodeaffinity_taints_active | Gauge | slice_project, slice_cluster, slice, namespace, slice_nodeaffinity | Number of nodes tainted for Node Affinity in the slice. | +| 10 | kubeslice_nodeaffinity_mutated_workloads_total | Counter | slice_project, slice_cluster, slice, namespace, slice_nodeaffinity, slice_workload_type | Count of workloads mutated by webhook for Node Affinity. | + +## Labels + +The following table describes the labels that are attached to the Slice Operator metrics. + +| Label | Description | +| ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | +| action | The action which triggered the metric record. | +| component | The KubeSlice component to which the metric is attached to. | +| event | The event title to which the metric is attached to. | +| object_kind | The kind of the object on which the metric is recorded. | +| object_name | The name of the object on which the metric is recorded. | +| namespace | The namespace of the object for which the metric is recorded. | +| service | The metric is related to the service discovery (service export or import). | +| slice | The name of the slice which the metric is attached to. The value is `NA` if the metric is unrelated to any slice. | +| slice_cluster | The name of the cluster the metric is attached to. The value is `worker-cluster` for all the Worker Operator related metrics. | +| slice_gateway | The Slice Gateway for which the metric is recorded | +| slice_gateway_pod | The Slice Gateway pod for which the metric is recorded. | +| slice_nodeaffinity | The Node Affinity of the slice for which the metric is recorded. | +| slice_networkpolicy | The namespace isolation policy of the slice for which the metric is recorded. | +| slice_networkpolicy_violation | The namespace isolation policy violation of the slice for which the metric is recorded. | +| slice_project | The name of the project on which the slice is created. | +| slice_reporting_controller | The name of the reporting controller. The value is `controller` for all the KubeSlice Controller related metrics. | +| slice_workload_type | The workload type of the slice for which the metric is recorded. | \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-cluster-health.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-cluster-health.mdx new file mode 100644 index 00000000..93fa59b5 --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-cluster-health.mdx @@ -0,0 +1,125 @@ +# Monitor Slice and Cluster Health +You can check the health of a slice and the worker clusters connected to it using the +corresponding YAML commands. + +## Monitor Slice Health +To monitor a slice's health: + +1. Get the list of worker clusters connected to a slice using the following command: + ``` + kubectl get workersliceconfig -n kubeslice- + ``` + Example + ``` + kubectl get workersliceconfig -n kubeslice-avesha + ``` + + Example Output + ``` + NAME AGE + water-worker-1 54m + water-worker-2 54m + water-worker-3 54m + ``` + +2. From the output, note down the name of a worker cluster and use it in the following command to + get the slice description: + + ``` + kubectl describe workersliceconfig -n kubeslice- + ``` + Example + ``` + kubectl describe workersliceconfig water-worker-1 -n kubeslice-avesha + ``` + + Example Output + + :::info + The output below is an excerpt of the command output to only show the health status. + ::: + + ``` + Slice Health: + Component Statuses: + Component: dns + Component Health Status: Normal + Component: slicegateway + Component Health Status: Normal + Component: slicerouter + Component Health Status: Normal + Last Updated: 2023-04-10T08:54:08Z + Slice Health Status: Normal + +When all the components are in a healthy state, the corresponding status indicates that +the health is **Normal**. + +When a component's health goes bad, the corresponding status indicates the health +with a **Warning**. To know how to trace an issue in each component, see [slice and cluster health issues](../../../troubleshooting/troubleshooting-guide#slice-and-cluster-health-issues). + +## Monitor Cluster Health +To monitor a cluster's health: + +1. Get the list of worker clusters using the following command: + + ``` + kubectl get clusters -n kubeslice- + ``` + + Example + ``` + kubectl get clusters -n kubeslice-avesha + ``` + Example Output + ``` + NAME AGE + worker-1 76m + worker-2 75m + worker-3 76m + ``` + +2. From the output, note down a worker cluster that you want to monitor and use it in the following + command to get the cluster description: + + ``` + kubectl describe clusters -n kubeslice- + ``` + + Example + + ``` + kubectl describe clusters worker-1 -n kubeslice-avesha + ``` + + Example Output + + :::info + The output below is an excerpt of the command output to only show the health status. + ::: + + ``` + Status: + Cluster Health: + Cluster Health Status: Normal + Component Statuses: + Component: nsmgr + Component Health Status: Normal + Component: forwarder + Component Health Status: Normal + Component: admission-webhook + Component Health Status: Normal + Component: netop + Component Health Status: Normal + Component: spire-agent + Component Health Status: Normal + Component: spire-server + Component Health Status: Normal + Component: istiod + Component Health Status: Normal + ``` + +When all the components are in a healthy state, the corresponding status indicates that +the health is **Normal**. + +When a component's health is bad, the corresponding status indicates the health +with a **Warning**. To know how to trace an issue in each component, see [slice and cluster health issues](../../../troubleshooting/troubleshooting-guide#slice-and-cluster-health-issues). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx new file mode 100644 index 00000000..288817be --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx @@ -0,0 +1,927 @@ +# Create Slices +After the worker clusters have been successfully registered with the KubeSlice Controller, +the next step is to create a slice that will onboard the application namespaces. It is possible +to create a slice across multiple clusters or intra-cluster. + +## Slice Configuration Parameters +The following tables describe the configuration parameters used to create a slice +with registered worker cluster(s). + +| Parameter | Parameter Type | Description | Required | +| -------------------------------------- | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| apiVersion | String | The KubeSlice Controller API version. A set of resources that are exposed together, along with the version. The value must be `controller.kubeslice.io/v1alpha1`. | Mandatory | +| kind | String | The name of a particular object schema. The value must be `SliceConfig`. | Mandatory | +| [metadata](#slice-metadata-parameters) | Object | The metadata describes parameters (names and types) and attributes that have been applied. | Mandatory | +| [spec](#slice-spec-parameters) | Object | The specification of the desired state of an object. | Mandatory | + +#### Slice Metadata Parameters + +These parameters are related to the metadata configured in the +[slice configuration YAML file](#slice-creation). + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------- | +| name | String | The name of the Slice. Each slice **must** have a unique name within a project namespace. | Mandatory | +| namespace | String | The project namespace on which you apply the slice configuration file. | Mandatory | + +#### Slice Spec Parameters + +These parameters are related to the spec configured in the +[slice configuration YAML file](#slice-creation). + +| Parameter | Parameter Type | Description | Required | +| -------------------------------------------------------------------- | ---------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| sliceSubnet | String (IP/16 Subnet) (**RFC 1918 addresses**) | This subnet is used to assign IP addresses to pods that connect to the slice overlay network. The CIDR range can be re-used for each slice or can be modified as required. Example: 192.168.0.0/16 | Mandatory | +| maxClusters | Integer | The maximum number of clusters that are allowed to connect a slice. **The value of maxClusters can only be set during the slice creation**. The maxClusters value is immutable after the slice creation. The minimum value is 2, and the maximum value is 32. The default value is 16. Example: 5. The maxClusters affect the subnetting across the clusters. For example, if the slice subnet is 10.1.0.0/16 and the maxClusters=16, then each cluster would get a subnet of 10.1.x.0/20, x=0,16,32. | Optional | +| sliceType | String | Denotes the type of the slice. The value must be set to `Application`. | Mandatory | +| [sliceGatewayProvider](#slice-gateway-provider-parameters) | Object | It is the type of slice gateway created for inter cluster communication. | Mandatory | +| sliceIpamType | String | It is the type of the IP address management for the slice subnet. The value must be always set to `Local`. | Mandatory | +| rotationInterval | Integer | The duration in which SliceGateway certificates are periodically renewed or rotated to ensure security and compliance. The default interval is 30 days and the supported range is 30 to 90 days.| Optional | +| renewBefore | Time | The time period before the expiration of SliceGateway certificates during which the renewal process is initiated. It represents the duration prior to certificate expiration when the renewal process is triggered to ensure seamless continuation of secure communication. By setting an appropriate value for renewBefore, certificates can be renewed ahead of time, allowing for any necessary updates or adjustments to be completed before the current certificates expire. This parameter helps avoid potential disruptions by ensuring the availability of valid certificates without waiting until the last moment. If you want to renew it now, provide the current Timestamp. warning: Do not set this parameter during slice creation.| Optional | +| [vpnConfig](#openvpn-configuration) | Object | The Slice VPN Gateway is a slice network service component that provides a secure VPN tunnel between multiple clusters that are a part of the slice configuration. | Optional | +| clusters | List of Strings | The names of the worker clusters that would be part of the slice. You can provide the list of worker clusters. | Mandatory | +| [qosProfileDetails](#qos-profile-parameters) | Object | QoS profile for the slice inter cluster traffic. | Mandatory | +| [namespaceIsolationProfile](#namespace-isolation-profile-parameters) | Object | It is the configuration to onboard namespaces and/or isolate namespaces with the network policy. | Mandatory | +| [externalGatewayConfig](#external-gateway-configuration-parameters) | Object | It is the slice ingress/egress gateway configuration. It is an optional configuration. | Optional | + +#### Slice Gateway Provider Parameters + +These parameters are related to the slice gateway created for the inter-cluster communication and they are configured in the +[slice configuration YAML file](#slice-creation). + +| Parameter | Parameter Type | Description | Required | +| ---------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| sliceGatewayType | String | The slice gateway type for inter cluster communication. The value must be `OpenVPN`. | Mandatory | +| sliceCaType | String | The slice gateway certificate authority type that provides certificates to secure inter-cluster traffic. The value must be always set to `Local`. | Mandatory | +| [SliceGatewayServiceType](#slice-gateway-service-type-parameters) | Object | The type of gateway connectivity to use on a cluster. The default value is NodePort. | Optional | + + +#### Slice Gateway Service Type Parameters + +These parameters are related to the slice gateway service type parameters that provides an option to use the external Load Balancer. The user +can add the slice gateway type and protocol for each cluster. + +| Parameter | Parameter | Description | Required | +| ------------- | --------- | ------------ | -------- | +| cluster | String | The name of the worker cluster for which the user configures slice gateway connectivity. This field supports wildcard entries. To configure the slice gateway connectivity for all clusters connected to the slice, specify *(asterisk) as the parameter's value. | Mandatory | +| type | String | It defines the type of inter-cluster connectivity in KubeSlice. It has two options: NodePort and LoadBalancer. The default value is NodePort. The type value set to LoadBalancer is immutable after the slice creation. | Optional | +| protocol | String |It defines the protocol for gateway configuration. It has two options: TCP and UDP. The default value is UDP. The protocol is immutable after the slice creation. | Optional | + + +#### OpenVPN Configuration +This parameter is related to the open VPN certificate generation and is configured in the [slice configuration](#slice-creation) YAML file. + +| Parameter | Parameter Type | Parameter Description | Required | +|-----------|-----------------|-----------------------|----------| +| cipher | Alphanumeric | This is the type of cipher used to generate the open VPN certificates. The value can be set to `AES_128_CBC`. The default value is `AES_256_CBC`.This parameter is provided as part of the slice configuration during creation. This configuration is immutable during the lifetime of the slice.| Optional | + + +#### QOS Profile Parameters + +These parameters are related to the QoS profile for the slice inter-cluster traffic configured in the +[slice configuration YAML file](#slice-creation). + +| Parameter | Parameter Type | Description | Required | +| ----------------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| queueType | String | It is the slice traffic control queue type. The value must be Hierarchical Token Bucket (`HTB`). HTB facilitates guaranteed bandwidth for the slice traffic. | Mandatory | +| priority | Integer | QoS profiles allows traffic management within a slice as well as prioritization across slices. The value range is 0-3. Integer 0 represents the lowest priority and integer 3 represents the highest priority. | Mandatory | +| tcType | String | It is the traffic control type. The value must be `BANDWIDTH_CONTROL`. | Mandatory | +| bandwidthCeilingKbps | Integer | The maximum bandwidth in Kbps that is allowed for the slice traffic. | Mandatory | +| bandwidthGuaranteedKbps | Integer | The guaranteed bandwidth in Kbps for the slice traffic. | Mandatory | +| dscpClass | Alphanumeric | DSCP marking code for the slice inter-cluster traffic. | Mandatory | + +#### Namespace Isolation Profile Parameters + +These parameters are related to onboarding namespaces, isolating the slice, and allowing external namespaces to +communicate with the slice. They are configured in the +[slice configuration YAML file](#slice-configuration). + +| Parameter | Parameter Type | Description | Required | +| ----------------------------------------------------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| [applicationNamespaces](#application-namespaces-parameters) | Array object | Defines the [namespaces that will be onboarded](#manage-namespace) to the slice and their corresponding worker clusters. | Mandatory | +| [allowedNamespaces](#allowed-namespaces-parameters) | Array object | Contains the list of namespaces from which the traffic flow is allowed to the slice. By default, native kubernetes namespaces such as `kube-system` are allowed. If `isolationEnabled ` is set to `true`, then you must include namespaces that you want to allow traffic from. | Optional | +| isolationEnabled | Boolean | Defines if the namespace isolation is enabled. By default, it is set to `false`. The [isolation policy](#isolate-namespaces) only applies to the traffic from the application and allowed namespaces to the same slice. | Optional | + +#### Application Namespaces Parameters + +These parameters are related to onboarding namespaces onto a slice, which are configured in the +[slice configuration YAML file](#slice-configuration). + +| Parameter | Parameter Type | Description | Required | +| --------- | --------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| namespace | String | The namespace that you want to onboard to the slice. These namespaces can be isolated using the namespace isolation feature. | Mandatory | +| clusters | List of Strings | Corresponding cluster names for the namespaces listed above. To onboard the namespace on all clusters, specify the asterisk `*` as this parameter\'s value. | Mandatory | + +#### Allowed Namespaces Parameters + +These parameters are related to allowing external namespaces to communicated with the slice, which are configured in the +[slice configuration YAML file](#slice-creation). + +| Parameter | Parameter Type | Description | Required | +| --------- | --------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | +| namespace | Strings | List of external namespaces that are not a part of the slice from which traffic is allowed into the slice. | Optional | +| clusters | List of Strings | Corresponding cluster names for the namespaces listed above. To onboard the namespace on all clusters, specify the asterisk `*` as this parameter\'s value. | Optional | + +#### External Gateway Configuration Parameters + +These parameters are related to external gateways, which are configured in the +[slice configuration YAML file](#slice-configuration). + +| Parameter | Parameter Type | Description | Required | +| ----------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | +| ingress | Boolean | To use the ingress gateway for East-West traffic on your slice, set the value to `true`. | Optional | +| egress | Boolean | To use the egress gateway for East-West traffic on your slice, set the value to `true`. | Optional | +| gatewayType | String | The type of ingress/egress gateways that need to be provisioned for the slice. It can either be `none` or `istio`.If set to `istio`, - The ingress gateway is created for a slice when `ingress` is enabled. - The egress gateway is created for a slice when `egress` is enabled. If set to `istio`, and ingress and egress are set to `false` then Istio gateways are not created. | Mandatory | +| clusters | List of Strings | Names of the clusters to which the `externalGateway` configuration should be applied. ### Create Slice YAML | Optional | + + + +### Standard QOS Profile Parameters +These parameters are related to the QoS profile for the slice inter-cluster traffic configured in the standard QoS profile configuration +YAML file. + +| Parameter | Parameter Type | Description | Required | +|------------|----------------|-------------|----------| +| apiVersion | String | The KubeSlice Controller API version. A set of resources that are exposed together, along with the version. The value must be networking.kubeslice.io/v1beta1. Mandatory +| kind | String | The name of a particular object schema. The value must be SliceQoSConfig. Mandatory +| [metadata](#standard-qos-profile-metadata-parameter) | Object | The metadata describes parameters (names and types) and attributes that have been applied. Mandatory +| [spec](#standard-qos-profile-specification-parameters) | Object | The specification of the desired state of an object. Mandatory + +#### Standard QoS Profile Metadata Parameter + +| Parameter | Parameter Type | Description | Required | +|-----------|----------------|--------------|----------| +| name | String | It is the name of the QoS profile. | Mandatory | +|namespace | String | The project namespace on which you apply the slice configuration file. | Mandatory | + +#### Standard QoS Profile Specification Parameters +| Parameter | Parameter Type | Description | Required | +|-----------|----------------|--------------|----------| +| queueType | String | It is the slice traffic control queue type. The value must be Hierarchical Token Bucket (HTB). HTB facilitates guaranteed bandwidth for the slice traffic. | Mandatory | +| priority | Integer | QoS profiles allows traffic management within a slice as well as prioritization across slices. The value range is 0-3. 0 represents the highest priority and 3 represents the lowest priority. | Mandatory | +| tcType | String | It is the traffic control type. The value must be BANDWIDTH_CONTROL. | Mandatory | +| bandwidthCeilingKbps | Integer | The maximum bandwidth in Kbps that is allowed for the slice traffic.| Mandatory | +| bandwidthGuaranteedKbps | Integer | The guaranteed bandwidth in Kbps for the slice traffic. | Mandatory | +| dscpClass | Alphanumeric | DSCP marking code for the slice inter-cluster traffic. | Mandatory | + + +## Slice Creation + +### Slice VPN Key Rotation +Any new slice created using the KubeSlice Manager or a YAML file will have a duration of 30 days to renew the SliceGateway certificates. +When creating a slice, you can change the rotation interval by modifying the rotationInterval parameter in the slice configuration YAML file. +This interval can be adjusted, with the minimum being 30 days and the maximum being 90 days. You can also update this parameter after +the creation of slice. + +Create the slice configuration YAML file using the following template. + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: + namespace: kubeslice- +spec: + sliceSubnet: + maxClusters: <2 - 32> #Ex: 5. By default, the maxClusters value is set to 16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + rotationInterval: 60 # If not provided, by default key rotation interval is 30 days + vpnConfig: + cipher: AES-128-CBC # If not provided, by default cipher is AES-256-CBC + clusters: + - + - + qosProfileDetails: + queueType: HTB + priority: #keep integer values from 0 to 3 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' + isolationEnabled: false #make this true in case you want to enable isolation + allowedNamespaces: + - namespace: kube-system + clusters: + - '*' +``` + +## Renew Slice Gateway Certificates + +You can use the renewBefore option to start the certificate renewal process right away. Using this option, you can start the certificate +renewal procedure before the rotation interval expires. The renewBefore parameter in the YAML file can be set to a suitable value to +ensure that the certificate renewal process begins right away, maintaining your system's security and compliance. + + +:::warning +Before you use the renewBefore parameter to renew the gateway certificates, ensure the slice gateways are operational. +::: + +Use the following template to renew your certificates. + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: + namespace: kubeslice- +spec: + sliceSubnet: + maxClusters: <2 - 32> #Ex: 5. By default, the maxClusters value is set to 16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + renewBefore: 2023-07-18T14:27:08Z #only required if you want to renew your certificates before hand + rotationInterval: 60 # If not provided, by default key rotation interval is 30 days + clusters: + - + - + qosProfileDetails: + queueType: HTB + priority: #keep integer values from 0 to 3 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' + isolationEnabled: false #make this true in case you want to enable isolation + allowedNamespaces: + - namespace: kube-system + clusters: + - '*' + ``` + +## Inter-Cluster Connectivity in KubeSlice +KubeSlice supports cluster connectivity among public and private clusters. Kubeslice extends the support to private clusters +using a Load Balancer. In the slice configuration YAML file, the user can specify the type of gateway connectivity to use +on a cluster. The type can be either NodePort or LoadBalancer. If you do not use LoadBalancer as the gateway connectivity +type, then NodePort is used by default. Like other configuration options in KubeSlice, the * (asterisk) wildcard +character is allowed to indicate all clusters of a slice. + +Add the gateway service type and protocol and the corresponding clusters under the sliceGatewayServiceType in the slice +configuration YAML as illustrated below. + + ``` + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceGatewayServiceType: + - cluster: + type: LoadBalancer # By default, the gateway service type is NodePort. + protocol: TCP # By default. the gateway protocol is UDP. + - cluster: + type: LoadBalancer # By default, the gateway service type is NodePort. + protocol: TCP # By default, the gateway protocol is UDP. +``` + +### Change the Gateway Provider for Inter-Cluster Connectivity + +After the LoadBalancer gateway service type and TCP/UDP gateway protocol are set, they become immutable. +However, if you want to change the gateway service type and protocol, then follow these steps: + +1. Detach the cluster from the slice by removing it from the attached slice configuration , that is under `clusters` and remove +the cluster, its protocol and type under `sliceGatewayServiceType` in the slice configuration YAML. +2. Apply the slice configuration to completely detach it. +3. Add the same cluster under `clusters` and set the `sliceGatewayServiceType` for this cluster. +4. Reapply the slice configuration to connect the cluster to the slice. + + +## Manage Namespaces +This section describes how to onboard namespaces to a slice. In Kubernetes, a namespace is a logical separation of resources within a +cluster, where resources like pods and services are associated with a namespace and are guaranteed to be uniquely +identifiable within it. Namespaces created for application deployments can be onboarded onto a slice to form a micro-network segment. +Once a namespace is bound to a slice, all pods scheduled in the namespace get connected to the slice. + +### Onboard Namespaces +To onboard namespaces, you must add them as part of `applicationNamespaces` in the +[slice configuration](#slice-creation) YAML file. + +In the slice configuration YAML file, add the namespaces using one of these methods: + +- Add namespaces for each worker cluster. +- Add a wildcard * (asterisk) to add all namespaces on the worker clusters. + +:::info +Ensure that the namespace that you want to onboard exists on the worker cluster. +::: + +Add the namespace and the corresponding clusters under the `applicationNamespaces` in the slice +configuration file as illustrated below. +``` +namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - 'worker-cluster-1' + - namespace: bookinfo + clusters: + - '*' +``` +:::info +Adding the asterisk (*) enables the namespace sameness, which means that the namespace +is onboarded on all the worker clusters of that slice. This configuration ensures that all the application +deployments from that namespace are onboarded automatically on to the slice. Enabling namespace sameness +creates that namespace on a worker cluster that does not contain it. Thus, all the worker clusters part +of that slice contains that namespace. +::: + +## Isolate Namespaces +The namespace isolation feature allows you to confine application namespaces to a slice. The +associated namespaces are connected to the slice and are isolated from other namespaces in the cluster. +This forms a secure inter-cluster network segment of pods that are isolated from the rest of the pods in +the clusters. The slice segmentation isolates and protects applications from each other, and reduces +blast radius of failure conditions. + +The following figure illustrates how the namespaces are isolated from different namespaces on a worker +cluster. Namespaces are isolated with respect to sending and receiving data traffic to other namespaces +in a cluster. + +![alt](/img/Namespace-Isolation-OS.png) + + +### Enable Namespace Isolation +To enable the Namespace Isolation on a slice, set the `isolationEnabled` value to **true** in the slice +configuration YAML file and to disable the Namespace Isolation set the value to **false**. +By default, the `isolationEnabled` value is set to **false**. + + +## Slice Istio Gateway Configurations +A slice can be configured to use Istio ingress and egress gateways for East-West traffic(inter-cluster, +egress from one cluster, and ingress into another cluster). Gateways operate at the edges of the clusters. +Ingress gateway act as an entry point and Egress gateway acts as exit point for East-West traffic in a +slice. Ingress/Egress gateway is not a core component of KubeSlice, it is an add-on feature that users can +activate if needed. + +:::info +Currently, Istio gateways are the only type of external gateways supported. +::: + +There are different ways to configure a slice that enables you to route the application traffic. +Below are the scenarios to configure a slice with/without egress and ingress gateways. + +### Scenario 1: Slice Configuration only with Egress Gateways +Create the slice configuration file with Istio egress gateway using the +following template. + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: + namespace: kubeslice- +spec: + sliceSubnet: #Ex: 10.1.0.0/16 + maxClusters: <2 - 32> #Ex: 5. By default, the maxClusters value is set to 16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + qosProfileDetails: + queueType: HTB + priority: 1 #keep integer values from 0 to 3 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' + isolationEnabled: false #make this true in case you want to enable isolation + allowedNamespaces: + - namespace: kube-system + clusters: + - '*' + externalGatewayConfig: + - ingress: + enabled: false + egress: + enabled: true + nsIngress: + enabled: false + gatewayType: istio + clusters: + - + - ingress: + enabled: false + egress: + enabled: false + nsIngress: + enabled: false + gatewayType: istio + clusters: + - +``` + +### Scenario 2: Slice Configuration only with Ingress Gateways +Create the slice configuration file with Istio ingress gateways using +the following template. + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: + namespace: kubeslice- +spec: + sliceSubnet: #Ex: 10.1.0.0/16 + maxClusters: <2 - 32> #Ex: 5. By default, the maxClusters value is set to 16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + qosProfileDetails: + queueType: HTB + priority: 1 #keep integer values from 0 to 3 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' + isolationEnabled: false #make this true in case you want to enable isolation + allowedNamespaces: + - namespace: kube-system + clusters: + - '*' + externalGatewayConfig: + - ingress: + enabled: false + egress: + enabled: false + nsIngress: + enabled: false + gatewayType: istio + clusters: + - + - ingress: + enabled: true + egress: + enabled: false + nsIngress: + enabled: false + gatewayType: istio + clusters: + - +``` + +### Scenario 3: Slice Configuration with Egress and Ingress Gateways +Create the slice configuration file with Istio ingress and egress gateways using the following template. + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: + namespace: kubeslice- +spec: + sliceSubnet: + maxClusters: <2 - 32> #Ex: 5. By default, the maxClusters value is set to 16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + qosProfileDetails: + queueType: HTB + priority: #keep integer values from 0 to 3 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' + isolationEnabled: false #make this true in case you want to enable isolation + allowedNamespaces: + - namespace: kube-system + clusters: + - '*' + externalGatewayConfig: #enable which gateway we wanted to and on which cluster + - ingress: + enabled: false + egress: + enabled: true + gatewayType: istio + clusters: + - + - ingress: + enabled: true + egress: + enabled: false + gatewayType: istio + clusters: + - +``` + + +## Apply Slice Configuration + +The following information is required. + +|Variable|Description| +|----|----| +|``|The name of the cluster.| +|``|The name of the slice configuration file.| +|``|The project namespace on which you apply the slice configuration file.| + + +Perform these steps: + +1. Switch the context to the **KubeSlice Controller** using the following command: + ``` + kubectx + ``` + +2. Apply the YAML file on the **project namespace** using the following command: + + ``` + kubectl apply -f .yaml -n + ``` + +### Create a Standard QoS Profile +The slice configuration file contains a QoS profile object. To apply a QoS profile to multiple slices, you can create a separate QOS profile YAML file and call it out in other slice configuration. + +#### Create a Standard QoS Profile YAML File +Use the following template to create a standard sliceqosconfig file. + +:::info +To understand more about the configuration parameters, see Standard QoS Profile Parameters. +::: + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceQoSConfig +metadata: + name: profile1 +spec: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2562 + dscpClass: AF11 +``` + +#### Apply the Standard QOS Profile YAML File +Apply the slice-qos-config file using the following command. + +``` +kubectl apply -f -n project-namespace +``` + +:::info +You can only add the filename if you are on the project namespace using the following command. +::: + +``` +kubectl apply slice-qos-config.yaml -n project-namespace +``` + +### Validate the Standard QoS Profile +To validate the standard QoS profile that you created, use the following command: + +``` +kubectl get sliceqosconfigs.controller.kubeslice.io -n project-namespace +``` + +Expected Output + +``` +NAME AGE +profile1 33s +``` + +After applying the slice-qos-config.yaml file, add the profile name in a slice configuration. You must add the name of the QoS +profile for the standardQosProfileName parameter in a slice configuration YAML file as illustrated in the following examples. + +:::info +In a slice configuration YAML file, the standardQosProfileName parameter and the qosProfileDetails object are mutually exclusive. +::: + + +#### Example of using the standard QoS Profile without Istio + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: red +spec: + sliceSubnet: 10.1.0.0/16 + maxClusters: <2 - 32> #Ex: 5. By default, the maxClusters value is set to 16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - cluster-1 + - cluster-2 + standardQosProfileName: profile1 +``` + +#### Example of using the standard QoS Profile with Istio + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: red +spec: + sliceSubnet: 10.1.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - cluster-1 + - cluster-2 + standardQosProfileName: profile1 + externalGatewayConfig: + - ingress: + enabled: false + egress: + enabled: false + nsIngress: + enabled: false + gatewayType: none + clusters: + - "*" + - ingress: + enabled: true + egress: + enabled: true + nsIngress: + enabled: true + gatewayType: istio + clusters: + - cluster-2 + +``` + +## Validate the Installation + +Validate the slice configuration on the KubeSlice Controller and the worker clusters. + + +### Validate the Slice on the Controller Cluster + +To validate the slice configuration on the controller cluster, use the following command: +``` +kubectl get workersliceconfig -n kubeslice- +``` + +Example +``` +kubectl get workersliceconfig -n kubeslice-avesha +``` + +**Example Output** +``` +NAME AGE +red-dev-worker-cluster-1 45s +red-dev-worker-cluster-2 45s +``` + +To validate the slice gateway on the controller cluster, use the following command: +``` +kubectl get workerslicegateway -n kubeslice- +``` + +Example +``` +kubectl get workerslicegateway -n kubeslice-avesha +``` + +**Example Output** +``` +NAME AGE +red-dev-worker-cluster-1-dev-worker-cluster-2 45s +red-dev-worker-cluster-2-dev-worker-cluster-1 45s +``` + +### Validate the Slice on the Worker Clusters +To validate the slice creation on each of the worker clusters, use the following command: +``` +kubectl get slice -n kubeslice-system +``` + +**Example Output** +``` +NAME AGE +red 45s +``` + +To validate the slice gateway on each of the worker cluster, use the following command: +``` +kubectl get slicegw -n kubeslice-system +``` + +**Example Output** +``` +NAME SUBNET REMOTE SUBNET REMOTE CLUSTER GW STATUS +red-dev-worker-cluster-1-dev-worker-cluster-2 10.1.1.0/24 10.1.2.0/24 dev-worker-cluster-2 +``` + + + +To validate the gateway pods on the worker cluster, use the following command: +``` +k get pods +``` +**Example Output** +``` +NAME READY STATUS RESTARTS AGE +blue-cluster1-cluster2-0-d948856f9-sqztd 3/3 Running 0 43s +blue-cluster1-cluster2-1-65f64b67c8-t975h 3/3 Running 0 43s +forwarder-kernel-g6b67 1/1 Running 0 153m +forwarder-kernel-mv52h 1/1 Running 0 153m +kubeslice-dns-6976b58b5c-kzbgg 1/1 Running 0 153m +kubeslice-netop-bfb55 1/1 Running 0 153m +kubeslice-netop-c4795 1/1 Running 0 153m +kubeslice-operator-7cf497857f-scf4w 2/2 Running 0 79m +nsm-admission-webhook-k8s-747df4b696-j7zh9 1/1 Running 0 153m +nsm-install-crds--1-ncvkl 0/1 Completed 0 153m +nsmgr-tdx2t 2/2 Running 0 153m +nsmgr-xdwm5 2/2 Running 0 153m +registry-k8s-5b7f5986d5-g88wx 1/1 Running 0 153m +vl3-slice-router-blue-c9b5fcb64-9n4qp 2/2 Running 0 2m5s +``` + + +### Validate Namespace Isolation +When the namespace isolation feature is enabled, the namespace isolation policy is +applied to isolate the application namespaces. Verify the namespace isolation policy by +running the following command to confirm that the namespace isolation feature is enabled: +``` +kubectl get netpol -n +``` +**** Expected Output**** + +``` +NAME POD-SELECTOR AGE +peacock-bookinfo 15s +``` + +In the above output, `peacock` is the slice name and `bookinfo` is the onboarded +namespace to which the namespace isolation policy is applied. + +:::success +After creating a slice across the worker clusters, it should be noted that all the slice configuration is applied at the +KubeSlice Controller level and the creation process was successful. +::: + +## ServiceExports and ServiceImports +Service Discovery is implemented using the CRDs ServiceExport and ServiceImport. + +If you want the service discoverable across the KubeSlice DNS, you must create a **ServiceExport**. + +ServiceExport CRD is used to configure an existing service on the slice to be exposed and discovered across +the clusters on the slice. On creating a ServiceExport on a cluster, a corresponding ServiceImport is created +on all the clusters that includes the list of endpoints populated from ServiceExport. This CRD contains +endpoints aggregated from all the clusters that expose the same service. The reconciler populates the +DNS entries and ensures traffic to reach the correct clusters and endpoint. + +### Service Export Configuration Parameters + +The following tables describe the configuration parameters used to create Service Export. + +| Parameter | Parameter Type | Description | Required | +| ---------------------------------------------- | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| apiVersion | String | The KubeSlice Controller API version. A set of resources that are exposed together, along with the version. The value must be `networking.kubeslice.io/v1beta1`. | Mandatory | +| kind | String | The name of a particular object schema. The value must be `ServiceExport`. | Mandatory | +| [metadata](#serviceexport-metadata-parameters) | Object | The metadata describes parameters (names and types) and attributes that have been applied. | Mandatory | +| [spec](#serviceexport-spec-parameters) | Object | The specification of the desired state of an object. | Mandatory | + +#### ServiceExport Metadata Parameters + +These parameters are related to metadata for exporting a service, which are configured in the +[ServiceExport YAML file](#create-a-serviceexport-yaml-file). + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | ------------------------------- | --------- | +| name | String | The name of the service export. | Mandatory | +| namespace | String | The application namespace. | Mandatory | + +#### ServiceExport Spec Parameters + +These parameters are related to the exporting service specification configured in the +[ServiceExport YAML file](#create-a-serviceexport-yaml-file). + +| Parameter | Parameter Type | Description | Required | +| ---------------------------------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| slice | String | The name of the slice on which the service should be exported. | Mandatory | +| aliases | String Array | One or more aliases can be provided for the service being exported from a worker cluster. This parameter is required when the exported services have arbitrary names instead of the `slice.local` name. | Optional | +| [selector](#service-selector-parameters) | Object | The labels used to select the endpoints. | Mandatory | +| [port](#service-ports-parameters) | Object | The details of the port for the service. | Mandatory | + +#### Service Selector Parameters + +These parameters are related to the labels for selecting the endpoints in a service export, which are configured in the +[ServiceExport YAML file](#create-a-serviceexport-yaml-file). + +| Parameter | Parameter Type | Description | Required | +| ----------- | -------------- | ---------------------------------------- | --------- | +| matchLabels | Map | The labels used to select the endpoints. | Mandatory | + +#### Service Ports Parameters + +These parameters contains the details of the port for the export service, which are configured in the +[ServiceExport YAML file](#create-a-serviceexport-yaml-file). + +| Parameter | Parameter Type | Description | Required | +| ------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------ | --------- | +| name | String | It is a unique identifier for the port. **It must be prefixed with `http` for HTTP services or `tcp` for TCP services**. | Mandatory | +| containerPort | Integer | The port number for the service. | Mandatory | +| Protocol | String | The protocol type for the service. For example: TCP. | Mandatory | + +### Create a ServiceExport YAML File +To export a service, you must create a service export `.yaml` file using the following template. + +``` +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: + namespace: +spec: + slice: + aliases: + - + - + selector: + matchLabels: + : + ports: + - name: + containerPort: + protocol: +``` + +### Apply the ServiceExport YAML File +To apply the serviceexport YAML file, use the following command: +``` +kubectl apply -f -n +``` +### Verify ServiceExport +Verify if the service is exported successfully using the following command: +``` +kubectl get serviceexport -n +``` + +### ServiceExport DNS +The service is exported and reachable through KubeSlice DNS at: +``` +...svc.slice.local +``` + +## ServiceImports +When a ServiceExport is deployed, the corresponding ServiceImport is automatically created on each of the +worker clusters that are part of the slice. This populates the necessary DNS entries and ensures your traffic +always reaches the correct cluster and endpoint. + +To verify that the service is imported on other worker clusters, use the following command: +``` +kubectl get serviceimport -n +``` + +:::success +You have successfully deployed and exported a service to your KubeSlice cluster. +::: + +### Limitations +:::warning +A slice configured with the Istio gateway for egress/ingress only supports HTTP services. +::: diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-deletion.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-deletion.mdx new file mode 100644 index 00000000..a215296b --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-deletion.mdx @@ -0,0 +1,116 @@ +# Delete Slices +This topic provides a cohesive explanation of the steps required to remove a slice from a cluster configuration. To begin with, the namespaces associated with the slice need to be offboarded. The process of offboarding is simple and straightforward, and this topic provides the steps to do so before deleting the slice. However, if the application is installed on multiple clusters within a slice, it is important to note that the offboarding steps must be completed for each cluster. + +## Delete Namespaces Quotas +You cannot offboard a namespace that has quotas enforced on it. You must delete the quotas enforced on the namespace to offboard it from the slice. + +To delete namespace quotas from your configuration: + +1. Open the `slice-resource-configuration.yaml` file. +2. Locate the namespace quotas you wish to delete. +3. Delete the relevant section of the YAML file related to the quotas. +4. Save the changes to the file. +5. Apply the updated YAML file to refresh the configuration. + +## Delete the ServiceExport for Each Application +If a ServiceExport was created in the application namespace, it must be deleted first. + +Deleting the ServiceExport removes the corresponding ServiceImport automatically on all the clusters of the slice. + +The below variables are required to delete the service export. + +| Variables | Description | +|-------------------------|--------------------------------------------------------- +| | The name of the cluster the application is deployed on. +| | The name of the service export that you want to delete. +| | The namespace the application is deployed on. + +Switch the contexts to the cluster you deployed the application on: + +``` +kubectx +``` + +Use the following command to delete the ServiceExport from the cluster: + +``` +kubectl delete serviceexport -n +``` + +## Offboard Namespaces from the Slice Configuration + +To offboard the namespace from a slice, delete the `namespace` and the associated +`clusters` under the `applicationNamespaces` in the slice configuration file as +illustrated below. + +``` + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' + - namespace: bookinfo + clusters: + - '*' +``` + +For example, in the above slice configuration YAML file, if you want to offboard the +BookInfo namespace from all clusters, edit the slice configuration YAML file and remove it +from `applicationNamespaces` as illustrated below. + +``` +namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' +``` + +To delete a slice, you must remove all the namespaces and the corresponding clusters. So, +edit the slice configuration YAML file to remove them. After you remove the namespaces +and the corresponding clusters, the application namespace configuration looks as +illustrated below. + +``` +namespaceIsolationProfile: + applicationNamespaces: +``` + +Apply the slice configuration YAML to update the offboarded namespaces. + +``` +kubectl apply -f .yaml -n +``` + +:::success +You have successfully offboarded the namespaces from the slice. +::: + +## Delete the Slice + +:::caution +This step **must** be completed before uninstalling the Slice Operator. Failing to do so +can result in slices and resources not being cleaned up properly. +::: + +Switch the context to the controller cluster using the following command: + +``` +kubectx +``` + +To delete an individual slice, use the following command: + +``` +kubectl delete sliceconfig -n kubeslice- +``` + +To delete all the slices, use the following command: + +``` +kubectl delete sliceconfig --all -n kubeslice- +``` + +:::success +You have successfully deleted the slice(s). +::: \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-controller-install.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-controller-install.mdx new file mode 100644 index 00000000..1e2efa28 --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-controller-install.mdx @@ -0,0 +1,255 @@ +# Install the Kubeslice Controller +The KubeSlice Controller orchestrates the creation and management of +slices on the worker clusters. The KubeSlice Controller components and the +worker cluster components can coexist on a cluster. Hence, the cluster +running the KubeSlice Controller can also be used as a worker cluster. +**We recommend that you run the KubeSlice Controller on a separate +cluster**. + +## KubeSlice Controller Components + +KubeSlice Controller installs the following: + +- KubeSlice Controller specific ClusterResourceDefinitions(CRDs) +- ClusterRole, ServiceAccount and ClusterRoleBinding for KubeSlice Controller +- A Role and RoleBinding for KubeSlice Controller Leader Election +- KubeSlice Controller workload +- KubeSlice Controller API Gateway + +## Create KubeSlice Controller YAML + +To install the KubeSlice Controller on one of the clusters, you need to create a `controller.yaml` file that requires the endpoint of the controller cluster. The endpoint is the location on which you install the KubeSlice Controller. + +### Get the Cluster Endpoint + +Use the following command to get the cluster endpoint: + +``` +kubectl cluster-info +``` + +Example output + +``` +Kubernetes control plane is running at https://aks-controller-cluster-dns-06a5f5da.hcp.westus2.azmk8s.io:443 +addon-http-application-routing-default-http-backend is running at https://aks-controller-cluster-dns-06a5f5da.hcp.westus2.azmk8s.io:443/api/v1/namespaces/kube-system/services/addon-http-application-routing-default-http-backend/proxy +addon-http-application-routing-nginx-ingress is running at http://40.125.122.238:80 http://40.125.122.238:443 +healthmodel-replicaset-service is running at https://aks-controller-cluster-dns-06a5f5da.hcp.westus2.azmk8s.io:443/api/v1/namespaces/kube-system/services/healthmodel-replicaset-service/proxy +CoreDNS is running at https://aks-controller-cluster-dns-06a5f5da.hcp.westus2.azmk8s.io:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy +Metrics-server is running at https://aks-controller-cluster-dns-06a5f5da.hcp.westus2.azmk8s.io:443/ap +``` + +From the above output, copy the URL for the **Kubernetes control plane** to +add it as the cluster endpoint in the `controller.yaml` file. + +For example, +`https://aks-controller-cluster-dns-06a5f5da.hcp.westus2.azmk8s.io:443`. + +### Controller Configuration Parameters +The following tables describe the configuration parameters used to install the KubeSlice Controller. + +| Parameter | Parameter Type | Description | Required | +| ------------------------------------------------------------ | -------------- | ---------------------------------------------------------------------------- | --------- | +| Kubeslice | Object | The cluster where the KubeSlice Controller is installed. | Mandatory | + +#### KubeSlice Parameters + +This parameter contains the configuration object used in the +[KubeSlice Controller YAML file](#create-controller-yaml). + +| Parameter | Parameter Type | Description | Required | +| ------------------------------------ | -------------- | ---------------------------------------------------------------------- | --------- | +| [controller](#controller-parameters) | Object | The cluster where the KubeSlice Controller is installed. | Mandatory | + +#### Controller Parameters +This object contains the different parameters used in the [KubeSlice Controller YAML file](#create-controller-yaml). + +| Parameter | Parameter Type | Description | Required | +| ------------------ | -------------- | -------------------------------------------------------------------------------------------------- | --------- | +| loglevel | String | The log level of Controller. The default value is `INFO`. The other values are `DEBUG` or `ERROR`. | Optional | +| rbacResourcePrefix | String | The RBAC resource prefix. | Optional | +| projectnsPrefix | String | The project namespace prefix. | Optional | +| endpoint | AlphaNumeric | The URL of the Kubernetes control plane. | Mandatory | + + +### Create Controller YAML +Create the `controller.yaml` file using the following template. + +:::info +To understand more about the parameters, see [Controller Configuration Parameters](#controller-configuration-parameters). +::: + +``` +kubeslice: + controller: + loglevel: info + rbacResourcePrefix: kubeslice-rbac + projectnsPrefix: kubeslice + endpoint: +``` + +### Apply Controller YAML + +``` +helm install kubeslice-controller kubeslice/kubeslice-controller -f .yaml --namespace kubeslice-controller --create-namespace +``` + +**Expected Output** + +``` +NAME: kubeslice-controller +LAST DEPLOYED: Tue May 3 13:12:49 2022 +NAMESPACE: kubeslice-controller +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +kubeslice-controller installation successful! +``` + +### Validate Controller Installation + +Validate the installation of the KubeSlice Controller by checking the status of the pods +that belong to the `kubeslice-controller` namespace using the following command: + +``` +kubectl get pods -n kubeslice-controller +``` + +**Expected Output** + +``` +NAME READY STATUS RESTARTS AGE +kubeslice-controller-manager-74f4d9cb8b-8spsq 2/2 Running 0 31s +``` + +## Create Project Namespace + +A project may represent an individual customer or an organization or a +department within an organization. Each project would have a dedicated +auto-generated namespace, which will ensure that the resources of one +project do not clash with the resources of another project. + +For example, a slice with the same name can exist across multiple projects but with different +configurations. Changes to the slice in one project will not affect the slice in another +project. For more information, see the +[KubeSlice Architecture](/versioned_docs/version-1.1.0/overview/architecture.mdx). + +### Project Namespace Configuration Parameters + +The following tables describe the parameters in the configuration file used to create the project namespace. + +| Parameter | Parameter Type | Description | Required | +| ---------------------------------------- | -------------- | ---------------------------------------------------------------------------------------------- | --------- | +| apiVersion | String | The KubeSlice Controller API version. The value must be `controller.kubeslice.io/v1alpha1`. | Mandatory | +| kind | String | The name of a Mandatory particular object schema. The value must be `Project`. | Mandatory | +| [metadata](#project-metadata-parameters) | Object | The metadata describes the parameters (names and types) and attributes that have been applied. | Mandatory | +| [spec](#project-spec-parameters) | Object | The specification of the desired state of an object. | Mandatory | + +#### Project Metadata Parameters + +These parameters are required for configuring the metadata in the +[project YAML file](#create-project-yaml). + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | ---------------------------------------------------------------------------------------------------------- | --------- | +| name | String | The name of the project you are creating. Each project should have a `unique` name. | Mandatory | +| namespace | String | The namespace on which you apply the project configuration file. The value must be `kubeslice-controller`. | Mandatory | + +#### Project Spec Parameters + +| Parameter | Parameter Type | Description | Required | +| --------------------------------------------- | -------------- | ------------------------------------------------ | --------- | +| [serviceAccount](#service-account-parameters) | Object | To specify permissions on the Project namespace. | Mandatory | + +#### Service Account Parameters + +A service account provides an identity for running processes in application pods. It +contains the list of users configured in the +[project YAML file](#create-project-yaml). + +| Parameter | Parameter Type | Description | Required | +| --------- | --------------- | -------------------------------------------------- | -------- | +| readOnly | List of Strings | The user to be created with read-only permission. | Optional | +| readWrite | List of Strings | The user to be created with read-write permission. | Optional | + +### Create Project YAML + +Create a project namespace by creating a `.yaml` file using the following template: + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: Project +metadata: + name: + namespace: kubeslice-controller +spec: + serviceAccount: + readOnly: + - + - + - + readWrite: + - + - + - +``` + +### Apply Project YAML + +Use the `.yaml`file that you have created and apply it to create the project. + +Apply the YAML file: + +``` +kubectl apply -f .yaml -n kubeslice-controller +``` + +### Project Validation + +After applying the YAML file on the project namespace, you can +validate if the project and service accounts are created successfully. + +#### Validate the Project + +Use the following command on the `kubeslice-controller` namespace to get +the list of the project: + +``` +kubectl get project -n kubeslice-controller +``` + +**Expected Output** + +``` +NAME AGE +avesha 30s +``` + +#### Validate the Service Accounts + +To validate the account creation, check the service accounts that belong +to the project namespace using the following command: + +``` +kubectl get sa -n kubeslice- +``` + +Example: + +``` +kubectl get sa -n kubeslice-avesha +``` + +Example Output + +``` +NAME SECRETS AGE +default 1 30s +kubeslice-rbac-ro-user1 1 30s +kubeslice-rbac-rw-user2 1 30s +``` + +:::success +You have successfully installed the KubeSlice Controller and created the project with a dedicated namespace. +::: diff --git a/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx b/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx new file mode 100644 index 00000000..99c72ce7 --- /dev/null +++ b/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx @@ -0,0 +1,631 @@ +# Register Worker Clusters + +To create a slice across your Kubernetes clusters, register worker clusters with the KubeSlice Controller. +This topic describes how to register a worker cluster with the KubeSlice Controller. + +## Cluster Registration Configuration Parameters + +The following tables describe the configuration parameters used to register a worker cluster with the controller cluster. + +| Parameter | Parameter Type | Required | Description | +| ----------------------------------------------------- | -------------- | ------------------------------------------------------------------------------------------- | ----------- | +| apiVersion | String | The KubeSlice Controller API version. The value must be `controller.kubeslice.io/v1alpha1`. | Mandatory | +| kind | String | The name of a particular object schema. The value must be `Cluster`. | Mandatory | +| [metadata](#cluster-registration-metadata-parameters) | Object | The metadata parameters (names and types) and attributes that have been applied. | Mandatory | +| [spec](#cluster-registration-spec-parameters) | Object | The specification of the desired state of an object. | Mandatory | + +#### Cluster Registration Metadata Parameters + +The parameters are related to metadata configured in the +[worker cluster registration YAML file](#create-cluster-registration-yaml). + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | ------------------------------------------------------------------------ | --------- | +| name | String | The given name of the cluster. | Mandatory | +| namespace | String | The project namespace on which you apply the project configuration file. | Mandatory | + +#### Cluster Registration Spec Parameters +The parameters are related to the specification configured in the +[worker cluster registration YAML file](#create-cluster-registration-yaml). + +| Parameter | Parameter Type | Description | Required | +| ----------------------------------------------- | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| networkInterface | String | The name of the network interface for egress traffic on a gateway node. The QoS profile for a slice will be applied to this interface. Default Interface: **eth0** | Mandatory | +| [clusterProperty](#cluster-property-parameters) | Object | It contains the worker cluster details. | Mandatory | + +#### Cluster Property Parameters +The parameters are related to the cluster property configured in the +[worker cluster registration YAML file](#create-cluster-registration-yaml). + +| Parameter | Parameter Type | Description | Required | +| -------------------------------------- | -------------- | ------------------------------------------------------------------------- | -------- | +| [geoLocation](#geolocation-parameters) | Object | It contains information on the geological location of the worker cluster. | Optional | + +#### Geolocation Parameters + +| Parameter | Parameter Type | Description | Required | +| ------------- | -------------- | -------------------------------------------------------- | -------- | +| cloudProvider | String | The name of your cloud provider. | Optional | +| cloudRegion | String | The cloud provider region where your cluster is located. | Optional | + + +## Create Cluster Registration YAML +You can list multiple clusters in a single registration YAML file and register them with the KubeSlice Controller. + +Create the cluster `registration.yaml` file using the following template: + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: Cluster +metadata: + name: + namespace: kubeslice- +spec: + clusterProperty: + geoLocation: + cloudProvider: "" + cloudRegion: "" + nodeIPs: # Optional + - + - +--- +apiVersion: controller.kubeslice.io/v1alpha1 +kind: Cluster +metadata: + name: + namespace: +spec: + clusterProperty: + geoLocation: + cloudProvider: "" + cloudRegion: "" + nodeIPs: # Optional + - + - +``` + +:::info +The IP addresses are used for inter-cluster tunnel creation (supports IPv4 and IPv6 IP address). If a node IP is not +provided, Kubeslice will auto detect it from the gateway nodes. +::: + + +The following is an example YAML file: + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: Cluster +metadata: + name: worker-cluster-1 + namespace: avesha +spec: + clusterProperty: + geoLocation: + cloudProvider: "AZURE" + cloudRegion: "eastus" + nodeIPs: # Optional + - + - +--- +apiVersion: controller.kubeslice.io/v1alpha1 +kind: Cluster +metadata: + name: worker-cluster-2 + namespace: avesha +spec: + clusterProperty: + geoLocation: + cloudProvider: "AZURE" + cloudRegion: "westus2" + nodeIPs: # Optional + - + - +``` + +The following is an example YAML file only with the mandatory parameters: + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: Cluster +metadata: + name: worker-1 + namespace: kubeslice-avesha +spec: + networkInterface: eth0 + clusterProperty: + geoLocation: + cloudProvider: "GCP" + cloudRegion: "europe-west3" +--- +apiVersion: controller.kubeslice.io/v1alpha1 +kind: Cluster +metadata: + name: worker-2 + namespace: kubeslice-avesha +spec: + networkInterface: eth0 + clusterProperty: + geoLocation: + cloudProvider: "GCP" + cloudRegion: "europe-west3" + +``` + + +### Apply the Cluster Registration YAML File + +The following information is required. + +|Values|Description| +|----|----| +|``|The name of the cluster.| +|``|The namespace of your project.| + +1. Switch the context to the controller cluster. + ``` + kubectx + ``` + +2. Use the following command to apply the `registration.yaml` file. + ``` + kubectl apply -f .yaml -n + ``` + +### Validate the Registered Clusters + +Validate the registered clusters by using the following command: + +``` +kubectl get clusters -n kubeslice- +``` + +**Example** +``` +kubectl get clusters -n kubeslice-avesha +``` + +**Expected Output** + +``` +NAME AGE +aks-worker-2 17s +gke-worker-1 17s +``` + +## Install the Slice Operator + +To install the Slice Operator on the **worker cluster**, you need to first +[register](#create-cluster-registration-yaml) the worker cluster with the KubeSlice Controller +as performed in the previous section. After the worker cluster is registered, you can obtain its secrets +manually from the KubeSlice Controller. These secrets will then need to be used in the `slice-operator.yaml` +file to install the Slice Operator. By completing these steps, the worker cluster can fully +integrate with the KubeSlice Controller and utilize its features. + +:::info +THIS STEP NEEDS TO BE PERFORMED ON ALL WORKER CLUSTERS IN THE CONFIGURATION. +::: + +## Automated Retrieval of Registered Cluster Secrets + +### Script Parameter Descriptions + +|Parameter | Description | +|----|----| +|``|The worker secret name that you get by running this command on the KubeSlice Controller: `kubectl get secrets -n kubeslice-`. For example, `kubeslice-rbac-worker-kind-worker-1-token-s9d96`.| +|``|The given name of the worker cluster.| +|`kubeslice-`|The given name of your project. For example, add `kubeslice-avesha` as the project namespace.| +|``| The worker network interface you get in the above output by doing route lookup on the `EXTERNAL` IP address. For example, add `eth0` as the parameter value.| +|` | awk '{ print $5 }' +``` + +**Example** +``` +ip route get 8.8.8.8 | awk '{ print $5 }' +``` + +**Example Output** + +``` +eth0 +``` + +### Create the secrets.sh File. + +Copy and save the below script as `secrets.sh`. + +``` +# The script returns a kubeconfig for the service account given +# you need to have kubectl on PATH with the context set to the cluster you want to create the config for + +# Cosmetics for the created config +firstWorkerSecretName=$1 + +# cluster name what you given in clusters registration +clusterName=$2 + +# the Namespace and ServiceAccount name that is used for the config +namespace=$3 + +# Need to give correct network interface value like ens160, eth0 etc +networkInterface=$4 + +# kubectl cluster-info of respective worker-cluster +worker_endpoint=$5 + + +###################### +# actual script starts +set -o errexit + +### Fetch Worker cluster Secrets ### +PROJECT_NAMESPACE=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath={.data.namespace}) +CONTROLLER_ENDPOINT=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath={.data.controllerEndpoint}) +CA_CRT=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath='{.data.ca\.crt}') +TOKEN=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath={.data.token}) + +echo " +--- +## Base64 encoded secret values from controller cluster +controllerSecret: + namespace: ${PROJECT_NAMESPACE} + endpoint: ${CONTROLLER_ENDPOINT} + ca.crt: ${CA_CRT} + token: ${TOKEN} +cluster: + name: ${clusterName} + endpoint: ${worker_endpoint} +netop: + networkInterface: ${networkInterface} +``` + +### Execute the secrets.sh File + +The output of the script is required for the slice YAML file creation: + +Use the following command to get the secrets of the worker cluster from the **controller cluster**. + +``` +sh secrets.sh +``` + +**Example** +``` +sh secrets.sh kubeslice-rbac-worker-gke-worker-1-token-85tmc gke-worker-1 kubeslice-avesha eth0 https://34.105.95.217 user1 5585799-5d5a-48fa-b805-f4a1ffb110 +``` + +**Example Output** +``` +--- +## Base64 encoded secret values from controller cluster +controllerSecret: + namespace: a3ViZXNsaWNlLWF2ZXNoYQ== + endpoint: aHR0cHM6Ly9DNjgwNTQ5MUNBNTI2MzVFM0YzNEUwQTFDRTRDMkY3RS5ncjcudXMtZWFzdC0xLmVrcy5hbWF6b25hd3MuY29t + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1Ea3lNREEzTURZeE5Gb1hEVE15TURreE56QTNNRFl4TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTXR2ClpXczQ1RVhicDdZMDJWdEJibUJITHBOTVN4VnpEQzBvWTh5N3R1YmJLWEJFRk9xbGVzU2trYWpmanJ5dGdTZzgKSmE2bXQ0TERjSlQrQ3FmYzMxYWN5RWpHdXlPeFM0Tmt4RGlBdkI0bHRyY2JLMmhnNkJmOWk1RDBTUzV5Rzh3WQpHZVV1bDkyOGlRcnVQeUxTY21wc0s3Y2sxL2FIQjAybDZpNmh5UWhyb1NMWSs5RHhobDdaVTBDMGpwTTQrZG1tClJ1cndVSEUvSGdFQ20yOUZuc3RiOTNZU2NsN3pERG4wdll1SVMrcWZoY2ZTSHR3VFRoZ1JkRlVtWStNUzM2dEYKSXBqNG8xT2xhdXRaMVZkUEJMbGRxUTdxTmlwVGtNbmZhTHA3U0h1QUUxSHV0N0xCOUxSMFAwQXlNU0NwbDV6QQpWWld1VVQ2cmw4TzZDMU5lUmY4Q0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZCNnJkejFRa0NoU2ZtcjBRVU5CeGt3M3pKeFZNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBVWlaYnl2T1N6bVU5YnlWdVFreE9xaVZVdnBVUFlhZDJoZmJsejd0Z1krNWljYnR3SApoeDg1Rmp3WkZvRktkQVJiN1cxYnJnTG5OcXhDZEllWTVKZEkzTlBZRjZrVlc3ZmNMckUwK1BEWXhkc3dDZXdlCk9DSklUSFp3YjNQM05qMUNzTmVVaHg0Um4wd2FiYjlzS0xkUG5Bc0NRNFplTWxaalBSUllIeHg5QVU2ZWNxbXMKZWJWUkRpVTUzekJGd2tpSEhyZHRDTDNQQmxCZENvY0s5dXFQSi9nSXJDYVBrWGl3SlNZb0NrdWt4dm9rZk9xVgpONWd5QXFKL2o4RmkwUEh3UkVIUXNHVnVvajhxQjJzYUd4ZExhcTVWditsQVJpWktEVGhrVXJEeUE0YjB5c0w2CnZDUGtJRUl1ZE5SSGQ3eEJ6ckxPcVhVdXFGREJvTS92TFhaOQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklqWkdZVEpwTlZWb1drcEhaaTF6VWpSa09WaENlRjlCUzNaSU1uQlhaMEZEY2xGTldrOXFOVUppWjJjaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbGMyeHBZMlV0WVhabGMyaGhJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpXTnlaWFF1Ym1GdFpTSTZJbXQxWW1WemJHbGpaUzF5WW1GakxYZHZjbXRsY2kxbmEyVXRkMjl5YTJWeUxURXRkRzlyWlc0dE9EVjBiV01pTENKcmRXSmxjbTVsZEdWekxtbHZMM05sY25acFkyVmhZMk52ZFc1MEwzTmxjblpwWTJVdFlXTmpiM1Z1ZEM1dVlXMWxJam9pYTNWaVpYTnNhV05sTFhKaVlXTXRkMjl5YTJWeUxXZHJaUzEzYjNKclpYSXRNU0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVnlkbWxqWlMxaFkyTnZkVzUwTG5WcFpDSTZJalJpT0RWak5tWmpMV1ZrWXpFdE5HRTNZeTFpT0dWaUxUVmxNVEUyTURrd1lXRmhNaUlzSW5OMVlpSTZJbk41YzNSbGJUcHpaWEoyYVdObFlXTmpiM1Z1ZERwcmRXSmxjMnhwWTJVdFlYWmxjMmhoT210MVltVnpiR2xqWlMxeVltRmpMWGR2Y210bGNpMW5hMlV0ZDI5eWEyVnlMVEVpZlEuRVNkVm1vajA3OXBrSmkzbktGMFhuZE9sa2Q4aGJJdUNaLURpdG1UOGZkVmRkeGhPWEdfcVFSSXZqN05tb1JMUC1xdzJacHliZmgtYWJRVUtNSHFTTGM0aFNBMFhaTTI2UnprWUpRZU9NUE80dGdqdjVQaWNYRkJDbFo0Vk93d2V0WE5Ldi1TLVhiOWVYeHBGQjVDZUozVm0tZjlBV2xXZkMzLUg3aTBoZVlXaWdOSU85SEFFeU43b1RtYXV3WFRRRUg3YVlNOURpZmRreHNaTjZyeTlPZ09TbzJMcUQyc2F2bzNVSU5iX3d6bzdkc2t3T0NuZjdOQk1pMzJOYmZTZ2dBaFdNOUVFM0hyUzFXMWgzZEJLZURMZjEzNXVGZjB4N29NM2lfSUliTzNnZlhYaDVKN3UwS1RIYXNvVFRwVFJhY29NVWkzZ3lnaFN5R0Y0dmVXSzZB +cluster: + name: gke-worker-1 + endpoint: https://34.105.95.217 +netop: + networkInterface: eth0 +``` + +## Manually Retrieve Registered Cluster Secrets + +Create your secrets YAML file using the above output to install the Slice Operator on the worker cluster. + +After registering the worker cluster with the KubeSlice Controller, you get a secret listed under the +project namespace. The secret contains access information for the Slice Operator on the worker cluster +to communicate with the KubeSlice Controller. + + +1. Switch the context to the controller cluster. + ``` + kubectx + ``` + +2. Get the list of secrets that belong to the project namespace using the following command: + ``` + kubectl get secrets -n kubeslice- + ``` + + **Example** + ``` + kubectl get secrets -n kubeslice-avesha + ``` + + **Example Output** + ``` + NAME TYPE DATA AGE + default-token-q2gp9 kubernetes.io/service-account-token 3 43s + kubeslice-rbac-ro-abc-token-kp9tq kubernetes.io/service-account-token 3 43s + kubeslice-rbac-ro-xyz-token-vcph6 kubernetes.io/service-account-token 3 43s + kubeslice-rbac-rw-abc-token-vkhfb kubernetes.io/service-account-token 3 43s + kubeslice-rbac-rw-xyz-token-rwqr9 kubernetes.io/service-account-token 3 43s + kubeslice-rbac-worker-aks-worker-1-token-hml58 kubernetes.io/service-account-token 5 43s + kubeslice-rbac-worker-aks-worker-2-token-lwzj2 kubernetes.io/service-account-token 5 43s + ``` + + The name of the secret is in this format:` kubeslice-rbac--token`. + For example, the `kubeslice-rbac-worker-aks-worker-1-token-hml58` secret is meant for a worker cluster + that is registered using the name `aks-worker-1`. + +3. Retrieve the details of the secret using the following command: + ``` + kubectl get secrets -o yaml -n kubeslice- + ``` + + **Example Output** + ``` + kubectl get secrets kubeslice-rbac-worker-aks-worker-1-token-hml58 -o yaml -n kubeslice-cisco + apiVersion: v1 + data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUU2VENDQXRHZ0F3SUJBZ0lSQUtkdGsrOTJWQlJaSlJ4K2w5SHFabWN3RFFZSktvWklodmNOQVFFTEJRQXcKRFRFTE1Ba0dBMVVFQXhNQ1kyRXdJQmNOTWpJd016RTFNRGN3TURNM1doZ1BNakExTWpBek1UVXdOekV3TXpkYQpNQTB4Q3pBSkJnTlZCQU1UQW1OaE1JSUNJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBZzhBTUlJQ0NnS0NBZ0VBCm1PZVNWQ3VVY0NNYlJtYkxsREFGSjljZk1ER0hHbWlVcy9PaU1zRm1QelZzcGkveEM0bFhqdStnSGtvNXMwcEEKeWZ6aURMU3cxeFA1RWk0S1NLMmhxZnBjYW04MFViTTV0RTIyaHowd21sOGlRblhES1Ztdm9JOFBqNm9SZHpiNApxcC9sMGFMZHUvOGtrVEhSWVU4MVJyWFJtWEVaUjJUcG9qaGZCYXd6UGxCNWFJall2YVc0djRERFpqRjFaTzNwCjdvNFg5RWZsZmZtd0wyNmlUSWZINjNwU3VBNjlob25RY0NLVjh3SmdDQVdxZHBDT0hJQlBUWjVzQThSWkdja2sKSDlzNXR3U00zbWVBcXEzaGhLVmNRL0YxNTlOLzdDRUZOZytjUTdtYkgxS21ISnEzYSsvYmRJM296L3R3cGRUZwppUUVEVS94UENxNTJHRnNFazNYTEcxSG5GUVpmZWVCNThQNVd6NS9Iak9KbHJwOExUN0RDdHFDK2FuLzNCRTh4ClJwMkRaOW9TT2UyblhyK3FreDRpampndVlKeCtiRHpGM2o0MVRrd1Q3am1teWlGMkZYN25nWGVpVk1nSU8xdisKZjFSdVRiTHpsYlFSNU12a09qUm9vVlBybWRXVVRFNVdaMFp4QnRkS1dtdUdHR2ZMOFljNndQc0NKUldianpORwppb2psZU9lVkg5UDB5S3VkREZPWkFINHp6Vk1CYTAvMHJXKzRnWnhtVzVpRkxaVE1BbEQ5QXhSclhPbFB4Uzg5CnFMY3NCMHNqbDNzeGlzb0lieEJGSUwzeGtRa0szK1RDYktIQmlnR1dBQmxlRGJHYWZHVjRDalpBL0E5MC93QlUKNjJRWUdEZ1FkVDhsN2U1anp0RjZWanBFbXo5T1IrUUphR3FXczFMQ04vRUNBd0VBQWFOQ01FQXdEZ1lEVlIwUApBUUgvQkFRREFnS2tNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHdIUVlEVlIwT0JCWUVGSlp0UHlYZ0wrcXIxRzIyCmtVWllpN0E4U1dPME1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQ0FRQWhXNG9QdVlEazNiamZSdlpYQmllTU5Sa3cKc1FCSjNoL3dCT3c5K0hMZ2lpYzJhNDdtWUJHcDlDV0ZvWTIvKzNDcjdZQThKYzZyY3QrcnlMeDUvQThlOVB3bApXV1VKbnVSalA5d3NoTkk0UUFKalRESEdMYVd4dXphOXFtVUxHUHV0VWpORkcySlJOTWxiV2pxakJzN1I1RFNMClVXazBEVi92dWU4YUhyRTJPRk5wVjFIK2V3VS9xdHFyRlVUWFI4d2NIRXdSNVU0cG9SSU9mOUl2OTdyOTdLY2gKTGFiQ1hJTWhpeVZMcDcvRXpGNVFyNFA0OUNhS0ZvMXhQQm1zcWUrV0lJZzFxbjk1ZlRHRjZmc3dwMHM0TE5pcQpJRnRsS3doR294VFNONXZWMU9EcTFWY3NOY2VRT0FNQVE0WE9zNGxBZURGTXFoaUtVcDJHZlZ2RWZKb2I4QzQ5CnAxcFB1ZWl5dksrc1ZUL2NWSkpzeUMvcnZBQUZ4ZnFlUytZbFlXajMwdG1pTitSdjRlS3V2c1ZadWZQSGVuNDcKdHVZSUQrNDZEL0x2ZVBBdGcwVVg4U3Qwakx4ZWg5bTFwRzZqSWk4NVlYQ0kzVy9XUms0aXpXMC85NldwZ1BJSAplOWQrRlhOZWY3eXJNWWExbGdGV1V2ajNiNG11aGlHQngvNE9oTWt6R3BUYU1aOWhCMUJyVlE5N1BwM0xkVHhxCnFESEJyZThETXN3MXJ4Uk12azVKNWEvVlMvUlBMS21KK2k2czN1RzlnaFVCSXIyQmVaS0gzdGFKUFpEaEhYNlUKaW1yS3F5KzV2MG9vTTl3OTU0MVlyMFVyUTZPSkpqNzRhc044MjRlVVJueFRCZDFTTVFMSGtYeThMS1FFYUVweQpZWXNrYUpPSys0cFJRREZTeFE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + clusterName: YWtzLXNwb2tlLTE= + controllerEndpoint: aHR0cHM6Ly8xMjcuMC4wLjE6MzY1MTU= + namespace: a3ViZXNsaWNlLWNpc2Nv + token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklsbFBVbUpqTVVGaU5ucDRiSFUwWm1wdVowdHVUREJ5V1RsemFtdEdjR1p5TTNaSk5FSkhVbkpGY2pnaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbGMyeHBZMlV0WTJselkyOGlMQ0pyZFdKbGNtNWxkR1Z6TG1sdkwzTmxjblpwWTJWaFkyTnZkVzUwTDNObFkzSmxkQzV1WVcxbElqb2lhM1ZpWlhOc2FXTmxMWEppWVdNdGMzQnZhMlV0WVd0ekxYTndiMnRsTFRFdGRHOXJaVzR0YUcxc05UZ2lMQ0pyZFdKbGNtNWxkR1Z6TG1sdkwzTmxjblpwWTJWaFkyTnZkVzUwTDNObGNuWnBZMlV0WVdOamIzVnVkQzV1WVcxbElqb2lhM1ZpWlhOc2FXTmxMWEppWVdNdGMzQnZhMlV0WVd0ekxYTndiMnRsTFRFaUxDSnJkV0psY201bGRHVnpMbWx2TDNObGNuWnBZMlZoWTJOdmRXNTBMM05sY25acFkyVXRZV05qYjNWdWRDNTFhV1FpT2lJd1l6Qm1ZalpoTWkwMlpUZG1MVFEwTkRVdE9UWTBaUzAwTURObVpqZzVPRGN6WldJaUxDSnpkV0lpT2lKemVYTjBaVzA2YzJWeWRtbGpaV0ZqWTI5MWJuUTZhM1ZpWlhOc2FXTmxMV05wYzJOdk9tdDFZbVZ6YkdsalpTMXlZbUZqTFhOd2IydGxMV0ZyY3kxemNHOXJaUzB4SW4wLnVYcnppc0U0ZkF6WklValV4Y2Q5d3dhVE41OGI0TVBlQjhOUUY0RHdWT1pwTzloQ293MU9BaE9Vc0k2cXdJeVNfcGN2T2tKeDBwN1hvTnVOZEZkdld5bThxUExNeThVNFhpZ2ZUeFhURUk4UG1RdGVzT2tRR3F3SFZlTExzME5LYUJ6ZUVaNFAwb2d4UWxXMVVxMzRTWFdJcTUzY3BNZFFJclZVdTBnYmdZMmZ6aUVrNnNlT3dVYkZ3ZGRuSElGUDN3Yi1qMDdTLUZpVG1ES042UmM3ZUFpNGNUZWtyXzNHZ0NOZllrbHdkdEd5czZETjg0ZlFQbVBqMmpUOS16QnRpcHJyS25SSzVPRHppWG4wT0FPQ0M3QlhpamJQeGswcHpNUG1jdDBBUzg0SGxFckd1WlVRUVNNQ2E5SEFwOG12UExYb3FaN1gxREI1bXBsTkxEM3gzaDgwcURZSExJUXZwNGhEUl8wdkpPSFZMaEl5akQ1NTNVUU5FMExhNThXTnhaTUhEZ1haRUtna3dlYXJBVWFXQ3U4VDRUNWdxS2dNMmFJMDU4RjhNWEVremdfWThCcjhJUnIzbmlJaEhnUXp2bHZFdG5ETl93ajNVXzZwUzJmRFZ4eFpDbURXSmlfUW9fWUpoN2JuVlh1bktDaVdqVWFZanQ1SjN4ZDhXcjkydVJBSDY3MzY4dmxjdWpVOTgyU2FjRTJBaks4NkhCR1FITTlfQ2FpZS1RUUgzc2hhUEVXVE5BT3FZWWMtbldUd29GcjJ0bUhFQnJsc0FVejVxaHdwcDVnMEV5dzFuMUdfS05MVWVwSUpCdF9VWjZpQ0NwX3NVbGZqSFdqb0R1OHJmd1ZIX3FudkZVNUViV0lpdnF4WkFVNTNqQmwtQkJELUlTbTJTMEoxWDJn + kind: Secret + metadata: + annotations: + kubernetes.io/service-account.name: kubeslice-rbac-worker-aks-worker-1 + kubernetes.io/service-account.uid: 0c0fb6a2-6e7f-4445-964e-403ff89873eb + creationTimestamp: "2022-03-15T08:48:04Z" + managedFields: + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:data: + .: {} + f:ca.crt: {} + f:namespace: {} + f:token: {} + f:metadata: + f:annotations: + .: {} + f:kubernetes.io/service-account.name: {} + f:kubernetes.io/service-account.uid: {} + f:type: {} + manager: kube-controller-manager + operation: Update + time: "2022-03-15T08:48:04Z" + - apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + f:data: + f:clusterName: {} + f:controllerEndpoint: {} + manager: manager + operation: Update + time: "2022-03-15T08:48:34Z" + name: kubeslice-rbac-worker-aks-worker-1-token-hml58 + namespace: kubeslice-cisco + resourceVersion: "21121" + uid: 611af586-b11d-45d4-a6e0-cee3167e837c + type: kubernetes.io/service-account-token + ``` +### Get the Worker Network Interface +Get the name of the network interface on the gateway nodes that is the egress interface for external traffic. + + +Use the following command on the gateway node: + +:::warning +The below command does not work for OpenShift clusters. +::: + +``` +ip route get | awk '{ print $5 }' +``` + +**Example** +``` +ip route get 8.8.8.8 | awk '{ print $5 }' +``` + +**Example Output** +``` +eth0 +``` + +## Slice Operator Configuration Parameters + +The following tables describe the configuration parameters used to install the Slice Operator on the worker cluster. + +| Parameter | Parameter Type | Description | Required | +| ------------------------------------------------------ | -------------- | -------------------------------------------------------------------------------------------------------------------- | --------- | +| [operator](#slice-operator-parameters) | Object | This contains the Slice Operator information. | Optional | +| [controllerSecret](#controller-secret-parameters) | Object | This contains the secrets to connect to controller cluster. | Optional | +| [cluster](#cluster-parameters) | Object | This contains the information about the worker cluster. | Optional | +| [netop](#network-operator) | Object | This contains the parameters related to network operations such as enforcing the QoS profile configured for a slice. | Mandatory | + + +#### Slice Operator Parameters + +These parameters are related to the installation of the Slice Operator and they are configured in the +[Slice Operator YAML file](#create-the-slice-operator-yaml). + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | ---------------------------------------------------------------------------- | -------- | +| logLevel | String | The log level can be set to INFO or DEBUG. The default value is set to INFO. | Optional | + +#### Controller Secret Parameters + +These parameters are related to the KubeSlice Controller that are required to install the Slice Operator and they are configured in the +[Slice Operator YAML file](#create-the-slice-operator-yaml). + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | ------------------------------------------------------- | --------- | +| namespace | String | The namespace that you get from the secret. | Mandatory | +| endpoint | String | The `controllerEndpoint` that you get from the secret. | Mandatory | +| ca.crt | String | The `ca.crt` that you get from the secret. | Mandatory | +| token | String | The `token` that you get from the secret. | Mandatory | + +#### Cluster Parameters + +These parameters are related to the worker cluster that are configured in the +[Slice Operator YAML file](#create-the-slice-operator-yaml). + +| Parameter | Parameter Type | Description | Required | +| --------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | +| name | String | The name of the cluster that is registered with the KubeSlice controller. The cluster name must be `unique` in the project. | Mandatory | +| endpoint | String | The control plane's kube-apiserver endpoint of the worker cluster that you get in the output after running the `kubectl cluster-info` command. | Mandatory | + + +#### Network Operator Parameter + +This parameter is related to network operations such as enforcing the QoS profile configured for a slice and is configured in the +[Slice Operator YAML file](#create-the-slice-operator-yaml). + +| Parameter | Parameter Type | Description | Required | +| ---------------- | -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| networkInterface | AlphaNumeric | The name of the network interface on the gateway nodes that is the egress interface for external traffic. It may be obtained by doing a route lookup on a well-known public IP address like the Google public DNS IP address `8.8.8.8`. | Mandatory | + + +## Create the Slice Operator YAML + +Use base64 encoded values of the `namespace`, `endpoint`, `ca.crt`, and the `token` from the above +secrets retrieved by using the `secret.sh` script or the output following the manual +secret retrieval steps to create the `sliceoperator.yaml` file. + +### Create the YAML File + +Copy and paste the below template into a file called `sliceoperator.yaml`. + +``` +## Base64 encoded secret values for the namespace, endpoint, ca.crt and token from the controller cluster +controllerSecret: + namespace: + endpoint: + ca.crt: + token: + +cluster: + name: + endpoint: + +netop: + networkInterface: + +``` + + +### Get the Worker Cluster Control Plane Endpoint + +Get the control plane's `kube-apiserver` endpoint of the **worker cluster** by using the following command: +``` +kubectl cluster-info +``` + +**Expected Output** +``` +https://34.159.173.186 +``` + + +### Example of the Slice Operator YAML +``` +controllerSecret: + namespace: a3ViZXNsaWNlLWF2ZXNoYQ== + endpoint: aHR0cHM6Ly8xNzIuMTguMC4yOjY0NDM= + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXhNREEzTkRBd05sb1hEVE15TURVd056QTNOREF3Tmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTlNaCnlFcnhHQitHZTczTDdwS3dKTDNHR0k2VDdjazkrdmFHbGNkZ1ZuNnA3bWVSdHh1SFZMQmZQYWJlM0JkZjJTaE4KKzZpVEtscVJoN0VPNmltRVdJbkk0UitING9Xb2xkU09uOEQ5b1VVeDcydGkrK211ak5CRmlmbHB1TG85bk9TMQpKUjAxWWdwaC9IMi9mVE0yeVVlRmlJelBFZEdpOXUxM0JzOHZqQnRjUmdsZzdobEE0bm1HSDRMMGtERjkrZHNWCmJBN3N1S1dOZ2ZBeHp5SnRKYkN5SkFFSHdKY0V0aHhOREpwRUZ2UEZRY29FYzl6SHFkdFJMejF0Z05yRUZwU2oKOCtBbHRMdEZVSFd0ZmF0RnJ1M05qOHNWN2JITVM3UTlKeWRjWkFMcDJNM3RkNkFSeGxzSVg4WlJRei9EWm5jcgo5UjhYS0JwUmxnOWMzOTZERDVrQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZEZmVYZC9YT0pSZWpVc2hzMnNPZ0E2RHdYcE5NQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRHU0TVlNb2Rqa2JnQ0hndzZwbQpTWjhKWGMxNStWa0o0clNUbEtpTERlWVlYNit0dzdoek1FZFBieXkwSnprbTl6UWYrbnZlQlpYODhLeVhVTzIxCjI0NWZjanQxeEQ1ZEVMalR1ZlFFYjhqejByVmdMTnFKak5Gdm1OZXhtYzB0aTlXRVlMQUFtcVVoaUlFVVdCUjkKeEp3M2Z0eXI4OWRKZ1pRc2cyUkl5S3h0ajZacUtMRElZVlZKbzFLZjlUOUFFZUw4Qjc2RnJzU1RuQjIrek83OQpveUUrVGRvMFJOQUFOYlF2aVNRR3J2NHRTZlRja2t2c3lDNi9qL1ZCSGRGQ3Zhb3c5WXRtRnZJUkVJdWx4YjZ1Cmp4MjNJc0VuaEovSXlQblZhY3JJM29SNU9WVENIaGY5RGoxRHZTU0dDVkt1RTRjVGo0YjZ0clFrNm1qbUZMZlkKUlhVPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklrRjVTV0pEVEdObU4yOU1RM3BXZUY5MVNXSnNaVzVzVkhOdllrTlljRXN0ZDFRNU9VbGxlbTE0V2xFaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbGMyeHBZMlV0WVhabGMyaGhJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpXTnlaWFF1Ym1GdFpTSTZJbXQxWW1WemJHbGpaUzF5WW1GakxYZHZjbXRsY2kxcmFXNWtMWGR2Y210bGNpMHhMWFJ2YTJWdUxYZzVjVzV0SWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXpaWEoyYVdObExXRmpZMjkxYm5RdWJtRnRaU0k2SW10MVltVnpiR2xqWlMxeVltRmpMWGR2Y210bGNpMXJhVzVrTFhkdmNtdGxjaTB4SWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXpaWEoyYVdObExXRmpZMjkxYm5RdWRXbGtJam9pTlRsaE16RXpOVE10WVdRek9DMDBaRE5tTFRsaVpHRXRObUZrWlRGak4yTTJPVGszSWl3aWMzVmlJam9pYzNsemRHVnRPbk5sY25acFkyVmhZMk52ZFc1ME9tdDFZbVZ6YkdsalpTMWhkbVZ6YUdFNmEzVmlaWE5zYVdObExYSmlZV010ZDI5eWEyVnlMV3RwYm1RdGQyOXlhMlZ5TFRFaWZRLjBuQzVRR1B5NUxFb1lQV2FfYVpaY1hqM2tjWm9abUNYekE5UWw2U3FwMGRpQ0p2VHAtWmpDa1QzX3k5YVhxTVZKNWJIUnN2SVBELUZKYkZMdVhaV2FmY05INW44ZkNqT25maG5BQ1lJWTZHUEVQQTBDV3ZMMUtNeEpoMjh1aU5HN3dVVUsyTHNhT1BFWUd5OHFZSTN2UEpJR3VvRUlkS0JVYmh4ZUdwTnBFQkM1aDNtVTY2TlV3MUZkWkNSNHBwRWwtYThXbXEtMmNqQUpBSmQ4MDVyQjE1UGM2b1dnc2xqUm5aNVNfeS12clg2dTZ4bVc2UUpYdmQ0bzNMY2QxVnJ2Z2pRczdkSkkyY0I2dnJmVWVPSXFHWWpYM3dKQnBOakFjZlBXeTQ0aG9CY1gtdlFSQ2ZwSndtTDlZX0EyTTRpZG5taE5xZ2dNb1RtaURGZ1NsYy1pZw== + +cluster: + name: cluster-worker-1 + endpoint: https://10.1.75.49:6443 + +netop: + networkInterface: eth0 + +``` + +### Apply the Slice Operator YAML + +The following information is required to apply the YAML file. + +|Parameter|Description| +|----|----| +|``|The name of the cluster.| +|``|The file name with the values.| + + +Apply Slice Operator YAML file: + +1. Switch the context to the **worker cluster** for which you have created the `sliceoperator.yaml` file. + ``` + kubectx + ``` +2. Apply the `sliceoperator.yaml` file on the `kubeslice-system` namespace using the following command: + ``` + helm install kubeslice-worker kubeslice/kubeslice-worker -f .yaml -n kubeslice-system --create-namespace + ``` + +### Validate the Slice Operator Installation +To validate the Slice Operator installation on the worker cluster, check the pods status +that belong to the `kubeslice-system` namespace. + +To check if the pods are running, use the following command: + +``` +kubectl get pods -n kubeslice-system +``` +**Example Output** + +``` +NAME READY STATUS RESTARTS AGE +forwarder-kernel-mlff5 1/1 Running 0 66s +forwarder-kernel-mwcpc 1/1 Running 0 66s +forwarder-kernel-wqj9p 1/1 Running 0 66s +kubeslice-dns-77bb868848-4kflc 1/1 Running 0 65s +kubeslice-install-crds-zs42b 0/1 Completed 0 2m4s +kubeslice-netop-jnsn7 1/1 Running 0 66s +kubeslice-netop-l7www 1/1 Running 0 66s +kubeslice-netop-z9c6c 1/1 Running 0 66s +kubeslice-operator-759f849954-jxcmd 2/2 Running 0 65s +nsm-admission-webhook-k8s-5b99dc99b8-plssn 1/1 Running 0 63s +nsm-install-crds-s9n6b 0/1 Completed 0 105s +nsmgr-jwwhj 2/2 Running 0 65s +nsmgr-mp2ks 2/2 Running 0 65s +nsmgr-qjngs 2/2 Running 0 66s +registry-k8s-b54b6484d-l8kvs 1/1 Running 0 65s +spire-install-clusterid-cr-g48jn 0/1 Completed 0 80s +spire-install-crds-rjnwq 0/1 Completed 0 99s +``` + +Validate the spire installation using the following command: +``` +k get pods -n spire +``` + +**Expected Output** + +``` +NAME READY STATUS RESTARTS AGE +spiffe-csi-driver-5nxw8 2/2 Running 0 2m30s +spire-agent-4nr5v 1/1 Running 0 2m30s +spire-server-0 2/2 Running 0 2m29s +``` + +:::success +You have successfully installed the Slice Operator on the worker cluster. Repeat the above steps to +install the Slice Operator on all the participating worker clusters. +::: + +:::success +You have successfully registered the worker clusters with the KubeSlice Controller. +::: \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/overview/architecture.mdx b/versioned_docs/version-1.2.0/overview/architecture.mdx new file mode 100644 index 00000000..54e77c7a --- /dev/null +++ b/versioned_docs/version-1.2.0/overview/architecture.mdx @@ -0,0 +1,35 @@ +# High-Level Architecture + +## Introduction +KubeSlice provides network services to applications that need secure and highly available connectivity between +multiple clusters. KubeSlice creates a flat overlay network to connect the clusters. The overlay network can be +described as an application slice that provides a slice of connectivity between the pods of an application +running in multiple clusters. It can also be described as an application-specific VPC that spans across +clusters. Pods can connect to the slice overlay network and communicate with each other seamlessly across +cluster boundaries. + +The connections between the clusters are secured by creating encrypted VPN tunnels that provide a safe passage +to inter-cluster traffic. + +KubeSlice can also be used to enable service discovery and reachability across clusters. A Kubernetes service +running in a cluster can be exported over the slice overlay network so that it is discovered and reached by +pods running in other clusters. + +The KubeSlice architecture consists of several components that interact with each other to manage the lifecycle +of the slice overlay network. The diagram below shows the primary components of KubeSlice and the connections +between them. +![alt](/img/Architecture-OS.png) + + +The controller cluster contains the KubeSlice Controller that manages user configuration and orchestrates the +creation of the slice overlay network between multiple worker clusters. It defines and owns a number of CRDs +that are used to store configuration and operational information in the cluster. The CRDs are also used in the +interaction between the controller cluster and the worker clusters. The worker clusters connect to the +Kubernetes API server of the controller cluster to fetch configuration that is stored in the custom resource +objects. + +The principal component of the worker clusters is the Slice Operator. It interacts with the controller cluster +and sets up the needed infra for the slice overlay network on the worker cluster. The worker clusters also +contain a DNS server called KubeSlice DNS that is used in inter-cluster service discovery. Users can also +create slices with ingress and egress gateways for East-West (E-W) traffic. The Slice Operator provisions the +gateways and setup routing rules to funnel traffic between the application pods and the gateway pods. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/overview/components.mdx b/versioned_docs/version-1.2.0/overview/components.mdx new file mode 100644 index 00000000..dabdca7f --- /dev/null +++ b/versioned_docs/version-1.2.0/overview/components.mdx @@ -0,0 +1,134 @@ +# KubeSlice Components +KubeSlice consists of the following main components deployed in one +or more gateway nodes that work in conjunction to securely connect +workloads across multiple Kubernetes clusters located in data centers, +public clouds, or edge locations: + +* [KubeSlice Controller](#kubeslice-controller) +* [Slice Operator](#slice-operator) +* [Slice VPN Gateways](#slice-vpn-gateways) +* [Slice Router](#slice-router) +* [Slice Istio Components](#slice-istio-components) +* [Slice Gateway Edge](#slice-gateway-edge) +* [KubeSlice DNS](#kubeslice-dns) +* [Network Service Mesh Control and Data Plane](#network-service-mesh-control-and-data-plane) +* [NSM Kernel Forwarder](#nsm-kernel-forwarder) +* [Spire Server and Agents](#spire-server-and-agents) +* [NetOps](#netops) + + +## KubeSlice Controller +The KubeSlice Controller is installed in one of the clusters and +provides a central configuration management system, for slices across multiple clusters. + +We recommend that you install the KubeSlice Controller on a separate cluster. + +The KubeSlice Controller provides: + +A communication interface through which Slice Operators on multiple clusters can connect to +it. The slice configuration that includes slice VPN gateway, service discovery with service +import/export, and ingress/egress gateway related parameters are relayed to the Slice +Operators on registered clusters. + +Creation and management of cryptographic certificates for secure slice VPN gateways. + +APIs through the API Gateway for the KubeSlice Manager to create and manage the application slices. + +## Slice Operator +The Slice Operator, also known as a Worker Operator is a Kubernetes Operator component +that manages the life-cycle of the KubeSlice related Custom Resource Definitions (CRDs). + +The Slice Operator performs the following functions: + +- Interacts with the KubeSlice controller to receive slice configuration updates. +- Reconciliation of slice resources in the cluster KubeSlice Controller. +- Creation of slice components required for Slice VPN Gateway connectivity and Service Discovery. +- Auto insertion and deletion of slice components to accommodate topology changes. +- Lifecycle management of slices, slice configurations, slice status, and slice telemetry. +- Lifecycle management of network policies and monitoring of configuration drift to generate slice events and alerts. +- Management of the association of slices with namespaces +- Interaction with the KubeSlice Controller to: + - Facilitate network policy and service discovery across the slice. + - Import/export Istio services to/from the other clusters attached to the slice. + - Implement Role-Based Access Control (RBAC) for managing the slice components. + +## Slice VPN Gateways +The Slice VPN Gateway is a slice network service component that provides a secure VPN +tunnel between multiple clusters that are a part of the slice configuration. + +The Slice Operator performs the following life-cycle functions for Slice VPN Gateways: + +- Interacts with the KubeSlice controller to receive configuration related to slice gateways. +- Maintains cryptographic keys and certificates needed for secure VPN tunnels. +- Deploys and reconciles slice VPN gateway pods. +- Periodically monitors the status of the gateway pods. +- Continuously interacts with Slice VPN Gateways for status, keys/certificates, and configuration changes. + +KubeSlice Controller manages the VPN gateway pairs for the attached clusters, and creates the +keys & configurations required for the operation. + +## Slice Router +A slice router is a virtual layer 3 device that sets up the routing and forwarding rules +in the slice overlay network. A minimum of one slice router pod is provisioned per slice +on a cluster. + +The slice operator manages the life cycle of the slice router deployment and monitors its +status periodically. + +## Slice Istio Components +KubeSlice provides the option of setting up ingress and egress gateways for a slice using +Istio Service Mesh resources. Ingress/Egress gateway is not a core component of KubeSlice, +it is an add-on feature that users can activate if needed. The Istio components must be +installed in the cluster before the KubeSlice components are installed or they can be +installed as a part of the KubeSlice installation itself. + +Slice Ingress/Egress Gateways are used for internal East-West traffic (inter-cluster, egress from one cluster, and ingress into another cluster) and Slice North-South Ingress Gateway for external traffic. + +## Slice Gateway Edge +The Slice Gateway Edge comes into play when the configured gateway connectivity type is LoadBalancer for a cluster of a slice. A network load +balancer connects the cluster to the other clusters in the slice. The Slice Gateway Edge is programmed by the Slice Operator to distribute the +external traffic coming in through the Load Balancer to the right slice gateway pods. Based on its communication with the Slice Operator, it +sets up NAT rules to filter the traffic, and forward to the appropriate slice gateway VPN pod. + + +## KubeSlice DNS +The KubeSlice DNS is a DNS server that is used to resolve service names exposed on application slices. + +The Slice Operator manages the DNS entries for all the services exposed on the slice overlay network(s). + +When a service is exported on the slice by installing a ServiceExport object, a DNS entry +is created in the KubeSlice DNS server in all the clusters that are part of the slice. + +## Network Service Mesh Control and Data Plane +The Network Service Mesh (NSM) component sets up the KubeSlice data plane and connects application pods to the +slice overlay network. It consists of: + +- NSM Control plane daemon set +- NSM Data plane daemon set +- NSM admission webhook controller pod +- NSM CRD objects that facilitate setting up the slice overlay network + +## NSM Kernel Forwarder +The NSM kernel forwarder is a `DaemonSet` that works with the NSM manager to set up +the data plane for the slice overlay network within a cluster. Its functions include: +* Inserting the NSM interfaces in the application pods and the vL3 slice router pod +* Configuring the interfaces +* Setting the operational state of the interfaces +* Setting up the routing table in the pods + +## Spire Server and Agents +The NSM components communicate with each other over gRPC (Google version of Remote Procedure Calls) +to set up and maintain the slice overlay network. Spire, a reference implementation of the SPIFFE +software identity management standard, is used to establish the NSM control plane and +the data plane workload identities. The Spire implementation on Kubernetes contains a +server as a `StatefulSet` and an agent that runs on every cluster node +as a `DaemonSet`. The NSM pods communicate with the node local Spire agent using the +Workload API to receive X.509 SVIDs (SPIFFE Verifiable Identity Document). The SVIDs +and the Spire trust bundle establish workload identities for authentication +and authorization. The SVIDs and the Spire trust bundle are also used in secure gRPC +with Transport Layer Security (TLS) between the NSM components to ensure confidentiality and integrity. + +## NetOps +Each slice in a cluster is associated with a QoS profile for bandwidth control across the +clusters. The QoS profile is applied to the external interface of the VPN gateway nodes. +NetOps pods configure and enforce the QoS profile for a slice on a cluster. diff --git a/versioned_docs/version-1.2.0/overview/features.mdx b/versioned_docs/version-1.2.0/overview/features.mdx new file mode 100644 index 00000000..e5917053 --- /dev/null +++ b/versioned_docs/version-1.2.0/overview/features.mdx @@ -0,0 +1,115 @@ +# Key Features + +KubeSlice makes Kubernetes simple at scale for multi cluster/multi-tenant/ +multi-region/multi-cloud application deployments. It is a platform that combines +network, application, Kubernetes, and deployment services to bring uniformity +across clusters for multi-cluster applications, thus dramatically increasing +development velocity for platform and product teams. + +KubeSlice bundles the following services into its architecture: + +[Comment]: + +| Services | Feature | Description | +|----|----|----| +| Application | Namespace sameness | Allows the freedom to deploy applications across clusters with namespace parity.| +| | Service exports and Service imports | Automatic service imports and exports allow service discovery across cluster boundaries. | +| | Isolation | Allows isolation by association of application namespaces with a slice.- | +| Network | East-West cluster communication | Enabled by automatically creating tunnels between clusters, on a per slice basis, establishing an overlay network enabling service-to-service communication as a flat Layer 3 network. KubeSlice can also be configured to utilize East-West ingress and egress gateways.| +| | Remove IP Addressing Complexity- | KubeSlice solves the complex problem of overlapping IP addressing between clusters across cloud providers, data centers, and edge locations. The overlay network is configured with a non-overlapping RFC1918 address space removing overlapping CNI CIDR concerns.| +| | QoS Profiling | Slices in a cluster have a QoS profile defined per slice, allowing granular traffic control between clusters. | +| Security | Cross cluster Layer 3 secure connectivity | KubeSlice gateway nodes establish encrypted VPN tunnels between all registered clusters. | +| | Network Policy Management | KubeSlice provides Network Policies that are normalized across all clusters. The clusters registered in the slice configuration can be tied to a slice forming network segmentation at Layer 3 that allow/deny traffic to applications external from the slice application and allowed namespaces. | +| | Multi-Tenancy | KubeSlice manages namespaces that are associated with a slice, creating application isolation and reducing the blast radius. | | + +## Multi-Cluster Support + +### Application Connectivity +Enables application connectivity across clusters/clouds +with zero touch provisioning. + +### Virtual Overlay +Constructs virtual clusters across physical clusters by establishing +an overlay network. + +### Traffic Prioritization +Guarantees the ability to dependably run high-priority applications +and traffic with QoS configuration for inter-cluster network connections. + + +## Multi-Tenancy Support + +### App Segmentation +Define QoS profiles on a per slice basis; thus providing the ability +to isolate microservices on one Slice from another. + +### Network Policies +Auto deploys network policies across clusters participating in the slice +configuration, marshaling configuration drift. + +## Namespace sameness + +### Multi-cluster namespace +Ensures namespace sameness on a Slice across +multi-cluster/cloud. + +### Multi-cluster +Enables the aggregation of a group of namespaces across clusters +thus allowing segmentation for multi-tenancy. + +### Access Controls +RBAC functionality is propagated across all clusters participating in +the Slice configuration. + +## Service Discovery + +### Auto discovery of services +Enables automatic service discovery across clusters +participating in the Slice configuration. + +### DNS Entry +When a service is exported on the Slice by installing a Service Export object, +the Slice Operator creates a DNS entry for the service in the Slice DNS and a similar entry +is created in the other clusters that are a part of the Slice. + +## IP Address Management + +IP Address Management (IPAM) is a method of planning, tracking, and managing the IP address space +used in a network. On the KubeSlice Manager, the **Maximum Clusters** parameter of the slice creation +page helps with IPAM. The corresponding YAML parameter is `maxClusters`. + +This parameter sets the maximum number of worker clusters that you can connect to a slice. The +maximum number of worker clusters affects the subnet calculation of a worker cluster. The subnet +in turn determines the number of host addresses a worker cluster gets for its application pods. + +For example, if the slice subnet is 10.1.0.0/16 and the maximum number of clusters is 16, then each +cluster gets a subnet of 10.1.x.0/20, where x = 0, 16, or 32. + +This is a significant parameter that can only be configured during slice creation. If this parameter is +not set, it defaults to 16. + +:::caution +The subnet of a worker cluster determines the number of host addresses that are available to that +cluster. Hence, you must be prudent and cautious when you set the maximum worker clusters. The value +of the maximum number of clusters set remains constant for the entire life of a slice, and it is +immutable after a slice is created. +::: + +The fewer the clusters, the more IP addresses are available for the application pods +of every worker cluster that is part of a slice. By default, the value of the **Maximum Clusters** +parameter is 16. The supported value range is 2 to 32 clusters. + + +## Connectivity to Clusters in Private VPCs + +In addition to connecting public clusters, KubeSlice can also be used to connect clusters that are enclosed within a private VPC. +Such clusters are accessed through network or application Load Balancer that are provisioned and managed by the cloud provider. +KubeSlice relies on network Load Balancers to setup the inter-cluster connectivity to private clusters. + +The following picture illustrates the inter-cluster connectivity set up by KubeSlice using a network Load Balancer (LB). + +![loadbalancer](/images/version1.2.0/key-features/support-for-private-clusters.png) + +Users can specify the type of connectivity for a cluster. If the cluster is in a private VPC, the user can utilize the `LoadBalancer` +connectivity type to connect it to other clusters. The default value is `NodePort`. The user can also configure the gateway protocol +while configuring the gateway type. The value can be TCP or UDP. The default value is UDP. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/overview/overview.mdx b/versioned_docs/version-1.2.0/overview/overview.mdx new file mode 100644 index 00000000..87abb508 --- /dev/null +++ b/versioned_docs/version-1.2.0/overview/overview.mdx @@ -0,0 +1,47 @@ +# What is KubeSlice? +Managing large-scale applications and infrastructure can be a daunting task in the +enterprise world. With Kubernetes, orchestrating applications became simpler, but +orchestrating and scaling your microservices based applications has become more difficult as +you grow from single cluster to multi-cluster deployments. + +As a leading innovator in Kubernetes-based applications, Avesha has developed breakthrough +patented products, **KubeSlice** and **Smart Scaler**, that empower organizations to manage and +scale their applications efficiently and cost-effectively. + +KubeSlice offers a simpler solution to the complex challenges of running multi-cluster +applications at scale by creating a Kubernetes operator called a `Slice`. This operator creates +a virtual cluster, across a fleet of clusters, that serves as a logical application boundary, +enabling pods and services to communicate with each other seamlessly. The Slice transcends +geographic borders, allowing applications to be deployed anywhere, whether on any cluster, +cloud, edge, or K8s distribution. + +Moreover, KubeSlice offers NIST-compliant VPN tunnels for secure and low-latency inter-cluster +networking via the east/west path. The Slice serves as a single security domain across +multi-cluster and multiple cloud providers. The KubeSlice Manager UI simplifies the management +of a fleet of clusters with its user-friendly features. With resource optimization, namespace +isolation, RBAC management, and node affinity features available for each Slice virtual cluster, +organizations can easily manage and scale their applications while prioritizing security. + +## Why KubeSlice? +As enterprises expand application architectures to span multiple clusters located in data centers or cloud +provider regions, or across cloud providers, Kubernetes clusters need the ability to fully integrate +connectivity and pod-to-pod communications with namespace propagation across clusters. + +KubeSlice enables creating multiple logical slices in a single cluster or group of clusters regardless of +their physical location. Existing intra-cluster communication remains local to the cluster utilizing each +pod's CNI interface. KubeSlice provides isolation of network traffic between clusters by creating an overlay +network for inter-cluster communication. + +KubeSlice accomplishes this by adding a second interface to the pod allowing local traffic to remain on the +CNI interface, and traffic bound for external clusters route over the overlay network to its destination pod +making KubeSlice CNI agnostic. + +KubeSlice solves the complex problem of overlapping IP addressing between cloud providers, data centers, and +edge locations. The overlay network is configured with a non-overlapping RFC1918 private network CIDR address +space. As KubeSlice creates network isolation, KubeSlice also takes the responsibility of allocating subnets +that are configurable based on the number of pods allocated to have inter-cluster reachability. In addition, +the same RFC1918 address can be configured across multiple slices created on the same cluster or cluster sets +further simplifying IP address management. + +KubeSlice offers services that dramatically increase application velocity for platform and product teams to +achieve uniformity for applications in multi-cluster environments. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/playground/sandbox.mdx b/versioned_docs/version-1.2.0/playground/sandbox.mdx new file mode 100644 index 00000000..8386384b --- /dev/null +++ b/versioned_docs/version-1.2.0/playground/sandbox.mdx @@ -0,0 +1,54 @@ + +# Sandbox + +Avesha provides a **four-hour free access** (for 240 minutes) to KubeSlice on +our sandbox environment. + +Get a hands-on experience of the product by registering at https://community.aveshalabs.io/. +The process to register is easy and seamless. + +## Register for Sandbox + +To register: + +1. Go to https://community.aveshalabs.io/. + + ![alt](/img/community-register-free.png) + +2. On the registration page, for **Company Name**, enter your organization name. +3. For **Name**, enter your name. +4. For **Email**, enter your email address. +5. Click **Terms and Conditions** to read them. +6. After you have read the terms and conditions, select the check box to confirm that + you agree with them. +7. Click **Register**. + +You will receive an email confirming your registration. + +## Create Clusters and Slices +The response email that you receive after you register contains the details about +how to access the KubeSlice on the sandbox environment. + +Using the shared credentials, try our KubeSlice seamlessly for four hours using the kubeslice-cli tool! + +:::info +You will receive an expiration-reminder email an hour (60 minutes) before your access expires. +::: + +Now with the access to your sandbox virtual machine (VM), a few tasks that you can do are: + +* Run `kubeslice-cli install --profile=full-demo` to get a full environment setup for you. This setup + provides you three kind clusters. You can use this setup to explore and learn about KubeSlice without installing anything. + To know more, see [kubeslice-cli](/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/install-kubeslice-cli.mdx) +* If you want to practice installing Kubeslice, you can do the + [minimal-demo](/versioned_docs/version-1.2.0/tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo.mdx). +* If you are a contributor, you can install a topology, clone your repo to the sandbox VM, and + try out your modifications. + + :::warning + Remember that the sandbox environment expires in **four hours**, which will erase all the local content if you don't save it. + Be sure to save your work on your local system. + ::: + +For any support/feedback, write to us at support@avesha.io or join the **KubeSlice Community** (#kubeslice) channel +on the **Kubernetes Slack** (kubernetes.slack.com) workspace. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/reference/reference-worker-cluster-secrets-script.mdx b/versioned_docs/version-1.2.0/reference/reference-worker-cluster-secrets-script.mdx new file mode 100644 index 00000000..d0b5b136 --- /dev/null +++ b/versioned_docs/version-1.2.0/reference/reference-worker-cluster-secrets-script.mdx @@ -0,0 +1,135 @@ +# Worker Cluster Secrets Retrieval Script + +## Introduction +To install the Slice Operator on the worker cluster, this topic outlines a step-by-step process +for creating a shell script that retrieves the worker cluster's secrets from the controller +cluster. Once the script has been executed, copy the output and use it to create the secrets +YAML file, which is then used to install the Slice Operator on the worker cluster. + +## Retrieve Worker Cluster Secrets + +To run the script to get the secrets of the worker cluster from the controller cluster: + +### Create Script +1. Copy and paste the given script into a file named `secrets.sh` + +``` +# The script returns a kubeconfig for the service account given +# you need to have kubectl on PATH with the context set to the cluster you want to create the config for + +# Cosmetics for the created config +firstWorkerSecretName=$1 + +# cluster name what you given in clusters registration +clusterName=$2 + +# the Namespace and ServiceAccount name that is used for the config +namespace=$3 + +# Need to give correct network interface value like ens160, eth0 etc +networkInterface=$4 + +# kubectl cluster-info of respective worker-cluster +worker_endpoint=$5 + +# Enter your username getting through mail +dusername=$6 +# Enter your password getting through mail +dpassword=$7 + +###################### +# actual script starts +set -o errexit + +### Fetch Worker cluster Secrets ### +PROJECT_NAMESPACE=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath={.data.namespace}) +CONTROLLER_ENDPOINT=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath={.data.controllerEndpoint}) +CA_CRT=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath='{.data.ca\.crt}') +TOKEN=$(kubectl get secrets $firstWorkerSecretName -n $namespace -o jsonpath={.data.token}) + +echo " +--- +## Base64 encoded secret values from controller cluster +controllerSecret: + namespace: ${PROJECT_NAMESPACE} + endpoint: ${CONTROLLER_ENDPOINT} + ca.crt: ${CA_CRT} + token: ${TOKEN} +cluster: + name: ${clusterName} + endpoint: ${worker_endpoint} +netop: + networkInterface: ${networkInterface} +imagePullSecrets: + repository: https://index.docker.io/v1/ + username: ${kubesliceRegistrationUsername} + password: ${kubesliceRegistrationPassword} + email: ${kubesliceRegistrationEmail} +``` +The following information is required to run the script. + +|Parameter|Description| +|----|----| +|``|The worker secret name that you get by running this command on the KubeSlice Controller: `kubectl get secrets -n kubeslice-`. For example, `kubeslice-rbac-worker-kind-worker-1-token-s9d96`.| +|``|The given name of the worker cluster.| +|`kubeslice-`|The given name of your project. For example, add `kubeslice-avesha` as the project namespace.| +|``| The worker network interface you get in the above output by doing route lookup on the `8.8.8.8` IP address. For example, add `eth0` as the parameter value.| +|``| The username you received in the KubeSlice Registration email.| +|``| The password you received in the KubeSlice Registration email.| +|``| The email address you used in the KubeSlice Registration process.| + +2. Open the terminal where you have `kubectl` on PATH and set the context to the cluster for which you want to create the config. + +### Retreive Network Interface +3. Get the name of the network interface on the gateway nodes that is the egress interface for external traffic. +:::warning +The below command does not work for OpenShift clusters. +::: +Use the following command on the gateway node: +``` +ip route get 8.8.8.8 | awk '{ print $5 }' +``` + +Example +``` +ip route get 8.8.8.8 | awk '{ print $5 }' +``` +**Example Output** +``` +eth0 +``` +### Execute Script +4. Run the command `sh secrets.sh ` by replacing the placeholders with actual values. + + For example: + + ``` + sh secrets.sh kubeslice-rbac-worker-kind-worker-1-token-s9d96 kind-worker-1 kubeslice-avesha ens160 https://10.0.0.101 user1 abcdefg-12345 + ``` + Note: The worker-secret-name is the name of the worker secret that can be obtained by running the command `kubectl get secrets -n kubeslice-` on KubeSlice Controller. + +5. The script will fetch the worker cluster secrets and provide an output in the markdown language. + +**Example Output** +``` +--- +## Base64 encoded secret values from controller cluster +controllerSecret: + namespace: a3ViZXNsaWNlLWF2ZXNoYQ== + endpoint: aHR0cHM6Ly9DNjgwNTQ5MUNBNTI2MzVFM0YzNEUwQTFDRTRDMkY3RS5ncjcudXMtZWFzdC0xLmVrcy5hbWF6b25hd3MuY29t + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1Ea3lNREEzTURZeE5Gb1hEVE15TURreE56QTNNRFl4TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTXR2ClpXczQ1RVhicDdZMDJWdEJibUJITHBOTVN4VnpEQzBvWTh5N3R1YmJLWEJFRk9xbGVzU2trYWpmanJ5dGdTZzgKSmE2bXQ0TERjSlQrQ3FmYzMxYWN5RWpHdXlPeFM0Tmt4RGlBdkI0bHRyY2JLMmhnNkJmOWk1RDBTUzV5Rzh3WQpHZVV1bDkyOGlRcnVQeUxTY21wc0s3Y2sxL2FIQjAybDZpNmh5UWhyb1NMWSs5RHhobDdaVTBDMGpwTTQrZG1tClJ1cndVSEUvSGdFQ20yOUZuc3RiOTNZU2NsN3pERG4wdll1SVMrcWZoY2ZTSHR3VFRoZ1JkRlVtWStNUzM2dEYKSXBqNG8xT2xhdXRaMVZkUEJMbGRxUTdxTmlwVGtNbmZhTHA3U0h1QUUxSHV0N0xCOUxSMFAwQXlNU0NwbDV6QQpWWld1VVQ2cmw4TzZDMU5lUmY4Q0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZCNnJkejFRa0NoU2ZtcjBRVU5CeGt3M3pKeFZNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFBVWlaYnl2T1N6bVU5YnlWdVFreE9xaVZVdnBVUFlhZDJoZmJsejd0Z1krNWljYnR3SApoeDg1Rmp3WkZvRktkQVJiN1cxYnJnTG5OcXhDZEllWTVKZEkzTlBZRjZrVlc3ZmNMckUwK1BEWXhkc3dDZXdlCk9DSklUSFp3YjNQM05qMUNzTmVVaHg0Um4wd2FiYjlzS0xkUG5Bc0NRNFplTWxaalBSUllIeHg5QVU2ZWNxbXMKZWJWUkRpVTUzekJGd2tpSEhyZHRDTDNQQmxCZENvY0s5dXFQSi9nSXJDYVBrWGl3SlNZb0NrdWt4dm9rZk9xVgpONWd5QXFKL2o4RmkwUEh3UkVIUXNHVnVvajhxQjJzYUd4ZExhcTVWditsQVJpWktEVGhrVXJEeUE0YjB5c0w2CnZDUGtJRUl1ZE5SSGQ3eEJ6ckxPcVhVdXFGREJvTS92TFhaOQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNklqWkdZVEpwTlZWb1drcEhaaTF6VWpSa09WaENlRjlCUzNaSU1uQlhaMEZEY2xGTldrOXFOVUppWjJjaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbGMyeHBZMlV0WVhabGMyaGhJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpXTnlaWFF1Ym1GdFpTSTZJbXQxWW1WemJHbGpaUzF5WW1GakxYZHZjbXRsY2kxbmEyVXRkMjl5YTJWeUxURXRkRzlyWlc0dE9EVjBiV01pTENKcmRXSmxjbTVsZEdWekxtbHZMM05sY25acFkyVmhZMk52ZFc1MEwzTmxjblpwWTJVdFlXTmpiM1Z1ZEM1dVlXMWxJam9pYTNWaVpYTnNhV05sTFhKaVlXTXRkMjl5YTJWeUxXZHJaUzEzYjNKclpYSXRNU0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVnlkbWxqWlMxaFkyTnZkVzUwTG5WcFpDSTZJalJpT0RWak5tWmpMV1ZrWXpFdE5HRTNZeTFpT0dWaUxUVmxNVEUyTURrd1lXRmhNaUlzSW5OMVlpSTZJbk41YzNSbGJUcHpaWEoyYVdObFlXTmpiM1Z1ZERwcmRXSmxjMnhwWTJVdFlYWmxjMmhoT210MVltVnpiR2xqWlMxeVltRmpMWGR2Y210bGNpMW5hMlV0ZDI5eWEyVnlMVEVpZlEuRVNkVm1vajA3OXBrSmkzbktGMFhuZE9sa2Q4aGJJdUNaLURpdG1UOGZkVmRkeGhPWEdfcVFSSXZqN05tb1JMUC1xdzJacHliZmgtYWJRVUtNSHFTTGM0aFNBMFhaTTI2UnprWUpRZU9NUE80dGdqdjVQaWNYRkJDbFo0Vk93d2V0WE5Ldi1TLVhiOWVYeHBGQjVDZUozVm0tZjlBV2xXZkMzLUg3aTBoZVlXaWdOSU85SEFFeU43b1RtYXV3WFRRRUg3YVlNOURpZmRreHNaTjZyeTlPZ09TbzJMcUQyc2F2bzNVSU5iX3d6bzdkc2t3T0NuZjdOQk1pMzJOYmZTZ2dBaFdNOUVFM0hyUzFXMWgzZEJLZURMZjEzNXVGZjB4N29NM2lfSUliTzNnZlhYaDVKN3UwS1RIYXNvVFRwVFJhY29NVWkzZ3lnaFN5R0Y0dmVXSzZB +cluster: + name: gke-worker-1 + endpoint: https://34.105.95.217 +netop: + networkInterface: eth0 +imagePullSecrets: + repository: https://index.docker.io/v1/ + username: Kumar + password: 5585799-5d5a-48fa-b805-f4a1ffb110 + email: aveshaenterprise@gmail.com +``` +### Save Output +6. Save the above output to a file called `secrets.yaml` which will be used during the installation process of the Slice Operator on the worker clusters. diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.1.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.1.0.mdx new file mode 100644 index 00000000..16204b22 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.1.0.mdx @@ -0,0 +1,38 @@ +# Release Notes for KubeSlice OSS 0.1.0 + +*Release date: 16th May 2022* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this by creating logical application ***slice*** boundaries which allow pods and services to communicate seamlessly across clusters, clouds, edges, and data centres. + +We continue to add new features and enhancements to KubeSlice. + + + +## What's New +These release notes describe the new changes and enhancements in this version. + +## KubeSlice Controller +The KubeSlice Controller orchestrates the creation and management of application slices on worker clusters. It is independent of any cloud provider and thus it: + +* Supports installation in any Kubernetes cluster (on-premise or cloud) +* Makes it easy to design hybrid cloud solutions +* The KubeSlice Controller components and the worker cluster components can coexist on the same cluster. + + +## Slice Operator +Slice Operator, also known as Worker Operator is a component of KubeSlice that must be installed on the worker cluster to interact with the KubeSlice Controller. It acts as an interface between the worker cluster and the KubeSlice Controller. It watches the configuration changes on the KubeSlice Controller and sets up the required infrastructure to create the slice overlay network. Slice Operators on worker clusters participate in inter-cluster discovery to enable service reachability across cluster boundaries. The Slice Operator can communicate directly with the KubeSlice Controller and get the configuration changes related to the registered cluster, also known as worker cluster. + + + +## Charts +Try out the KubeSlice community version using the given [helm charts](https://github.com/kubeslice/charts). + + + +## Supported Kubernetes Services +This version of KubeSlice has been tested on Azure Kubernetes Service and Google Kubernetes Engine, and KIND Kubernetes clusters. The supported Kubernetes versions are [1.20](https://v1-20.docs.kubernetes.io/), [1.21](https://v1-21.docs.kubernetes.io/), and [1.22](https://v1-22.docs.kubernetes.io/). + + + +## Known Issues +A slice can be deleted with onboarded applications, which would cause disruptions. Ensure to delete a slice only after all the applications are detached from the slice. diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.2.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.2.0.mdx new file mode 100644 index 00000000..80365606 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.2.0.mdx @@ -0,0 +1,87 @@ +# Release Notes for KubeSlice OSS 0.2.0 +*Release Date 16th June 2022* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this by creating logical application ***slice*** boundaries which allow pods and services to communicate seamlessly across clusters, clouds, edges, and data centres. + +We continue to add new features and enhancements to KubeSlice. + +## What's New + +These release notes describe the new changes and enhancements in this version. + +## Onboarding Namespaces + +Namespaces that are created to run application deployments can be onboarded on a slice to form a micro +network segment. Once a namespace is bound to a slice, all the pods that get scheduled in +the namespace would get connected to the slice. The configuration is part of the slice YAML file. +This feature onboards namespaces and not individual applications. + +## Breaking Change for Onboarding Applications +With the onboarding namespaces feature, onboarding each application is no longer supported. +The onboarding namespaces feature can onboard complete namespaces and not individual +applications. + +:::caution +If you upgrade the worker operator to this 0.2.0 version, then the existing onboarded +applications do not work as expected. + +To avoid this breaking change, you must add the corresponding namespace of the existing +onboarded application in the slice configuration file. To know more, see +[namespace isolation profile parameters](/versioned_docs/version-1.1.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx#namespace-isolation-profile-parameters). +::: + +To onboard namespaces: + +1. Edit the [slice configuration](/versioned_docs/version-1.1.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx#slice-creation) YAML file to add namespaces as part of applicationNamespaces . You can add namespaces in the following ways in the slice configuration YAML file: + + * Add namespaces for each worker cluster. + * Add a wildcard ***** (asterisk) to add all namespaces in the worker clusters. + + :::info + Ensure that the namespace that you want to onboard exists on the worker cluster. + ::: + +2. Add the namespace and the corresponding clusters under the applicationNamespaces in the slice configuration file as illustrated below. + + ``` + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - 'worker-cluster-1' + - namespace: bookinfo + clusters: + - '*' + ``` + + :::info + Adding the asterisk (*) enables the namespace sameness, which means that the namespace is onboarded on all the worker clusters of that slice. + ::: + +3. Apply the slice configuration to complete the process of onboarding namespaces. + ``` + kubectl apply -f .yaml -n + ``` + +## Namespace Isolation +By default, all namespaces on a slice are not isolated and accept traffic from any source. +To secure the slice, you can selectively allow traffic to namespaces by isolating them. + +The namespace isolation feature enables you to confine application namespaces with a slice. +The application namespaces are isolated from other namespaces in a cluster and are +connected to the slice network. This leads to the formation of a secure inter-cluster +network segment of pods that are isolated from the rest of the pods in the clusters. + +## Intra-cluster Slice +A slice can now also be created within a single worker cluster. + +## Supported Kubernetes Services +This version of KubeSlice has been tested on Azure Kubernetes Service and +Google Kubernetes Engine, and KIND Kubernetes clusters. + +The supported Kubernetes versions for cloud clusters are +[1.20](https://v1-20.docs.kubernetes.io/), [1.21](https://v1-21.docs.kubernetes.io/), +and [1.22](https://v1-22.docs.kubernetes.io/). + +The supported Kubernetes versions for KIND clusters are +[1.21](https://v1-21.docs.kubernetes.io/) and [1.22](https://v1-22.docs.kubernetes.io/). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.3.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.3.0.mdx new file mode 100644 index 00000000..8fc87b5f --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.3.0.mdx @@ -0,0 +1,39 @@ +# Release Notes for KubeSlice OSS 0.3.0 +*Release Date 12th September 2022* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this by creating logical application ***slice*** boundaries which allow pods and services to communicate seamlessly across clusters, clouds, edges, and data centres. + +We continue to add new features and enhancements to KubeSlice. + +## What's New + +These release notes describe the new changes and enhancements in this version. + +### Standard QoS Profile +KubeSlice supports a standard QoS profile configuration in a separate YAML file. +This QoS profile must be applied and called out as a standard QoS profile in +a slice configuration. This is useful when you want to configure the same +QoS profile on multiple slices. For more information, see +[creating a standard QoS profile](/versioned_docs/version-1.1.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx#create-a-standard-qos-profile). + +## Bug Fixes +* The issue where the QoS traffic on a worker cluster did not work due to + an incorrect *node port* while passing conContext to the NetOp pod is fixed. The + issue has also occurred because the master node's *node IP address* was configured on + the worker cluster. You must always configure the *node IP address* of the worker cluster's gateway node + while installing the Slice Operator on the worker cluster. + +* The issue where an exponential increase of goroutine threads in the Gateway Sidecar + component caused abrupt CPU utilization is fixed. The fix reduces the CPU utilization + by around 500 percent. + +## Supported Kubernetes Services +This version of KubeSlice has been tested on Azure Kubernetes Service and +Google Kubernetes Engine, and KIND Kubernetes clusters. + +The supported Kubernetes versions for cloud clusters are +[1.20](https://v1-20.docs.kubernetes.io/), [1.21](https://v1-21.docs.kubernetes.io/), +and [1.22](https://v1-22.docs.kubernetes.io/). + +The supported Kubernetes versions for KIND clusters are +[1.21](https://v1-21.docs.kubernetes.io/) and [1.22](https://v1-22.docs.kubernetes.io/). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.4.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.4.0.mdx new file mode 100644 index 00000000..f5f91427 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.4.0.mdx @@ -0,0 +1,17 @@ +# Release Notes for KubeSlice OSS 0.4.0 + +*Release date: 12th October 2022* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this by creating logical application ***slice*** boundaries which allow pods and services to communicate seamlessly across clusters, clouds, edges, and data centres. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + +### IP Address Management +For managing and monitoring IP addresses on a slice, a new parameter has been introduced +to configure the maximum number of clusters that can be connected to a slice. This parameter +is configurable only during slice creation. The value is immutable after the slice creation. + +For more information, see the [slice configuration](/versioned_docs/version-1.1.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx#slice-creation). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.5.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.5.0.mdx new file mode 100644 index 00000000..75b67922 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.5.0.mdx @@ -0,0 +1,49 @@ +# Release Notes for KubeSlice OSS 0.5.0 + +*Release date: 29th December 2022* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services +in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this +by creating logical application ***slice*** boundaries that enable seamless communication between pods and services across +clusters, clouds, edges, and data centers. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + +### Network Service Mesh Upgrade +The Network Service Mesh (NSM) component has been upgraded to the stable +[GA version 1.5.0](https://networkservicemesh.io/docs/releases/v1.5.0/) that provides upstream networking fixes. + +### Latest Supported Kubernetes Version +The latest Kubernetes version that we support from this release is version [1.24](https://v1-24.docs.kubernetes.io/). + +### Enhancements +* When the **namespace sameness** is applied to a namespace on a slice, then it applies to all worker + clusters that are part of the slice. If a worker cluster does not already have that namespace, it is now created. This + ensures that all the worker clusters that are part of a slice will have that + namespace for which the namespace sameness is applied. This created namespace remains on the worker + cluster even after the worker cluster is detached from that slice, and even when that slice is deleted. + +## Known Issues +The known issues are as follows: + +- If the SPIRE server takes time to start up, the **spire-server** pod continues to restart, thus delaying the completion +of cluster registration by 120 seconds. The worker cluster can only be added to a slice after this. + + Workaround: None + +- After a slice is created, the gateway connectivity takes approximately 120 seconds to establish a tunnel. + + Workaround: None + +- Istio version 1.13 is incompatible with Kubernetes version 1.24. It might cause issues with KubeSlice version 0.5.0, which + now supports Kubernetes version 1.24. However, KubeSlice version 0.5.0 can be installed without Istio too. + + Workaround: In the following topics, you must skip the steps related to Istio: + + * [Prerequisites](/versioned_docs/version-1.1.0/get-started/prerequisites/prerequisites-install-istio.mdx) + * [Register the worker cluster](versioned_docs/version-1.1.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx) + * [Create a slice](/versioned_docs/version-1.1.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx) + * [Deploy the BookInfo application](/versioned_docs/version-1.1.0/tutorials/yaml-tutorials/deploy-the-bookinfo-application.mdx) \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.5.1.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.5.1.mdx new file mode 100644 index 00000000..326a5eb2 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.5.1.mdx @@ -0,0 +1,16 @@ +# Release Notes for KubeSlice OSS 0.5.0 v1 + +*Release date: 06 Jan 2023* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services +in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this +by creating logical application ***slice*** boundaries that enable seamless communication between pods and services across +clusters, clouds, edges, and data centers. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + +### Enhancements +* The **kubeslice-cli** tool now allows uninstalling Kubeslice components using a custom topology file. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.6.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.6.0.mdx new file mode 100644 index 00000000..fb8e2ce0 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.6.0.mdx @@ -0,0 +1,30 @@ +# Release Notes for KubeSlice OSS 0.6.0 + +*Release date: 16th Feb 2023* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services +in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this +by creating logical application ***slice*** boundaries that enable seamless communication between pods and services across +clusters, clouds, edges, and data centers. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + +### Latest Supported Kubernetes Version +The latest Kubernetes version that we support from this release is version [1.24](https://v1-24.docs.kubernetes.io/). + +### Enhancements +* With the current release, **Slice Gateway** spins up two pairs of gateway pods. With multiple replicas of the VPN pods, +multiple gateway nodes, and multiple network connections to the remote cluster, we now have mechanisms in place to handle +failure scenarios effectively. + +## Known Issues +The known issues are as follows: + +- The Slice Gateway Redundancy is incompatible with the existing slices on the cluster. So, you must create a new slice to + use the Slice Gateway Redundancy. +- Sometimes, gateway pods get stuck in the `Init` state due to this [upstream NSM](https://github.com/networkservicemesh/cmd-registry-k8s/issues/362) issue. +- If a node that contains the application pods and NSM webhook pod scheduled on it is restarted, the NSM containers are not injected into the application pod. + This occurs as the NSM webhook pod manages the lifecycle of the NSM `mutatingwebhookconfiguration`. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.7.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.7.0.mdx new file mode 100644 index 00000000..1326a673 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-0.7.0.mdx @@ -0,0 +1,37 @@ +# Release Notes for KubeSlice OSS 0.7.0 + +*Release date: 14th April 2023* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services +in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this +by creating logical application ***slice*** boundaries that enable seamless communication between pods and services across +clusters, clouds, edges, and data centers. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + +### Controller Events +KubeSlice creates controller events and stores them in the `kubeslice-controller` +and `kubeslice-project` namespaces. The events can be retrieved using the corresponding command. +For more information, see [controller events](versioned_docs/version-1.0.0/install-kubeslice/yaml/events/yaml-events-controller-events.mdx). + +### Slice and Cluster Health +You can now monitor slice and cluster health using the corresponding description commands. +For more information, see [slice and cluster health](/versioned_docs/version-1.0.0/install-kubeslice/yaml/slice-operations/slice-cluster-health.mdx). + +### Removal of the Cert Manager Dependency +The `cert-manager` is no longer required to install the KubeSlice Controller on the controller cluster. +With this version, the KubeSlice Controller supports upward compatibility. This means that the `cert-manager` +installed does not have any impact on the KubeSlice Controller. However, the `cert-manager` is still +required for the version 0.6.0 and older versions of the KubeSlice Controller to work. + +### Service Export Discovery +To identify the exported services from a worker cluster that have different names other than +the `slice.local` name, a new property called `aliases` has been introduced as part of +the service export configuration. + +### The Node IP is Editable +The node IP for the worker cluster that is registered with the controller can now be edited. If +KubeSlice could not detect a node IP during registration, you can enter it by editing a cluster. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.0.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.0.0.mdx new file mode 100644 index 00000000..25742096 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.0.0.mdx @@ -0,0 +1,33 @@ +# Release Notes for KubeSlice OSS 1.0.0 + +*Release date: 31st May 2023* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services +in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this +by creating logical application ***slice*** boundaries that enable seamless communication between pods and services across +clusters, clouds, edges, and data centers. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + +### Worker Operator-generated Events +The Worker Operator generates events in the `kubeslice-system` namespace. The events can +be retrieved using the corresponding command. +For more information, see [the Worker Operator events](/versioned_docs/version-1.0.0/install-kubeslice/yaml/events/yaml-events-controller-events.mdx). + +### KubeSlice Metrics +KubeSlice now records custom metrics that helps in monitoring the slice, and other KubeSlice +components. These Prometheus metrics can be configured as alerts using the alert manager. +For more information, see [KubeSlice metrics](/versioned_docs/version-1.0.0/install-kubeslice/yaml/metrics/yaml-metrics-controller-metrics.mdx). + +### Event Integration on Slack +KubeSlice-generated events can be integrated on slack to track them on a separate application workspace. + +For more information, see [integrate KubeSlice-generated events on Slack](/versioned_docs/version-1.1.0/add-ons/add-ons-slack-events.mdx). + +### Metric-based Alerts Integration with Slack +KubeSlice metric-based alerts can be integrated with Slack for brownfield and greenfield Prometheus deployments. +This integration facilitates tracking the metric-based alerts on a dedicated Slack channel. +For more information, see [integrate alerts with Slack](/versioned_docs/version-1.1.0/add-ons/add-ons-slack-metrics.mdx). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.1.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.1.0.mdx new file mode 100644 index 00000000..d8d1d95f --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.1.0.mdx @@ -0,0 +1,51 @@ +# Release Notes for KubeSlice OSS 1.1.0 + +*Release date: 28th July 2023* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services +in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this +by creating logical application ***slice*** boundaries that enable seamless communication between pods and services across +clusters, clouds, edges, and data centers. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + + +## New features + +### Slice VPN Key rotation + +The slice VPN Gateway is an essential component of the slice network service, allowing the slice to connect to secure VPN networks. +By default, any new slice created using the KubeSlice Manager or the YAML file will have a duration of 30 days to renew the SliceGateway +certificates. You can customize the rotation interval when creating a slice by setting the rotationInterval parameter in the slice +configuration YAML file. This interval range is 30 to 90 days. + +Additionally, if you need to update the rotation interval at any time, you can modify the rotationInterval parameter in the slice +configuration file. This flexibility allows you to align the certificate renewal process with your specific security requirements. +Moreover, if you want to initiate the certificate renewal process immediately, you can use the renewBefore option. This option allows +you to trigger the certificate renewal process before the expiration of the rotation interval. By specifying an appropriate value for +renewBefore parameter in the YAML file, you can ensure that the certificate renewal process starts promptly, helping to maintain the +security and compliance of your system. + +The VPN cipher can be configured during slice creation using the cipher parameter in the slice configuration YAML file. The cipher +value can be set to AES_128_CBC. The default value is AES_256_CBC. This configuration is immutable during the lifetime of a slice. + + +## Issues Fixed + +Added a security context at the container level to the NSM init container to enable it to modify the `resolv.conf` file when there +is a security context at the pod level that is too restrictive. + +## Known Issues + +* Users can trigger VPN Key Rotation with the `RenewBefore` parameter even before the slice gateways are up and running. + This premature rotation attempt often results in failures during the rotation process. Please refrain from triggering VPN + Key rotation before the gateways are in a healthy condition to ensure a successful rotation process. +* After detaching a worker cluster from a slice, gateway pods are not deleted. +* In the current version, the rebalancing feature of gateway redundancy is disabled. +* We have identified a scenario where, during a Helm upgrade on Kubeslice Worker, the NSM (Network Service Mesh) admission + webhook pod might not be automatically cleaned up in case of a failure. As a result, there could be potential issues + with the old NSM webhook pod lingering from the previous release. So users are advised to manually delete the + old NSM webhook pod until the issue is resolved. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.2.0.mdx b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.2.0.mdx new file mode 100644 index 00000000..a41268e3 --- /dev/null +++ b/versioned_docs/version-1.2.0/release-notes/release-notes-for-kubeslice-oss-1.2.0.mdx @@ -0,0 +1,24 @@ +# Release Notes for KubeSlice OSS 1.2.0 + +*Release date: 16th Jan 2024* + +KubeSlice is a ***cloud-independent*** platform that combines network, application, Kubernetes, and deployment services +in a framework to accelerate application deployment in a multi-cluster and multi-tenant environment. KubeSlice achieves this +by creating logical application ***slice*** boundaries that enable seamless communication between pods and services across +clusters, clouds, edges, and data centers. + +We continue to add new features and enhancements to KubeSlice. + +## What's New +These release notes describe the new changes and enhancements in this version. + + +### KubeSlice Supports Cluster Connectivity between Public and Private Clusters +KubeSlice now supports cluster connectivity to private clusters. This feature is currently configurable only using YAML. + +With this release, we remove the restriction for the cluster to have at least one node with a public IP address to make +it part of the KubeSlice overlay network. Enabling a node with a public IP restricted the application of KubeSlice on +private cloud clusters where the virtual private clouds filter out external traffic. + +With the new feature, KubeSlice extends its usage to such private cloud clusters through a network load balancer which +can be provisioned and managed by all major cloud providers. \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/troubleshooting/troubleshooting-guide.mdx b/versioned_docs/version-1.2.0/troubleshooting/troubleshooting-guide.mdx new file mode 100644 index 00000000..1c2dc147 --- /dev/null +++ b/versioned_docs/version-1.2.0/troubleshooting/troubleshooting-guide.mdx @@ -0,0 +1,712 @@ +# Troubleshooting Guide + +## Introduction + +This guide aims to provide a comprehensive overview of potential troubleshooting scenarios that may arise when installing and configuring KubeSlice. + +## Installation Issues + +### Why do I face installation issues while installing KubeSlice on kind clusters on Ubuntu OS? + +On Ubuntu OS, if you have too many files open and try installing KubeSlice on kind clusters, +then you face issues. + +You must increase the `ulimit` to `2048` or `unlimited` and try installing KubeSlice on kind +clusters. If you still face issues, see [errors due to too many open files](https://kind.sigs.k8s.io/docs/user/known-issues/#pod-errors-due-to-too-many-open-files). + +### Why do I get an error during a helm upgrade of the KubeSlice Controller? + +:::caution +Currently, you can only upgrade to a software patch version that does not contain schema +changes. You cannot upgrade to a software patch/complete version that contains schema changes. +::: + +When you try to upgrade the controller using the `helm upgrade` command, +during which, if the worker operator pod is down, you get the following error related +to a mutating webhook. + +``` +Patch Deployment "kubeslice-controller-manager" in namespace kubeslice-controller +error updating the resource "kubeslice-controller-manager": +cannot patch "kubeslice-controller-manager" with kind Deployment: Internal error occurred: failed calling webhook "mdeploy.avesha.io": failed to call webhook: Post "https://kubeslice-webhook-service.kubeslice-system.svc:443/mutate-appsv1-deploy?timeout=10s": no endpoints available for service "kubeslice-webhook-service" +Looks like there are no changes for Deployment "kubernetes-dashboard" +Looks like there are no changes for Deployment "dashboard-metrics-scraper" +Patch Certificate "kubeslice-controller-serving-cert" in namespace kubeslice-controller +Patch Issuer "kubeslice-controller-selfsigned-issuer" in namespace kubeslice-controller +Patch MutatingWebhookConfiguration "kubeslice-controller-mutating-webhook-configuration" in namespace +Patch ValidatingWebhookConfiguration "kubeslice-controller-validating-webhook-configuration" in namespace +Error: UPGRADE FAILED: cannot patch "kubeslice-controller-manager" with kind Deployment: Internal error occurred: failed calling webhook "mdeploy.avesha.io": failed to call webhook: Post "https://kubeslice-webhook-service.kubeslice-system.svc:443/mutate-appsv1-deploy?timeout=10s": no endpoints available for service "kubeslice-webhook-service" +``` + +To resolve this error, manually delete the mutating webhook configuration as described +below: + +1. Get the name of the `MutatingWebhookConfiguration` webhook using the following command: + + ``` + kubectl get mutatingwebhookconfiguration + ``` + + Expected Output + + ``` + NAME WEBHOOKS AGE + cdi-api-datavolume-mutate 1 16d + cert-manager-webhook 1 31d + istio-sidecar-injector 4 15d + kubeslice-controller-mutating-webhook-configuration 7 30d + kubeslice-mutating-webhook-configuration 1 29d + longhorn-webhook-mutator 1 17d + nsm-admission-webhook-cfg 1 29d + virt-api-mutator 4 18d + ``` + + Note down the name of the `MutatingWebhookConfiguration` webhook, which is `kubeslice-mutating-webhook-configuration` + in the above output. + +2. Delete the `MutatingWebhookConfiguration` using the following command: + + ``` + kubectl delete mutatingwebhookconfiguration kubeslice-mutating-webhook-configuration + ``` + + +## Connectivity Issues + +### Why is my registered cluster not connected to the KubeSlice Controller? + +There could be an issue during the installation of the Slice Operator on the registered +cluster. Try these steps: + +1. Switch context to the registered cluster on which you are facing + connectivity issues using the following command: + + ``` + kubectx + ``` + +2. Validate the installation of the Slice Operator by checking the pods belonging to + the namespace `kubeslice-controller-system`using the following command (from the output, check the status of the pods): + + ``` + kubectl get pods -n kubeslice-controller-system + ``` + +3. If the connection issue still persists, check if the KubeSlice Controller endpoint and token in the cluster are correct in the + Slice Operator YAML configuration file that is applied in that registered cluster. To know about the configuration, + see the [Slice Operator YAML file](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx#slice-creation). + file. + +### Registering clusters with the same name does not throw an error. + +Each instance of the cluster is registered separately as two different clusters and +Kubernetes ignores duplication of the cluster's name. + +It is best to avoid the duplication of the clusters names as Kubernetes inherently ignores +the duplication of cluster names. + +### The KubeSlice Controller was successfully installed with a controller endpoint that is not reachable by a slice. + +Check if the controller endpoint is correct during the installation of the Slice Operator +on the worker cluster. Check if the controller cluster's secret token and ca-cert installed on the +worker cluster is correct. To know more, see [Getting the Secrets of the Registered +Cluster](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx#manually-retrieve-registered-cluster-secrets). + +### Node IP address on the registered cluster was changed but the KubeSlice components were not cleaned up. + +When the Node IP address is changed on a registered cluster, then a manual clean-up is +required for the worker cluster configuration to use the updated IP. So, we recommend not +to change the Node IP manually when it is already configured or add an invalid Node IP +address. + +While registering a cluster, the Node IP is configured by pulling the value from the cluster. + +### A cluster registration failed with a correct cluster YAML file. + +The registration fails when a `cluster.yaml` file is applied to register more than one +clusters. + +Ensure that a`cluster.yaml`file is applied to only one cluster and not multiple clusters. + +### Why do I experience router connectivity issues when one or more nodes are restarted in the worker clusters? + +There is a connection disruption in some routers when one or more nodes are restarted in +the worker clusters. You must restart the application pod to restore the router +connectivity. + +## Cluster Issues + +### The error/warning states that the CRD object is stuck. + +1. Patch an empty finalizer with the failing object CRD warning as shown in this example. + + (`serviceexportconfigs.hub.kubeslice.io` is a failing CRD object in this + example.) + + ``` + kubectl patch crd/serviceexportconfigs.hub.kubeslice.io -p '{"metadata":{"finalizers":[]}}' --type=merge + ``` + +2. Uninstall and reinstall the KubeSlice Controller. + +### The error states that the project namespace is stuck. + +1. Delete the stuck namespace by running the following command: + + ``` + kubectl patch ns/ -p '{"metadata":{"finalizers":[]}}' --type=merge + ``` + +2. Uninstall and reinstall the KubeSlice Controller. + +## Slice and Cluster Health Issues + +### What should I do when nsmgr is down? + +Identify the worker cluster on which `nsmgr` is down. + +`nsmgr` is a `DaemonSet` running in the `kubeslice-system` namespace. You can verify the status +of `nsmgr` on the corresponding worker cluster to further trace the issue. + +To verify the status of `nsmgr`: + +1. Get the details of `nsmgr` using the following command: + + ``` + kubectl get daemonset -n kubeslice-system --selector=app=nsmgr + ``` + + Expected Output + + ``` + NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE + nsmgr 3 3 3 3 3 17d + ``` + +2. Get the status of `nsmgr` from all the pods by using the following command: + + ``` + kubectl get pods -n kubeslice-system --selector=app=nsmgr + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + nsmgr-6gfxz 2/2 Running 3 (40h ago) 17d + nsmgr-jtxxr 2/2 Running 2 (12d ago) 17d + nsmgr-tdmd8 2/2 Running 0 11d + ``` + + If the status is not `Running`, then `nsmgr` on that pod is down. + +### What should I do when forwarder is down? + +Identify the worker cluster on which `forwarder` is down. + +`forwarder` is a `DaemonSet` running in the `kubeslice-system` namespace. You can verify the status +of `forwarder` on the corresponding worker cluster to further trace the issue. + +To verify the status of `forwarder`: + +1. Get the details of `forwarder` using the following command: + ``` + kubectl get daemonset -n kubeslice-system --selector=app=forwarder-kernel + ``` + Expected Output + ``` + NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE + forwarder-kernel 3 3 3 3 3 17d + ``` + + If a `forwarder` pod is down, then the number under `READY` will not be the same as other columns. + +2. Get the status of `forwarder` from all the pods using the following command: + ``` + kubectl get pods -n kubeslice-system --selector=app=forwarder-kernel + ``` + ``` + NAME READY STATUS RESTARTS AGE + forwarder-kernel-2zb9r 1/1 Running 0 17d + forwarder-kernel-jjzz7 1/1 Running 0 17d + forwarder-kernel-r5kcw 1/1 Running 0 17d + ``` + If the `STATUS` is not `Running`, then that pod is down. + +### What should I do when netop is down? + +Identify the worker cluster on which `netop` is down. + +`netop` is a `DaemonSet` running in the `kubeslice-system` namespace. You can verify the status +of `netop` on the corresponding worker cluster to further trace the issue. + +To verify the status of `netop`: + +1. Get the details of `netop` using the following command: + + ``` + kubectl get daemonsets -n kubeslice-system | grep kubeslice-netop + ``` + + Expected Output + + ``` + kubeslice-netop 2 2 2 2 2 17d + ``` + +2. The `netop` pod is restricted to run only on gateway nodes. Get the gateway nodes that run + the `netop` pod. + + ``` + kubectl get nodes --selector=kubeslice.io/node-type=gateway + ``` + + Expected Output + + ``` + NAME STATUS ROLES AGE VERSION + gke-demo-cluster-2-s-demo-cluster-2-s-3e484d4b-cbnl Ready 17d v1.23.16-gke.1400 + gke-demo-cluster-2-s-demo-cluster-2-s-3e484d4b-qnwp Ready 17d v1.23.16-gke.1400 + ``` + +3. Get the `netop` pods on the `kubeslice-system` namespace using the following command: + + ``` + kubectl get pods -n kubeslice-system --selector=app=app_net_op + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + kubeslice-netop-dqsg7 1/1 Running 0 17d + kubeslice-netop-jc4c2 1/1 Running 0 11d + ``` + + If the `STATUS` is not `Running`, then that pod is down. + +### What should I do when spire-agent is down? + +Identify the worker cluster on which `spire-agent` is down. + +`spire-agent` is a `DaemonSet` running in the `spire` namespace. You can verify the status +of `spire-agent` on the corresponding worker cluster to further trace the issue. + +To verify the status of `spire-agent`: + +1. Get the details of `spire-agent` using the following command: + ``` + kubectl get daemonset -n spire --selector=app=spire-agent + ``` + Expected Output + ``` + NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE + spire-agent 3 3 3 3 3 17d + ``` +2. Get the `spire-agent` pods from the `spire` namespace using the following command: + ``` + kubectl get pods -n spire --selector=app=spire-agent + ``` + Expected Output + ``` + NAME READY STATUS RESTARTS AGE + spire-agent-l692m 1/1 Running 0 11d + spire-agent-nrfnf 1/1 Running 0 17d + spire-agent-xp5m8 1/1 Running 0 17d + ``` + If the `STATUS` is not `Running`, then that pod is down. + +### What should I do when spire-server is down? + +Identify the worker cluster on which `spire-server` is down. + +`spire-server` is a `StatefulSet` running in the `spire` namespace. You can verify the status +of `spire-server` on the corresponding worker cluster to further trace the issue. + +To verify the status of `spire-server`: + +1. Get the details of `spire-server` using the following command: + + ``` + kubectl get statefulset -n spire --selector=app=spire-server + ``` + + Expected Output + + ``` + NAME READY AGE + spire-server 1/1 17d + ``` + +2. Get the `spire-agent` pods from the `spire` namespace using the following command: + ``` + kubectl get pods -n spire --selector=app=spire-server + ``` + Expected Output + ``` + NAME READY STATUS RESTARTS AGE + spire-server-0 2/2 Running 0 17d + ``` + If the `STATUS` is not `Running` and `2/2` pods are not `READY`, then that pod is down. + +### What should I do when admission-webhook is down? + +Identify the worker cluster on which `admission-webhook` is down. + +`admission-webhook` is a deployment running in the `kubeslice-system` namespace. You can verify the status +of `admission-webhook` on the corresponding worker cluster to further trace the issue. + +To verify the status of `admission-webhook`: + +1. Get the details of `admission-webhook` using the following command: + + ``` + kubectl get deployment -n kubeslice-system --selector=app=admission-webhook-k8s + ``` + + Expected Output + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + nsm-admission-webhook-k8s 1/1 1 1 17d + ``` + +2. Get the `admission-webhook` pods from the `kubeslice-system` namespace using the following + command: + + ``` + kubectl get pod -n kubeslice-system --selector=app=admission-webhook-k8s + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + nsm-admission-webhook-k8s-698784967d-nmtrl 1/1 Running 0 17d + ``` + + If the `STATUS` is not `Running`, and `1/1` pod is not `READY`, then that pod is down. + +### What should I do when istiod is down? + +Identify the worker cluster on which `istiod` is down. + +`istiod` is a deployment running in the `istio-system` namespace. You can verify the status +of `istiod` on the corresponding worker cluster to further trace the issue. + +To verify the status of `istiod`: + +1. Get the details of `istiod` using the following command: + + ``` + kubectl get deployment -n istio-system --selector app=istiod + ``` + + Expected Output + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + istiod 1/1 1 1 17d + + ``` + +2. Get the `istiod` pods from the `istio-system` namespace using the following + command: + + ``` + kubectl get pods -n istio-system --selector app=istiod + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + istiod-6b56cffbd9-8xx5t 1/1 Running 0 17d + ``` + If the `STATUS` is not `Running`, and `1/1` pod is not `READY`, then that pod is down. + +### What should I do when dns is down? + +Identify the worker cluster on which `dns` is down. + +`dns` is a deployment running in the `kubeslice-system` namespace. You can verify the status +of `dns` on the corresponding worker cluster to further trace the issue. + +To verify the status of `dns`: + +1. Get the details of `dns` using the following command: + + ``` + kubectl get deployments -n kubeslice-system --selector=app=kubeslice-dns + ``` + + Expected Output + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + kubeslice-dns 1/1 1 1 17d + ``` + +2. Get the `dns` pods from the `kubeslice-system` namespace using the following + command: + + ``` + kubectl get pods -n kubeslice-system --selector=app=kubeslice-dns + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + kubeslice-dns-79d4fc6477-frjw6 1/1 Running 0 17d + ``` + If the `STATUS` is not `Running`, and `1/1` pod is not `READY`, then that pod is down. + +### What should I do when slice-router is down? + +`slice-router` is a deployment running in the `kubeslice-system` namespace, which is only one per slice. +You can verify the status of `slice-router` on the corresponding worker cluster to further trace +the issue. + +To verify the status of `slice-router`: + +1. Get the details of `slice-router` using the following command: + + ``` + kubectl get deployment -n kubeslice-system | grep vl3-slice-router- + ``` + + Example + + ``` + kubectl get deployment -n kubeslice-system | grep vl3-slice-router-bookinfo-slice + ``` + + Expected Output + + ``` + vl3-slice-router-bookinfo-slice 1/1 1 1 17d + ``` + +2. Get the `slice-router` pods from the `kubeslice-system` namespace using the following + command: + + ``` + kubectl get pods -n kubeslice-system --selector=kubeslice.io/pod-type=router,kubeslice.io/slice=bookinfo-slice + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + vl3-slice-router-bookinfo-slice-795fb754cc-4dnl4 2/2 Running 0 11d + ``` + If the `STATUS` is not `Running`, and `2/2` pods are not `READY`, then that pod is down. + +### What should I do when egress is down? + +`egress`is a deployment running in the`kubeslice-system`namespace. You can verify the status +of`egress`` on the corresponding worker cluster to further trace the issue. + +To verify the status of `egress`: + +1. Get the details of `egress` using the following command: + + ``` + kubectl get deployment -n kubeslice-system --selector=istio=egressgateway,slice=bookinfo-slice + ``` + + Expected Output + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + bookinfo-slice-istio-egressgateway 1/1 1 1 17d + + ``` + +2. Get the `egressgateway` pods from the `kubeslice-system` namespace using the following + command: + + ``` + kubectl get pod -n kubeslice-system --selector=istio=egressgateway,slice=bookinfo-slice + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + bookinfo-slice-istio-egressgateway-7548b49659-9z4c5 2/2 Running 0 17d + ``` + +### What should I do when ingress is down? + +`ingress`is a deployment running in the`kubeslice-system`namespace. You can verify the status +of`ingress`` on the corresponding worker cluster to further trace the issue. + +To verify the status of `ingress`: + +1. Get the details of `ingress` using the following command: + + ``` + kubectl get deployment -n kubeslice-system --selector=istio=ingressgateway,slice=bookinfo-slice + ``` + + Expected Output + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + bookinfo-slice-istio-ingressgateway 1/1 1 1 17d + + ``` + +2. Get the `ingressgateway` pods from the `kubeslice-system` namespace using the following + command: + + ``` + kubectl get pod -n kubeslice-system --selector=istio=ingressgateway,slice=bookinfo-slice + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + bookinfo-slice-istio-ingressgateway-765fb4ddf-d52cs 2/2 Running 0 17d + ``` + + If the `STATUS` is not `Running`, and `2/2` pods are not `READY`, then that pod is down. + +### What should I do when slicegateway is down? + +`slicegateway` is a deployment running in the `kubeslice-system` namespace. You can verify the +status of `slicegateway` on the corresponding worker cluster to further trace the issue. +Slice gateways are always created in pairs. + +To verify the status of `slicegateway`: + +1. Get the details of `slicegateway` using the following command: + + ``` + kubectl get deployment -n kubeslice-system --selector=kubeslice.io/pod-type=slicegateway,kubeslice.io/slice=bookinfo-slice + ``` + + Expected Output + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + bookinfo-slice-worker-1-worker-2-0 1/1 1 1 17d + bookinfo-slice-worker-1-worker-2-1 1/1 1 1 17d + ``` + If a pod is down, then it is shown under `READY` as `0/1`. + +2. Get the `ingressgateway` pods from the `kubeslice-system` namespace using the following + command: + + ``` + kubectl get pod -n kubeslice-system --selector=kubeslice.io/pod-type=slicegateway,kubeslice.io/slice=bookinfo-slice + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + bookinfo-slice-worker-1-worker-2-0-97748d58b-sqm7s 3/3 Running 0 17d + bookinfo-slice-worker-1-worker-2-1-8496454697-mw8cs 3/3 Running 0 17d + ``` + If the `STATUS` is not `Running`, and `3/3` pods are not `READY`, then that pod is down. + +## Onboarded Application Namespace Issues + +### NSM containers are not injected in pods during deployments in the application namespace. + +If NSM containers are not injected in pods during deployments in the application namespace, +then check if that application namespace contains the KubeSlice label. If the label is +not there, wait for the Slice Operator to label the namespace. + +For example, run the following command to check the label: + +``` +kubectl describe ns iperf +``` + +In the command output below, `kubeslice.io/slice=blue` is the KubeSlice label. + +``` +Name: iperf +Labels: hnc.x-k8s.io/included-namespace=true + iperf.tree.hnc.x-k8s.io/depth=0 + kubernetes.io/metadata.name=iperf + kubeslice.io/slice=blue +Annotations: +Status: Active + +No resource quota. + +No LimitRange resource. +``` + +In the command output, the iperf namespace contains the `kubeslice.io/slice=blue` label. +This means that the namespace is already onboarded to the blue slice. + +### I face connectivity issues with the NSM interfaces present in an application pod. + +When more than one NSM interfaces are present in an application pod, connectivity issues +occur. This is due to the router having an older NSM interface, causing the connectivity +disruption among the application pods. + +You must reboot the application pod that has more than one NSM interface to restore the +connectivity among the application pods. + + +## kubeslice-cli + +This guide describes troubleshooting scenarios that you could face during installing and while using the `kubeSlice-cli` tool. + +### Unable to Install Kubeslice using the kubeslice-cli Tool on Ubuntu + +During the installation of KubeSlice using the `kubeslice-cli install -p=minimal-demo` command, if you get the following error message: + +``` +✓ Writing configuration 📜 + • Starting control-plane 🕹️ ... + ✗ Starting control-plane 🕹️ +ERROR: failed to create cluster: failed to init node with kubeadm: command "docker exec --privileged ks-w-2-control-plane kubeadm init --skip-phases=preflight --config=/kind/kubeadm.conf --skip-token-print --v=6" failed with error: exit status 137 + +Command Output: +2022/10/04 06:12:21 Process failed exit status 1 +``` + +There could be a memory/disk space issue. + +**To resolve**: + +- Remove unused clusters (other than the ones used in the demo). +- Increase disk space/memory resources. + +### Unable to run the kubeslice-cli commands + +After successfully installing KubeSlice using kubeslice-cli, if you are unable to use the commands: + +``` +kubeslice-cli get sliceConfig -n kubeslice-demo +``` + +``` +Fetching KubeSlice sliceConfig... +🏃 Running command: /usr/local/bin/kubectl get sliceconfigs.controller.kubeslice.io -n demo +error: the server doesn't have a resource type "sliceconfigs" +2022/10/04 08:26:40 Process failed exit status 1 +``` + +**To resolve**: + +- Ensure you are on the controller cluster to run the commands: `kubectx -c`. +- Export the configuration file using this command: `export KUBECONFIG=kubeslice/`. + +### Getting an Unverified Developer Error Message on macOS + +When you try to install kubeslice-cli on macOS, you get the `Unverified Developer Error Message`. +This error message appears when you try to install an application from a developer who is not registered with Apple. + +**To resolve**: + +Follow the instructions in [Enabling the Application for macOS](https://www.alphr.com/cannot-be-opened-because-the-developer-cannot-be-verified/). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo-on-cloud-clusters.mdx b/versioned_docs/version-1.2.0/tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo-on-cloud-clusters.mdx new file mode 100644 index 00000000..d551f40a --- /dev/null +++ b/versioned_docs/version-1.2.0/tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo-on-cloud-clusters.mdx @@ -0,0 +1,261 @@ +# Demo using Cloud Clusters + +This topic describes the steps to install KubeSlice on cloud clusters. To install the KubeSlice Controller and its components on +your existing clusters, use the `kubeslice-cli install` command. + +## Prerequisites +Before you begin, ensure the following prerequisites are met: +- You have set up the environment to install the KubeSlice Controller on the cluster. For more information, +see [Prerequisites](/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-kubeslice-controller-requirements.mdx). +- You have set up the environment for the worker clusters. For more information, +see [Prerequisites](/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-worker-cluster-requirements.mdx). +- You have authenticated the worker clusters with the cloud providers. For more information, +see [Prepare Clusters](/versioned_docs/version-1.2.0/get-started/prerequisites/prerequisites-cluster-authentication.mdx). + +## Install KubeSlice + +In this demonstration, let us install KubeSlice on cloud clusters using the custom topology configuration YAML. + +## Install the KubeSlice Controller and Worker Clusters + +You must create a topology configuration file that includes the names of the clusters and the cluster contexts that host the +KubeSlice Controller, the worker clusters, and a project name. For more information, see the [sample +configuration](/versioned_docs/version-1.2.0//install-kubeslice/kubeslice-cli/install-kubeslice.mdx#sample-topology-configuration-file) file. + +The following is an example custom topology file for installing KubeSlice in an existing setup. +``` +configuration: + cluster_configuration: + kube_config_path: + controller: + name: controller + context_name: cloud-controller + kube_config_path: + workers: + - name: worker-1 + context_name: cloud-worker-1 + - name: worker-2 + context_name: cloud-worker-2 + kube_config_path: + kubeslice_configuration: + project_name: kubeslice-avesha + helm_chart_configuration: + repo_alias: kubeslice + repo_url: https://kubeslice.github.io/kubeslice/ + cert_manager_chart: + chart_name: cert-manager + controller_chart: + chart_name: kubeslice-controller + worker_chart: + chart_name: kubeslice-worker +``` + +Use the following command to install the controller and the worker clusters: +``` +kubeslice-cli --config= install +``` +The above command installs the KubeSlice Controller, creates a project, and registers the worker cluster with the project by +installing the Slice Operator on the worker cluster. + +## Register a New Worker Cluster + +To register a new worker cluster with the existing KubeSlice configuration (or KubeSlice Controller): + +1. Add new worker cluster information under workers in the custom topology file that was used to install KubeSlice earlier. +2. Use the **install** command to apply the updated custom topology file. + +The following is an example custom topology file for registering a new worker cluster. Under **workers**, add a +new worker with the name `worker-3` and the cluster context `cloud-worker-3`. +``` +configuration: + cluster_configuration: + kube_config_path: + controller: + name: controller + context_name: kind-controller + kube_config_path: + workers: + - name: worker-1 + context_name: cloud-worker-1 + - name: worker-2 + context_name: cloud-worker-2 + - name: worker-3 + context_name: cloud-worker-3 + kube_config_path: + kubeslice_configuration: + project_name: kubeslice-avesha + helm_chart_configuration: + repo_alias: kubeslice + repo_url: https://kubeslice.github.io/kubeslice/ + cert_manager_chart: + chart_name: cert-manager + controller_chart: + chart_name: kubeslice-controller + worker_chart: + chart_name: kubeslice-worker +``` + +Use the following command to register a new worker cluster with the KubeSlice Controller: +``` +kubeslice-cli install --config= -s controller +``` + +## Create a Slice + +To onboard your existing namespaces (and their applications) onto a slice: + +1. Create a slice configuration YAML file (choose the namespaces, clusters, and so on to be part of the slice). +2. Use the `kubeslice-cli create` command to apply the slice configuration YAML file. + + +### Create a Slice Configuration YAML File + +Use the following template to create a slice configuration YAML file. +:::info +To understand more about the configuration parameters, see +[Slice Configuration Parameters](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx). +::: + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: #The name of the slice +spec: + sliceSubnet: #The slice subnet + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - #The name of your worker cluster1 + - #The name of your worker cluster2 + qosProfileDetails: + queueType: HTB + priority: 0 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 30000 + bandwidthGuaranteedKbps: 20000 + dscpClass: AF11 +``` + +### Apply the Slice Configuration YAML file + +:::caution +The `kubeslice-cli create sliceConfig -n -f ` command returns successfully after the slice +configuration is applied. However, in each cluster, the relevant pods for controlling and managing the slice may still be starting. +Ensure to wait for the slice to complete the initialization before deploying services to it. +::: + +To apply the slice configuration YAML, use the following command: +``` +kubeslice-cli create sliceConfig -n -f --config= +``` + +Example +``` +kubeslice-cli create sliceConfig -n kubeslice-avesha -f slice-config.yaml +``` + +Example output +``` +🏃 Running command: /usr/local/bin/kubectl apply -f slice-config.yaml -n kubeslice-avesha +sliceconfig.controller.kubeslice.io/slice-red created + +Successfully Applied Slice Configuration. +``` + +## Deploy the Application +:::info +If the application is already deployed on a namespace that is onboarded to a slice, then re-deploy the application. +::: + +## Create a Service Export +To create a service export, use the following command: +``` +kubeslice-cli create serviceExportConfig -f -n --config= +``` + +### Validate the Service Export +When an application service runs on one of the worker clusters that are onboarded to a slice, the worker generates a ServiceExport +for the application and propagates it to the KubeSlice Controller. + +To verify the service export on the controller cluster, use the following command: +``` +kubeslice-cli get serviceExportConfig -n +``` +Example +``` +kubeslice-cli get serviceExportConfig -n kubeslice-avesha +``` +Example Output +``` +Fetching KubeSlice serviceExportConfig... +🏃 Running command: /usr/local/bin/kubectl get serviceexportconfigs.controller.kubeslice.io -n kubeslice-avesha +NAME AGE +iperf-server-iperf-cloud-worker-1 43s +``` + +To view the details of the service export configuration, use the following command: +``` +kubeslice-cli describe serviceExportConfig -n +``` +Example +``` +kubeslice-cli describe serviceExportConfig iperf-server-iperf-cloud-worker-1 -n kubeslice-avesha +``` + +The following output shows the ServiceExportConfig for iperf-server application is present on the controller cluster. +``` +Describe KubeSlice serviceExportConfig... +🏃 Running command: /usr/local/bin/kubectl describe serviceexportconfigs.controller.kubeslice.io iperf-server-iperf-cloud-worker-1 -n kubeslice-avesha +Name: iperf-server-iperf-cloud-worker-1 +Namespace: kubeslice-avesha +Labels: original-slice-name=slice-red + service-name=iperf-server + service-namespace=iperf + worker-cluster=cloud-worker-1 +Annotations: +API Version: controller.kubeslice.io/v1alpha1 +Kind: ServiceExportConfig +Spec: + Service Discovery Ports: + Name: tcp + Port: 5201 + Protocol: TCP + Service Name: iperf-server + Service Namespace: iperf + Slice Name: slice-red + Source Cluster: cloud-worker-1 + +``` + +## Modify the Service Discovery Configuration +kubeslice-cli enables you to modify the service discovery parameters. For example, to modify the port on which the service is running, edit +the value and save. This updates the ServiceExportConfig. The ServiceExportConfig will again be propagated to all the worker clusters. + +To edit the service export configuration, use the following command: +``` +kubeslice-cli edit serviceExportConfig -n --config= +``` +Example +``` +kubeslice-cli edit serviceExportConfig iperf-server-iperf-cloud-worker-1 -n kubeslice-avesha +``` +Example Output +``` +Editing KubeSlice serviceExportConfig... +🏃 Running command: /usr/local/bin/kubectl edit serviceexportconfigs.controller.kubeslice.io iperf-server-iperf-cloud-worker-1 -n kubeslice-avesha +... +``` + +## Uninstall KubeSlice + +To uninstall KubeSlice Controller and all its components, use the following command: +``` +kubeslice-cli uninstall --config= --all +``` + + +To uninstall KubeSlice from your cloud clusters step-by-step, follow the instructions in [Uninstall KubeSlice](/versioned_docs/version-1.2.0/install-kubeslice/kubeslice-cli/uninstall-kubeslice.mdx). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo.mdx b/versioned_docs/version-1.2.0/tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo.mdx new file mode 100644 index 00000000..2c30978d --- /dev/null +++ b/versioned_docs/version-1.2.0/tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo.mdx @@ -0,0 +1,366 @@ +# Demo using Kind Clusters +This topic describes the steps to install the KubeSlice Controller and its components using kind clusters for non-production use. To +install KubeSlice on a locally configured kind cluster, use the `kubeslice-cli install` command. + +## Prerequisites +Before you begin, ensure the following prerequisites are met: +- You have installed kind to create a demo clusters. For more information, see [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). +- You have installed helm to add the KubeSlice repo. For more information, see helm [Releases](https://github.com/helm/helm/releases) page. +- You have installed kubectl to access clusters. For more information, see [Installing Tools](https://kubernetes.io/docs/tasks/tools/). +- You have installed kubectx which is required to switch cluster contexts. For more information, see [Installing Kubectx](https://github.com/ahmetb/kubectx). +- You have installed Docker. For more information, see [Install Docker Engine](https://docs.docker.com/engine/install/). + +## Install KubeSlice + +The `kubeslice-cli install —profile=` command creates a demo topology consisting of one controller and +two worker clusters. The **full-demo** deploys the application on the `demo` slice, whereas the **minimal-demo** requires that an application +be deployed on the `demo` slice. + +In this demonstration, install KubeSlice on kind clusters using kubeslice-cli, using the `--profile=minimal-demo` option. + +The `kubeslice-cli install` command with the `--profile=minimal-demo` option does the following: + +1. Creates three kind clusters. One controller cluster with the name `ks-ctrl` and two worker clusters with the names `ks-w-1` and `ks-w-1`. +2. Installs Calico Networking on controller and worker clusters. +3. Downloads the opensource KubeSlice helm charts. +4. Installs KubeSlice Controller on a `ks-ctrl` cluster. +5. Creates a `kubeslice-demo` project namespace on a controller cluster. +6. Registers the `ks-w-1` and `ks-w-2` worker clusters with a project. +7. Installs Slice Operator on the worker clusters. +8. Creates a slice called `demo`. +9. Creates the `iperf` namespace for application deployment. + + +To setup the KubeSlice demo, use the following command: +``` +kubeslice-cli install --profile=minimal-demo +``` + +:::caution +- You must run the kubeslice-cli commands on the controller cluster. Run this command to ensure you are on the controller cluster: `kubectx -c`. +- Export the kubeconfig file before you run any kubeslice-cli command using this command: `export KUBECONFIG=kubeslice/kubeconfig.yaml`. +::: + +## Switch the Cluster Context +Use the following command to switch the context of the cluster: +``` +kubectx kind-ks-ctrl +``` +Expected Output +``` +✔ Switched to context "kind-ks-ctrl". +``` + +## Validate the Installation + +To validate a project, use the following command on the `kubeslice-controller` namespace to get the list of project: +``` +kubeslice-cli get project -n kubeslice-controller +``` + +Example Output +``` +Fetching KubeSlice Project... +🏃 Running command: /usr/local/bin/kubectl get projects.controller.kubeslice.io -n kubeslice-controller +NAME AGE +kubeslice-demo 4h19m +``` + +To validate the registered worker clusters, use the following command: +``` +kubeslice-cli get worker -n kubeslice-demo +``` +Expected Output +``` +Fetching KubeSlice Worker... +🏃 Running command: /usr/local/bin/kubectl get clusters.controller.kubeslice.io -n kubeslice-demo +NAME AGE +ks-w-1 54m +ks-w-2 54m +``` + +:::success +You have successfully installed the KubeSlice Controller on the controller cluster and Slice Operator on the worker cluster. +::: + + +The `kubeslice-cli install —profile=minimal-demo` command creates a slice called `demo` after successfully installing KubeSlice Controller +and the Slice Operator on the worker clusters. To validate the demo slice, see [Validate the Slice](#validate-the-slice). + +You can now onboard the iperf application on the `demo` slice. To onboard the application on the slice, see +[Deploy the iPerf Application](#deploy-the-iperf-application). + +You can also use the kubeslice-cli command to create a new slice for application onboarding. To create a slice on your demo setup, +follow these steps. + + +## Create a Slice +:::info +Skips this step if you do not want to create a new slice and continue onboarding the application on the `demo` slice. +::: +Create a slice configuration YAML file using the following template and apply it to the project namespace. + +### Create the Slice Configuration YAML File +:::info +To understand more about the configuration parameters, see [Slice Configuration Parameters](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx). +::: + +Use the following template to create a slice. + +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: #The name of the slice +spec: + sliceSubnet: #The slice subnet + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - #The name of your worker cluster1 + - #The name of your worker cluster2 + qosProfileDetails: + queueType: HTB + priority: 0 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 30000 + bandwidthGuaranteedKbps: 20000 + dscpClass: AF11 + +``` + +The following is the example slice configuration YAML file: +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: slice-red +spec: + sliceSubnet: 10.190.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - ks-w-1 + - ks-w-2 + qosProfileDetails: + queueType: HTB + priority: 0 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 30000 + bandwidthGuaranteedKbps: 20000 + dscpClass: AF11 +``` + +### Apply the Slice Configuration YAML File +Use the following command to create a slice. +``` +kubeslice-cli create sliceConfig -n kubeslice-demo -f +``` +Example +``` +kubeslice-cli create sliceConfig -n kubeslice-demo -f slice-config.yaml +``` +Example output +``` +🏃 Running command: /usr/local/bin/kubectl apply -f slice-config.yaml -n kubeslice-demo +sliceconfig.controller.kubeslice.io/slice-red created + +Successfully Applied Slice Configuration. +``` +### Validate the Slice +Use the following command to get a slice: +``` +kubeslice-cli get sliceConfig -n kubeslice-demo +``` + +Example Output +``` +Fetching KubeSlice sliceConfig... +🏃 Running command: /usr/local/bin/kubectl get sliceconfigs.controller.kubeslice.io -n kubeslice-demo +NAME AGE +slice-red 110s +``` + +### Validate the Slice on the Controller Cluster +To validate the slice configuration on the controller cluster, use the following command: +``` +/usr/local/bin/kubectl --context=kind-ks-ctrl --kubeconfig=kubeslice/kubeconfig.yaml get workersliceconfig -n kubeslice-demo +``` +Expected Output +``` +NAME AGE +slice-red-ks-w-1 19h +slice-red-ks-w-2 19h +``` + +To validate the worker slice gateway, use the following command: +``` +/usr/local/bin/kubectl --context=kind-ks-ctrl --kubeconfig=kubeslice/kubeconfig.yaml get workerslicegateway -n kubeslice-demo +``` +Expected Output +``` +NAME AGE +slice-red-ks-w-1-ks-w-2 19h +slice-red-ks-w-2-ks-w-1 19h +``` + +### Validate the Slice on the Worker Cluster + +To validate the slice creation on each worker cluster, use the following command: +``` +/usr/local/bin/kubectl --context=kind-ks-w-1 --kubeconfig=kubeslice/kubeconfig.yaml get slice -n kubeslice-system +``` +Expected Output +``` +NAME AGE +slice-red 19h +``` + +To validate the slice gateway on each worker cluster, use the following command: +``` +/usr/local/bin/kubectl --context=kind-ks-w-1 --kubeconfig=kubeslice/kubeconfig.yaml get slicegw -n kubeslice-system +``` + +Expected Output +``` +NAME SUBNET REMOTE SUBNET REMOTE CLUSTER GW STATUS +slice-red-ks-w-1-ks-w-2 10.190.1.0/24 10.190.2.0/24 ks-w-2 +``` + +## Deploy the iPerf Application + +The `kubeslice-cli` tool sets up the iPerf demo application on the `iperf` namespace. The `iperf-server` is deployed on the `ks-w-1` worker +cluster and the `iperf-sleep` is deployed on the `ks-w-2` worker cluster. You need to restart the iPerf deployment to onboard the +applications on the slice. + +To restart the deployment on the `ks-w-1` worker, use the following command: +``` +/usr/local/bin/kubectl rollout restart deployment/iperf-server -n iperf --context=kind-ks-w-1 --kubeconfig=kubeslice/kubeconfig.yaml +``` +Expected Output +``` +deployment.apps/iperf-server restarted +``` + +To restart the deployment on the `ks-w-2` worker, use the following command: +``` +/usr/local/bin/kubectl rollout restart deployment/iperf-sleep -n iperf --context=kind-ks-w-2 --kubeconfig=kubeslice/kubeconfig.yaml +``` +Expected Output +``` +deployment.apps/iperf-sleep restarted +``` + +## Validate the iPerf Installation + +To validate the iperf-server installation, use the following command: +``` +/usr/local/bin/kubectl --context=kind-ks-w-1 --kubeconfig=kubeslice/kubeconfig.yaml get pods -n iperf +``` +Expected Output +``` +NAME READY STATUS RESTARTS AGE +iperf-server-758dd55bf-dkkbw 2/2 Running 0 36m +``` + +## ServiceExports and ServiceImports + +The iPerf server needs to be exported for visibility. Use the following command to export the iPerf server: + +``` +/usr/local/bin/kubectl --context=kind-ks-w-1 --kubeconfig=kubeslice/kubeconfig.yaml apply -f kubeslice/iperf-server-service-export.yaml -n iperf +``` +Expected Output +``` +serviceexport.networking.kubeslice.io/iperf-server created +``` + +To validate the service export on the `ks-w-1` worker cluster where the iperf-server is installed, use the following command: +``` +/usr/local/bin/kubectl --context=kind-ks-w-1 --kubeconfig=kubeslice/kubeconfig.yaml get serviceexport -n iperf +``` +Expected Output +``` +NAME SLICE INGRESS PORT(S) ENDPOINTS STATUS +iperf-server slice-red 5201/TCP 1 READY +``` + +To validate the service imports on the workers clusters, use the following commands: + +``` +/usr/local/bin/kubectl --context=kind-ks-w-1 --kubeconfig=kubeslice/kubeconfig.yaml get serviceimport -n iperf +``` +Expected Output +``` +NAME SLICE PORT(S) ENDPOINTS STATUS +iperf-server slice-red 5201/TCP 1 READY +``` + +``` +/usr/local/bin/kubectl --context=kind-ks-w-2 --kubeconfig=kubeslice/kubeconfig.yaml get serviceimport -n iperf +``` +Expected Output +``` +NAME SLICE PORT(S) ENDPOINTS STATUS +iperf-server slice-red 5201/TCP 1 READY +``` + +### Verify the Inter-Cluster Communication + +Use the following command to describe the iperf-server service and retrieve the short and full DNS names for the service. +``` +/usr/local/bin/kubectl --context=kind-ks-w-2 --kubeconfig=kubeslice/kubeconfig.yaml describe serviceimport iperf-server -n iperf | grep +``` + +Expected Output +``` +"Dns Name:" + Dns Name: iperf-server.iperf.svc.slice.local + Dns Name: iperf-server-758dd55bf-dkkbw.ks-w-1.iperf-server.iperf.svc.slice.local +``` + +:::note +Use the short DNS name later to verify the inter-cluster communication. +::: + +To verify the iPerf connectivity, use the following command: +``` +/usr/local/bin/kubectl --context=kind-ks-w-2 --kubeconfig=kubeslice/kubeconfig.yaml exec -it deploy/iperf-sleep -c iperf -n iperf -- iperf -c iperf-server.iperf.svc.slice.local -p 5201 -i 1 -b 10Mb; +``` + +Expected Output +``` +------------------------------------------------------------ +Client connecting to iperf-server.iperf.svc.slice.local, TCP port 5201 +TCP window size: 45.0 KByte (default) +------------------------------------------------------------ +[ 1] local 10.1.2.5 port 49188 connected with 10.1.1.5 port 5201 +[ ID] Interval Transfer Bandwidth +[ 1] 0.00-1.00 sec 640 KBytes 5.24 Mbits/sec +[ 1] 1.00-2.00 sec 512 KBytes 4.19 Mbits/sec +[ 1] 2.00-3.00 sec 512 KBytes 4.19 Mbits/sec +[ 1] 3.00-4.00 sec 640 KBytes 5.24 Mbits/sec +[ 1] 4.00-5.00 sec 512 KBytes 4.19 Mbits/sec +[ 1] 5.00-6.00 sec 640 KBytes 5.24 Mbits/sec +[ 1] 6.00-7.00 sec 512 KBytes 4.19 Mbits/sec +[ 1] 7.00-8.00 sec 512 KBytes 4.19 Mbits/sec +[ 1] 8.00-9.00 sec 640 KBytes 5.24 Mbits/sec +[ 1] 9.00-10.00 sec 512 KBytes 4.19 Mbits/sec +[ 1] 0.00-10.12 sec 5.88 MBytes 4.87 Mbits/sec +``` + +## Uninstall KubeSlice + +:::info +The `kubeslic-cli uninstall` command deletes the kind clusters created for the demo, uninstalling the KubeSlice Controller and the registered worker clusters. +::: +To uninstall KubeSlice and delete the demo clusters, use the following command: +``` +kubeslice-cli uninstall +``` \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-bookinfo-application-on-an-intra-cluster-slice.mdx b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-bookinfo-application-on-an-intra-cluster-slice.mdx new file mode 100644 index 00000000..56eb2aad --- /dev/null +++ b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-bookinfo-application-on-an-intra-cluster-slice.mdx @@ -0,0 +1,585 @@ +# Deploy the BookInfo Application on an Intra-cluster Slice + +## Introduction +[BookInfo](https://istio.io/latest/docs/examples/bookinfo/) is a sample application from Istio that is composed +of four separate microservices: productpage, details, reviews, and ratings. In this topic, we will use the +BookInfo application to demonstrate inter-slice communication. + +## Prerequisites +Before you begin, ensure the following prerequisites are met: + +- You have the KubeSlice Controller components and worker cluster components on the same cluster. For more information, +see [Installing KubeSlice](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-controller-install.mdx) +and [Registering the Worker Cluster](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx). +- Before creating a slice, create the `bookinfo` namespace in all the participating worker clusters. + Use the following command to create the `bookinfo` namespace: + ``` + kubectl create ns bookinfo + ``` +## Creating the Slice +To install the BookInfo application on a single cluster, you must create a slice without Istio enabled. For more information, +see [Creating a Slice](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx). + +## Creating the BookInfo Deployment YAML Files +Using the templates below, create the necessary .yaml files to deploy the BookInfo application. All fields in the +template will remain the same except for the `slice name` which must be replaced with the name of your slice. + +:::info +These instructions will guide you through deploying the Productpage service to a cluster we will refer to as +the `productpage cluster`, and the remaining services as well as service exports will be deployed to a +cluster referred to here as the `services cluster`. +::: + +## ProductPage +Using the template below, create productpage.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Productpage service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + type: NodePort + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage + labels: + account: productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + securityContext: + runAsUser: 1000 + env: + - name: REVIEWS_HOSTNAME + value: reviews.bookinfo.svc.slice.local + - name: DETAILS_HOSTNAME + value: details.bookinfo.svc.slice.local + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true + volumes: + - name: tmp + emptyDir: {} +``` + +## Details +Using the template below, create details.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details + labels: + account: details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +``` + +## Ratings +Using the template below, create ratings.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings + labels: + account: ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +``` + +## Reviews +Using the template below, create reviews.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews + labels: + account: reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +``` + +## ServiceExports +Using the template below, create serviceexports.yaml. All fields in the template will remain the same except +for **two** `slice name` instances which must be replaced with the name of your slice. +``` +################################################################################## +# Details ServiceExport +################################################################################## +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: details +spec: + slice: #Replace Slice Name + selector: + matchLabels: + app: details + ingressEnabled: false + ports: + - name: http + containerPort: 9080 + protocol: TCP +--- +################################################################################## +# Reviews ServiceExport +################################################################################## +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: reviews +spec: + slice: #Replace Slice Name + selector: + matchLabels: + app: reviews + ingressEnabled: false + ports: + - name: http + containerPort: 9080 + protocol: TCP +``` + +## Deploy the Application on a Single Cluster + +Perform these steps: + +1. Use the following command to ensure we are targeting the cluster we deploy the product page to: + ``` + kubectx + ``` +2. Use the following command to create the `bookinfo` namespace that we deploy these services on: + ``` + kubectl create ns bookinfo + ``` +3. Use the following command to apply the `productpage.yaml` file: + ``` + kubectl apply -f productpage.yaml -n bookinfo + ``` + Expected Output + ``` + service/productpage created + serviceaccount/bookinfo-productpage created + deployment.apps/productpage-v1 created + ``` +4. Use the following command to check if the deployed productpage pod is running on the cluster: + ``` + kubectl get pods -n bookinfo + ``` + Expected Output + ``` + NAME READY STATUS RESTARTS AGE + productpage-v1-5cc46fc6dc-drd8b 4/4 Running 0 26h + ``` +5. Using the following commands, apply the details.yaml, ratings.yaml, reviews.yaml, and serviceexports.yaml files. + Use the following command to apply the `details.yaml` file. + ``` + kubectl apply -f details.yaml -n bookinfo + ``` + Expected Output + ``` + service/details created + serviceaccount/bookinfo-details created + deployment.apps/details-v1 created + ``` +6. Use the following command to apply the `ratings.yaml` file: + ``` + kubectl apply -f ratings.yaml -n bookinfo + ``` + Expected Output + ``` + service/ratings created + serviceaccount/bookinfo-ratings created + deployment.apps/ratings-v1 created + ``` +7. Use the following command to apply the `reviews.yaml` file: + ``` + kubectl apply -f reviews.yaml -n bookinfo + ``` + Expected Output + ``` + service/reviews created + serviceaccount/bookinfo-reviews created + deployment.apps/reviews-v3 created + ``` +8. Use the following command to verify if the deployed pods are running on the cluster: + ``` + kubectl get pods -n bookinfo + ``` + Expected Output + ``` + NAME READY STATUS RESTARTS AGE + details-v1-557b474454-fbfhh 4/4 Running 0 26h + ratings-v1-5846f848bb-4dwtz 4/4 Running 0 26h + reviews-v3-64cf7654f4-cfqz8 4/4 Running 0 26h + ``` +9. Use the following command to apply `serviceexports.yaml` file: + ``` + kubectl apply -f serviceexports.yaml -n bookinfo + ``` + Expected Output + ``` + serviceexport.networking.kubeslice.io/details created + serviceexport.networking.kubeslice.io/reviews created + ``` +:::success +You have completed the deployment of BookInfo application on a slice.** +::: + +## Validate the BookInfo Deployment +### Validate the Services + +Perform these steps: + +1. Switch contexts to target the `services cluster`. + ``` + kubectx + ``` +2. Use the following command to verify the details and reviews services have been successfully exported to the +KubeSlice configuration: + ``` + kubectl get serviceexport -n bookinfo + ``` + Expected Output + ``` + kubectl get serviceexport -n bookinfo + NAME SLICE INGRESS PORT(S) ENDPOINTS STATUS ALIAS + details white 9080/TCP 1 READY + reviews white 9080/TCP 1 READY + ``` + +## Validate the Productpage on the Cloud Cluster + +Perform these steps: + +1. Switch contexts to target the `productpage cluster`. + ``` + kubectx + ``` +2. Using the following command, verify the details and reviews service imports are present in the cluster: + ``` + kubectl get serviceimport -n bookinfo + ``` + Expected Output + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + details white 9080/TCP 1 READY + reviews white 9080/TCP 1 READY + ``` +3. Use the following command to check the exposed port for the product page service. Use this port to +visit the BookInfo webpage. + ``` + kubectl get services -n bookinfo + ``` + Expected Output + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + details ClusterIP 10.0.116.23 9080/TCP 2d2h + productpage NodePort 10.0.20.209 9080:31194/TCP 2d2h + reviews ClusterIP 10.0.146.220 9080/TCP 2d2h + ``` + + :::info + To view the deployed BookInfo product page, we need the external IP of an application node and the exposed port + we just retrieved. Take note of the external IP address of one of the application nodes to use it later. + ::: + +4. Use the following command to get your node details: + ``` + kubectl get nodes -o wide + ``` + Expected Output (your output will differ, here we are just focused on the external IP address). + ``` + kubectl get nodes -o wide + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE + KERNEL-VERSION CONTAINER-RUNTIME + gke-preprod-knative--preprod-knative--aba5a0cc-9jrq Ready 27h v1.20.15-gke.3600 10.6.0.4 35.231.181.81 Container-Optimized OS from Google 5.4.170+ containerd://1.4.8 + gke-preprod-knative--preprod-knative--aba5a0cc-xj3j Ready 27h v1.20.15-gke.3600 10.6.0.5 35.243.229.81 Container-Optimized OS from Google 5.4.170+ containerd://1.4.8 + gke-preprod-knative--preprod-knative--d19d3a9f-c32x Ready 28h v1.20.15-gke.3600 10.6.0.3 104.196.200.27 Container-Optimized OS from Google 5.4.170+ containerd://1.4.8 + ``` +5. Combine the external IP address the command returns with the port you retrieved in the last step in the format below, +and visit the page in a browser to view your multi-cluster BookInfo deployment. + ``` + http://:/productpage + ``` + Example + ![alt](/img/Bookinfo-productpage-OS.png) + +## Validate the Productpage on the Kind Cluster +### Access the Productpage from the Local Machine +If the kind clusters are on a local machine, perform these steps: + +1. Set up port-forwarding from a local machine using the following command: + ``` + kubectl port-forward svc/ -n : + ``` + Example + ``` + kubectl port-forward svc/productpage -n bookinfo 31986:9080 + ``` +2. Open the browser to access the product page using the following URL: + ``` + http://localhost:/productpage + ``` + Example + ``` + http://localhost:31986/productpage + ``` + +The following is an example of the product page: +![alt](/img/Bookinfo-productpage-OS.png) + +### Access the Productpage from the Cloud EC2 Machine +If the kind clusters are on Cloud EC2 Machine, perform these steps: + +1. Connect to your EC2 machine using SSH from your local machine using the following command: + ``` + ssh -i ubuntu@ -N -L :: + ``` + Example + ``` + ssh -i mykeypair.pem ubuntu@54.234.57.178 -N -L 8080:172.18.0.6:31986 + ``` +2. Open the browser to access the product page using the following URL: + ``` + http://localhost:/productpage + ``` + Example + ``` + http://localhost:8080/productpage + ``` + +The following is an example of the product page: +![alt](/img/Bookinfo-productpage-OS.png) + +:::success +You have successfully deployed the BookInfo application on a KubeSlice configuration containing at least two clusters. +::: + +## Uninstall Istio BookInfo +To uninstall Istio BookInfo from your KubeSlice configuration, follow the instructions +in [offboarding namespaces](/versioned_docs/version-1.2.0/uninstall-kubeslice/uninstall-kubeslice.mdx#offboard-application-namespaces). diff --git a/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-bookinfo-application.mdx b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-bookinfo-application.mdx new file mode 100644 index 00000000..e43eeaf4 --- /dev/null +++ b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-bookinfo-application.mdx @@ -0,0 +1,595 @@ +# Deploy the BookInfo Application + +## Introduction +[BookInfo](https://istio.io/latest/docs/examples/bookinfo/) is a sample application from Istio that is composed +of four separate microservices: productpage, details, reviews, and ratings. In this topic, we will use the +BookInfo application to demonstrate inter-slice communication. + +## Prerequisites +Before you begin, ensure the following prerequisites are met: + +- You have a KubeSlice configuration with two or more clusters registered. For more information, +see [Installing KubeSlice](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-controller-install.mdx) +and [Registering the Worker Cluster](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx). +- You have Istio installed in all registered worker clusters. +- Before creating a slice, create the `bookinfo` namespace in all the participating worker clusters. + Use the following command to create the `bookinfo` namespace: + ``` + kubectl create ns bookinfo + ``` +- Inject the istio label to the `bookinfo` namespace using the following command: + ``` + kubectl label namespace bookinfo istio-injection=enabled + ``` +- You have the slice created across the worker clusters. For more information, +see [Creating a Slice](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx). + +## Create the BookInfo Deployment YAML Files +Using the templates below, create the necessary .yaml files to deploy the BookInfo application. +All fields in the template will remain the same except for the `slice name` which must be replaced with the +name of your slice. + +:::info +These instructions will guide you through deploying the Product Page service to a cluster we will refer to as +the `productpage cluster`, and the remaining services as well as service exports will be deployed to a cluster +referred to here as the `services cluster`. +::: + +## Productpage +Using the template below, create productpage.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Productpage service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + type: NodePort + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage + labels: + account: productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + securityContext: + runAsUser: 1000 + env: + - name: REVIEWS_HOSTNAME + value: reviews.bookinfo.svc.slice.local + - name: DETAILS_HOSTNAME + value: details.bookinfo.svc.slice.local + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true + volumes: + - name: tmp + emptyDir: {} +``` + +## Details +Using the template below, create details.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details + labels: + account: details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +``` +## Ratings +Using the template below, create ratings.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings + labels: + account: ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +``` + +## Reviews +Using the template below, create reviews.yaml. All fields in the template will remain the same except for +the `slice name` which must be replaced with the name of your slice. +``` +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews + labels: + account: reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + - name: netshoot + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +``` +## ServiceExports +Using the template below, create serviceexports.yaml. All fields in the template will remain the same except + for **two** `slice name` instances which must be replaced with the name of your slice. +``` +################################################################################## +# Details ServiceExport +################################################################################## +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: details +spec: + slice: #Replace Slice Name + selector: + matchLabels: + app: details + ingressEnabled: false + ports: + - name: http + containerPort: 9080 + protocol: TCP +--- +################################################################################## +# Reviews ServiceExport +################################################################################## +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: reviews +spec: + slice: #Replace Slice Name + selector: + matchLabels: + app: reviews + ingressEnabled: false + ports: + - name: http + containerPort: 9080 + protocol: TCP +``` +## Deploy to the Productpage Cluster + +Perform these steps: +1. Use the following command to ensure we are targeting the cluster we will be deploying the product page to: + ``` + kubectx + ``` +2. Use the following command to apply the `productpage.yaml` file: + ``` + kubectl apply -f productpage.yaml -n bookinfo + ``` + Expected Output: + ``` + service/productpage created + serviceaccount/bookinfo-productpage created + deployment.apps/productpage-v1 created + ``` +3. Use the following command to check if the deployed productpage pod is running on the cluster: + ``` + kubectl get pods -n bookinfo + ``` + Expected Output + ``` + NAME READY STATUS RESTARTS AGE + productpage-v1-5cc46fc6dc-drd8b 4/4 Running 0 26h + ``` + +## Deploy to the Service Cluster + +Perform these steps: + +1. Use the following command to ensure we are targeting the cluster we deploy reviews, details, and ratings to: + ``` + kubectx + ``` +2. Use the following command to create the `bookinfo` namespace that we deploy these services on: + ``` + kubectl create ns bookinfo + ``` +3. Use the following command to label the `bookinfo` namespace for istio-injection: + ``` + kubectl label namespace bookinfo istio-injection=enabled + ``` +4. Using the following commands, apply the details.yaml, ratings.yaml, reviews.yaml, and serviceexports.yaml files. +Use the following command to apply the `details.yaml` file. + ``` + kubectl apply -f details.yaml -n bookinfo + ``` + Expected Output + ``` + service/details created + serviceaccount/bookinfo-details created + deployment.apps/details-v1 created + ``` +5. Use the following command to apply the `ratings.yaml` file: + ``` + kubectl apply -f ratings.yaml -n bookinfo + ``` + Expected Output + ``` + service/ratings created + serviceaccount/bookinfo-ratings created + deployment.apps/ratings-v1 created + ``` +6. Use the following command to apply the `reviews.yaml` file: + ``` + kubectl apply -f reviews.yaml -n bookinfo + ``` + Expected Output + ``` + service/reviews created + serviceaccount/bookinfo-reviews created + deployment.apps/reviews-v3 created + ``` +7. Use the following command to verify if the deployed pods are running on the cluster: + ``` + kubectl get pods -n bookinfo + ``` + Expected Output + ``` + NAME READY STATUS RESTARTS AGE + details-v1-557b474454-fbfhh 4/4 Running 0 26h + ratings-v1-5846f848bb-4dwtz 4/4 Running 0 26h + reviews-v3-64cf7654f4-cfqz8 4/4 Running 0 26h + ``` +8. Use the following command to apply `serviceexports.yaml` file: + ``` + kubectl apply -f serviceexports.yaml -n bookinfo + ``` + Expected Output + ``` + serviceexport.networking.kubeslice.io/details created + serviceexport.networking.kubeslice.io/reviews created + ``` + +## Validate the BookInfo Deployment +### Validate the Services + +Perform these steps: + +1. Switch the contexts to target the `services cluster`. + ``` + kubectx + ``` +2. Use the following command to verify the details and reviews services have been successfully exported to the +KubeSlice configuration: + ``` + kubectl get serviceexport -n bookinfo + ``` + Expected Output + ``` + kubectl get serviceexport -n bookinfo + NAME SLICE INGRESS PORT(S) ENDPOINTS STATUS ALIAS + details white true 9080/TCP 1 READY + reviews white true 9080/TCP 1 READY + ``` +### Validate the Productpage on the Cloud Cluster + +Perform these steps: + +1. Switch contexts to target the `productpage cluster`. + ``` + kubectx + ``` +2. Using the following command, verify the details and reviews service imports are present in the cluster: + ``` + kubectl get serviceimport -n bookinfo + ``` + Expected Output + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + details white 9080/TCP 1 READY + reviews white 9080/TCP 1 READY + ``` +3. Use the following command to check the exposed port for the product page service. In a moment, use this port to +visit the BookInfo webpage. + ``` + kubectl get services -n bookinfo + ``` + Expected Output + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + details ClusterIP 10.0.116.23 9080/TCP 2d2h + productpage NodePort 10.0.20.209 9080:31194/TCP 2d2h + reviews ClusterIP 10.0.146.220 9080/TCP 2d2h + ``` + +:::info +To view the deployed BookInfo product page, we need the external IP of an application node and the exposed +port we just retrieved.Take note of the external IP address of one of the application nodes to use it later. +::: + +4. Use the following command to get your node details: + ``` + kubectl get nodes -o wide + ``` + Expected Output (your output will differ, here we are just focused on the external IP address). + ``` + kubectl get nodes -o wide + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE + KERNEL-VERSION CONTAINER-RUNTIME + gke-preprod-knative--preprod-knative--aba5a0cc-9jrq Ready 27h v1.20.15-gke.3600 10.6.0.4 35.231.181.81 Container-Optimized OS from Google 5.4.170+ containerd://1.4.8 + gke-preprod-knative--preprod-knative--aba5a0cc-xj3j Ready 27h v1.20.15-gke.3600 10.6.0.5 35.243.229.81 Container-Optimized OS from Google 5.4.170+ containerd://1.4.8 + gke-preprod-knative--preprod-knative--d19d3a9f-c32x Ready 28h v1.20.15-gke.3600 10.6.0.3 104.196.200.27 Container-Optimized OS from Google 5.4.170+ containerd://1.4.8 + ``` +5. Combine the external IP address the command returns with the port you retrieved in the last step in the format below, +and visit the page in a browser to view your multi-cluster BookInfo deployment. + ``` + http://:/productpage + ``` + Example + ![alt](/img/Bookinfo-productpage-OS.png) + +## Validate the Productpage on the Kind Cluster +### Access the Productpage from the Local Machine +If the kind clusters are on a local machine, perform these steps: + +1. Set up port-forwarding from a local machine using the following command: + ``` + kubectl port-forward svc/ -n : + ``` + Example + ``` + kubectl port-forward svc/productpage -n bookinfo 31986:9080 + ``` +2. Open the browser to access the product page using the following URL: + ``` + http://localhost:/productpage + ``` + Example + ``` + http://localhost:31986/productpage + ``` + +The following is an example of the product page: +![alt](/img/Bookinfo-productpage-OS.png) + +### Access the Productpage from the Cloud EC2 Machine +If the kind clusters are on Cloud EC2 Machine, perform these steps: + +1. Connect to your EC2 machine using SSH from your local machine using the following command: + ``` + ssh -i ubuntu@ -N -L :: + ``` + Example + ``` + ssh -i mykeypair.pem ubuntu@54.234.57.178 -N -L 8080:172.18.0.6:31986 + ``` +2. Open the browser to access the product page using the following URL: + ``` + http://localhost:/productpage + ``` + Example + ``` + http://localhost:8080/productpage + ``` + +The following is an example of the product page: +![alt](/img/Bookinfo-productpage-OS.png) + +:::success +You have successfully deployed BookInfo application on a KubeSlice configuration containing at least two clusters. +::: + +## Uninstall Istio BookInfo +To uninstall BookInfo application from your KubeSlice configuration, follow the +instructions in [offboarding namespaces](/versioned_docs/version-1.2.0/uninstall-kubeslice/uninstall-kubeslice.mdx#offboard-application-namespaces). \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-iperf-application-on-an-intra-cluster-slice.mdx b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-iperf-application-on-an-intra-cluster-slice.mdx new file mode 100644 index 00000000..0cf2d587 --- /dev/null +++ b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-iperf-application-on-an-intra-cluster-slice.mdx @@ -0,0 +1,360 @@ +# Deploy the iPerf Application on an Intra-Cluster Slice + +## Introduction +iPerf is a tool commonly used to measure network performance, perform network tuning, and more. The iPerf +application consists of two main services, iperf-sleep (client) and iperf-server. + +This tutorial provides the steps to: +* Install the iperf-sleep and iperf-server services on a single worker cluster within a KubeSlice configuration. +* Verify intra-cluster communication over KubeSlice. + +## Prerequisites +Before you begin, ensure the following prerequisites are met: + +- You have the KubeSlice Controller components and worker cluster components on the same cluster. For more information, +see [Installing KubeSlice](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-controller-install.mdx) +and [Registering the Worker Cluster](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx). +- Before creating a slice, create the `iperf` namespace in all the participating worker clusters. +Use the following command to create the `iperf` namespace: + ``` + kubectl create ns iperf + ``` + +## Create a Slice +To install the iPerf application on a single cluster, you must create a slice without Istio enabled. + +### Create the Slice Configuration YAML File +Use the following template to create a slice without Istio enabled: +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: +spec: + sliceSubnet: #For example: 10.32.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + applicationNamespaces: + isolationEnabled: false #make this true in case you want to enable isolation + allowedNamespaces: + - namespace: kube-system + clusters: + - +``` + +### Apply the Slice Configuration +The following information is required. + + +| Variable | Description | +|------------|------------------------| +| `` | The name of the cluster. | +| ``| The name of the slice configuration file. | +| `` | The project name on which you apply the slice configuration file. | + +Perform these steps: + +1. Switch the context to the KubeSlice Controller using the following command: + + ``` + kubectx + ``` +2. Apply the YAML file on the project namespace using the following command: + + ``` + kubectl apply -f .yaml -n + ``` + +## Deploy iPerf + +In this tutorial, iperf-sleep and iperf-server will be deployed in the same worker clusters. The cluster is used +for iperf-sleep is referred to as `sleep cluster` as well as for iperf-server. Therefore it is also referred to +as `server cluster`. + +### Create the iPerf Sleep YAML File + +Use the following template to create a `iperf-sleep.yaml` deployment file. +``` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-sleep + namespace: iperf + labels: + app: iperf-sleep +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-sleep + template: + metadata: + labels: + app: iperf-sleep + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: Always + command: ["/bin/sleep", "3650d"] + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +``` + +### Apply the iPerf Sleep YAML File + +Perform these steps: + +1. Switch the context to the registered cluster you want to install iperf-sleep. + ``` + kubectx + ``` +2. Apply the `iperf-sleep.yaml` deployment file using the following command: + ``` + kubectl apply -f iperf-sleep.yaml -n iperf + ``` + +## Create the iPerf Server YAML File +Using the following template create a `iperf-server.yaml` deployment file. All fields in the template will remain +the same except for **one `slice name`** instance which must be replaced with the name of your slice. +``` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-server + namespace: iperf + labels: + app: iperf-server +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-server + template: + metadata: + labels: + app: iperf-server + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: Always + args: + - '-s' + - '-p' + - '5201' + ports: + - containerPort: 5201 + name: server + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +--- +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: iperf-server + namespace: iperf +spec: + slice: + selector: + matchLabels: + app: iperf-server + ports: + - name: tcp + containerPort: 5201 + protocol: TCP +``` + +## Apply the iPerf Server YAML File +Install the iperf server on the same worker cluster that has iperf sleep installed, you must remain on the same context of the worker cluster. + +Apply the iperf-server.yaml deployment. +``` +kubectl apply -f iperf-server.yaml -n iperf +``` + +## Validate your iPerf Installation +To verify our iPerf installation, switch contexts to the cluster with the iperf-sleep.yaml applied. + +## Validate the iPerf Sleep Installation +Perform these steps: + +1. Switch the context to the worker cluster. + ``` + kubectx + ``` +2. Validate the iperf-sleep pods belonging to the `iperf` namespace using the following command: + ``` + kubectl get pods -n iperf + ``` + Example Output + ``` + NAME READY STATUS RESTARTS AGE + iperf-sleep-5477bf94cb-vmmtd 3/3 Running 0 10s + ``` + +3. Validate the ServiceImport using the following command: + ``` + kubectl get serviceimport -n iperf + ``` + Example Output + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + iperf-server lion 1 READY + ``` + +## Validate the iPerf Server Installation +Perform these steps: + +1. Switch the context to the worker cluster. + ``` + kubectx + ``` +2. Validate the iperf-server pods belonging to the `iperf` namespace using the following command: + ``` + kubectl get pods -n iperf + ``` + + Example Output + ``` + NAME READY STATUS RESTARTS AGE + iperf-server-5958958795-fld2p 3/3 Running 0 20s + ``` + +3. Validate the ServiceImport using the following command: + ``` + kubectl get serviceimport -n iperf + ``` + + Example Output + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + iperf-server lion 1 READY + ``` +4. Validate the ServiceExport using the following command: + ``` + kubectl get serviceexport -n iperf + ``` + Example Output + ``` + NAME SLICE INGRESS PORT(S) ENDPOINTS STATUS ALIAS + iperf-server lion 5201/TCP 1 READY + ``` + +## Validate ServiceExportconfig and ServiceImportconfig +Perform these steps on the worker cluster where KubeSlice Controller is installed: + +1. Switch the context of the cluster. + ``` + kubectx + ``` +2. Validate serviceexportconfig using the following command: + ``` + kubectl get serviceexportconfigs -A + ``` + Example Output + ``` + NAMESPACE NAME AGE + kubeslice-devops iperf-server 5m12s + ``` +3. Validate the workerserviceimports using the following command: + ``` + kubectl get workerserviceimports -A + ``` + Example Output + ``` + NAMESPACE NAME AGE + kubeslice-devops iperf-server-iperf-lion-worker-cluster-1 5m59s + kubeslice-devops iperf-server-iperf-lion-worker-cluster-2 5m59s + ``` + +## Get the DNS Name + +Use the following command to describe the iperf-server service and retrieve the short and full DNS names for the +service. Use the short DNS name later to verify the inter-cluster communication. + +``` +kubectl describe serviceimport iperf-server -n iperf | grep "Dns Name:" +``` +Expected Output +``` +Dns Name: iperf-server.iperf.svc.slice.local #The DNS Name listed here will be used as the DNS Name below. +Dns Name: ..iperf-server.iperf.svc.slice.local #Full DNS Name +``` + +## Verify the Intra-Cluster Communication +Perform these steps: + +1. List the pods in the `iperf` namespace to get the full name of the iperf-sleep pod. + ``` + kubectl get pods -n iperf + ``` +2. Using the pod name you just retrieved, execute the command into the iperf-sleep pod with the following command: + ``` + kubectl exec -it -c iperf -n iperf -- sh + ``` +3. Once attached to the pod, use the short DNS Name retrieved above to connect to the server from the sleep pod. + ``` + iperf -c -p 5201 -i 1 -b 10Mb; + ``` + +Example Output + +If the iperf-sleep pod is able to reach the iperf-server pod, you should see similar output to that below. +``` +> kubectl exec -it iperf-sleep-5477bf94cb-vmmtd -c iperf -n iperf -- sh +/ $ iperf -c iperf-server.iperf.svc.slice.local -p 5201 -i 1 -b 10Mb; +------------------------------------------------------------ +Client connecting to iperf-server.iperf.svc.slice.local, TCP port 5201 +TCP window size: 45.0 KByte (default) +------------------------------------------------------------ +[ 1] local 10.1.1.89 port 38400 connected with 10.1.2.25 port 5201 +[ ID] Interval Transfer Bandwidth +[ 1] 0.00-1.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 1.00-2.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 2.00-3.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 3.00-4.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 4.00-5.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 5.00-6.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 6.00-7.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 7.00-8.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 8.00-9.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 9.00-10.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 0.00-10.00 sec 12.8 MBytes 10.7 Mbits/sec +/ $ +``` + +## Uninstall iPerf +To uninstall iPerf application from your KubeSlice configuration, follow the instructions +in [offboarding namespaces](/versioned_docs/version-1.2.0/uninstall-kubeslice/uninstall-kubeslice.mdx#offboard-application-namespaces). diff --git a/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-iperf-application.mdx b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-iperf-application.mdx new file mode 100644 index 00000000..ebcb4d81 --- /dev/null +++ b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-iperf-application.mdx @@ -0,0 +1,380 @@ +# Deploy the iPerf Application + +## Introduction +iPerf is a tool commonly used to measure network performance, perform network tuning, and more. The iPerf +application consists of two main services, iperf-sleep (client) and iperf-server. + +This tutorial provides the steps to: +* Install the iperf-sleep and iperf-server services on two clusters within a KubeSlice configuration. +* Verify inter-cluster communication over KubeSlice. + +## Prerequisites +Before you begin, ensure the following prerequisites are met: + +- You have a KubeSlice configuration with two or more clusters registered. For more information, +see [Installing KubeSlice](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-controller-install.mdx) +and [Registering the Worker Cluster](/versioned_docs/version-1.2.0/install-kubeslice/yaml/yaml-register-worker-clusters.mdx). +- Before creating the slice, create a `iperf` namespace in all participating worker clusters. + Use the following command to create the `iperf` namespace: + ``` + kubectl create ns iperf + ``` +- You have the slice created across the worker clusters. For more information, +see [Create a Slice](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx). + + +## Create the Slice Configuration YAML File +Use the following template to create the Slice without Istio: +``` +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: +spec: + sliceSubnet: #For example: 10.32.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + applicationNamespaces: + isolationEnabled: false #make this true in case you want to enable isolation + allowedNamespaces: + - namespace: kube-system + clusters: + - +``` + +## Apply the Slice Configuration +The following information is required. + +| Variable | Description | +|---------------|--------------------------------| +| `` | The given name of the cluster. | +| `` | The name of the slice configuration file | +| `` | The project namespace on which you apply the slice configuration file. | + +Perform these steps: + +1. Switch the context to the KubeSlice Controller using the following command: + ``` + kubectx + ``` + +2. Apply the YAML file on the project namespace using the following command: + ``` + kubectl apply -f .yaml -n + ``` + +## Deploy iPerf +In this tutorial, iperf-sleep and iperf-server will be deployed in the two different clusters. The cluster used for +iperf-sleep is referred to as `sleep cluster`, and the cluster used for iperf-server is referred to as `server cluster`. + +## Create the iPerf Sleep YAML File + +Use the following template to create a `iperf-sleep.yaml` deployment file. +``` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-sleep + namespace: iperf + labels: + app: iperf-sleep +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-sleep + template: + metadata: + labels: + app: iperf-sleep + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: Always + command: ["/bin/sleep", "3650d"] + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +``` + +## Apply the iPerf Sleep YAML File + + +Perform these steps: + +1. Switch the context to the registered cluster you want to install iperf-sleep. + ``` + kubectx + ``` +2. Create the `iperf` namespace using the following command: + ``` + kubectl create ns iperf + ``` +3. Apply the `iperf-sleep.yaml` deployment file using the following command: + ``` + kubectl apply -f iperf-sleep.yaml -n iperf + ``` + +## Create the iPerf Server YAML File +Using the following template create a `iperf-server.yaml` deployment file. All fields in the template will +remain the same except for **one `slice name`** instances which must be replaced with the name of your slice. +``` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-server + namespace: iperf + labels: + app: iperf-server +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-server + template: + metadata: + labels: + app: iperf-server + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: Always + args: + - '-s' + - '-p' + - '5201' + ports: + - containerPort: 5201 + name: server + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +--- +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: iperf-server + namespace: iperf +spec: + slice: + selector: + matchLabels: + app: iperf-server + ingressEnabled: false + ports: + - name: tcp + containerPort: 5201 + protocol: TCP +``` + +### Apply the iPerf Server YAML File +Perform these steps: + +1. Switch the context to the registered cluster you want to install the iperf server. + ``` + kubectx + ``` +2. Create the `iperf` namespace using the following command: + ``` + kubectl create ns iperf + ``` +3. Apply the `iperf-server.yaml` deployment file using the following command: + ``` + kubectl apply -f iperf-server.yaml -n iperf + ``` + +## Validate your iPerf Installation +To verify our iPerf installation, first switch the context to the cluster with the iperf-sleep.yaml applied. + +## Validate the iPerf Sleep Installation +Perform these steps: + +1. Switch the context to the cluster where you installed the iperf Sleep. + ``` + kubectx + ``` + +2. Validate the iperf-sleep pods belonging to the `iperf` namespace using the following command: + ``` + kubectl get pods -n iperf + ``` + Example Output + ``` + NAME READY STATUS RESTARTS AGE + iperf-sleep-5477bf94cb-vmmtd 3/3 Running 0 10s + ``` + +3. Validate the ServiceImport using the following command: + ``` + kubectl get serviceimport -n iperf + ``` + Example Output + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + iperf-server lion 1 READY + ``` + +## Validate the iPerf Server Installation +Perform these steps: + +1. Switch the context to the cluster where you have installed the iperf Server. + ``` + kubectx + ``` +2. Validate the iperf-server pods belonging to the `iperf` namespace using the following command: + ``` + kubectl get pods -n iperf + ``` + Example Output + ``` + NAME READY STATUS RESTARTS AGE + iperf-server-5958958795-fld2p 3/3 Running 0 20s + ``` +3. Validate the ServiceImport using the following command: + ``` + kubectl get serviceimport -n iperf + ``` + Example Output + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + iperf-server lion 1 READY + ``` +4. Validate the ServiceExport using the following command: + ``` + kubectl get serviceexport -n iperf + ``` + Example Output + ``` + NAME SLICE INGRESS PORT(S) ENDPOINTS STATUS ALIAS + iperf-server lion 5201/TCP 1 READY + ``` + +## Validate ServiceExport and ServiceImport +Perform these steps in the cluster where KubeSlice Controller is installed: + +1. Switch the context of the cluster. + ``` + kubectx + ``` + +2. Validate serviceexportconfig using the following command: + ``` + kubectl get serviceexportconfigs -A + ``` + + Example Output + ``` + NAMESPACE NAME AGE + kubeslice-devops iperf-server 5m12s + ``` + +3. Validate the workerserviceimports using the following command: + ``` + kubectl get workerserviceimports -A + ``` + + Example Output + ``` + NAMESPACE NAME AGE + kubeslice-devops iperf-server-iperf-lion-worker-cluster-1 5m59s + kubeslice-devops iperf-server-iperf-lion-worker-cluster-2 5m59s + ``` + +## Get the DNS Name + +Use the following command to describe the iperf-server service and retrieve the short and full DNS names for the service. We will use the short DNS name later to verify the inter-cluster communication. +``` +kubectl describe serviceimport iperf-server -n iperf | grep "Dns Name:" +``` + +Expected Output +``` +Dns Name: iperf-server.iperf.svc.slice.local #The DNS Name listed here will be used as the DNS Name below. +Dns Name: ..iperf-server.iperf.svc.slice.local #Full DNS Name +``` + +## Verify the Inter-Cluster Communication +Perform these steps: + +1. Switch the context of the cluster. + ``` + kubectx + ``` + +2. List the pods in the `iperf` namespace to get the full name of the iperf-sleep pod. + ``` + kubectl get pods -n iperf + ``` + +3. Using the pod name you just retrieved, execute the command into the iperf-sleep pod with the following command: + ``` + kubectl exec -it -c iperf -n iperf -- sh + ``` + +4. After attaching the pod, use the short DNS Name retrieved above to connect to the server from the sleep pod. + ``` + iperf -c -p 5201 -i 1 -b 10Mb; + ``` + +Expected Output + +If the iperf-sleep pod is able to reach the iperf-server pod across clusters, you should see similar output to +that below. +``` +> kubectl exec -it iperf-sleep-5477bf94cb-vmmtd -c iperf -n iperf -- sh +/ $ iperf -c iperf-server.iperf.svc.slice.local -p 5201 -i 1 -b 10Mb; +------------------------------------------------------------ +Client connecting to iperf-server.iperf.svc.slice.local, TCP port 5201 +TCP window size: 45.0 KByte (default) +------------------------------------------------------------ +[ 1] local 10.1.1.89 port 38400 connected with 10.1.2.25 port 5201 +[ ID] Interval Transfer Bandwidth +[ 1] 0.00-1.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 1.00-2.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 2.00-3.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 3.00-4.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 4.00-5.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 5.00-6.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 6.00-7.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 7.00-8.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 8.00-9.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 9.00-10.00 sec 1.25 MBytes 10.5 Mbits/sec +[ 1] 0.00-10.00 sec 12.8 MBytes 10.7 Mbits/sec +/ $ +``` + + +## Uninstall iPerf +To uninstall iPerf application from your KubeSlice configuration, follow the instructions +in [offboarding namespaces](/versioned_docs/version-1.2.0/uninstall-kubeslice/uninstall-kubeslice.mdx#offboard-application-namespaces). diff --git a/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/test-network-connectivity.mdx b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/test-network-connectivity.mdx new file mode 100644 index 00000000..d2f6e94d --- /dev/null +++ b/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/test-network-connectivity.mdx @@ -0,0 +1,275 @@ +# Test the Network Connectivity +iPerf is a tool commonly used to measure network performance, perform +network tuning, and more. The iPerf application consists of two main +services, iperf-sleep (client) and iperf-server. + +### Prerequisites for using the iPerf Tool +To deploy a application, you must create a namespace for that +application in both the client and server clusters before creating the +slice. + +Create the `iperf` namespace on the worker clusters identified as the +client and server using the following command: + +``` +kubectl create ns iperf +``` + +### Deploy the iPerf Application +Deploy the iPerf application and test the network connectivity between the worker clusters. + +:::info +You can also use an intra-cluster slice to test the intra cluster connectivity. To know +more, see [deploying the iPerf application on an intra-cluster slice](/versioned_docs/version-1.2.0/tutorials/yaml-tutorials/deploy-the-iperf-application-on-an-intra-cluster-slice.mdx). +::: + +Identify a worker cluster as a client and another worker cluster as a +server and configure them to test the network connectivity. + +To establish the connectivity between two worker clusters: + +1. Switch context to the worker cluster identified as the client using + the following command: + + ``` + kubectx + ``` + +2. Onboard the existing `iperf` namespace to the slice. To know more, see + [onboarding namespaces](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx#manage-namespaces). + + :::caution + Ensure that you have onboarded the iperf namespace. If you create a namespace after + the slice creation, then you could face issues when you deploy the application as the + namespace creation takes some time. + ::: + +3. Create the `iperf-sleep.yaml` using the following template. + + ``` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: iperf-sleep + namespace: iperf + labels: + app: iperf-sleep + spec: + replicas: 1 + selector: + matchLabels: + app: iperf-sleep + template: + metadata: + labels: + app: iperf-sleep + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: Always + command: ["/bin/sleep", "3650d"] + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true + ``` + +4. Apply the`iperf-sleep.yaml` using the following command: + + ``` + kubectl apply -f iperf-sleep.yaml -n iperf + ``` + +5. Validate the iPerf client by checking if the pods are running on the +worker cluster using the following command: + + ``` + kubectl get pods -n iperf + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + iperf-sleep-676b945fbf-9l9h7 3/3 Running 0 60s + ``` + +6. Switch context to the worker cluster identified as the server using +the following command: + + ``` + kubectx + ``` + +7. Onboard the existing `iperf` namespace on the slice. To know more, see + [onboarding namespaces](/versioned_docs/version-1.2.0/install-kubeslice/yaml/slice-operations/slice-operations-slice-creation.mdx#manage-namespaces). + + :::caution + Ensure that you have onboarded the iperf namespace. If you create a + namespace after the slice creation, then you could face issues when you + deploy the application as the namespace creation takes some time. + ::: + +8. Create the `iperf-server.yaml` file using the following template. + + ``` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: iperf-server + namespace: iperf + labels: + app: iperf-server + spec: + replicas: 1 + selector: + matchLabels: + app: iperf-server + template: + metadata: + labels: + app: iperf-server + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: Always + args: + - '-s' + - '-p' + - '5201' + ports: + - containerPort: 5201 + name: server + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true + --- + apiVersion: networking.kubeslice.io/v1beta1 + kind: ServiceExport + metadata: + name: iperf-server + namespace: iperf + spec: + slice: # water + selector: + matchLabels: + app: iperf-server + ingressEnabled: false + ports: + - name: tcp + containerPort: 5201 + protocol: TCP + ``` + +9. Apply the `iperf-server.yaml` configured in the worker cluster using the following +command: + + ``` + kubectl apply -f iperf-server.yaml -n iperf + ``` + +10. Validate the iPerf server by checking if the pods are running on the worker cluster +using the following command: + + + ``` + kubectl get pods -n iperf + ``` + + Expected Output + + ``` + NAME READY STATUS RESTARTS AGE + iperf-server-7889799774-s5zrs 3/3 Running 0 60s + ``` + +11. Validate the service export of the iPerf server on the worker clusters using +the following command: + + ``` + kubectl get serviceexport -n iperf + ``` + Expected Output + + ``` + NAME SLICE INGRESS PORT(S) ENDPOINTS STATUS ALIAS + iperf-server water 5201/TCP 1 READY + ``` + +12. Validate the service import of the iPerf server on the worker cluster using the +following command: + + ``` + kubectl get serviceimport -n iperf + ``` + Expected Output + + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + iperf-server water 5201/TCP 1 READY + ``` + +13. Validate the service import of the iPerf client on the other worker cluster by running +the following command: + + ``` + kubectl get serviceimport -n iperf + ``` + + Expected Output + + ``` + NAME SLICE PORT(S) ENDPOINTS STATUS ALIAS + iperf-server water 5201/TCP 1 READY + ``` + +14. Switch context to the iperf client cluster using the following command: + + ``` + kubectx + ``` + +15. Check the connectivity from the iPerf client by using the following command: + + ``` + kubectl exec -it deploy/iperf-sleep -c iperf -n iperf -- iperf -c iperf-server.iperf.svc.slice.local -p 5201 -i 1 -b 10Mb; + ``` + Expected Output + + ``` + ------------------------------------------------------------ + Client connecting to iperf-server.iperf.svc.slice.local, TCP port 5201 + TCP window size: 45.0 KByte (default) + ------------------------------------------------------------ + [ 1] local 10.1.1.5 port 58116 connected with 10.1.2.5 port 5201 + [ ID] Interval Transfer Bandwidth + [ 1] 0.00-1.00 sec 640 KBytes 5.24 Mbits/sec + [ 1] 1.00-2.00 sec 640 KBytes 5.24 Mbits/sec + [ 1] 2.00-3.00 sec 640 KBytes 5.24 Mbits/sec + [ 1] 3.00-4.00 sec 512 KBytes 4.19 Mbits/sec + [ 1] 4.00-5.00 sec 640 KBytes 5.24 Mbits/sec + [ 1] 5.00-6.00 sec 768 KBytes 6.29 Mbits/sec + [ 1] 6.00-7.00 sec 512 KBytes 4.19 Mbits/sec + [ 1] 7.00-8.00 sec 512 KBytes 4.19 Mbits/sec + [ 1] 8.00-9.00 sec 512 KBytes 4.19 Mbits/sec + [ 1] 9.00-10.00 sec 768 KBytes 6.29 Mbits/sec + [ 1] 10.00-10.45 sec 384 KBytes 7.04 Mbits/sec + [ 1] 0.00-10.45 sec 6.38 MBytes 5.12 Mbits/sec + ``` + + :::success + The connectivity between the worker clusters on a slice is successful! + ::: \ No newline at end of file diff --git a/versioned_docs/version-1.2.0/uninstall-kubeslice/uninstall-kubeslice.mdx b/versioned_docs/version-1.2.0/uninstall-kubeslice/uninstall-kubeslice.mdx new file mode 100644 index 00000000..6a43945f --- /dev/null +++ b/versioned_docs/version-1.2.0/uninstall-kubeslice/uninstall-kubeslice.mdx @@ -0,0 +1,230 @@ +# Uninstall KubeSlice +This topic describes how to uninstall KubeSlice. Follow these steps to uninstall the KubeSlice Controller: + +- Delete the ServiceExport for each application +- Offboard all namespaces from the slice +- Delete a slice +- Uninstall Slice operator +- Deregister the worker cluster +- Delete a project +- Uninstall the KubeSlice Controller + + + +## Delete ServiceExports +If a ServiceExport was created in the application namespace, it must be deleted first. + +Deleting the ServiceExport removes the corresponding ServiceImport automatically on all the clusters of the slice. + +The below variables are required to delete the service export. + +| Variables | Description | +|-------------------------|--------------------------------------------------------- +| | The name of the cluster the application is deployed on. +| | The name of the service export that you want to delete. +| | The namespace the application is deployed on. + +Switch the contexts to the cluster you deployed the application on: + +``` +kubectx +``` + +Use the following command to delete the ServiceExport from the cluster: + +``` +kubectl delete serviceexport -n +``` +## Offboard Application Namespaces +To offboard the namespace from a slice, delete the `namespace` and the associated +`clusters` under the `applicationNamespaces` in the slice configuration file as +illustrated below. + +``` + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' + - namespace: bookinfo + clusters: + - '*' +``` + +For example, in the above slice configuration YAML file, if you want to offboard the +BookInfo namespace from all clusters, edit the slice configuration YAML file and remove it +from `applicationNamespaces` as illustrated below. + +``` +namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - '*' +``` + +To delete a slice, you must remove all the namespaces and the corresponding clusters. So, +edit the slice configuration YAML file to remove them. After you remove the namespaces +and the corresponding clusters, the application namespace configuration looks as +illustrated below. + +``` +namespaceIsolationProfile: + applicationNamespaces: +``` + +Apply the slice configuration YAML to update the offboarded namespaces. + +``` +kubectl apply -f .yaml -n +``` + +:::success +You have successfully offboarded the namespaces from the slice. +::: + +## Delete Slices + +:::caution +This step **must** be completed before uninstalling the Slice Operator. Failing to do so +can result in slices and resources not being cleaned up properly. +::: + +Switch the context to the controller cluster using the following command: + +``` +kubectx +``` + +To delete an individual slice, use the following command: + +``` +kubectl delete sliceconfig -n kubeslice- +``` + +To delete all the slices, use the following command: + +``` +kubectl delete sliceconfig --all -n kubeslice- +``` + +:::success +You have successfully deleted the slice(s). +::: + +## Uninstall the Slice Operator +:::caution +You **must** remove the cluster from all the slices it is connected to before uninstalling the +Slice Operator in the worker cluster. Failing to do so can result in slices and +resources not being cleaned up properly. +::: + +After removing a cluster from all the slices, you can uninstall the Slice Operator +in the worker cluster. + +Switch the context to the worker cluster using the following command: + +``` +kubectx +``` + +Uninstall the Slice Operator using the following command: + +``` +helm uninstall [RELEASE_NAME] -n kubeslice-system +``` + +### Delete Slice Operator CRDs +Delete the CRDs of the Slice Operator using the following commands: + +``` +kubectl delete crd serviceexports.networking.kubeslice.io +kubectl delete crd serviceimports.networking.kubeslice.io +kubectl delete crd slices.networking.kubeslice.io +kubectl delete crd slicegateways.networking.kubeslice.io +``` +### Delete kubeslice-system Namespace +Delete the `kubeslice-system` namespace using the following command: + +``` +kubectl delete ns kubeslice-system +``` +:::success +You have now uninstalled the Slice Operator from your registered cluster. +::: + +## Deregister Worker Clusters +After uninstalling the Slice Operator successfully, deregister your worker cluster from +the KubeSlice Controller. + +Switch the context to the KubeSlice Controller cluster. + +``` +kubectx +``` + +Deregister a worker cluster using the following command: + +``` +kubectl delete clusters -n kubeslice- +``` + +To deregister all worker clusters, use the following command: + +``` +kubectl delete clusters --all -n kubeslice- +``` + +:::success +You have now successfully deregistered the cluster from the KubeSlice Controller. +::: + +## Delete a Project + +:::caution +Before deleting a project, ensure all namespaces have been offboarded and the slice has been deleted. +::: + +Delete a project after deregistering the worker cluster. The service account, namespaces, clusters, secrets, certificates, and tokens +are all deleted when the project is deleted from the KubeSlice Controller. + +Use the following command to delete the project: + +``` +kubeslice-cli delete project -n +``` + +## Uninstall the KubeSlice Controller + + +### Uninstall the KubeSlice Controller + +Uninstall the KubeSlice Controller using the following command: + +``` +helm uninstall kubeslice-controller -n kubeslice-controller +``` +### Delete the kubeslice-controller Namespace + +Delete the `kubeslice-controller` namespace using the following command: + +``` +kubectl delete ns kubeslice-controller +``` +### Delete the Certificate Manager +Delete the Certificate Manager using the following command: + +``` +helm uninstall cert-manager -n cert-manager +``` +### Delete the Certificate Manager Namespace + +Delete the namespace `cert-manager` using the following command: + +``` +kubectl delete ns cert-manager +``` + +:::success +All set! KubeSlice Controller has been uninstalled from your cluster. +::: \ No newline at end of file diff --git a/versioned_sidebars/version-1.2.0-sidebars.json b/versioned_sidebars/version-1.2.0-sidebars.json new file mode 100644 index 00000000..ab50cc23 --- /dev/null +++ b/versioned_sidebars/version-1.2.0-sidebars.json @@ -0,0 +1,311 @@ +{ + "version3": [ + { + "type": "category", + "label": "Overview", + "link": { + "type": "generated-index", + "title": "KubeSlice Overview", + "description": "The Kubeslice Overview section provides a high-level introduction and understanding of Kubeslice, a Kubernetes management platform. It covers the architecture, components, and key features of Kubeslice, giving users a comprehensive overview of its capabilities and functionalities. This section serves as a starting point for users to familiarize themselves with Kubeslice and gain insights into its purpose and benefits.", + "keywords": [ + "guides" + ], + "slug": "/" + }, + "items": [ + "overview/overview", + "overview/architecture", + "overview/components", + "overview/features" + ] + }, + { + "type": "category", + "label": "Playground", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Playground", + "description": "Welcome to the KubeSlice Playground, a limited-time sandbox environment that allows you to explore and experiment with KubeSlice Enterprise on kind clusters. This playground provides a preconfigured setup with all the necessary tool sets and is available for a duration of 4 hours. With this installation guide, you will be guided through the process of setting up KubeSlice on kind clusters, creating slices across the clusters, and testing connectivity using the iPerf tool.", + "keywords": [ + "playground", "kubeslice" + ] + }, + "items": [ + "playground/sandbox" + ] + }, + { + "type": "category", + "label": "Get Started", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Get Started", + "description": "The Get Started section serves as a comprehensive guide to help you quickly get started with using Kubeslice. Before diving into Kubeslice, it's important to ensure that you have completed the prerequisites outlined in the Prerequisites section, which include installing the necessary command line tools like Kubernetes CLI (kubectl) and KubeSlice-CLI (kubeslice-cl), as well as having a basic understanding of Kubernetes concepts. By following the instructions and best practices outlined in this section, you'll be able to efficiently leverage the power of Kubeslice and streamline your application deployment and management processes. Start your Kubeslice journey today and experience the benefits of simplified Kubernetes application deployment.", + "keywords": [ + "get-started", "kubeslice" + ] + }, + "items": [ + { + "type": "category", + "label": "Prerequisites", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Installation Prerequisites", + "description": "The prerequisites section of the documentation outlines the necessary requirements and conditions that need to be met before installing and using KubeSlice. It provides a comprehensive checklist of the software, tools, and resources that should be in place to ensure a smooth installation and optimal usage of KubeSlice. The prerequisites section helps users understand the dependencies and configurations needed, such as having a compatible Kubernetes cluster, sufficient system resources, and required access privileges. By following the guidelines in this section, users can ensure that their environment meets the necessary prerequisites for a successful deployment and utilization of KubeSlice.", + "keywords": [ + "prerequisites", "kubeslice" + ] + }, + "items": [ + "get-started/prerequisites/prerequisites-command-line-tools", + "get-started/prerequisites/prerequisites-kubeslice-controller-requirements", + "get-started/prerequisites/prerequisites-worker-cluster-requirements", + "get-started/prerequisites/prerequisites-cluster-authentication", + "get-started/prerequisites/prerequisites-cluster-networking", + "get-started/prerequisites/prerequisites-gateway-node-label", + "get-started/prerequisites/prerequisites-configure-helm-repository", + "get-started/prerequisites/prerequisites-install-istio" + + + ] + } + + ] + }, + { + "type": "category", + "label": "Install KubeSlice", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Install KubeSlice", + "description": "The Install Kubeslice section offers comprehensive, step-by-step instructions for installing and setting up Kubeslice in your environment. It covers various methods and approaches to install Kubeslice, providing flexibility based on your specific requirements. The section includes detailed procedures and commands for deploying Kubeslice components, configuring settings, and integrating it with your Kubernetes cluster. It provides guidance on downloading the Kubeslice package, installing necessary dependencies, and executing the installation process. By following the instructions in this section, users can successfully install Kubeslice, ensuring its proper integration and operation within their Kubernetes environment.", + "keywords": [ + "install", "kubeslice" + ] + }, + "items": [ + { + "type": "category", + "label": "Using kubeslice-cli", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "KubeSlice CLI", + "description": "Installing KubeSlice using the KubeSlice CLI method of installation is a straightforward, efficient and recommended approach. The KubeSlice CLI provides a convenient way to install and manage KubeSlice Enterprise deployments. To begin the installation process, the CLI can be easily configured by setting up the necessary installation template. With the CLI properly configured, users can initiate the installation by running a simple command, which will handle the installation process automatically. This method ensures a smooth and hassle-free installation, allowing users to quickly get KubeSlice up and running in their desired environment. By leveraging the power of the KubeSlice CLI, users can effortlessly deploy and manage KubeSlice Enterprise, enabling efficient slice creation, cluster management, and data visualization.", + "keywords": [ + "kubeslice-cli-install", "kubeslice" + ] + }, + "items": [ + "install-kubeslice/kubeslice-cli/install-kubeslice-cli", + "install-kubeslice/kubeslice-cli/command-reference", + "install-kubeslice/kubeslice-cli/topology-configuration", + "install-kubeslice/kubeslice-cli/install-kubeslice", + "install-kubeslice/kubeslice-cli/uninstall-kubeslice" + ] + }, + { + "type": "category", + "label": "Using YAML", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "YAML Installation", + "description": "Installing KubeSlice using the YAML method provides a flexible and customizable approach for deployment. The YAML method allows users to define their desired configuration in a YAML file, specifying various parameters and settings for their KubeSlice installation. To get started, users need to create a series of YAML files with the necessary specifications, including cluster details, slice configurations, and other required settings. Once the YAML files are prepared, users can apply it using Kubernetes tools such as kubectl, which will initiate the installation process. This method offers granular control over the installation process, allowing users to tailor their KubeSlice deployment according to their specific requirements. By leveraging the YAML method, users can seamlessly integrate KubeSlice into their existing infrastructure and take advantage of its powerful features for managing slices, cluster connectivity, and visualizing data within their Kubernetes clusters.", + "keywords": [ + "yaml-install", "kubeslice" + ] + }, + "items": [ + "install-kubeslice/yaml/yaml-controller-install", + "install-kubeslice/yaml/yaml-register-worker-clusters", + + + { + "type": "category", + "label": "Slice Operations", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Slice Operations", + "description": "The Slice Operations page serves as a comprehensive guide for performing various operations and tasks with Kubeslice. It covers a wide range of topics related to managing and manipulating Kubernetes slices using Kubeslice's powerful features and functionality. The page includes detailed instructions and examples for each submenu of Slice Operations, including Slice Creation, . By following the guidance provided in this section, users can effectively leverage Kubeslice to streamline their slice management processes. They will learn how to create slices. This knowledge will enable users to ensure efficient operations of their Kubernetes applications. Explore the Slice Operations page and its submenus to gain a deeper understanding of Kubeslice's capabilities and enhance your Kubernetes slice management skills.", + "keywords": [ + "slice-operations", "kubeslice" + ] + + }, + "items": [ + "install-kubeslice/yaml/slice-operations/slice-operations-slice-creation", + "install-kubeslice/yaml/slice-operations/slice-operations-slice-deletion", + "install-kubeslice/yaml/slice-operations/slice-cluster-health" + ] + }, + { + "type": "category", + "label": "Events", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Events", + "description": "Kubernetes Events are informative messages that provide real-time updates about the activities and state changes occurring within a Kubernetes cluster. They offer insights into various events, such as pod creations, deployments, service changes, and error notifications, enabling administrators to monitor and troubleshoot the cluster's behavior and health.", + "keywords": [ + "events", "kubeslice" + ] + + }, + "items": [ + "install-kubeslice/yaml/events/yaml-events-controller-events", + "install-kubeslice/yaml/events/yaml-events-worker-events" + ] + }, + { + "type": "category", + "label": "Metrics", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Metrics", + "description": "Kubernetes metrics provide valuable insights into the performance and health of your Kubernetes clusters and workloads. These metrics encompass various aspects such as resource utilization, network traffic, and application-specific metrics. By collecting and analyzing Kubernetes metrics, you can gain a deep understanding of your cluster's behavior, identify bottlenecks, optimize resource allocation, and make data-driven decisions for scaling and performance tuning. Monitoring Kubernetes metrics is crucial for maintaining the stability, efficiency, and reliability of your containerized environment.", + "keywords": [ + "metrics", "kubeslice" + ] + + }, + "items": [ + "install-kubeslice/yaml/metrics/yaml-metrics-controller-metrics", + "install-kubeslice/yaml/metrics/yaml-metrics-worker-metrics" + ] + } + + ] + + } + + + + ] + }, + "uninstall-kubeslice/uninstall-kubeslice", + { + "type": "category", + "label": "Add-ons", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Add-ons", + "description": "The add-on section of the documentation provides valuable insights and instructions for extending the functionality of KubeSlice through various add-ons. These add-ons enhance and customize KubeSlice's capabilities to meet specific use cases and requirements. The documentation outlines how to install, configure, and utilize these add-ons effectively. Whether it's integrating with monitoring tools, enabling advanced networking features, or implementing additional security measures, the add-on section offers comprehensive guidance on expanding KubeSlice's capabilities beyond its core features. By exploring the add-ons, users can unlock new possibilities and tailor KubeSlice to suit their unique needs, making it a versatile and adaptable tool for managing slices and optimizing cluster resources.", + "keywords": [ + "kubeslice" + ] + }, + "items": [ + "add-ons/add-ons-slack-events", + "add-ons/add-ons-slack-metrics" + ] + }, + + "troubleshooting/troubleshooting-guide", + { + "type": "category", + "label": "Reference", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Reference", + "description": "The reference section of the documentation provides a collection of tools and configuration information for KubeSlice. It serves as a valuable resource for developers, system administrators, and advanced users, offering essential details on configuration parameters. This section acts as a handy guide, providing the necessary tools and information to effectively utilize KubeSlice in various use cases.", + "keywords": [ + "reference", "kubeslice" + ] + }, + "items": [ + "reference/reference-worker-cluster-secrets-script" + ] + }, + { + "type": "category", + "label": "Tutorials", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Tutorials", + "description": "The tutorials section describes how to deploy applications such as BookInfo and iPerf. It also describes the demos using kubeslice-cli.", + "keywords": [ + "tutorials", "kubeslice" + ] + }, + "items": [ + { + "type": "category", + "label": "kubeslice-cli Tutorials", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "kubeslice-cli Tutorials", + "description": "The tutorials section of the documentation describes how to use the demo options on kind and cloud clusters.", + "keywords": [ + "tutorials", "kubeslice-cli" + ] + + }, + "items": [ + "tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo-on-cloud-clusters", + "tutorials/kubeslice-cli-tutorials/kubeslice-cli-demo" + ] + }, + { + "type": "category", + "label": "YAML Tutorials", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "YAML Tutorials", + "description": "The tutorials section of the documentation describes how to test the network connectivity and deploy BookInfo and iPerf applications on a slice.", + "keywords": [ + "tutorials", "kubeslice" + ] + + }, + "items": [ + "tutorials/yaml-tutorials/test-network-connectivity", + "tutorials/yaml-tutorials/deploy-the-bookinfo-application-on-an-intra-cluster-slice", + "tutorials/yaml-tutorials/deploy-the-bookinfo-application", + "tutorials/yaml-tutorials/deploy-the-iperf-application-on-an-intra-cluster-slice", + "tutorials/yaml-tutorials/deploy-the-iperf-application" + ] + } + + ] + }, + { + "type": "category", + "label": "Release Notes", + "collapsed": true, + "link": { + "type": "generated-index", + "title": "Release Notes", + "description": "The release notes section of the documentation provides an overview of the latest updates, enhancements, bug fixes, and new features introduced in each version of KubeSlice. It serves as a valuable resource for users to stay informed about the changes and improvements made to the platform over time. By referring to the release notes, users can understand the evolution of KubeSlice, discover new functionalities, and ensure compatibility with their existing deployments.", + "keywords": [ + "release-notes", "kubeslice" + ] + }, + "items": [ + "release-notes/release-notes-for-kubeslice-oss-1.2.0", + "release-notes/release-notes-for-kubeslice-oss-1.1.0", + "release-notes/release-notes-for-kubeslice-oss-1.0.0", + "release-notes/release-notes-for-kubeslice-oss-0.7.0", + "release-notes/release-notes-for-kubeslice-oss-0.6.0", + "release-notes/release-notes-for-kubeslice-oss-0.5.1", + "release-notes/release-notes-for-kubeslice-oss-0.5.0", + "release-notes/release-notes-for-kubeslice-oss-0.4.0", + "release-notes/release-notes-for-kubeslice-oss-0.3.0", + "release-notes/release-notes-for-kubeslice-oss-0.2.0", + "release-notes/release-notes-for-kubeslice-oss-0.1.0" + ] + } + ] +} diff --git a/versions.json b/versions.json index 62b4d013..d7edcbda 100644 --- a/versions.json +++ b/versions.json @@ -1,4 +1,5 @@ [ + "1.2.0", "1.1.0", "1.0.0", "0.7.0",