diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index b8f9d6e..33a1431 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -48,7 +48,6 @@ rules: resources: - cacheservers - frontproxies - - shards verbs: - create - delete @@ -84,6 +83,7 @@ rules: resources: - kubeconfigs - rootshards + - shards verbs: - get - list diff --git a/internal/controller/shard_controller.go b/internal/controller/shard_controller.go index 6b5627a..6e46bdb 100644 --- a/internal/controller/shard_controller.go +++ b/internal/controller/shard_controller.go @@ -18,12 +18,25 @@ package controller import ( "context" + "errors" + "fmt" + k8creconciling "k8c.io/reconciler/pkg/reconciling" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/equality" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/kcp-dev/kcp-operator/internal/reconciling" + "github.com/kcp-dev/kcp-operator/internal/resources" + "github.com/kcp-dev/kcp-operator/internal/resources/shard" operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" ) @@ -33,30 +46,153 @@ type ShardReconciler struct { Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=operator.kcp.io,resources=shards,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=operator.kcp.io,resources=shards/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=operator.kcp.io,resources=shards/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Shard object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.0/pkg/reconcile -func (r *ShardReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) - - // TODO(user): your logic here - - return ctrl.Result{}, nil -} - // SetupWithManager sets up the controller with the Manager. func (r *ShardReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&operatorv1alpha1.Shard{}). + Owns(&appsv1.Deployment{}). Complete(r) } + +// +kubebuilder:rbac:groups=operator.kcp.io,resources=shards,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=operator.kcp.io,resources=shards/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=operator.kcp.io,resources=shards/finalizers,verbs=update +// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete + +func (r *ShardReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, recErr error) { + logger := log.FromContext(ctx) + logger.V(4).Info("Reconciling Shard object") + + var s operatorv1alpha1.Shard + if err := r.Client.Get(ctx, req.NamespacedName, &s); err != nil { + if client.IgnoreNotFound(err) != nil { + return ctrl.Result{}, fmt.Errorf("failed to get shard: %w", err) + } + + return ctrl.Result{}, nil + } + + defer func() { + if err := r.reconcileStatus(ctx, &s); err != nil { + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + }() + + var rootShard operatorv1alpha1.RootShard + if ref := s.Spec.RootShard.Reference; ref != nil { + rootShardRef := types.NamespacedName{ + Namespace: s.Namespace, + Name: ref.Name, + } + + if err := r.Client.Get(ctx, rootShardRef, &rootShard); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get root shard: %w", err) + } + } else { + return ctrl.Result{}, errors.New("no RootShard reference specified in Shard spec") + } + + return ctrl.Result{}, r.reconcile(ctx, &s, &rootShard) +} + +func (r *ShardReconciler) reconcile(ctx context.Context, s *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) error { + var errs []error + + ownerRefWrapper := k8creconciling.OwnerRefWrapper(*metav1.NewControllerRef(s, operatorv1alpha1.SchemeGroupVersion.WithKind("Shard"))) + + certReconcilers := []reconciling.NamedCertificateReconcilerFactory{ + shard.ServerCertificateReconciler(s, rootShard), + shard.ServiceAccountCertificateReconciler(s, rootShard), + shard.VirtualWorkspacesCertificateReconciler(s, rootShard), + shard.RootShardClientCertificateReconciler(s, rootShard), + } + + if err := reconciling.ReconcileCertificates(ctx, certReconcilers, s.Namespace, r.Client, ownerRefWrapper); err != nil { + errs = append(errs, err) + } + + if err := k8creconciling.ReconcileSecrets(ctx, []k8creconciling.NamedSecretReconcilerFactory{ + shard.RootShardClientKubeconfigReconciler(s, rootShard), + }, s.Namespace, r.Client, ownerRefWrapper); err != nil { + errs = append(errs, err) + } + + if err := k8creconciling.ReconcileDeployments(ctx, []k8creconciling.NamedDeploymentReconcilerFactory{ + shard.DeploymentReconciler(s, rootShard), + }, s.Namespace, r.Client, ownerRefWrapper); err != nil { + errs = append(errs, err) + } + + if err := k8creconciling.ReconcileServices(ctx, []k8creconciling.NamedServiceReconcilerFactory{ + shard.ServiceReconciler(s), + }, s.Namespace, r.Client, ownerRefWrapper); err != nil { + errs = append(errs, err) + } + + return kerrors.NewAggregate(errs) +} + +// reconcileStatus sets both phase and conditions on the reconciled Shard object. +func (r *ShardReconciler) reconcileStatus(ctx context.Context, oldShard *operatorv1alpha1.Shard) error { + newShard := oldShard.DeepCopy() + var errs []error + + if newShard.Status.Phase == "" { + newShard.Status.Phase = operatorv1alpha1.ShardPhaseProvisioning + } + + if newShard.DeletionTimestamp != nil { + newShard.Status.Phase = operatorv1alpha1.ShardPhaseDeleting + } + + if err := r.setAvailableCondition(ctx, newShard); err != nil { + errs = append(errs, err) + } + + if cond := apimeta.FindStatusCondition(newShard.Status.Conditions, string(operatorv1alpha1.ShardConditionTypeAvailable)); cond.Status == metav1.ConditionTrue { + newShard.Status.Phase = operatorv1alpha1.ShardPhaseRunning + } + + // only patch the status if there are actual changes. + if !equality.Semantic.DeepEqual(oldShard.Status, newShard.Status) { + if err := r.Client.Status().Patch(ctx, newShard, client.MergeFrom(oldShard)); err != nil { + errs = append(errs, err) + } + } + + return kerrors.NewAggregate(errs) +} + +func (r *ShardReconciler) setAvailableCondition(ctx context.Context, s *operatorv1alpha1.Shard) error { + var dep appsv1.Deployment + depKey := types.NamespacedName{Namespace: s.Namespace, Name: resources.GetShardDeploymentName(s)} + if err := r.Client.Get(ctx, depKey, &dep); client.IgnoreNotFound(err) != nil { + return err + } + + available := metav1.ConditionFalse + reason := operatorv1alpha1.ShardConditionReasonDeploymentUnavailable + msg := deploymentStatusString(dep, depKey) + + if dep.Name != "" { + if deploymentReady(dep) { + available = metav1.ConditionTrue + reason = operatorv1alpha1.ShardConditionReasonReplicasUp + } else { + available = metav1.ConditionFalse + reason = operatorv1alpha1.ShardConditionReasonReplicasUnavailable + } + } + + s.Status.Conditions = updateCondition(s.Status.Conditions, metav1.Condition{ + Type: string(operatorv1alpha1.ShardConditionTypeAvailable), + Status: available, + ObservedGeneration: s.Generation, + Reason: string(reason), + Message: msg, + }) + + return nil +} diff --git a/internal/controller/shard_controller_test.go b/internal/controller/shard_controller_test.go index 91c5e87..1e4584d 100644 --- a/internal/controller/shard_controller_test.go +++ b/internal/controller/shard_controller_test.go @@ -22,7 +22,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/apimachinery/pkg/api/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -43,9 +44,32 @@ var _ = Describe("Shard Controller", func() { shard := &operatorv1alpha1.Shard{} BeforeEach(func() { + By("creating the custom resource for the Kind RootShard") + err := k8sClient.Get(ctx, typeNamespacedName, &operatorv1alpha1.RootShard{}) + if err != nil && apierrors.IsNotFound(err) { + resource := &operatorv1alpha1.RootShard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-root-shard", + Namespace: "default", + }, + Spec: operatorv1alpha1.RootShardSpec{ + External: operatorv1alpha1.ExternalConfig{ + Hostname: "example.kcp.io", + Port: 6443, + }, + CommonShardSpec: operatorv1alpha1.CommonShardSpec{ + Etcd: operatorv1alpha1.EtcdConfig{ + Endpoints: []string{"https://localhost:2379"}, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + By("creating the custom resource for the Kind Shard") - err := k8sClient.Get(ctx, typeNamespacedName, shard) - if err != nil && errors.IsNotFound(err) { + err = k8sClient.Get(ctx, typeNamespacedName, shard) + if err != nil && apierrors.IsNotFound(err) { resource := &operatorv1alpha1.Shard{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName, @@ -57,6 +81,11 @@ var _ = Describe("Shard Controller", func() { Endpoints: []string{"https://localhost:2379"}, }, }, + RootShard: operatorv1alpha1.RootShardConfig{ + Reference: &corev1.LocalObjectReference{ + Name: "my-root-shard", + }, + }, }, } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) diff --git a/internal/resources/resources.go b/internal/resources/resources.go index a9fba4c..44511a7 100644 --- a/internal/resources/resources.go +++ b/internal/resources/resources.go @@ -64,19 +64,35 @@ func GetRootShardDeploymentName(r *operatorv1alpha1.RootShard) string { return fmt.Sprintf("%s-kcp", r.Name) } +func GetShardDeploymentName(s *operatorv1alpha1.Shard) string { + return fmt.Sprintf("%s-shard-kcp", s.Name) +} + func GetRootShardServiceName(r *operatorv1alpha1.RootShard) string { return fmt.Sprintf("%s-kcp", r.Name) } -func GetRootShardResourceLabels(r *operatorv1alpha1.RootShard) map[string]string { +func GetShardServiceName(s *operatorv1alpha1.Shard) string { + return fmt.Sprintf("%s-shard-kcp", s.Name) +} + +func getResourceLabels(instance, component string) map[string]string { return map[string]string{ - appNameLabel: "kcp", - appInstanceLabel: r.Name, appManagedByLabel: "kcp-operator", - appComponentLabel: "rootshard", + appNameLabel: "kcp", + appInstanceLabel: instance, + appComponentLabel: component, } } +func GetRootShardResourceLabels(r *operatorv1alpha1.RootShard) map[string]string { + return getResourceLabels(r.Name, "rootshard") +} + +func GetShardResourceLabels(s *operatorv1alpha1.Shard) map[string]string { + return getResourceLabels(s.Name, "shard") +} + func GetRootShardBaseHost(r *operatorv1alpha1.RootShard) string { clusterDomain := r.Spec.ClusterDomain if clusterDomain == "" { @@ -90,10 +106,27 @@ func GetRootShardBaseURL(r *operatorv1alpha1.RootShard) string { return fmt.Sprintf("https://%s:6443", GetRootShardBaseHost(r)) } +func GetShardBaseHost(s *operatorv1alpha1.Shard) string { + clusterDomain := s.Spec.ClusterDomain + if clusterDomain == "" { + clusterDomain = "cluster.local" + } + + return fmt.Sprintf("%s-shard-kcp.%s.svc.%s", s.Name, s.Namespace, clusterDomain) +} + +func GetShardBaseURL(s *operatorv1alpha1.Shard) string { + return fmt.Sprintf("https://%s:6443", GetShardBaseHost(s)) +} + func GetRootShardCertificateName(r *operatorv1alpha1.RootShard, certName operatorv1alpha1.Certificate) string { return fmt.Sprintf("%s-%s", r.Name, certName) } +func GetShardCertificateName(s *operatorv1alpha1.Shard, certName operatorv1alpha1.Certificate) string { + return fmt.Sprintf("%s-%s", s.Name, certName) +} + func GetRootShardCAName(r *operatorv1alpha1.RootShard, caName operatorv1alpha1.CA) string { if caName == operatorv1alpha1.RootCA { return fmt.Sprintf("%s-ca", r.Name) @@ -102,12 +135,7 @@ func GetRootShardCAName(r *operatorv1alpha1.RootShard, caName operatorv1alpha1.C } func GetFrontProxyResourceLabels(f *operatorv1alpha1.FrontProxy) map[string]string { - return map[string]string{ - appNameLabel: "kcp", - appInstanceLabel: f.Name, - appManagedByLabel: "kcp-operator", - appComponentLabel: "front-proxy", - } + return getResourceLabels(f.Name, "front-proxy") } func GetFrontProxyDeploymentName(f *operatorv1alpha1.FrontProxy) string { diff --git a/internal/resources/shard/certificates.go b/internal/resources/shard/certificates.go new file mode 100644 index 0000000..c0709d5 --- /dev/null +++ b/internal/resources/shard/certificates.go @@ -0,0 +1,167 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shard + +import ( + "fmt" + + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + certmanagermetav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" + + "github.com/kcp-dev/kcp-operator/internal/reconciling" + "github.com/kcp-dev/kcp-operator/internal/resources" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func ServerCertificateReconciler(shard *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) reconciling.NamedCertificateReconcilerFactory { + name := resources.GetShardCertificateName(shard, operatorv1alpha1.ServerCertificate) + + return func() (string, reconciling.CertificateReconciler) { + return name, func(cert *certmanagerv1.Certificate) (*certmanagerv1.Certificate, error) { + cert.SetLabels(resources.GetShardResourceLabels(shard)) + cert.Spec = certmanagerv1.CertificateSpec{ + SecretName: name, + Duration: &operatorv1alpha1.DefaultCertificateDuration, + RenewBefore: &operatorv1alpha1.DefaultCertificateRenewal, + + PrivateKey: &certmanagerv1.CertificatePrivateKey{ + Algorithm: certmanagerv1.RSAKeyAlgorithm, + Size: 4096, + }, + + Usages: []certmanagerv1.KeyUsage{ + certmanagerv1.UsageServerAuth, + }, + + DNSNames: []string{ + "localhost", + resources.GetShardBaseHost(shard), + }, + + IssuerRef: certmanagermetav1.ObjectReference{ + Name: resources.GetRootShardCAName(rootShard, operatorv1alpha1.ServerCA), + Kind: "Issuer", + Group: "cert-manager.io", + }, + } + + return cert, nil + } + } +} + +func VirtualWorkspacesCertificateReconciler(shard *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) reconciling.NamedCertificateReconcilerFactory { + name := resources.GetShardCertificateName(shard, operatorv1alpha1.VirtualWorkspacesCertificate) + + return func() (string, reconciling.CertificateReconciler) { + return name, func(cert *certmanagerv1.Certificate) (*certmanagerv1.Certificate, error) { + cert.SetLabels(resources.GetShardResourceLabels(shard)) + cert.Spec = certmanagerv1.CertificateSpec{ + SecretName: name, + Duration: &operatorv1alpha1.DefaultCertificateDuration, + RenewBefore: &operatorv1alpha1.DefaultCertificateRenewal, + + PrivateKey: &certmanagerv1.CertificatePrivateKey{ + Algorithm: certmanagerv1.RSAKeyAlgorithm, + Size: 4096, + }, + + Usages: []certmanagerv1.KeyUsage{ + certmanagerv1.UsageServerAuth, + }, + + DNSNames: []string{ + resources.GetShardBaseHost(shard), + }, + + IssuerRef: certmanagermetav1.ObjectReference{ + Name: resources.GetRootShardCAName(rootShard, operatorv1alpha1.ServerCA), + Kind: "Issuer", + Group: "cert-manager.io", + }, + } + + return cert, nil + } + } +} + +func ServiceAccountCertificateReconciler(shard *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) reconciling.NamedCertificateReconcilerFactory { + name := resources.GetShardCertificateName(shard, operatorv1alpha1.ServiceAccountCertificate) + + return func() (string, reconciling.CertificateReconciler) { + return name, func(cert *certmanagerv1.Certificate) (*certmanagerv1.Certificate, error) { + cert.SetLabels(resources.GetShardResourceLabels(shard)) + cert.Spec = certmanagerv1.CertificateSpec{ + CommonName: name, + SecretName: name, + Duration: &operatorv1alpha1.DefaultCertificateDuration, + RenewBefore: &operatorv1alpha1.DefaultCertificateRenewal, + + PrivateKey: &certmanagerv1.CertificatePrivateKey{ + Algorithm: certmanagerv1.RSAKeyAlgorithm, + Size: 4096, + }, + + IssuerRef: certmanagermetav1.ObjectReference{ + Name: resources.GetRootShardCAName(rootShard, operatorv1alpha1.ServiceAccountCA), + Kind: "Issuer", + Group: "cert-manager.io", + }, + } + + return cert, nil + } + } +} + +func RootShardClientCertificateReconciler(shard *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) reconciling.NamedCertificateReconcilerFactory { + name := resources.GetShardCertificateName(shard, operatorv1alpha1.ClientCertificate) + + return func() (string, reconciling.CertificateReconciler) { + return name, func(cert *certmanagerv1.Certificate) (*certmanagerv1.Certificate, error) { + cert.SetLabels(resources.GetShardResourceLabels(shard)) + cert.Spec = certmanagerv1.CertificateSpec{ + CommonName: fmt.Sprintf("shard-%s", shard.Name), + SecretName: name, + Duration: &operatorv1alpha1.DefaultCertificateDuration, + RenewBefore: &operatorv1alpha1.DefaultCertificateRenewal, + + PrivateKey: &certmanagerv1.CertificatePrivateKey{ + Algorithm: certmanagerv1.RSAKeyAlgorithm, + Size: 4096, + }, + + Subject: &certmanagerv1.X509Subject{ + Organizations: []string{"system:masters"}, + }, + + Usages: []certmanagerv1.KeyUsage{ + certmanagerv1.UsageClientAuth, + }, + + IssuerRef: certmanagermetav1.ObjectReference{ + Name: resources.GetRootShardCAName(rootShard, operatorv1alpha1.ClientCA), + Kind: "Issuer", + Group: "cert-manager.io", + }, + } + + return cert, nil + } + } +} diff --git a/internal/resources/shard/deployment.go b/internal/resources/shard/deployment.go new file mode 100644 index 0000000..d737d5e --- /dev/null +++ b/internal/resources/shard/deployment.go @@ -0,0 +1,209 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shard + +import ( + "fmt" + "strings" + + "k8c.io/reconciler/pkg/reconciling" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/kcp-dev/kcp-operator/internal/resources" + "github.com/kcp-dev/kcp-operator/internal/resources/utils" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +const ( + ServerContainerName = "kcp" +) + +var ( + defaultResourceRequirements = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + corev1.ResourceCPU: resource.MustParse("1"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("2Gi"), + corev1.ResourceCPU: resource.MustParse("2"), + }, + } +) + +func getCertificateMountPath(certName operatorv1alpha1.Certificate) string { + return fmt.Sprintf("/etc/kcp/tls/%s", certName) +} + +func getCAMountPath(caName operatorv1alpha1.CA) string { + return fmt.Sprintf("/etc/kcp/tls/ca/%s", caName) +} + +func getKubeconfigMountPath(certName operatorv1alpha1.Certificate) string { + return fmt.Sprintf("/etc/kcp/%s-kubeconfig", certName) +} + +func DeploymentReconciler(shard *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) reconciling.NamedDeploymentReconcilerFactory { + + return func() (string, reconciling.DeploymentReconciler) { + return resources.GetShardDeploymentName(shard), func(dep *appsv1.Deployment) (*appsv1.Deployment, error) { + labels := resources.GetShardResourceLabels(shard) + dep.SetLabels(labels) + dep.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + dep.Spec.Template.ObjectMeta.SetLabels(labels) + + secretMounts := []utils.SecretMount{{ + VolumeName: "kcp-ca", + SecretName: resources.GetRootShardCAName(rootShard, operatorv1alpha1.RootCA), + MountPath: getCAMountPath(operatorv1alpha1.RootCA), + }} + + image, _ := resources.GetImageSettings(shard.Spec.Image) + args := getArgs(shard, rootShard) + + if shard.Spec.Etcd.TLSConfig != nil { + secretMounts = append(secretMounts, utils.SecretMount{ + VolumeName: "etcd-client-cert", + SecretName: rootShard.Spec.Etcd.TLSConfig.SecretRef.Name, + MountPath: "/etc/etcd/tls", + }) + + args = append(args, + "--etcd-certfile=/etc/etcd/tls/tls.crt", + "--etcd-keyfile=/etc/etcd/tls/tls.key", + "--etcd-cafile=/etc/etcd/tls/ca.crt", + ) + } + + for _, cert := range []operatorv1alpha1.Certificate{ + // requires server CA and the shard client cert to be mounted + operatorv1alpha1.ClientCertificate, + } { + secretMounts = append(secretMounts, utils.SecretMount{ + VolumeName: fmt.Sprintf("%s-kubeconfig", cert), + SecretName: kubeconfigSecret(shard, cert), + MountPath: getKubeconfigMountPath(cert), + }) + } + + // All of these CAs are shared between rootshard and regular shards. + for _, ca := range []operatorv1alpha1.CA{ + operatorv1alpha1.ClientCA, + operatorv1alpha1.ServerCA, + operatorv1alpha1.ServiceAccountCA, + operatorv1alpha1.RequestHeaderClientCA, + } { + secretMounts = append(secretMounts, utils.SecretMount{ + VolumeName: fmt.Sprintf("%s-ca", ca), + SecretName: resources.GetRootShardCAName(rootShard, ca), + MountPath: getCAMountPath(ca), + }) + } + + for _, cert := range []operatorv1alpha1.Certificate{ + operatorv1alpha1.ServerCertificate, + operatorv1alpha1.ServiceAccountCertificate, + operatorv1alpha1.ClientCertificate, + } { + secretMounts = append(secretMounts, utils.SecretMount{ + VolumeName: fmt.Sprintf("%s-cert", cert), + SecretName: resources.GetShardCertificateName(shard, cert), + MountPath: getCertificateMountPath(cert), + }) + } + + volumes := []corev1.Volume{} + volumeMounts := []corev1.VolumeMount{} + + for _, sm := range secretMounts { + v, vm := sm.Build() + volumes = append(volumes, v) + volumeMounts = append(volumeMounts, vm) + } + + dep.Spec.Template.Spec.Containers = []corev1.Container{{ + Name: ServerContainerName, + Image: image, + Command: []string{"/kcp", "start"}, + Args: args, + VolumeMounts: volumeMounts, + Resources: defaultResourceRequirements, + SecurityContext: &corev1.SecurityContext{ + ReadOnlyRootFilesystem: ptr.To(true), + AllowPrivilegeEscalation: ptr.To(false), + }, + }} + dep.Spec.Template.Spec.Volumes = volumes + + // explicitly set the replicas if it is configured in the RootShard + // object or if the existing Deployment object doesn't have replicas + // configured. This will allow a HPA to interact with the replica + // count. + if shard.Spec.Replicas != nil { + dep.Spec.Replicas = shard.Spec.Replicas + } else if dep.Spec.Replicas == nil { + dep.Spec.Replicas = ptr.To[int32](2) + } + + return dep, nil + } + } +} + +func getArgs(shard *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) []string { + args := []string{ + // CA configuration. + fmt.Sprintf("--root-ca-file=%s/tls.crt", getCAMountPath(operatorv1alpha1.RootCA)), + fmt.Sprintf("--client-ca-file=%s/tls.crt", getCAMountPath(operatorv1alpha1.ClientCA)), + + // Requestheader configuration. + fmt.Sprintf("--requestheader-client-ca-file=%s/tls.crt", getCAMountPath(operatorv1alpha1.RequestHeaderClientCA)), + "--requestheader-username-headers=X-Remote-User", + "--requestheader-group-headers=X-Remote-Group", + "--requestheader-extra-headers-prefix=X-Remote-Extra-", + + // Certificate flags (server, service account signing). + fmt.Sprintf("--tls-private-key-file=%s/tls.key", getCertificateMountPath(operatorv1alpha1.ServerCertificate)), + fmt.Sprintf("--tls-cert-file=%s/tls.crt", getCertificateMountPath(operatorv1alpha1.ServerCertificate)), + fmt.Sprintf("--service-account-key-file=%s/tls.crt", getCertificateMountPath(operatorv1alpha1.ServiceAccountCertificate)), + fmt.Sprintf("--service-account-private-key-file=%s/tls.key", getCertificateMountPath(operatorv1alpha1.ServiceAccountCertificate)), + fmt.Sprintf("--shard-client-key-file=%s/tls.crt", getCertificateMountPath(operatorv1alpha1.ClientCertificate)), + fmt.Sprintf("--shard-client-cert-file=%s/tls.key", getCertificateMountPath(operatorv1alpha1.ClientCertificate)), + + // Etcd client configuration. + fmt.Sprintf("--etcd-servers=%s", strings.Join(shard.Spec.Etcd.Endpoints, ",")), + + // General shard configuration. + fmt.Sprintf("--shard-name=%s", shard.Name), + fmt.Sprintf("--external-hostname=%s", resources.GetShardBaseHost(shard)), + fmt.Sprintf("--shard-external-url=https://%s:%d", rootShard.Spec.External.Hostname, rootShard.Spec.External.Port), + fmt.Sprintf("--root-shard-kubeconfig-file=%s/kubeconfig", getKubeconfigMountPath(operatorv1alpha1.ClientCertificate)), + fmt.Sprintf("--cache-kubeconfig=%s/kubeconfig", getKubeconfigMountPath(operatorv1alpha1.ClientCertificate)), + "--root-directory=''", + "--enable-leader-election=true", + "--logging-format=json", + } + + return args +} diff --git a/internal/resources/shard/kubeconfigs.go b/internal/resources/shard/kubeconfigs.go new file mode 100644 index 0000000..a4deccb --- /dev/null +++ b/internal/resources/shard/kubeconfigs.go @@ -0,0 +1,83 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shard + +import ( + "fmt" + + k8creconciling "k8c.io/reconciler/pkg/reconciling" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + + "github.com/kcp-dev/kcp-operator/internal/resources" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func kubeconfigSecret(shard *operatorv1alpha1.Shard, cert operatorv1alpha1.Certificate) string { + return fmt.Sprintf("%s-%s-kubeconfig", shard.Name, cert) +} + +func RootShardClientKubeconfigReconciler(shard *operatorv1alpha1.Shard, rootShard *operatorv1alpha1.RootShard) k8creconciling.NamedSecretReconcilerFactory { + const ( + serverName = "root-shard" + contextName = "shard-base" // hardcoded in kcp + authInfoName = "shard" + ) + + return func() (string, k8creconciling.SecretReconciler) { + return kubeconfigSecret(shard, operatorv1alpha1.ClientCertificate), func(secret *corev1.Secret) (*corev1.Secret, error) { + var config *clientcmdapi.Config + + if secret.Data == nil { + secret.Data = make(map[string][]byte) + } + + config = &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + serverName: { + Server: resources.GetRootShardBaseURL(rootShard), + CertificateAuthority: getCAMountPath(operatorv1alpha1.ServerCA) + "/tls.crt", + }, + }, + Contexts: map[string]*clientcmdapi.Context{ + contextName: { + Cluster: serverName, + AuthInfo: authInfoName, + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + authInfoName: { + ClientCertificate: getCertificateMountPath(operatorv1alpha1.ClientCertificate) + "/tls.crt", + ClientKey: getCertificateMountPath(operatorv1alpha1.ClientCertificate) + "/tls.key", + }, + }, + CurrentContext: contextName, + } + + data, err := clientcmd.Write(*config) + if err != nil { + return nil, err + } + + secret.Data["kubeconfig"] = data + + return secret, nil + } + } +} diff --git a/internal/resources/shard/service.go b/internal/resources/shard/service.go new file mode 100644 index 0000000..2a094ad --- /dev/null +++ b/internal/resources/shard/service.go @@ -0,0 +1,57 @@ +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shard + +import ( + "k8c.io/reconciler/pkg/reconciling" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + "github.com/kcp-dev/kcp-operator/internal/resources" + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" +) + +func ServiceReconciler(shard *operatorv1alpha1.Shard) reconciling.NamedServiceReconcilerFactory { + return func() (string, reconciling.ServiceReconciler) { + return resources.GetShardServiceName(shard), func(svc *corev1.Service) (*corev1.Service, error) { + labels := resources.GetShardResourceLabels(shard) + svc.SetLabels(labels) + svc.Spec.Type = corev1.ServiceTypeClusterIP + svc.Spec.Ports = []corev1.ServicePort{ + { + Name: "https", + Protocol: corev1.ProtocolTCP, + Port: 6443, + TargetPort: intstr.FromInt32(6443), + AppProtocol: ptr.To("https"), + }, + { + Name: "https-virtual-workspaces", + Protocol: corev1.ProtocolTCP, + Port: 6444, + TargetPort: intstr.FromInt32(6444), + AppProtocol: ptr.To("https"), + }, + } + svc.Spec.Selector = labels + + return svc, nil + } + } +} diff --git a/test/e2e/shards/shards_test.go b/test/e2e/shards/shards_test.go new file mode 100644 index 0000000..7385bad --- /dev/null +++ b/test/e2e/shards/shards_test.go @@ -0,0 +1,127 @@ +//go:build e2e + +/* +Copyright 2025 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rootshards + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/go-logr/logr" + kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrlruntime "sigs.k8s.io/controller-runtime" + + operatorv1alpha1 "github.com/kcp-dev/kcp-operator/sdk/apis/operator/v1alpha1" + "github.com/kcp-dev/kcp-operator/test/utils" +) + +func TestCreateShard(t *testing.T) { + ctrlruntime.SetLogger(logr.Discard()) + + client := utils.GetKubeClient(t) + ctx := context.Background() + + // create namspace + namespace := "create-shard" + utils.CreateSelfDestructingNamespace(t, ctx, client, namespace) + + // deploy a root shard incl. etcd + rootShard := utils.DeployRootShard(ctx, t, client, namespace) + + // deploy a 2nd shard incl. etcd + shardName := "aadvark" + utils.DeployShard(ctx, t, client, namespace, shardName, rootShard.Name) + + // create a kubeconfig to access the root shard + configSecretName := fmt.Sprintf("%s-shard-kubeconfig", rootShard.Name) + + rsConfig := operatorv1alpha1.Kubeconfig{} + rsConfig.Name = configSecretName + rsConfig.Namespace = namespace + + rsConfig.Spec = operatorv1alpha1.KubeconfigSpec{ + Target: operatorv1alpha1.KubeconfigTarget{ + RootShardRef: &corev1.LocalObjectReference{ + Name: rootShard.Name, + }, + }, + Username: "e2e", + Validity: metav1.Duration{Duration: 2 * time.Hour}, + SecretRef: corev1.LocalObjectReference{ + Name: configSecretName, + }, + Groups: []string{"system:masters"}, + } + + t.Log("Creating kubeconfig for RootShard…") + if err := client.Create(ctx, &rsConfig); err != nil { + t.Fatal(err) + } + utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: rsConfig.Namespace, Name: rsConfig.Spec.SecretRef.Name}) + + t.Log("Connecting to RootShard…") + rootShardClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace, rsConfig.Name) + + // wait until the 2nd shard has registered itself successfully at the root shard + shardKey := types.NamespacedName{Name: shardName} + t.Log("Waiting for Shard to register itself on the RootShard…") + utils.WaitForObject(t, ctx, rootShardClient, &kcpcorev1alpha1.Shard{}, shardKey) + + // create a kubeconfig to access the shard + configSecretName = fmt.Sprintf("%s-shard-kubeconfig", shardName) + + shardConfig := operatorv1alpha1.Kubeconfig{} + shardConfig.Name = configSecretName + shardConfig.Namespace = namespace + + shardConfig.Spec = operatorv1alpha1.KubeconfigSpec{ + Target: operatorv1alpha1.KubeconfigTarget{ + ShardRef: &corev1.LocalObjectReference{ + Name: shardName, + }, + }, + Username: "e2e", + Validity: metav1.Duration{Duration: 2 * time.Hour}, + SecretRef: corev1.LocalObjectReference{ + Name: configSecretName, + }, + Groups: []string{"system:masters"}, + } + + t.Log("Creating kubeconfig for Shard…") + if err := client.Create(ctx, &shardConfig); err != nil { + t.Fatal(err) + } + utils.WaitForObject(t, ctx, client, &corev1.Secret{}, types.NamespacedName{Namespace: shardConfig.Namespace, Name: shardConfig.Spec.SecretRef.Name}) + + t.Log("Connecting to Shard…") + kcpClient := utils.ConnectWithKubeconfig(t, ctx, client, namespace, shardConfig.Name) + + // proof of life: list something every logicalcluster in kcp has + t.Log("Should be able to list Secrets.") + secrets := &corev1.SecretList{} + if err := kcpClient.List(ctx, secrets); err != nil { + t.Fatalf("Failed to list secrets in kcp: %v", err) + } +}