Skip to content

Commit

Permalink
Add KafkaAutoscaler APIs (#1168)
Browse files Browse the repository at this point in the history
Signed-off-by: obaydullahmhs <obaydullah@appscode.com>
  • Loading branch information
obaydullahmhs authored Mar 8, 2024
1 parent 64fea9f commit 403b738
Show file tree
Hide file tree
Showing 18 changed files with 1,754 additions and 0 deletions.
67 changes: 67 additions & 0 deletions apis/autoscaling/v1alpha1/kafka_helpers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha1

import (
"fmt"

"kubedb.dev/apimachinery/apis"
"kubedb.dev/apimachinery/apis/autoscaling"
"kubedb.dev/apimachinery/crds"

"kmodules.xyz/client-go/apiextensions"
)

func (_ *KafkaAutoscaler) CustomResourceDefinition() *apiextensions.CustomResourceDefinition {
return crds.MustCustomResourceDefinition(SchemeGroupVersion.WithResource(ResourcePluralKafkaAutoscaler))
}

var _ apis.ResourceInfo = &KafkaAutoscaler{}

func (k *KafkaAutoscaler) ResourceFQN() string {
return fmt.Sprintf("%s.%s", ResourcePluralKafkaAutoscaler, autoscaling.GroupName)
}

func (k *KafkaAutoscaler) ResourceShortCode() string {
return ResourceCodeKafkaAutoscaler
}

func (k *KafkaAutoscaler) ResourceKind() string {
return ResourceKindKafkaAutoscaler
}

func (k *KafkaAutoscaler) ResourceSingular() string {
return ResourceSingularKafkaAutoscaler
}

func (k *KafkaAutoscaler) ResourcePlural() string {
return ResourcePluralKafkaAutoscaler
}

func (k *KafkaAutoscaler) ValidateSpecs() error {
return nil
}

var _ StatusAccessor = &KafkaAutoscaler{}

func (k *KafkaAutoscaler) GetStatus() AutoscalerStatus {
return k.Status
}

func (k *KafkaAutoscaler) SetStatus(s AutoscalerStatus) {
k.Status = s
}
104 changes: 104 additions & 0 deletions apis/autoscaling/v1alpha1/kafka_types.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
/*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha1

import (
opsapi "kubedb.dev/apimachinery/apis/ops/v1alpha1"

core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

const (
ResourceCodeKafkaAutoscaler = "kfscaler"
ResourceKindKafkaAutoscaler = "KafkaAutoscaler"
ResourceSingularKafkaAutoscaler = "kafkaautoscaler"
ResourcePluralKafkaAutoscaler = "kafkaautoscalers"
)

// KafkaAutoscaler is the configuration for a kafka
// autoscaler, which automatically manages pod resources based on historical and
// real time resource utilization.

// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

// +kubebuilder:object:root=true
// +kubebuilder:resource:path=kafkaautoscalers,singular=kafkaautoscaler,shortName=kfscaler,categories={datastore,kubedb,appscode}
// +kubebuilder:subresource:status
type KafkaAutoscaler struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`

// Specification of the behavior of the autoscaler.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
Spec KafkaAutoscalerSpec `json:"spec"`

// Current information about the autoscaler.
// +optional
Status AutoscalerStatus `json:"status,omitempty"`
}

// KafkaAutoscalerSpec is the specification of the behavior of the autoscaler.
type KafkaAutoscalerSpec struct {
DatabaseRef *core.LocalObjectReference `json:"databaseRef"`

// This field will be used to control the behaviour of ops-manager
OpsRequestOptions *KafkaOpsRequestOptions `json:"opsRequestOptions,omitempty"`

Compute *KafkaComputeAutoscalerSpec `json:"compute,omitempty"`
Storage *KafkaStorageAutoscalerSpec `json:"storage,omitempty"`
}

type KafkaComputeAutoscalerSpec struct {
// +optional
NodeTopology *NodeTopology `json:"nodeTopology,omitempty"`

Node *ComputeAutoscalerSpec `json:"node,omitempty"`
Broker *ComputeAutoscalerSpec `json:"broker,omitempty"`
Controller *ComputeAutoscalerSpec `json:"controller,omitempty"`
}

type KafkaStorageAutoscalerSpec struct {
Node *StorageAutoscalerSpec `json:"node,omitempty"`
Broker *StorageAutoscalerSpec `json:"broker,omitempty"`
Controller *StorageAutoscalerSpec `json:"controller,omitempty"`
}

type KafkaOpsRequestOptions struct {
// Timeout for each step of the ops request in second. If a step doesn't finish within the specified timeout, the ops request will result in failure.
Timeout *metav1.Duration `json:"timeout,omitempty"`

// ApplyOption is to control the execution of OpsRequest depending on the database state.
// +kubebuilder:default="IfReady"
Apply opsapi.ApplyOption `json:"apply,omitempty"`
}

// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// KafkaAutoscalerList is a list of KafkaAutoscaler objects.
type KafkaAutoscalerList struct {
metav1.TypeMeta `json:",inline"`
// metadata is the standard list metadata.
// +optional
metav1.ListMeta `json:"metadata"`

// items is the list of kafka autoscaler objects.
Items []KafkaAutoscaler `json:"items"`
}
153 changes: 153 additions & 0 deletions apis/autoscaling/v1alpha1/kafka_webhook.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
/*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha1

import (
"context"
"errors"
"fmt"

dbapi "kubedb.dev/apimachinery/apis/kubedb/v1alpha2"
opsapi "kubedb.dev/apimachinery/apis/ops/v1alpha1"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)

// log is for logging in this package.
var kafkaLog = logf.Log.WithName("kafka-autoscaler")

var _ webhook.Defaulter = &KafkaAutoscaler{}

// Default implements webhook.CustomDefaulter so a webhook will be registered for the type
func (k *KafkaAutoscaler) Default() {
kafkaLog.Info("defaulting", "name", k.Name)
k.setDefaults()
}

func (k *KafkaAutoscaler) setDefaults() {
var db dbapi.Kafka
err := DefaultClient.Get(context.TODO(), types.NamespacedName{
Name: k.Spec.DatabaseRef.Name,
Namespace: k.Namespace,
}, &db)
if err != nil {
_ = fmt.Errorf("can't get Kafka %s/%s \n", k.Namespace, k.Spec.DatabaseRef.Name)
return
}

k.setOpsReqOptsDefaults()

if k.Spec.Storage != nil {
if db.Spec.Topology != nil {
setDefaultStorageValues(k.Spec.Storage.Broker)
setDefaultStorageValues(k.Spec.Storage.Controller)
} else {
setDefaultStorageValues(k.Spec.Storage.Node)
}
}

if k.Spec.Compute != nil {
if db.Spec.Topology != nil {
setDefaultComputeValues(k.Spec.Compute.Broker)
setDefaultComputeValues(k.Spec.Compute.Controller)
} else {
setDefaultComputeValues(k.Spec.Compute.Node)
}
}
}

func (k *KafkaAutoscaler) setOpsReqOptsDefaults() {
if k.Spec.OpsRequestOptions == nil {
k.Spec.OpsRequestOptions = &KafkaOpsRequestOptions{}
}
// Timeout is defaulted to 600s in ops-manager retries.go (to retry 120 times with 5sec pause between each)
// OplogMaxLagSeconds & ObjectsCountDiffPercentage are defaults to 0
if k.Spec.OpsRequestOptions.Apply == "" {
k.Spec.OpsRequestOptions.Apply = opsapi.ApplyOptionIfReady
}
}

var _ webhook.Validator = &KafkaAutoscaler{}

// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
func (k *KafkaAutoscaler) ValidateCreate() (admission.Warnings, error) {
kafkaLog.Info("validate create", "name", k.Name)
return nil, k.validate()
}

// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type
func (k *KafkaAutoscaler) ValidateUpdate(oldObj runtime.Object) (admission.Warnings, error) {
kafkaLog.Info("validate create", "name", k.Name)
return nil, k.validate()
}

func (_ *KafkaAutoscaler) ValidateDelete() (admission.Warnings, error) {
return nil, nil
}

func (k *KafkaAutoscaler) validate() error {
if k.Spec.DatabaseRef == nil {
return errors.New("databaseRef can't be empty")
}
var kf dbapi.Kafka
err := DefaultClient.Get(context.TODO(), types.NamespacedName{
Name: k.Spec.DatabaseRef.Name,
Namespace: k.Namespace,
}, &kf)
if err != nil {
_ = fmt.Errorf("can't get Kafka %s/%s \n", k.Namespace, k.Spec.DatabaseRef.Name)
return err
}

if k.Spec.Compute != nil {
cm := k.Spec.Compute
if kf.Spec.Topology != nil {
if cm.Node != nil {
return errors.New("Spec.Compute.Node is invalid for kafka with topology")
}
} else {
if cm.Broker != nil {
return errors.New("Spec.Compute.Broker is invalid for combined kafka")
}
if cm.Controller != nil {
return errors.New("Spec.Compute.Controller is invalid for combined kafka")
}
}
}

if k.Spec.Storage != nil {
st := k.Spec.Storage
if kf.Spec.Topology != nil {
if st.Node != nil {
return errors.New("Spec.Storage.Node is invalid for kafka with topology")
}
} else {
if st.Broker != nil {
return errors.New("Spec.Storage.Broker is invalid for combined kafka")
}
if st.Controller != nil {
return errors.New("Spec.Storage.Controller is invalid for combined kafka")
}
}
}

return nil
}
Loading

0 comments on commit 403b738

Please sign in to comment.