Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add a cleanup loop for failed vm using configmap #59

Merged
merged 4 commits into from
Aug 29, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ script:
- if [[ -n "$(git status --porcelain)" ]] ; then echo "It seems like you need to run make. Please run it and commit the changes"; git status --porcelain; false; fi
- make docker-test
- make deploy-test-cluster
- KUBECONFIG="`pwd`/cluster/dind-cluster/config" go test -v -race ./tests/...
- KUBECONFIG="`pwd`/cluster/dind-cluster/config" go test -timeout 30m -v -race ./tests/...

deploy:
- provider: script
Expand Down
4 changes: 3 additions & 1 deletion cmd/manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,11 @@ func loadMacAddressFromEnvVar(envName string) (net.HardwareAddr, error) {

func main() {
var logType, metricsAddr string
var waitingTime int

flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&logType, "v", "production", "Log type (debug/production).")
flag.IntVar(&waitingTime, "wait-time", 600, "waiting time to release the mac if object was not created")
flag.Parse()

if logType == "debug" {
Expand Down Expand Up @@ -80,7 +82,7 @@ func main() {
os.Exit(1)
}

kubemacpoolManager := manager.NewKubeMacPoolManager(podNamespace, podName, metricsAddr)
kubemacpoolManager := manager.NewKubeMacPoolManager(podNamespace, podName, metricsAddr, waitingTime)

err = kubemacpoolManager.Run(rangeStart, rangeEnd)
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions config/default/manager/manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ spec:
- /manager
args:
- "--v=production"
- "--wait-time=600"
image: quay.io/kubevirt/kubemacpool:latest
imagePullPolicy: Always
name: manager
Expand Down
1 change: 1 addition & 0 deletions config/release/kubemacpool.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ spec:
containers:
- args:
- --v=production
- --wait-time=600
command:
- /manager
env:
Expand Down
1 change: 1 addition & 0 deletions config/test/kubemacpool.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ spec:
containers:
- args:
- --v=debug
- --wait-time=10
command:
- /manager
env:
Expand Down
1 change: 1 addition & 0 deletions config/test/manager_image_patch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ spec:
name: manager
args:
- "--v=debug"
- "--wait-time=10"
2 changes: 1 addition & 1 deletion hack/functest.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/env bash

source hack/common.sh
KUBECONFIG=${MACPOOL_DIR}/cluster/$MACPOOL_PROVIDER/.kubeconfig go test -v -race ./tests/...
KUBECONFIG=${MACPOOL_DIR}/cluster/$MACPOOL_PROVIDER/.kubeconfig go test -timeout 20m -v -race ./tests/...
3 changes: 3 additions & 0 deletions pkg/controller/virtualmachine/virtualmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ func (r *ReconcilePolicy) addFinalizerAndUpdate(virtualMachine *kubevirt.Virtual
if helper.ContainsString(virtualMachine.ObjectMeta.Finalizers, pool_manager.RuntimeObjectFinalizerName) {
return nil
}

log.V(1).Info("The VM does not have a finalizer",
"virtualMachineName", request.Name,
"virtualMachineNamespace", request.Namespace)
Expand All @@ -130,6 +131,8 @@ func (r *ReconcilePolicy) addFinalizerAndUpdate(virtualMachine *kubevirt.Virtual
"virtualMachineName", request.Name,
"virtualMachineNamespace", request.Namespace)

r.poolManager.MarkVMAsReady(virtualMachine)

return nil
}

Expand Down
23 changes: 13 additions & 10 deletions pkg/manager/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"

"github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/controller"
"github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/names"
poolmanager "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/pool-manager"
"github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/webhook"
)
Expand All @@ -44,23 +45,25 @@ type KubeMacPoolManager struct {
config *rest.Config
metricsAddr string
continueToRunManager bool
restartChannel chan struct{}
kubevirtInstalledChannel chan struct{}
stopSignalChannel chan os.Signal
podNamespace string
podName string
resourceLock resourcelock.Interface
restartChannel chan struct{} // Close the channel if we need to regenerate certs
kubevirtInstalledChannel chan struct{} // This channel is close after we found kubevirt to reload the manager
stopSignalChannel chan os.Signal // stop channel signal
podNamespace string // manager pod namespace
podName string // manager pod name
waitingTime int // Duration in second to lock a mac address before it was saved to etcd
resourceLock resourcelock.Interface // Use for the leader election
}

func NewKubeMacPoolManager(podNamespace, podName, metricsAddr string) *KubeMacPoolManager {
func NewKubeMacPoolManager(podNamespace, podName, metricsAddr string, waitingTime int) *KubeMacPoolManager {
kubemacpoolManager := &KubeMacPoolManager{
continueToRunManager: true,
restartChannel: make(chan struct{}),
kubevirtInstalledChannel: make(chan struct{}),
stopSignalChannel: make(chan os.Signal, 1),
podNamespace: podNamespace,
podName: podName,
metricsAddr: metricsAddr}
metricsAddr: metricsAddr,
waitingTime: waitingTime}

signal.Notify(kubemacpoolManager.stopSignalChannel, os.Interrupt, os.Kill)

Expand Down Expand Up @@ -119,7 +122,7 @@ func (k *KubeMacPoolManager) Run(rangeStart, rangeEnd net.HardwareAddr) error {
}

isKubevirtInstalled := checkForKubevirt(k.clientset)
poolManager, err := poolmanager.NewPoolManager(k.clientset, rangeStart, rangeEnd, isKubevirtInstalled)
poolManager, err := poolmanager.NewPoolManager(k.clientset, rangeStart, rangeEnd, isKubevirtInstalled, k.waitingTime)
if err != nil {
return fmt.Errorf("unable to create pool manager error %v", err)
}
Expand Down Expand Up @@ -196,7 +199,7 @@ func (k *KubeMacPoolManager) markPodAsLeader() error {
return err
}

pod.Labels[webhook.LeaderLabel] = "true"
pod.Labels[names.LEADER_LABEL] = "true"
_, err = k.clientset.CoreV1().Pods(k.podNamespace).Update(pod)
if err != nil {
return err
Expand Down
15 changes: 15 additions & 0 deletions pkg/names/names.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
package names
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could you introduce this change in a different commit please?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done


const MANAGER_NAMESPACE = "kubemacpool-system"

const MANAGER_DEPLOYMENT = "kubemacpool-mac-controller-manager"

const WEBHOOK_SERVICE = "kubemacpool-service"

const MUTATE_WEBHOOK = "kubemacpool-webhook"

const MUTATE_WEBHOOK_CONFIG = "kubemacpool"

const LEADER_LABEL = "kubemacpool-leader"

const ADMISSION_IGNORE_LABEL = "kubemacpool/ignoreAdmission"
5 changes: 4 additions & 1 deletion pkg/pool-manager/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ const (
RuntimeObjectFinalizerName = "k8s.v1.cni.cncf.io/kubeMacPool"
networksAnnotation = "k8s.v1.cni.cncf.io/networks"
networksStatusAnnotation = "k8s.v1.cni.cncf.io/networks-status"
vmWaitConfigMapName = "kubemacpool-vm-configmap"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what is this CM good for?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This CM saved the allocated mac addresses in waiting state.

We need to use it in case of a race condition that our manager go down and It have that types of allocations so the new leader can continue from the same state.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds dangerous synchronisation-wise, but that's not different from the rest of kubemacpool... :D

)

var log = logf.Log.WithName("PoolManager")
Expand All @@ -54,7 +55,7 @@ const (
AllocationStatusWaitingForPod AllocationStatus = "WaitingForPod"
)

func NewPoolManager(kubeClient kubernetes.Interface, rangeStart, rangeEnd net.HardwareAddr, kubevirtExist bool) (*PoolManager, error) {
func NewPoolManager(kubeClient kubernetes.Interface, rangeStart, rangeEnd net.HardwareAddr, kubevirtExist bool, waitTime int) (*PoolManager, error) {
err := checkRange(rangeStart, rangeEnd)
if err != nil {
return nil, err
Expand Down Expand Up @@ -86,6 +87,8 @@ func NewPoolManager(kubeClient kubernetes.Interface, rangeStart, rangeEnd net.Ha
return nil, err
}

go poolManger.vmWaitingCleanupLook(waitTime)

return poolManger, nil
}

Expand Down
Loading