diff --git a/cloud/scheduler/framework/cycle_state.go b/cloud/scheduler/framework/cycle_state.go index 859e6d9..2feb459 100644 --- a/cloud/scheduler/framework/cycle_state.go +++ b/cloud/scheduler/framework/cycle_state.go @@ -8,8 +8,9 @@ type CycleState struct { } type SchedulerResult struct { - vmid int - node string + vmid int + node string + storage string } func NewCycleState() CycleState { @@ -46,8 +47,8 @@ func (c *CycleState) UpdateState(completed bool, err error, result SchedulerResu c.result = result } -func NewSchedulerResult(vmid int, node string) SchedulerResult { - return SchedulerResult{vmid: vmid, node: node} +func NewSchedulerResult(vmid int, node string, storage string) SchedulerResult { + return SchedulerResult{vmid: vmid, node: node, storage: storage} } func (c *CycleState) Result() SchedulerResult { @@ -61,3 +62,7 @@ func (r *SchedulerResult) Node() string { func (r *SchedulerResult) VMID() int { return r.vmid } + +func (r *SchedulerResult) Storage() string { + return r.storage +} diff --git a/cloud/scheduler/framework/types.go b/cloud/scheduler/framework/types.go index 1592aa4..580f715 100644 --- a/cloud/scheduler/framework/types.go +++ b/cloud/scheduler/framework/types.go @@ -58,7 +58,7 @@ type NodeInfo struct { } func GetNodeInfoList(ctx context.Context, client *proxmox.Service) ([]*NodeInfo, error) { - nodes, err := client.Nodes(ctx) + nodes, err := client.GetNodes(ctx) if err != nil { return nil, err } diff --git a/cloud/scheduler/scheduler.go b/cloud/scheduler/scheduler.go index 35d277b..a2eb13f 100644 --- a/cloud/scheduler/scheduler.go +++ b/cloud/scheduler/scheduler.go @@ -3,6 +3,7 @@ package scheduler import ( "context" "fmt" + "strings" "time" "github.com/go-logr/logr" @@ -221,7 +222,15 @@ func (s *Scheduler) ScheduleOne(ctx context.Context) { return } - result := framework.NewSchedulerResult(vmid, node) + // select vm storage to be used for vm image + // must be done after node selection as some storages may not be available on some nodes + storage, err := s.SelectStorage(qemuCtx, *config, node) + if err != nil { + state.UpdateState(true, err, framework.SchedulerResult{}) + return + } + + result := framework.NewSchedulerResult(vmid, node, storage) state.UpdateState(true, nil, result) } @@ -271,7 +280,7 @@ func (s *Scheduler) CreateQEMU(ctx context.Context, config *api.VirtualMachineCr func (s *Scheduler) SelectNode(ctx context.Context, config api.VirtualMachineCreateOptions) (string, error) { s.logger.Info("finding proxmox node matching qemu") - nodes, err := s.client.Nodes(ctx) + nodes, err := s.client.GetNodes(ctx) if err != nil { return "", err } @@ -316,6 +325,33 @@ func (s *Scheduler) SelectVMID(ctx context.Context, config api.VirtualMachineCre return s.RunVMIDPlugins(ctx, nil, config, nextid, *usedID) } +func (s *Scheduler) SelectStorage(ctx context.Context, config api.VirtualMachineCreateOptions, nodeName string) (string, error) { + s.logger.Info("finding proxmox storage to be used for qemu") + if config.Storage != "" { + // to do: raise error if storage is not available on the node + return config.Storage, nil + } + + node, err := s.client.Node(ctx, nodeName) + if err != nil { + return "", err + } + storages, err := node.GetStorages(ctx) + if err != nil { + return "", err + } + + // current logic is just selecting the first storage + // that is active and supports "images" type of content + for _, storage := range storages { + if strings.Contains(storage.Content, "images") && storage.Active == 1 { + return storage.Storage, nil + } + } + + return "", fmt.Errorf("no storage available for VM image on node %s", nodeName) +} + func (s *Scheduler) RunFilterPlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nodes []*api.Node) ([]*api.Node, error) { s.logger.Info("filtering proxmox node") feasibleNodes := make([]*api.Node, 0, len(nodes)) diff --git a/cloud/services/compute/instance/cloudinit.go b/cloud/services/compute/instance/cloudinit.go index 9a268db..72f09f6 100644 --- a/cloud/services/compute/instance/cloudinit.go +++ b/cloud/services/compute/instance/cloudinit.go @@ -37,7 +37,7 @@ func (s *Service) deleteCloudConfig(ctx context.Context) error { path := userSnippetPath(s.scope.Name()) volumeID := fmt.Sprintf("%s:%s", storageName, path) - node, err := s.client.Node(ctx, s.scope.NodeName()) + node, err := s.client.GetNode(ctx, s.scope.NodeName()) if err != nil { return err } diff --git a/cloud/services/compute/instance/qemu.go b/cloud/services/compute/instance/qemu.go index dba3743..163e313 100644 --- a/cloud/services/compute/instance/qemu.go +++ b/cloud/services/compute/instance/qemu.go @@ -67,10 +67,6 @@ func (s *Service) createQEMU(ctx context.Context) (*proxmox.VirtualMachine, erro log := log.FromContext(ctx) log.Info("creating qemu") - if err := s.ensureStorageAvailable(ctx); err != nil { - return nil, err - } - // create qemu log.Info("making qemu spec") vmoption := s.generateVMOptions() @@ -81,10 +77,14 @@ func (s *Service) createQEMU(ctx context.Context) (*proxmox.VirtualMachine, erro log.Error(err, "failed to schedule qemu instance") return nil, err } - node, vmid := result.Node(), result.VMID() + node, vmid, storage := result.Node(), result.VMID(), result.Storage() s.scope.SetNodeName(node) s.scope.SetVMID(vmid) + // inject storage + s.injectVMOption(&vmoption, storage) + s.scope.SetStorage(storage) + // os image if err := s.setCloudImage(ctx); err != nil { return nil, err @@ -164,3 +164,14 @@ func boolToInt8(b bool) int8 { } return 0 } + +func (s *Service) injectVMOption(vmOption *api.VirtualMachineCreateOptions, storage string) *api.VirtualMachineCreateOptions { + // storage is finalized after node scheduling so we need to inject storage name here + ide2 := fmt.Sprintf("file=%s:cloudinit,media=cdrom", storage) + scsi0 := fmt.Sprintf("%s:0,import-from=%s", storage, rawImageFilePath(s.scope.GetImage())) + vmOption.Scsi.Scsi0 = scsi0 + vmOption.Ide.Ide2 = ide2 + vmOption.Storage = storage + + return vmOption +} diff --git a/cloud/services/compute/instance/storage.go b/cloud/services/compute/instance/storage.go deleted file mode 100644 index 97c671a..0000000 --- a/cloud/services/compute/instance/storage.go +++ /dev/null @@ -1,55 +0,0 @@ -package instance - -import ( - "context" - "fmt" - "strings" - - "github.com/k8s-proxmox/proxmox-go/api" - "sigs.k8s.io/controller-runtime/pkg/log" -) - -// make sure storage exists and supports "images" type of content -func (s *Service) ensureStorageAvailable(ctx context.Context) error { - log := log.FromContext(ctx) - log.Info("ensuring storage is available") - storageName := s.scope.GetStorage() - if storageName == "" { // no storage specified, find available storage - storage, err := s.findVMStorage(ctx) - if err != nil { - return err - } - storageName = storage.Storage - s.scope.SetStorage(storageName) - } else { // storage specified, check if it supports "images" type of content - log.Info("checking if specified storage supports image type of content") - storage, err := s.client.RESTClient().GetStorage(ctx, storageName) - if err != nil { - return err - } - if !supportsImage(storage) { - return fmt.Errorf("storage %s does not support \"images\" type of content", storageName) - } - } - return nil -} - -// get one storage supporting "images" type of content -func (s *Service) findVMStorage(ctx context.Context) (*api.Storage, error) { - log := log.FromContext(ctx) - log.Info("finding available storage") - storages, err := s.client.RESTClient().GetStorages(ctx) - if err != nil { - return nil, err - } - for _, storage := range storages { - if supportsImage(storage) { - return storage, nil - } - } - return nil, fmt.Errorf("no available storage") -} - -func supportsImage(storage *api.Storage) bool { - return strings.Contains(storage.Content, "images") -} diff --git a/cloud/services/compute/storage/reconcile.go b/cloud/services/compute/storage/reconcile.go index 7c0d767..baaac53 100644 --- a/cloud/services/compute/storage/reconcile.go +++ b/cloud/services/compute/storage/reconcile.go @@ -76,7 +76,7 @@ func (s *Service) deleteStorage(ctx context.Context) error { return err } - nodes, err := s.client.Nodes(ctx) + nodes, err := s.client.GetNodes(ctx) if err != nil { return err } diff --git a/go.mod b/go.mod index 0dd1637..4e8f4e7 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/go-logr/logr v1.3.0 github.com/imdario/mergo v0.3.13 - github.com/k8s-proxmox/proxmox-go v0.0.0-alpha28 + github.com/k8s-proxmox/proxmox-go v0.0.0-alpha30 github.com/onsi/ginkgo/v2 v2.13.2 github.com/onsi/gomega v1.30.0 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 1d4c4bf..2d3300b 100644 --- a/go.sum +++ b/go.sum @@ -301,8 +301,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k8s-proxmox/proxmox-go v0.0.0-alpha28 h1:h0PwVITcljicpXCmMcOyXeXWhkVeYBiK4F2A/Ch5dxg= -github.com/k8s-proxmox/proxmox-go v0.0.0-alpha28/go.mod h1:ZSAdc9vVAEcIhbNkZxURWTY+k59cXUy9mswp5ofMM40= +github.com/k8s-proxmox/proxmox-go v0.0.0-alpha30 h1:xwA4cEZVjaShetPErsN/z+CHUA4jE8HhIRQ9d345WsM= +github.com/k8s-proxmox/proxmox-go v0.0.0-alpha30/go.mod h1:ZSAdc9vVAEcIhbNkZxURWTY+k59cXUy9mswp5ofMM40= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=