Skip to content

Commit

Permalink
select storage for vm image after node selection so to not pick unava…
Browse files Browse the repository at this point in the history
…ilable storage
  • Loading branch information
sp-yduck committed Feb 1, 2025
1 parent 469028b commit 0900359
Show file tree
Hide file tree
Showing 9 changed files with 69 additions and 72 deletions.
13 changes: 9 additions & 4 deletions cloud/scheduler/framework/cycle_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@ type CycleState struct {
}

type SchedulerResult struct {
vmid int
node string
vmid int
node string
storage string
}

func NewCycleState() CycleState {
Expand Down Expand Up @@ -46,8 +47,8 @@ func (c *CycleState) UpdateState(completed bool, err error, result SchedulerResu
c.result = result
}

func NewSchedulerResult(vmid int, node string) SchedulerResult {
return SchedulerResult{vmid: vmid, node: node}
func NewSchedulerResult(vmid int, node string, storage string) SchedulerResult {
return SchedulerResult{vmid: vmid, node: node, storage: storage}
}

func (c *CycleState) Result() SchedulerResult {
Expand All @@ -61,3 +62,7 @@ func (r *SchedulerResult) Node() string {
func (r *SchedulerResult) VMID() int {
return r.vmid
}

func (r *SchedulerResult) Storage() string {
return r.storage
}
2 changes: 1 addition & 1 deletion cloud/scheduler/framework/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ type NodeInfo struct {
}

func GetNodeInfoList(ctx context.Context, client *proxmox.Service) ([]*NodeInfo, error) {
nodes, err := client.Nodes(ctx)
nodes, err := client.GetNodes(ctx)
if err != nil {
return nil, err
}
Expand Down
40 changes: 38 additions & 2 deletions cloud/scheduler/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package scheduler
import (
"context"
"fmt"
"strings"
"time"

"github.com/go-logr/logr"
Expand Down Expand Up @@ -221,7 +222,15 @@ func (s *Scheduler) ScheduleOne(ctx context.Context) {
return
}

result := framework.NewSchedulerResult(vmid, node)
// select vm storage to be used for vm image
// must be done after node selection as some storages may not be available on some nodes
storage, err := s.SelectStorage(qemuCtx, *config, node)
if err != nil {
state.UpdateState(true, err, framework.SchedulerResult{})
return
}

result := framework.NewSchedulerResult(vmid, node, storage)
state.UpdateState(true, nil, result)
}

Expand Down Expand Up @@ -271,7 +280,7 @@ func (s *Scheduler) CreateQEMU(ctx context.Context, config *api.VirtualMachineCr

func (s *Scheduler) SelectNode(ctx context.Context, config api.VirtualMachineCreateOptions) (string, error) {
s.logger.Info("finding proxmox node matching qemu")
nodes, err := s.client.Nodes(ctx)
nodes, err := s.client.GetNodes(ctx)
if err != nil {
return "", err
}
Expand Down Expand Up @@ -316,6 +325,33 @@ func (s *Scheduler) SelectVMID(ctx context.Context, config api.VirtualMachineCre
return s.RunVMIDPlugins(ctx, nil, config, nextid, *usedID)
}

func (s *Scheduler) SelectStorage(ctx context.Context, config api.VirtualMachineCreateOptions, nodeName string) (string, error) {
s.logger.Info("finding proxmox storage to be used for qemu")
if config.Storage != "" {
// to do: raise error if storage is not available on the node
return config.Storage, nil
}

node, err := s.client.Node(ctx, nodeName)
if err != nil {
return "", err
}
storages, err := node.GetStorages(ctx)
if err != nil {
return "", err
}

// current logic is just selecting the first storage
// that is active and supports "images" type of content
for _, storage := range storages {
if strings.Contains(storage.Content, "images") && storage.Active == 1 {
return storage.Storage, nil
}
}

return "", fmt.Errorf("no storage available for VM image on node %s", nodeName)
}

func (s *Scheduler) RunFilterPlugins(ctx context.Context, state *framework.CycleState, config api.VirtualMachineCreateOptions, nodes []*api.Node) ([]*api.Node, error) {
s.logger.Info("filtering proxmox node")
feasibleNodes := make([]*api.Node, 0, len(nodes))
Expand Down
2 changes: 1 addition & 1 deletion cloud/services/compute/instance/cloudinit.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func (s *Service) deleteCloudConfig(ctx context.Context) error {
path := userSnippetPath(s.scope.Name())
volumeID := fmt.Sprintf("%s:%s", storageName, path)

node, err := s.client.Node(ctx, s.scope.NodeName())
node, err := s.client.GetNode(ctx, s.scope.NodeName())
if err != nil {
return err
}
Expand Down
21 changes: 16 additions & 5 deletions cloud/services/compute/instance/qemu.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,6 @@ func (s *Service) createQEMU(ctx context.Context) (*proxmox.VirtualMachine, erro
log := log.FromContext(ctx)
log.Info("creating qemu")

if err := s.ensureStorageAvailable(ctx); err != nil {
return nil, err
}

// create qemu
log.Info("making qemu spec")
vmoption := s.generateVMOptions()
Expand All @@ -81,10 +77,14 @@ func (s *Service) createQEMU(ctx context.Context) (*proxmox.VirtualMachine, erro
log.Error(err, "failed to schedule qemu instance")
return nil, err
}
node, vmid := result.Node(), result.VMID()
node, vmid, storage := result.Node(), result.VMID(), result.Storage()
s.scope.SetNodeName(node)
s.scope.SetVMID(vmid)

// inject storage
s.injectVMOption(&vmoption, storage)
s.scope.SetStorage(storage)

// os image
if err := s.setCloudImage(ctx); err != nil {
return nil, err
Expand Down Expand Up @@ -164,3 +164,14 @@ func boolToInt8(b bool) int8 {
}
return 0
}

func (s *Service) injectVMOption(vmOption *api.VirtualMachineCreateOptions, storage string) *api.VirtualMachineCreateOptions {
// storage is finalized after node scheduling so we need to inject storage name here
ide2 := fmt.Sprintf("file=%s:cloudinit,media=cdrom", storage)
scsi0 := fmt.Sprintf("%s:0,import-from=%s", storage, rawImageFilePath(s.scope.GetImage()))
vmOption.Scsi.Scsi0 = scsi0
vmOption.Ide.Ide2 = ide2
vmOption.Storage = storage

return vmOption
}
55 changes: 0 additions & 55 deletions cloud/services/compute/instance/storage.go

This file was deleted.

2 changes: 1 addition & 1 deletion cloud/services/compute/storage/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func (s *Service) deleteStorage(ctx context.Context) error {
return err
}

nodes, err := s.client.Nodes(ctx)
nodes, err := s.client.GetNodes(ctx)
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ go 1.19
require (
github.com/go-logr/logr v1.3.0
github.com/imdario/mergo v0.3.13
github.com/k8s-proxmox/proxmox-go v0.0.0-alpha28
github.com/k8s-proxmox/proxmox-go v0.0.0-alpha30
github.com/onsi/ginkgo/v2 v2.13.2
github.com/onsi/gomega v1.30.0
github.com/pkg/errors v0.9.1
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -301,8 +301,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/k8s-proxmox/proxmox-go v0.0.0-alpha28 h1:h0PwVITcljicpXCmMcOyXeXWhkVeYBiK4F2A/Ch5dxg=
github.com/k8s-proxmox/proxmox-go v0.0.0-alpha28/go.mod h1:ZSAdc9vVAEcIhbNkZxURWTY+k59cXUy9mswp5ofMM40=
github.com/k8s-proxmox/proxmox-go v0.0.0-alpha30 h1:xwA4cEZVjaShetPErsN/z+CHUA4jE8HhIRQ9d345WsM=
github.com/k8s-proxmox/proxmox-go v0.0.0-alpha30/go.mod h1:ZSAdc9vVAEcIhbNkZxURWTY+k59cXUy9mswp5ofMM40=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
Expand Down

0 comments on commit 0900359

Please sign in to comment.