Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into k0s-1-29
Browse files Browse the repository at this point in the history
  • Loading branch information
laverya committed Oct 23, 2024
2 parents 2eb8fb7 + 13367d8 commit 3788026
Show file tree
Hide file tree
Showing 17 changed files with 704 additions and 35 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/automated-prs-manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ jobs:
# If all tests and required checks passed, approve and merge.
if gh run view "$run_id" --json jobs -q '.jobs[] | select(.name == "validate-success") | .conclusion' | grep -q "success"; then
if gh run view "$run_id" --json jobs -q '.jobs[] | select(.name == "Validate success") | .conclusion' | grep -q "success"; then
if gh pr checks "${{ matrix.pr.url }}" --required; then
echo "All tests and required checks passed. Approving and merging."
echo -e "LGTM :thumbsup: \n\nThis PR was automatically approved and merged by the [automated-prs-manager](https://github.com/replicatedhq/embedded-cluster/blob/main/.github/workflows/automated-prs-manager.yaml) GitHub action" > body.txt
Expand All @@ -90,8 +90,8 @@ jobs:
# If more than half of the e2e jobs are successful, re-run the failed jobs.
num_of_jobs=$(gh run view "$run_id" --json jobs -q '.jobs[] | select(.name | startswith("e2e")) | .name' | wc -l)
num_of_successful_jobs=$(gh run view "$run_id" --json jobs -q '.jobs[] | select((.name | startswith("e2e")) and (.conclusion == "success")) | .name' | wc -l)
num_of_jobs=$(gh run view "$run_id" --json jobs -q '.jobs[] | select(.name | startswith("E2E")) | .name' | wc -l)
num_of_successful_jobs=$(gh run view "$run_id" --json jobs -q '.jobs[] | select((.name | startswith("E2E")) and (.conclusion == "success")) | .name' | wc -l)
if [ "$num_of_successful_jobs" -gt $((num_of_jobs / 2)) ]; then
echo "More than half of the e2e jobs are successful. Re-running failed jobs."
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ jobs:

# e2e-docker runs the e2e tests inside a docker container rather than a full VM
e2e-docker:
name: E2E docker
name: E2E docker # this name is used by .github/workflows/automated-prs-manager.yaml
runs-on: ubuntu-latest
needs:
- git-sha
Expand Down Expand Up @@ -570,7 +570,7 @@ jobs:
test-name: '${{ matrix.test }}'

e2e:
name: E2E
name: E2E # this name is used by .github/workflows/automated-prs-manager.yaml
runs-on: ${{ matrix.runner || 'ubuntu-22.04' }}
needs:
- build-current
Expand Down Expand Up @@ -635,7 +635,7 @@ jobs:
# this job will validate that all the tests passed
# it is used for the github branch protection rule
validate-success:
name: Validate success
name: Validate success # this name is used by .github/workflows/automated-prs-manager.yaml
runs-on: ubuntu-20.04
needs:
- e2e
Expand Down
6 changes: 5 additions & 1 deletion cmd/embedded-cluster/install.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func RunHostPreflights(c *cli.Context, provider *defaults.Provider, applier *add
return fmt.Errorf("unable to read host preflights: %w", err)
}

data := preflights.TemplateData{
data, err := preflights.TemplateData{
ReplicatedAPIURL: replicatedAPIURL,
ProxyRegistryURL: proxyRegistryURL,
IsAirgap: isAirgap,
Expand All @@ -162,6 +162,10 @@ func RunHostPreflights(c *cli.Context, provider *defaults.Provider, applier *add
K0sDataDir: provider.EmbeddedClusterK0sSubDir(),
OpenEBSDataDir: provider.EmbeddedClusterOpenEBSLocalSubDir(),
SystemArchitecture: runtime.GOARCH,
}.WithCIDRData(getCIDRs(c))

if err != nil {
return fmt.Errorf("unable to get host preflights data: %w", err)
}
chpfs, err := preflights.GetClusterHostPreflights(c.Context, data)
if err != nil {
Expand Down
10 changes: 9 additions & 1 deletion cmd/embedded-cluster/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,16 @@ func withSubnetCIDRFlags(flags []cli.Flag) []cli.Flag {
// --pod-cidr and --service-cidr have been set, they are used. Otherwise,
// the cidr flag is split into pod and service CIDRs.
func DeterminePodAndServiceCIDRs(c *cli.Context) (string, string, error) {
if c.IsSet("pod-cidr") && c.IsSet("service-cidr") {
if c.IsSet("pod-cidr") || c.IsSet("service-cidr") {
return c.String("pod-cidr"), c.String("service-cidr"), nil
}
return netutils.SplitNetworkCIDR(c.String("cidr"))
}

// getCIDRs returns the CIDRs in use based on the provided cli flags.
func getCIDRs(c *cli.Context) (string, string, string) {
if c.IsSet("pod-cidr") || c.IsSet("service-cidr") {
return c.String("pod-cidr"), c.String("service-cidr"), ""
}
return "", "", c.String("cidr")
}
172 changes: 172 additions & 0 deletions cmd/embedded-cluster/network_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
package main

import (
"flag"
"testing"

"github.com/k0sproject/k0s/pkg/apis/k0s/v1beta1"
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
)

func Test_getCIDRs(t *testing.T) {
tests := []struct {
name string
buildCliContext func(*flag.FlagSet) *cli.Context
expected []string
}{
{
name: "with pod and service flags",
expected: []string{
"10.0.0.0/24",
"10.1.0.0/24",
"",
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("pod-cidr", "10.0.0.0/24")
c.Set("service-cidr", "10.1.0.0/24")
return c
},
},
{
name: "with pod flag",
expected: []string{
"10.0.0.0/24",
v1beta1.DefaultNetwork().ServiceCIDR,
"",
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("pod-cidr", "10.0.0.0/24")
return c
},
},
{
name: "with pod, service and cidr flags",
expected: []string{
"10.0.0.0/24",
"10.1.0.0/24",
"",
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("pod-cidr", "10.0.0.0/24")
c.Set("service-cidr", "10.1.0.0/24")
c.Set("cidr", "10.2.0.0/24")
return c
},
},
{
name: "with pod and cidr flags",
expected: []string{
"10.0.0.0/24",
v1beta1.DefaultNetwork().ServiceCIDR,
"",
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("pod-cidr", "10.0.0.0/24")
c.Set("cidr", "10.2.0.0/24")
return c
},
},
{
name: "with cidr flag",
expected: []string{
"",
"",
"10.2.0.0/24",
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("cidr", "10.2.0.0/24")
return c
},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req := require.New(t)

flagSet := flag.NewFlagSet(t.Name(), 0)
flags := withSubnetCIDRFlags([]cli.Flag{})
for _, f := range flags {
err := f.Apply(flagSet)
req.NoError(err)
}

cc := test.buildCliContext(flagSet)
podCIDR, serviceCIDR, CIDR := getCIDRs(cc)
req.Equal(test.expected[0], podCIDR)
req.Equal(test.expected[1], serviceCIDR)
req.Equal(test.expected[2], CIDR)
})
}
}

func Test_DeterminePodAndServiceCIDRs(t *testing.T) {

tests := []struct {
name string
buildCliContext func(*flag.FlagSet) *cli.Context
expected []string
}{
{
name: "with pod flag",
expected: []string{
"10.0.0.0/16",
v1beta1.DefaultNetwork().ServiceCIDR,
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("pod-cidr", "10.0.0.0/16")
return c
},
},
{
name: "with service flag",
expected: []string{
v1beta1.DefaultNetwork().PodCIDR,
"10.1.0.0/16",
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("service-cidr", "10.1.0.0/16")
return c
},
},
{
name: "with cidr flag",
expected: []string{
"10.0.0.0/16",
"10.1.0.0/16",
},
buildCliContext: func(flagSet *flag.FlagSet) *cli.Context {
c := cli.NewContext(cli.NewApp(), flagSet, nil)
c.Set("cidr", "10.0.0.0/15")
return c
},
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req := require.New(t)

flagSet := flag.NewFlagSet(t.Name(), 0)
flags := withSubnetCIDRFlags([]cli.Flag{})
for _, f := range flags {
err := f.Apply(flagSet)
req.NoError(err)
}

cc := test.buildCliContext(flagSet)
podCIDR, serviceCIDR, err := DeterminePodAndServiceCIDRs(cc)
req.NoError(err)
req.Equal(test.expected[0], podCIDR)
req.Equal(test.expected[1], serviceCIDR)
})
}
}
2 changes: 1 addition & 1 deletion e2e/playwright/tests/deploy-upgrade/test.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ test('deploy upgrade', async ({ page }) => {
await page.getByRole('link', { name: 'Version history', exact: true }).click();
await page.locator('.available-update-row', { hasText: process.env.APP_UPGRADE_VERSION }).getByRole('button', { name: 'Deploy', exact: true }).click();
const iframe = page.frameLocator('#upgrade-service-iframe');
await expect(iframe.locator('h3')).toContainText('The First Config Group', { timeout: 20 * 1000 });
await expect(iframe.locator('h3')).toContainText('The First Config Group', { timeout: 60 * 1000 }); // can take time to download the kots binary
await expect(iframe.locator('input[type="text"]')).toHaveValue('initial-hostname.com');
await iframe.locator('input[type="text"]').click();
await iframe.locator('input[type="text"]').fill('updated-hostname.com');
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: embedded-cluster-lam-service-config
labels:
troubleshoot.sh/kind: support-bundle
{{- with (include "embedded-cluster-operator.labels" $ | fromYaml) }}
{{- toYaml . | nindent 4 }}
{{- end }}
data:
support-bundle-spec: |
apiVersion: troubleshoot.sh/v1beta2
kind: SupportBundle
metadata:
name: embedded-cluster-lam-service-config
labels:
troubleshoot.sh/kind: support-bundle
spec:
collectors:
- runDaemonSet:
name: "local-artifact-mirror-service-config"
namespace: embedded-cluster
podSpec:
containers:
- image: {{ .Values.utilsImage }}
imagePullPolicy: Always
args: ["chroot","/host","cat","/etc/systemd/system/local-artifact-mirror.service.d/embedded-cluster.conf"]
name: debugger
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /host
name: host-root
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostIPC: true
hostNetwork: true
hostPID: true
securityContext:
runAsUser: 0
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /
type: ""
name: host-root
5 changes: 0 additions & 5 deletions operator/deploy/melange.tmpl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,6 @@ pipeline:
- runs: |
set -exuo pipefail
# remove a 'v' prefix from the version if it exists
if [[ ${VERSION:0:1} == "v" ]]; then
export VERSION=${VERSION:1}
fi
make -C operator build
cp operator/bin/manager "${{targets.contextdir}}/manager"
- uses: strip
2 changes: 1 addition & 1 deletion operator/pkg/charts/charts.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func generateHelmConfigs(ctx context.Context, in *clusterv1beta1.Installation, c
// those values depend on the sha256 of the compiled binary itself
embeddedclusteroperator.Metadata.Images = oi
embeddedclusteroperator.Metadata.Location = operatorLocation
embeddedclusteroperator.Metadata.Version = versions.Version
embeddedclusteroperator.Metadata.Version = strings.TrimPrefix(versions.Version, "v")
embeddedclusteroperator.Render()

migrationStatus := k8sutil.CheckConditionStatus(in.Status, registry.RegistryMigrationStatusConditionType)
Expand Down
14 changes: 7 additions & 7 deletions operator/pkg/charts/charts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ zfs-localpv:
const test_operatorValues = `embeddedBinaryName: test-binary-name
embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f3ed231b
embeddedClusterK0sVersion: 0.0.0
embeddedClusterVersion: 1.2.3-operator
embeddedClusterVersion: v1.2.3-operator
global:
labels:
replicated.com/disaster-recovery: infra
Expand All @@ -77,7 +77,7 @@ utilsImage: abc-repo/ec-utils:latest-amd64@sha256:92dec6e167ff57b35953da389c2f62
const test_proxyOperatorValues = `embeddedBinaryName: test-binary-name
embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f3ed231b
embeddedClusterK0sVersion: 0.0.0
embeddedClusterVersion: 1.2.3-operator
embeddedClusterVersion: v1.2.3-operator
extraEnv:
- name: HTTP_PROXY
value: http://proxy
Expand Down Expand Up @@ -112,7 +112,7 @@ utilsImage: abc-repo/ec-utils:latest-amd64@sha256:92dec6e167ff57b35953da389c2f62
`

const test_onlineAdminConsoleValues = `embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f3ed231b
embeddedClusterVersion: 1.2.3-operator
embeddedClusterVersion: v1.2.3-operator
images:
kotsadm: ':'
kurlProxy: ':'
Expand Down Expand Up @@ -166,7 +166,7 @@ service:
`

const test_airgapAdminConsoleValues = `embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f3ed231b
embeddedClusterVersion: 1.2.3-operator
embeddedClusterVersion: v1.2.3-operator
images:
kotsadm: ':'
kurlProxy: ':'
Expand All @@ -193,7 +193,7 @@ service:
`

const test_airgapHAAdminConsoleValues = `embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f3ed231b
embeddedClusterVersion: 1.2.3-operator
embeddedClusterVersion: v1.2.3-operator
images:
kotsadm: ':'
kurlProxy: ':'
Expand All @@ -220,7 +220,7 @@ service:
`

const test_proxyAdminConsoleValues = `embeddedClusterID: e79f0701-67f3-4abf-a672-42a1f3ed231b
embeddedClusterVersion: 1.2.3-operator
embeddedClusterVersion: v1.2.3-operator
extraEnv:
- name: HTTP_PROXY
value: http://proxy
Expand Down Expand Up @@ -457,7 +457,7 @@ func test_replaceAddonMeta() {
embeddedclusteroperator.Metadata = release.AddonMetadata{
Location: "oci://proxy.replicated.com/anonymous/registry.replicated.com/library/embedded-cluster-operator",
}
versions.Version = "1.2.3-operator" // This is not great, we use this to override the version of the operator chart
versions.Version = "v1.2.3-operator" // This is not great, we use this to override the version of the operator chart
// we can't use the version from the metadata because it won't be set in the operator binary
// TODO fix this

Expand Down
Loading

0 comments on commit 3788026

Please sign in to comment.