-
Notifications
You must be signed in to change notification settings - Fork 187
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Bump go 1.22.2->1.22.5 and add E2E tests #639
Changes from all commits
13ad396
9fea054
dbe8e76
29c2503
1cafd93
2c7858a
af61fa0
28ebff1
6615f6a
810059a
de25d5b
4e00652
a7ca28e
801565d
bfea27c
2796ac4
51313fd
9b4df3c
36781ff
1152f81
a2c28db
4204eff
bca2467
3d97cbe
1923987
8aae4aa
84c06a0
0e365ee
bc020f4
55f34f6
2ffb6d5
ba1fba3
8f39a84
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
# End-to-end tests for konnectivity-network-proxy running in a kind cluster | ||
|
||
These e2e tests deploy the KNP agent and server to a local [kind](https://kind.sigs.k8s.io/) | ||
cluster to verify their functionality. | ||
|
||
These can be run automatically using `make e2e-test`. | ||
|
||
## Setup in `main_test.go` | ||
|
||
Before any of the actual tests are run, the `TestMain()` function | ||
in `main_test.go` performs the following set up steps: | ||
|
||
- Spin up a new kind cluster with the node image provided by the `-kind-image` flag. | ||
- Sideload the KNP agent and server images provided with `-agent-image` and `-server-image` into the cluster. | ||
- Deploy the necessary RBAC and service templates for both the KNP agent and server (see `renderAndApplyManifests`). | ||
|
||
## The tests | ||
|
||
### `static_count_test.go` | ||
|
||
These tests deploy the KNP servers and agents to the previously created kind cluster. | ||
After the deployments are up, the tests check that both the agent and server report | ||
the correct number of connections on their metrics endpoints. |
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could you add a short README.md to the e2e directory that goes over what maint_test.go sets up (RBAC and service) and what each specific test is expected to set up? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. On it! There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done, could you take a look? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. LGTM! |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,178 @@ | ||
package e2e | ||
|
||
import ( | ||
"bytes" | ||
"context" | ||
"flag" | ||
"fmt" | ||
"log" | ||
"os" | ||
"path" | ||
"testing" | ||
"text/template" | ||
"time" | ||
|
||
"k8s.io/apimachinery/pkg/runtime/schema" | ||
"k8s.io/client-go/kubernetes/scheme" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
"sigs.k8s.io/e2e-framework/klient/wait" | ||
"sigs.k8s.io/e2e-framework/klient/wait/conditions" | ||
"sigs.k8s.io/e2e-framework/pkg/env" | ||
"sigs.k8s.io/e2e-framework/pkg/envconf" | ||
"sigs.k8s.io/e2e-framework/pkg/envfuncs" | ||
"sigs.k8s.io/e2e-framework/support/kind" | ||
) | ||
|
||
var ( | ||
testenv env.Environment | ||
agentImage = flag.String("agent-image", "", "The proxy agent's docker image.") | ||
serverImage = flag.String("server-image", "", "The proxy server's docker image.") | ||
kindImage = flag.String("kind-image", "kindest/node", "Image to use for kind nodes.") | ||
connectionMode = flag.String("mode", "grpc", "Connection mode to use during e2e tests.") | ||
) | ||
|
||
func TestMain(m *testing.M) { | ||
flag.Parse() | ||
if *agentImage == "" { | ||
log.Fatalf("must provide agent image with -agent-image") | ||
} | ||
if *serverImage == "" { | ||
log.Fatalf("must provide server image with -server-image") | ||
} | ||
|
||
scheme.AddToScheme(scheme.Scheme) | ||
|
||
testenv = env.New() | ||
kindClusterName := "kind-test" | ||
kindCluster := kind.NewCluster(kindClusterName).WithOpts(kind.WithImage(*kindImage)) | ||
|
||
testenv.Setup( | ||
envfuncs.CreateCluster(kindCluster, kindClusterName), | ||
envfuncs.LoadImageToCluster(kindClusterName, *agentImage), | ||
envfuncs.LoadImageToCluster(kindClusterName, *serverImage), | ||
renderAndApplyManifests, | ||
) | ||
|
||
testenv.Finish(envfuncs.DestroyCluster(kindClusterName)) | ||
|
||
os.Exit(testenv.Run(m)) | ||
} | ||
|
||
// renderTemplate renders a template from e2e/templates into a kubernetes object. | ||
// Template paths are relative to e2e/templates. | ||
func renderTemplate(file string, params any) (client.Object, *schema.GroupVersionKind, error) { | ||
b := &bytes.Buffer{} | ||
|
||
tmp, err := template.ParseFiles(path.Join("templates/", file)) | ||
if err != nil { | ||
return nil, nil, fmt.Errorf("could not parse template %v: %w", file, err) | ||
} | ||
|
||
err = tmp.Execute(b, params) | ||
if err != nil { | ||
return nil, nil, fmt.Errorf("could not execute template %v: %w", file, err) | ||
} | ||
|
||
decoder := scheme.Codecs.UniversalDeserializer() | ||
|
||
obj, gvk, err := decoder.Decode(b.Bytes(), nil, nil) | ||
if err != nil { | ||
return nil, nil, fmt.Errorf("could not decode rendered yaml into kubernetes object: %w", err) | ||
} | ||
|
||
return obj.(client.Object), gvk, nil | ||
} | ||
|
||
type KeyValue struct { | ||
Key string | ||
Value string | ||
} | ||
|
||
type DeploymentConfig struct { | ||
Replicas int | ||
Image string | ||
Args []KeyValue | ||
} | ||
|
||
func renderAndApplyManifests(ctx context.Context, cfg *envconf.Config) (context.Context, error) { | ||
client := cfg.Client() | ||
|
||
// Render agent RBAC and Service templates. | ||
agentServiceAccount, _, err := renderTemplate("agent/serviceaccount.yaml", struct{}{}) | ||
if err != nil { | ||
return nil, err | ||
} | ||
agentClusterRole, _, err := renderTemplate("agent/clusterrole.yaml", struct{}{}) | ||
if err != nil { | ||
return nil, err | ||
} | ||
agentClusterRoleBinding, _, err := renderTemplate("agent/clusterrolebinding.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
agentService, _, err := renderTemplate("agent/service.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
// Submit agent RBAC templates to k8s. | ||
err = client.Resources().Create(ctx, agentServiceAccount) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, agentClusterRole) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, agentClusterRoleBinding) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, agentService) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
// Render server RBAC and Service templates. | ||
serverClusterRoleBinding, _, err := renderTemplate("server/clusterrolebinding.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
serverService, _, err := renderTemplate("server/service.yaml", struct{}{}) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
// Submit server templates to k8s. | ||
err = client.Resources().Create(ctx, serverClusterRoleBinding) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
err = client.Resources().Create(ctx, serverService) | ||
if err != nil { | ||
return ctx, err | ||
} | ||
|
||
return ctx, nil | ||
} | ||
|
||
func deployAndWaitForDeployment(deployment client.Object) func(context.Context, *testing.T, *envconf.Config) context.Context { | ||
return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { | ||
client := cfg.Client() | ||
err := client.Resources().Create(ctx, deployment) | ||
if err != nil { | ||
t.Fatalf("could not create Deployment: %v", err) | ||
} | ||
|
||
err = wait.For( | ||
conditions.New(client.Resources()).DeploymentAvailable(deployment.GetName(), deployment.GetNamespace()), | ||
wait.WithTimeout(1*time.Minute), | ||
wait.WithInterval(10*time.Second), | ||
) | ||
if err != nil { | ||
t.Fatalf("waiting for Deployment failed: %v", err) | ||
} | ||
|
||
return ctx | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,85 @@ | ||
package e2e | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"net/http" | ||
"testing" | ||
|
||
corev1 "k8s.io/api/core/v1" | ||
|
||
"github.com/prometheus/common/expfmt" | ||
"sigs.k8s.io/e2e-framework/klient/k8s/resources" | ||
"sigs.k8s.io/e2e-framework/pkg/envconf" | ||
) | ||
|
||
func getMetricsGaugeValue(url string, name string) (int, error) { | ||
resp, err := http.Get(url) | ||
if err != nil { | ||
return 0, fmt.Errorf("could not get metrics from url %v: %w", url, err) | ||
} | ||
|
||
metricsParser := &expfmt.TextParser{} | ||
metricsFamilies, err := metricsParser.TextToMetricFamilies(resp.Body) | ||
if err != nil { | ||
return 0, fmt.Errorf("could not parse metrics: %w", err) | ||
} | ||
defer resp.Body.Close() | ||
|
||
metricFamily, exists := metricsFamilies[name] | ||
if !exists { | ||
return 0, fmt.Errorf("metric %v does not exist", name) | ||
} | ||
value := int(metricFamily.GetMetric()[0].GetGauge().GetValue()) | ||
return value, nil | ||
} | ||
|
||
func assertAgentsAreConnected(expectedConnections int, adminPort int) func(context.Context, *testing.T, *envconf.Config) context.Context { | ||
return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { | ||
client := cfg.Client() | ||
|
||
var agentPods *corev1.PodList | ||
err := client.Resources().List(ctx, agentPods, resources.WithLabelSelector("k8s-app=konnectivity-agent")) | ||
if err != nil { | ||
t.Fatalf("couldn't get agent pods (label selector 'k8s-app=konnectivity-agent'): %v", err) | ||
} | ||
|
||
for _, agentPod := range agentPods.Items { | ||
numConnections, err := getMetricsGaugeValue(fmt.Sprintf("%v:%v/metrics", agentPod.Status.PodIP, adminPort), "konnectivity_network_proxy_agent_open_server_connections") | ||
if err != nil { | ||
t.Fatalf("couldn't get agent metric 'konnectivity_network_proxy_agent_open_server_connections' for pod %v: %v", agentPod.Name, err) | ||
} | ||
|
||
if numConnections != expectedConnections { | ||
t.Errorf("incorrect number of connected servers (want: %d, got: %d)", expectedConnections, numConnections) | ||
} | ||
} | ||
|
||
return ctx | ||
} | ||
} | ||
|
||
func assertServersAreConnected(expectedConnections int, adminPort int) func(context.Context, *testing.T, *envconf.Config) context.Context { | ||
return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { | ||
client := cfg.Client() | ||
|
||
var serverPods *corev1.PodList | ||
err := client.Resources().List(ctx, serverPods, resources.WithLabelSelector("k8s-app=konnectivity-server")) | ||
if err != nil { | ||
t.Fatalf("couldn't get server pods (label selector 'k8s-app=konnectivity-server'): %v", err) | ||
} | ||
|
||
for _, serverPod := range serverPods.Items { | ||
numConnections, err := getMetricsGaugeValue(fmt.Sprintf("%v:%v/metrics", serverPod.Status.PodIP, adminPort), "konnectivity_network_proxy_server_ready_backend_connections") | ||
if err != nil { | ||
t.Fatalf("couldn't get agent metric 'konnectivity_network_proxy_server_ready_backend_connections' for pod %v: %v", serverPod.Name, err) | ||
} | ||
|
||
if numConnections != expectedConnections { | ||
t.Errorf("incorrect number of connected agents (want: %d, got: %d)", expectedConnections, numConnections) | ||
} | ||
} | ||
|
||
return ctx | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can you please tell me what this does ?
Does it not build a new konnectivity server/agent image per PR which is then used by the e2e tests ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was trying to make it so that the image gets built once per workflow run, instead of being built each
kind-e2e
step. I'm not sure if that's working, though...There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@aojea Could I get you to take a look at this?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was able to fix this by making a new entry in the
Makefile
that doesn't have a dependency ondocker-build
.