@@ -31,46 +31,21 @@ import ../make-test-python.nix (
|
||||
];
|
||||
};
|
||||
};
|
||||
# A daemonset that responds 'hello' on port 8000
|
||||
networkTestDaemonset = pkgs.writeText "test.yml" ''
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
name: test
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: test
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: test
|
||||
spec:
|
||||
containers:
|
||||
- name: test
|
||||
image: test.local/hello:local
|
||||
imagePullPolicy: Never
|
||||
resources:
|
||||
limits:
|
||||
memory: 20Mi
|
||||
command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo hello"]
|
||||
'';
|
||||
tokenFile = pkgs.writeText "token" "p@s$w0rd";
|
||||
agentTokenFile = pkgs.writeText "agent-token" "agentP@s$w0rd";
|
||||
# Let flannel use eth1 to enable inter-node communication in tests
|
||||
canalConfig = pkgs.writeText "rke2-canal-config.yaml" ''
|
||||
apiVersion: helm.cattle.io/v1
|
||||
kind: HelmChartConfig
|
||||
metadata:
|
||||
name: rke2-canal
|
||||
namespace: kube-system
|
||||
spec:
|
||||
valuesContent: |-
|
||||
flannel:
|
||||
iface: "eth1"
|
||||
'';
|
||||
canalConfig = {
|
||||
apiVersion = "helm.cattle.io/v1";
|
||||
kind = "HelmChartConfig";
|
||||
metadata = {
|
||||
name = "rke2-canal";
|
||||
namespace = "kube-system";
|
||||
};
|
||||
# spec.valuesContent needs to a string, either json or yaml
|
||||
spec.valuesContent = builtins.toJSON {
|
||||
flannel.iface = "eth1";
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "${rke2.name}-multi-node";
|
||||
@@ -85,23 +60,6 @@ import ../make-test-python.nix (
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Setup image archives to be imported by rke2
|
||||
systemd.tmpfiles.settings."10-rke2" = {
|
||||
"/var/lib/rancher/rke2/agent/images/rke2-images-core.tar.zst" = {
|
||||
"L+".argument = "${coreImages}";
|
||||
};
|
||||
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.tar.zst" = {
|
||||
"L+".argument = "${canalImages}";
|
||||
};
|
||||
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
|
||||
"L+".argument = "${helloImage}";
|
||||
};
|
||||
# Copy the canal config so that rke2 can write the remaining default values to it
|
||||
"/var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml" = {
|
||||
"C".argument = "${canalConfig}";
|
||||
};
|
||||
};
|
||||
|
||||
# Canal CNI with VXLAN
|
||||
networking.firewall.allowedUDPPorts = [ 8472 ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
@@ -134,6 +92,41 @@ import ../make-test-python.nix (
|
||||
"rke2-snapshot-controller-crd"
|
||||
"rke2-snapshot-validation-webhook"
|
||||
];
|
||||
images = [
|
||||
coreImages
|
||||
canalImages
|
||||
helloImage
|
||||
];
|
||||
manifests = {
|
||||
canal-config.content = canalConfig;
|
||||
# A daemonset that responds 'hello' on port 8000
|
||||
network-test.content = {
|
||||
apiVersion = "apps/v1";
|
||||
kind = "DaemonSet";
|
||||
metadata = {
|
||||
name = "test";
|
||||
labels.name = "test";
|
||||
};
|
||||
spec = {
|
||||
selector.matchLabels.name = "test";
|
||||
template = {
|
||||
metadata.labels.name = "test";
|
||||
spec.containers = [
|
||||
{
|
||||
name = "hello";
|
||||
image = "${helloImage.imageName}:${helloImage.imageTag}";
|
||||
imagePullPolicy = "Never";
|
||||
command = [
|
||||
"socat"
|
||||
"TCP4-LISTEN:8000,fork"
|
||||
"EXEC:echo hello"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -145,22 +138,6 @@ import ../make-test-python.nix (
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Setup image archives to be imported by rke2
|
||||
systemd.tmpfiles.settings."10-rke2" = {
|
||||
"/var/lib/rancher/rke2/agent/images/rke2-images-core.linux-amd64.tar.zst" = {
|
||||
"L+".argument = "${coreImages}";
|
||||
};
|
||||
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.linux-amd64.tar.zst" = {
|
||||
"L+".argument = "${canalImages}";
|
||||
};
|
||||
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
|
||||
"L+".argument = "${helloImage}";
|
||||
};
|
||||
"/var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml" = {
|
||||
"C".argument = "${canalConfig}";
|
||||
};
|
||||
};
|
||||
|
||||
# Canal CNI health checks
|
||||
networking.firewall.allowedTCPPorts = [ 9099 ];
|
||||
# Canal CNI with VXLAN
|
||||
@@ -177,6 +154,12 @@ import ../make-test-python.nix (
|
||||
tokenFile = agentTokenFile;
|
||||
serverAddr = "https://${nodes.server.networking.primaryIPAddress}:9345";
|
||||
nodeIP = config.networking.primaryIPAddress;
|
||||
manifests.canal-config.content = canalConfig;
|
||||
images = [
|
||||
coreImages
|
||||
canalImages
|
||||
helloImage
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -199,8 +182,7 @@ import ../make-test-python.nix (
|
||||
server.succeed("${kubectl} cluster-info")
|
||||
server.wait_until_succeeds("${kubectl} get serviceaccount default")
|
||||
|
||||
# Now create a pod on each node via a daemonset and verify they can talk to each other.
|
||||
server.succeed("${kubectl} apply -f ${networkTestDaemonset}")
|
||||
# Now verify that each daemonset pod can talk to each other.
|
||||
server.wait_until_succeeds(
|
||||
f'[ "$(${kubectl} get ds test -o json | ${jq} .status.numberReady)" -eq {len(machines)} ]'
|
||||
)
|
||||
@@ -217,9 +199,9 @@ import ../make-test-python.nix (
|
||||
server.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
|
||||
agent.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
|
||||
# Verify the server can exec into the pod
|
||||
# for pod in pods:
|
||||
# resp = server.succeed(f"${kubectl} exec {pod} -- socat TCP:{pod_ip}:8000 -")
|
||||
# assert resp.strip() == "hello", f"Unexpected response from hello daemonset: {resp.strip()}"
|
||||
for pod in pods:
|
||||
resp = server.succeed(f"${kubectl} exec {pod} -- socat TCP:{pod_ip}:8000 -").strip()
|
||||
assert resp == "hello", f"Unexpected response from hello daemonset: {resp}"
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
@@ -26,19 +26,13 @@ import ../make-test-python.nix (
|
||||
copyToRoot = pkgs.hello;
|
||||
config.Entrypoint = [ "${pkgs.hello}/bin/hello" ];
|
||||
};
|
||||
testJobYaml = pkgs.writeText "test.yaml" ''
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: test
|
||||
image: "test.local/hello:local"
|
||||
restartPolicy: Never
|
||||
'';
|
||||
# A ConfigMap in regular yaml format
|
||||
cmFile = (pkgs.formats.yaml { }).generate "rke2-manifest-from-file.yaml" {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata.name = "from-file";
|
||||
data.username = "foo-file";
|
||||
};
|
||||
in
|
||||
{
|
||||
name = "${rke2.name}-single-node";
|
||||
@@ -51,19 +45,6 @@ import ../make-test-python.nix (
|
||||
...
|
||||
}:
|
||||
{
|
||||
# Setup image archives to be imported by rke2
|
||||
systemd.tmpfiles.settings."10-rke2" = {
|
||||
"/var/lib/rancher/rke2/agent/images/rke2-images-core.tar.zst" = {
|
||||
"L+".argument = "${coreImages}";
|
||||
};
|
||||
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.tar.zst" = {
|
||||
"L+".argument = "${canalImages}";
|
||||
};
|
||||
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
|
||||
"L+".argument = "${helloImage}";
|
||||
};
|
||||
};
|
||||
|
||||
# RKE2 needs more resources than the default
|
||||
virtualisation.cores = 4;
|
||||
virtualisation.memorySize = 4096;
|
||||
@@ -84,6 +65,47 @@ import ../make-test-python.nix (
|
||||
"rke2-snapshot-controller-crd"
|
||||
"rke2-snapshot-validation-webhook"
|
||||
];
|
||||
images = [
|
||||
coreImages
|
||||
canalImages
|
||||
helloImage
|
||||
];
|
||||
manifests = {
|
||||
test-job.content = {
|
||||
apiVersion = "batch/v1";
|
||||
kind = "Job";
|
||||
metadata.name = "test";
|
||||
spec.template.spec = {
|
||||
containers = [
|
||||
{
|
||||
name = "hello";
|
||||
image = "${helloImage.imageName}:${helloImage.imageTag}";
|
||||
}
|
||||
];
|
||||
restartPolicy = "Never";
|
||||
};
|
||||
};
|
||||
disabled = {
|
||||
enable = false;
|
||||
content = {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata.name = "disabled";
|
||||
data.username = "foo";
|
||||
};
|
||||
};
|
||||
from-file.source = "${cmFile}";
|
||||
custom-target = {
|
||||
enable = true;
|
||||
target = "my-manifest.json";
|
||||
content = {
|
||||
apiVersion = "v1";
|
||||
kind = "ConfigMap";
|
||||
metadata.name = "custom-target";
|
||||
data.username = "foo-custom";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@@ -95,14 +117,28 @@ import ../make-test-python.nix (
|
||||
''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("rke2-server")
|
||||
machine.succeed("${kubectl} cluster-info")
|
||||
with subtest("Start cluster"):
|
||||
machine.wait_for_unit("rke2-server")
|
||||
machine.succeed("${kubectl} cluster-info")
|
||||
machine.wait_until_succeeds("${kubectl} get serviceaccount default")
|
||||
|
||||
machine.wait_until_succeeds("${kubectl} get serviceaccount default")
|
||||
machine.succeed("${kubectl} apply -f ${testJobYaml}")
|
||||
machine.wait_until_succeeds("${kubectl} wait --for 'condition=complete' job/test")
|
||||
output = machine.succeed("${kubectl} logs -l batch.kubernetes.io/job-name=test")
|
||||
assert output.rstrip() == "Hello, world!", f"unexpected output of test job: {output}"
|
||||
with subtest("Test job completes successfully"):
|
||||
machine.wait_until_succeeds("${kubectl} wait --for 'condition=complete' job/test")
|
||||
output = machine.succeed("${kubectl} logs -l batch.kubernetes.io/job-name=test").rstrip()
|
||||
assert output == "Hello, world!", f"unexpected output of test job: {output}"
|
||||
|
||||
with subtest("ConfigMap from-file exists"):
|
||||
output = machine.succeed("${kubectl} get cm from-file -o=jsonpath='{.data.username}'").rstrip()
|
||||
assert output == "foo-file", f"Unexpected data in Configmap from-file: {output}"
|
||||
|
||||
with subtest("ConfigMap custom-target exists"):
|
||||
# Check that the file exists at the custom target path
|
||||
machine.succeed("ls /var/lib/rancher/rke2/server/manifests/my-manifest.json")
|
||||
output = machine.succeed("${kubectl} get cm custom-target -o=jsonpath='{.data.username}'").rstrip()
|
||||
assert output == "foo-custom", f"Unexpected data in Configmap custom-target: {output}"
|
||||
|
||||
with subtest("Disabled ConfigMap doesn't exist"):
|
||||
machine.fail("${kubectl} get cm disabled")
|
||||
'';
|
||||
}
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user