nixos/k3s: use improved error reporting and assertions
Additionally, mark testScript as python and remove shutdowns at the end of tests.
This commit is contained in:
@@ -135,7 +135,7 @@ import ../make-test-python.nix (
|
||||
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/advanced.yaml")
|
||||
# check that the timeout is set correctly, select only the first doc in advanced.yaml
|
||||
advancedManifest = json.loads(machine.succeed("yq -o json 'select(di == 0)' /var/lib/rancher/k3s/server/manifests/advanced.yaml"))
|
||||
assert advancedManifest["spec"]["timeout"] == "69s", f"unexpected value for spec.timeout: {advancedManifest["spec"]["timeout"]}"
|
||||
t.assertEqual(advancedManifest["spec"]["timeout"], "69s", "unexpected value for spec.timeout")
|
||||
# wait for test jobs to complete
|
||||
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello", timeout=180)
|
||||
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/values-file", timeout=180)
|
||||
@@ -145,9 +145,9 @@ import ../make-test-python.nix (
|
||||
values_file_output = machine.succeed("kubectl logs -l batch.kubernetes.io/job-name=values-file")
|
||||
advanced_output = machine.succeed("kubectl -n test logs -l batch.kubernetes.io/job-name=advanced")
|
||||
# strip the output to remove trailing whitespaces
|
||||
assert hello_output.rstrip() == "Hello, world!", f"unexpected output of hello job: {hello_output}"
|
||||
assert values_file_output.rstrip() == "Hello, file!", f"unexpected output of values file job: {values_file_output}"
|
||||
assert advanced_output.rstrip() == "advanced hello", f"unexpected output of advanced job: {advanced_output}"
|
||||
t.assertEqual(hello_output.rstrip(), "Hello, world!", "unexpected output of hello job")
|
||||
t.assertEqual(values_file_output.rstrip(), "Hello, file!", "unexpected output of values file job")
|
||||
t.assertEqual(advanced_output.rstrip(), "advanced hello", "unexpected output of advanced job")
|
||||
# wait for bundled traefik deployment
|
||||
machine.wait_until_succeeds("kubectl -n kube-system rollout status deployment traefik", timeout=180)
|
||||
'';
|
||||
|
||||
@@ -99,26 +99,25 @@ import ../make-test-python.nix (
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = # python
|
||||
''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("k3s")
|
||||
# check existence of the manifest files
|
||||
machine.fail("ls /var/lib/rancher/k3s/server/manifests/absent.yaml")
|
||||
machine.succeed("ls /var/lib/rancher/k3s/server/manifests/foo-namespace.yaml")
|
||||
machine.succeed("ls /var/lib/rancher/k3s/server/manifests/hello.yaml")
|
||||
machine.wait_for_unit("k3s")
|
||||
# check existence of the manifest files
|
||||
machine.fail("ls /var/lib/rancher/k3s/server/manifests/absent.yaml")
|
||||
machine.succeed("ls /var/lib/rancher/k3s/server/manifests/foo-namespace.yaml")
|
||||
machine.succeed("ls /var/lib/rancher/k3s/server/manifests/hello.yaml")
|
||||
|
||||
# check if container images got imported
|
||||
machine.wait_until_succeeds("crictl img | grep 'test\.local/pause'")
|
||||
machine.wait_until_succeeds("crictl img | grep 'test\.local/hello'")
|
||||
# check if container images got imported
|
||||
machine.wait_until_succeeds("crictl img | grep 'test\.local/pause'")
|
||||
machine.wait_until_succeeds("crictl img | grep 'test\.local/hello'")
|
||||
|
||||
# check if resources of manifests got created
|
||||
machine.wait_until_succeeds("kubectl get ns foo")
|
||||
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello")
|
||||
machine.fail("kubectl get ns absent")
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
# check if resources of manifests got created
|
||||
machine.wait_until_succeeds("kubectl get ns foo")
|
||||
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello")
|
||||
machine.fail("kubectl get ns absent")
|
||||
'';
|
||||
|
||||
meta.maintainers = lib.teams.k3s.members;
|
||||
}
|
||||
|
||||
@@ -40,18 +40,19 @@ import ../make-test-python.nix (
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
machine.wait_for_unit("k3s")
|
||||
# wait until the node is ready
|
||||
machine.wait_until_succeeds(r"""kubectl get node ${nodeName} -ojson | jq -e '.status.conditions[] | select(.type == "Ready") | .status == "True"'""")
|
||||
# test whether the config template file contains the magic comment
|
||||
out=machine.succeed("cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl")
|
||||
assert "MAGIC COMMENT" in out, "the containerd config template does not contain the magic comment"
|
||||
# test whether the config file contains the magic comment
|
||||
out=machine.succeed("cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml")
|
||||
assert "MAGIC COMMENT" in out, "the containerd config does not contain the magic comment"
|
||||
'';
|
||||
testScript = # python
|
||||
''
|
||||
start_all()
|
||||
machine.wait_for_unit("k3s")
|
||||
# wait until the node is ready
|
||||
machine.wait_until_succeeds(r"""kubectl get node ${nodeName} -ojson | jq -e '.status.conditions[] | select(.type == "Ready") | .status == "True"'""")
|
||||
# test whether the config template file contains the magic comment
|
||||
out=machine.succeed("cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl")
|
||||
t.assertIn("MAGIC COMMENT", out, "the containerd config template does not contain the magic comment")
|
||||
# test whether the config file contains the magic comment
|
||||
out=machine.succeed("cat /var/lib/rancher/k3s/agent/etc/containerd/config.toml")
|
||||
t.assertIn("MAGIC COMMENT", out, "the containerd config does not contain the magic comment")
|
||||
'';
|
||||
|
||||
meta.maintainers = lib.teams.k3s.members;
|
||||
}
|
||||
|
||||
@@ -82,46 +82,43 @@ import ../make-test-python.nix (
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
with subtest("should start etcd"):
|
||||
etcd.start()
|
||||
etcd.wait_for_unit("etcd.service")
|
||||
testScript = # python
|
||||
''
|
||||
with subtest("should start etcd"):
|
||||
etcd.start()
|
||||
etcd.wait_for_unit("etcd.service")
|
||||
|
||||
with subtest("should wait for etcdctl endpoint status to succeed"):
|
||||
etcd.wait_until_succeeds("etcdctl endpoint status")
|
||||
with subtest("should wait for etcdctl endpoint status to succeed"):
|
||||
etcd.wait_until_succeeds("etcdctl endpoint status")
|
||||
|
||||
with subtest("should wait for etcdctl endpoint health to succeed"):
|
||||
etcd.wait_until_succeeds("etcdctl endpoint health")
|
||||
with subtest("should wait for etcdctl endpoint health to succeed"):
|
||||
etcd.wait_until_succeeds("etcdctl endpoint health")
|
||||
|
||||
with subtest("should start k3s"):
|
||||
k3s.start()
|
||||
k3s.wait_for_unit("k3s")
|
||||
with subtest("should start k3s"):
|
||||
k3s.start()
|
||||
k3s.wait_for_unit("k3s")
|
||||
|
||||
with subtest("should test if kubectl works"):
|
||||
k3s.wait_until_succeeds("k3s kubectl get node")
|
||||
with subtest("should test if kubectl works"):
|
||||
k3s.wait_until_succeeds("k3s kubectl get node")
|
||||
|
||||
with subtest("should wait for service account to show up; takes a sec"):
|
||||
k3s.wait_until_succeeds("k3s kubectl get serviceaccount default")
|
||||
with subtest("should wait for service account to show up; takes a sec"):
|
||||
k3s.wait_until_succeeds("k3s kubectl get serviceaccount default")
|
||||
|
||||
with subtest("should create a sample secret object"):
|
||||
k3s.succeed("k3s kubectl create secret generic nixossecret --from-literal thesecret=abacadabra")
|
||||
with subtest("should create a sample secret object"):
|
||||
k3s.succeed("k3s kubectl create secret generic nixossecret --from-literal thesecret=abacadabra")
|
||||
|
||||
with subtest("should check if secret is correct"):
|
||||
k3s.wait_until_succeeds("[[ $(kubectl get secrets nixossecret -o json | jq -r .data.thesecret | base64 -d) == abacadabra ]]")
|
||||
with subtest("should check if secret is correct"):
|
||||
k3s.wait_until_succeeds("[[ $(kubectl get secrets nixossecret -o json | jq -r .data.thesecret | base64 -d) == abacadabra ]]")
|
||||
|
||||
with subtest("should have a secret in database"):
|
||||
etcd.wait_until_succeeds("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
|
||||
with subtest("should have a secret in database"):
|
||||
etcd.wait_until_succeeds("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
|
||||
|
||||
with subtest("should delete the secret"):
|
||||
k3s.succeed("k3s kubectl delete secret nixossecret")
|
||||
with subtest("should delete the secret"):
|
||||
k3s.succeed("k3s kubectl delete secret nixossecret")
|
||||
|
||||
with subtest("should not have a secret in database"):
|
||||
etcd.wait_until_fails("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
|
||||
|
||||
with subtest("should shutdown k3s and etcd"):
|
||||
k3s.shutdown()
|
||||
etcd.shutdown()
|
||||
'';
|
||||
with subtest("should not have a secret in database"):
|
||||
etcd.wait_until_fails("[[ $(etcdctl get /registry/secrets/default/nixossecret | head -c1 | wc -c) -ne 0 ]]")
|
||||
'';
|
||||
|
||||
meta.maintainers = etcd.meta.maintainers ++ lib.teams.k3s.members;
|
||||
}
|
||||
|
||||
@@ -47,33 +47,29 @@ import ../make-test-python.nix (
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
testScript = # python
|
||||
''
|
||||
import json
|
||||
|
||||
start_all()
|
||||
machine.wait_for_unit("k3s")
|
||||
# wait until the node is ready
|
||||
machine.wait_until_succeeds(r"""kubectl get node ${nodeName} -ojson | jq -e '.status.conditions[] | select(.type == "Ready") | .status == "True"'""")
|
||||
# test whether the kubelet registered an inhibitor lock
|
||||
machine.succeed("systemd-inhibit --list --no-legend | grep \"kubelet.*k3s-server.*shutdown\"")
|
||||
# run kubectl proxy in the background, close stdout through redirection to not wait for the command to finish
|
||||
machine.execute("kubectl proxy --address 127.0.0.1 --port=8001 >&2 &")
|
||||
machine.wait_until_succeeds("nc -z 127.0.0.1 8001")
|
||||
# get the kubeletconfig
|
||||
kubelet_config=json.loads(machine.succeed("curl http://127.0.0.1:8001/api/v1/nodes/${nodeName}/proxy/configz | jq '.kubeletconfig'"))
|
||||
start_all()
|
||||
machine.wait_for_unit("k3s")
|
||||
# wait until the node is ready
|
||||
machine.wait_until_succeeds(r"""kubectl get node ${nodeName} -ojson | jq -e '.status.conditions[] | select(.type == "Ready") | .status == "True"'""")
|
||||
# test whether the kubelet registered an inhibitor lock
|
||||
machine.succeed("systemd-inhibit --list --no-legend | grep \"kubelet.*k3s-server.*shutdown\"")
|
||||
# run kubectl proxy in the background, close stdout through redirection to not wait for the command to finish
|
||||
machine.execute("kubectl proxy --address 127.0.0.1 --port=8001 >&2 &")
|
||||
machine.wait_until_succeeds("nc -z 127.0.0.1 8001")
|
||||
# get the kubeletconfig
|
||||
kubelet_config=json.loads(machine.succeed("curl http://127.0.0.1:8001/api/v1/nodes/${nodeName}/proxy/configz | jq '.kubeletconfig'"))
|
||||
|
||||
with subtest("Kubelet config values are set correctly"):
|
||||
assert kubelet_config["shutdownGracePeriod"] == "${shutdownGracePeriod}", \
|
||||
f"unexpected value for shutdownGracePeriod: {kubelet_config["shutdownGracePeriod"]}"
|
||||
assert kubelet_config["shutdownGracePeriodCriticalPods"] == "${shutdownGracePeriodCriticalPods}", \
|
||||
f"unexpected value for shutdownGracePeriodCriticalPods: {kubelet_config["shutdownGracePeriodCriticalPods"]}"
|
||||
assert kubelet_config["podsPerCore"] == ${toString podsPerCore}, \
|
||||
f"unexpected value for podsPerCore: {kubelet_config["podsPerCore"]}"
|
||||
assert kubelet_config["memoryThrottlingFactor"] == ${toString memoryThrottlingFactor}, \
|
||||
f"unexpected value for memoryThrottlingFactor: {kubelet_config["memoryThrottlingFactor"]}"
|
||||
assert kubelet_config["containerLogMaxSize"] == "${containerLogMaxSize}", \
|
||||
f"unexpected value for containerLogMaxSize: {kubelet_config["containerLogMaxSize"]}"
|
||||
'';
|
||||
with subtest("Kubelet config values are set correctly"):
|
||||
t.assertEqual(kubelet_config["shutdownGracePeriod"], "${shutdownGracePeriod}")
|
||||
t.assertEqual(kubelet_config["shutdownGracePeriodCriticalPods"], "${shutdownGracePeriodCriticalPods}")
|
||||
t.assertEqual(kubelet_config["podsPerCore"], ${toString podsPerCore})
|
||||
t.assertEqual(kubelet_config["memoryThrottlingFactor"], ${toString memoryThrottlingFactor})
|
||||
t.assertEqual(kubelet_config["containerLogMaxSize"],"${containerLogMaxSize}")
|
||||
'';
|
||||
|
||||
meta.maintainers = lib.teams.k3s.members;
|
||||
}
|
||||
|
||||
@@ -192,7 +192,7 @@ import ../make-test-python.nix (
|
||||
# Verify the pods can talk to each other
|
||||
for pod in pods:
|
||||
resp = server.succeed(f"k3s kubectl exec {pod} -- socat TCP:{pod_ip}:8000 -")
|
||||
assert resp.strip() == "server"
|
||||
t.assertEqual(resp.strip(), "server")
|
||||
'';
|
||||
|
||||
meta.maintainers = lib.teams.k3s.members;
|
||||
|
||||
@@ -76,40 +76,39 @@ import ../make-test-python.nix (
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = # python
|
||||
''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("k3s")
|
||||
machine.succeed("kubectl cluster-info")
|
||||
machine.fail("sudo -u noprivs kubectl cluster-info")
|
||||
machine.succeed("k3s check-config")
|
||||
machine.succeed(
|
||||
"${pauseImage} | ctr image import -"
|
||||
)
|
||||
machine.wait_for_unit("k3s")
|
||||
machine.succeed("kubectl cluster-info")
|
||||
machine.fail("sudo -u noprivs kubectl cluster-info")
|
||||
machine.succeed("k3s check-config")
|
||||
machine.succeed(
|
||||
"${pauseImage} | ctr image import -"
|
||||
)
|
||||
|
||||
# Also wait for our service account to show up; it takes a sec
|
||||
machine.wait_until_succeeds("kubectl get serviceaccount default")
|
||||
machine.succeed("kubectl apply -f ${testPodYaml}")
|
||||
machine.succeed("kubectl wait --for 'condition=Ready' pod/test")
|
||||
machine.succeed("kubectl delete -f ${testPodYaml}")
|
||||
# Also wait for our service account to show up; it takes a sec
|
||||
machine.wait_until_succeeds("kubectl get serviceaccount default")
|
||||
machine.succeed("kubectl apply -f ${testPodYaml}")
|
||||
machine.succeed("kubectl wait --for 'condition=Ready' pod/test")
|
||||
machine.succeed("kubectl delete -f ${testPodYaml}")
|
||||
|
||||
# regression test for #176445
|
||||
machine.fail("journalctl -o cat -u k3s.service | grep 'ipset utility not found'")
|
||||
# regression test for #176445
|
||||
machine.fail("journalctl -o cat -u k3s.service | grep 'ipset utility not found'")
|
||||
|
||||
with subtest("Run k3s-killall"):
|
||||
# Call the killall script with a clean path to assert that
|
||||
# all required commands are wrapped
|
||||
output = machine.succeed("PATH= ${k3s}/bin/k3s-killall.sh 2>&1 | tee /dev/stderr")
|
||||
assert "command not found" not in output, "killall script contains unknown command"
|
||||
with subtest("Run k3s-killall"):
|
||||
# Call the killall script with a clean path to assert that
|
||||
# all required commands are wrapped
|
||||
output = machine.succeed("PATH= ${k3s}/bin/k3s-killall.sh 2>&1 | tee /dev/stderr")
|
||||
t.assertNotIn("command not found", output, "killall script contains unknown command")
|
||||
|
||||
# Check that killall cleaned up properly
|
||||
machine.fail("systemctl is-active k3s.service")
|
||||
machine.fail("systemctl list-units | grep containerd")
|
||||
machine.fail("ip link show | awk -F': ' '{print $2}' | grep -e flannel -e cni0")
|
||||
machine.fail("ip netns show | grep cni-")
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
# Check that killall cleaned up properly
|
||||
machine.fail("systemctl is-active k3s.service")
|
||||
machine.fail("systemctl list-units | grep containerd")
|
||||
machine.fail("ip link show | awk -F': ' '{print $2}' | grep -e flannel -e cni0")
|
||||
machine.fail("ip netns show | grep cni-")
|
||||
'';
|
||||
|
||||
meta.maintainers = lib.teams.k3s.members;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user