nixos/rke2: merge code with nixos/k3s (#447847)
This commit is contained in:
3
.github/labeler.yml
vendored
3
.github/labeler.yml
vendored
@@ -261,7 +261,8 @@
|
|||||||
- any:
|
- any:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file:
|
- any-glob-to-any-file:
|
||||||
- nixos/modules/services/cluster/k3s/**/*
|
- nixos/modules/services/cluster/rancher/default.nix
|
||||||
|
- nixos/modules/services/cluster/rancher/k3s.nix
|
||||||
- nixos/tests/k3s/**/*
|
- nixos/tests/k3s/**/*
|
||||||
- pkgs/applications/networking/cluster/k3s/**/*
|
- pkgs/applications/networking/cluster/k3s/**/*
|
||||||
|
|
||||||
|
|||||||
@@ -476,6 +476,9 @@ and [release notes for v18](https://goteleport.com/docs/changelog/#1800-070325).
|
|||||||
|
|
||||||
- `services.matter-server` now hosts a debug dashboard on the configured port. Open the port on the firewall with `services.matter-server.openFirewall`.
|
- `services.matter-server` now hosts a debug dashboard on the configured port. Open the port on the firewall with `services.matter-server.openFirewall`.
|
||||||
|
|
||||||
|
- `services.k3s` now shares most of its code with `services.rke2`. The merge resulted in both modules providing more options, with `services.rke2` receiving the most improvements.
|
||||||
|
Existing configurations for either module should not be affected.
|
||||||
|
|
||||||
- The new option [networking.ipips](#opt-networking.ipips) has been added to create IP within IP kind of tunnels (including 4in6, ip6ip6 and ipip).
|
- The new option [networking.ipips](#opt-networking.ipips) has been added to create IP within IP kind of tunnels (including 4in6, ip6ip6 and ipip).
|
||||||
With the existing [networking.sits](#opt-networking.sits) option (6in4), it is now possible to create all combinations of IPv4 and IPv6 encapsulation.
|
With the existing [networking.sits](#opt-networking.sits) option (6in4), it is now possible to create all combinations of IPv4 and IPv6 encapsulation.
|
||||||
|
|
||||||
|
|||||||
@@ -474,7 +474,6 @@
|
|||||||
./services/cluster/corosync/default.nix
|
./services/cluster/corosync/default.nix
|
||||||
./services/cluster/druid/default.nix
|
./services/cluster/druid/default.nix
|
||||||
./services/cluster/hadoop/default.nix
|
./services/cluster/hadoop/default.nix
|
||||||
./services/cluster/k3s/default.nix
|
|
||||||
./services/cluster/kubernetes/addon-manager.nix
|
./services/cluster/kubernetes/addon-manager.nix
|
||||||
./services/cluster/kubernetes/addons/dns.nix
|
./services/cluster/kubernetes/addons/dns.nix
|
||||||
./services/cluster/kubernetes/apiserver.nix
|
./services/cluster/kubernetes/apiserver.nix
|
||||||
@@ -487,7 +486,7 @@
|
|||||||
./services/cluster/kubernetes/scheduler.nix
|
./services/cluster/kubernetes/scheduler.nix
|
||||||
./services/cluster/pacemaker/default.nix
|
./services/cluster/pacemaker/default.nix
|
||||||
./services/cluster/patroni/default.nix
|
./services/cluster/patroni/default.nix
|
||||||
./services/cluster/rke2/default.nix
|
./services/cluster/rancher/default.nix
|
||||||
./services/cluster/spark/default.nix
|
./services/cluster/spark/default.nix
|
||||||
./services/cluster/temporal/default.nix
|
./services/cluster/temporal/default.nix
|
||||||
./services/computing/boinc/client.nix
|
./services/computing/boinc/client.nix
|
||||||
|
|||||||
@@ -1,913 +0,0 @@
|
|||||||
{
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
pkgs,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
cfg = config.services.k3s;
|
|
||||||
removeOption =
|
|
||||||
config: instruction:
|
|
||||||
lib.mkRemovedOptionModule (
|
|
||||||
[
|
|
||||||
"services"
|
|
||||||
"k3s"
|
|
||||||
]
|
|
||||||
++ config
|
|
||||||
) instruction;
|
|
||||||
|
|
||||||
manifestDir = "/var/lib/rancher/k3s/server/manifests";
|
|
||||||
chartDir = "/var/lib/rancher/k3s/server/static/charts";
|
|
||||||
imageDir = "/var/lib/rancher/k3s/agent/images";
|
|
||||||
containerdConfigTemplateFile = "/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl";
|
|
||||||
yamlFormat = pkgs.formats.yaml { };
|
|
||||||
yamlDocSeparator = builtins.toFile "yaml-doc-separator" "\n---\n";
|
|
||||||
# Manifests need a valid YAML suffix to be respected by k3s
|
|
||||||
mkManifestTarget =
|
|
||||||
name: if (lib.hasSuffix ".yaml" name || lib.hasSuffix ".yml" name) then name else name + ".yaml";
|
|
||||||
# Produces a list containing all duplicate manifest names
|
|
||||||
duplicateManifests = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
|
||||||
builtins.attrNames cfg.manifests
|
|
||||||
);
|
|
||||||
# Produces a list containing all duplicate chart names
|
|
||||||
duplicateCharts = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
|
||||||
builtins.attrNames cfg.charts
|
|
||||||
);
|
|
||||||
|
|
||||||
# Converts YAML -> JSON -> Nix
|
|
||||||
fromYaml =
|
|
||||||
path:
|
|
||||||
builtins.fromJSON (
|
|
||||||
builtins.readFile (
|
|
||||||
pkgs.runCommand "${path}-converted.json" { nativeBuildInputs = [ pkgs.yq-go ]; } ''
|
|
||||||
yq --no-colors --output-format json ${path} > $out
|
|
||||||
''
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
# Replace prefixes and characters that are problematic in file names
|
|
||||||
cleanHelmChartName =
|
|
||||||
name:
|
|
||||||
let
|
|
||||||
woPrefix = lib.removePrefix "https://" (lib.removePrefix "oci://" name);
|
|
||||||
in
|
|
||||||
lib.replaceStrings
|
|
||||||
[
|
|
||||||
"/"
|
|
||||||
":"
|
|
||||||
]
|
|
||||||
[
|
|
||||||
"-"
|
|
||||||
"-"
|
|
||||||
]
|
|
||||||
woPrefix;
|
|
||||||
|
|
||||||
# Fetch a Helm chart from a public registry. This only supports a basic Helm pull.
|
|
||||||
fetchHelm =
|
|
||||||
{
|
|
||||||
name,
|
|
||||||
repo,
|
|
||||||
version,
|
|
||||||
hash ? lib.fakeHash,
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
isOci = lib.hasPrefix "oci://" repo;
|
|
||||||
pullCmd = if isOci then repo else "--repo ${repo} ${name}";
|
|
||||||
name' = if isOci then "${repo}-${version}" else "${repo}-${name}-${version}";
|
|
||||||
in
|
|
||||||
pkgs.runCommand (cleanHelmChartName "${name'}.tgz")
|
|
||||||
{
|
|
||||||
inherit (lib.fetchers.normalizeHash { } { inherit hash; }) outputHash outputHashAlgo;
|
|
||||||
impureEnvVars = lib.fetchers.proxyImpureEnvVars;
|
|
||||||
nativeBuildInputs = with pkgs; [
|
|
||||||
kubernetes-helm
|
|
||||||
cacert
|
|
||||||
# Helm requires HOME to refer to a writable dir
|
|
||||||
writableTmpDirAsHomeHook
|
|
||||||
];
|
|
||||||
}
|
|
||||||
''
|
|
||||||
helm pull ${pullCmd} --version ${version}
|
|
||||||
mv ./*.tgz $out
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Returns the path to a YAML manifest file
|
|
||||||
mkExtraDeployManifest =
|
|
||||||
x:
|
|
||||||
# x is a derivation that provides a YAML file
|
|
||||||
if lib.isDerivation x then
|
|
||||||
x.outPath
|
|
||||||
# x is an attribute set that needs to be converted to a YAML file
|
|
||||||
else if builtins.isAttrs x then
|
|
||||||
(yamlFormat.generate "extra-deploy-chart-manifest" x)
|
|
||||||
# assume x is a path to a YAML file
|
|
||||||
else
|
|
||||||
x;
|
|
||||||
|
|
||||||
# Generate a HelmChart custom resource.
|
|
||||||
mkHelmChartCR =
|
|
||||||
name: value:
|
|
||||||
let
|
|
||||||
chartValues = if (lib.isPath value.values) then fromYaml value.values else value.values;
|
|
||||||
# use JSON for values as it's a subset of YAML and understood by the k3s Helm controller
|
|
||||||
valuesContent = builtins.toJSON chartValues;
|
|
||||||
in
|
|
||||||
# merge with extraFieldDefinitions to allow setting advanced values and overwrite generated
|
|
||||||
# values
|
|
||||||
lib.recursiveUpdate {
|
|
||||||
apiVersion = "helm.cattle.io/v1";
|
|
||||||
kind = "HelmChart";
|
|
||||||
metadata = {
|
|
||||||
inherit name;
|
|
||||||
namespace = "kube-system";
|
|
||||||
};
|
|
||||||
spec = {
|
|
||||||
inherit valuesContent;
|
|
||||||
inherit (value) targetNamespace createNamespace;
|
|
||||||
chart = "https://%{KUBERNETES_API}%/static/charts/${name}.tgz";
|
|
||||||
};
|
|
||||||
} value.extraFieldDefinitions;
|
|
||||||
|
|
||||||
# Generate a HelmChart custom resource together with extraDeploy manifests. This
|
|
||||||
# generates possibly a multi document YAML file that the auto deploy mechanism of k3s
|
|
||||||
# deploys.
|
|
||||||
mkAutoDeployChartManifest = name: value: {
|
|
||||||
# target is the final name of the link created for the manifest file
|
|
||||||
target = mkManifestTarget name;
|
|
||||||
inherit (value) enable package;
|
|
||||||
# source is a store path containing the complete manifest file
|
|
||||||
source = pkgs.concatText "auto-deploy-chart-${name}.yaml" (
|
|
||||||
[
|
|
||||||
(yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value))
|
|
||||||
]
|
|
||||||
# alternate the YAML doc separator (---) and extraDeploy manifests to create
|
|
||||||
# multi document YAMLs
|
|
||||||
++ (lib.concatMap (x: [
|
|
||||||
yamlDocSeparator
|
|
||||||
(mkExtraDeployManifest x)
|
|
||||||
]) value.extraDeploy)
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
autoDeployChartsModule = lib.types.submodule (
|
|
||||||
{ config, ... }:
|
|
||||||
{
|
|
||||||
options = {
|
|
||||||
enable = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
default = true;
|
|
||||||
example = false;
|
|
||||||
description = ''
|
|
||||||
Whether to enable the installation of this Helm chart. Note that setting
|
|
||||||
this option to `false` will not uninstall the chart from the cluster, if
|
|
||||||
it was previously installed. Please use the the `--disable` flag or `.skip`
|
|
||||||
files to delete/disable Helm charts, as mentioned in the
|
|
||||||
[docs](https://docs.k3s.io/installation/packaged-components#disabling-manifests).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
repo = lib.mkOption {
|
|
||||||
type = lib.types.nonEmptyStr;
|
|
||||||
example = "https://kubernetes.github.io/ingress-nginx";
|
|
||||||
description = ''
|
|
||||||
The repo of the Helm chart. Only has an effect if `package` is not set.
|
|
||||||
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
|
||||||
filesystem.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
name = lib.mkOption {
|
|
||||||
type = lib.types.nonEmptyStr;
|
|
||||||
example = "ingress-nginx";
|
|
||||||
description = ''
|
|
||||||
The name of the Helm chart. Only has an effect if `package` is not set.
|
|
||||||
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
|
||||||
filesystem.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
version = lib.mkOption {
|
|
||||||
type = lib.types.nonEmptyStr;
|
|
||||||
example = "4.7.0";
|
|
||||||
description = ''
|
|
||||||
The version of the Helm chart. Only has an effect if `package` is not set.
|
|
||||||
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
|
||||||
filesystem.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
hash = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
example = "sha256-ej+vpPNdiOoXsaj1jyRpWLisJgWo8EqX+Z5VbpSjsPA=";
|
|
||||||
default = "";
|
|
||||||
description = ''
|
|
||||||
The hash of the packaged Helm chart. Only has an effect if `package` is not set.
|
|
||||||
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
|
||||||
filesystem.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
package = lib.mkOption {
|
|
||||||
type = with lib.types; either path package;
|
|
||||||
example = lib.literalExpression "../my-helm-chart.tgz";
|
|
||||||
description = ''
|
|
||||||
The packaged Helm chart. Overwrites the options `repo`, `name`, `version`
|
|
||||||
and `hash` in case of conflicts.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
targetNamespace = lib.mkOption {
|
|
||||||
type = lib.types.nonEmptyStr;
|
|
||||||
default = "default";
|
|
||||||
example = "kube-system";
|
|
||||||
description = "The namespace in which the Helm chart gets installed.";
|
|
||||||
};
|
|
||||||
|
|
||||||
createNamespace = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
default = false;
|
|
||||||
example = true;
|
|
||||||
description = "Whether to create the target namespace if not present.";
|
|
||||||
};
|
|
||||||
|
|
||||||
values = lib.mkOption {
|
|
||||||
type = with lib.types; either path attrs;
|
|
||||||
default = { };
|
|
||||||
example = {
|
|
||||||
replicaCount = 3;
|
|
||||||
hostName = "my-host";
|
|
||||||
server = {
|
|
||||||
name = "nginx";
|
|
||||||
port = 80;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
description = ''
|
|
||||||
Override default chart values via Nix expressions. This is equivalent to setting
|
|
||||||
values in a `values.yaml` file.
|
|
||||||
|
|
||||||
WARNING: The values (including secrets!) specified here are exposed unencrypted
|
|
||||||
in the world-readable nix store.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
extraDeploy = lib.mkOption {
|
|
||||||
type = with lib.types; listOf (either path attrs);
|
|
||||||
default = [ ];
|
|
||||||
example = lib.literalExpression ''
|
|
||||||
[
|
|
||||||
../manifests/my-extra-deployment.yaml
|
|
||||||
{
|
|
||||||
apiVersion = "v1";
|
|
||||||
kind = "Service";
|
|
||||||
metadata = {
|
|
||||||
name = "app-service";
|
|
||||||
};
|
|
||||||
spec = {
|
|
||||||
selector = {
|
|
||||||
"app.kubernetes.io/name" = "MyApp";
|
|
||||||
};
|
|
||||||
ports = [
|
|
||||||
{
|
|
||||||
name = "name-of-service-port";
|
|
||||||
protocol = "TCP";
|
|
||||||
port = 80;
|
|
||||||
targetPort = "http-web-svc";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
|
||||||
'';
|
|
||||||
description = "List of extra Kubernetes manifests to deploy with this Helm chart.";
|
|
||||||
};
|
|
||||||
|
|
||||||
extraFieldDefinitions = lib.mkOption {
|
|
||||||
inherit (yamlFormat) type;
|
|
||||||
default = { };
|
|
||||||
example = {
|
|
||||||
spec = {
|
|
||||||
bootstrap = true;
|
|
||||||
helmVersion = "v2";
|
|
||||||
backOffLimit = 3;
|
|
||||||
jobImage = "custom-helm-controller:v0.0.1";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
description = ''
|
|
||||||
Extra HelmChart field definitions that are merged with the rest of the HelmChart
|
|
||||||
custom resource. This can be used to set advanced fields or to overwrite
|
|
||||||
generated fields. See <https://docs.k3s.io/helm#helmchart-field-definitions>
|
|
||||||
for possible fields.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config.package = lib.mkDefault (fetchHelm {
|
|
||||||
inherit (config)
|
|
||||||
repo
|
|
||||||
name
|
|
||||||
version
|
|
||||||
hash
|
|
||||||
;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
manifestModule = lib.types.submodule (
|
|
||||||
{
|
|
||||||
name,
|
|
||||||
config,
|
|
||||||
options,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
{
|
|
||||||
options = {
|
|
||||||
enable = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
default = true;
|
|
||||||
description = "Whether this manifest file should be generated.";
|
|
||||||
};
|
|
||||||
|
|
||||||
target = lib.mkOption {
|
|
||||||
type = lib.types.nonEmptyStr;
|
|
||||||
example = "manifest.yaml";
|
|
||||||
description = ''
|
|
||||||
Name of the symlink (relative to {file}`${manifestDir}`).
|
|
||||||
Defaults to the attribute name.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
content = lib.mkOption {
|
|
||||||
type = with lib.types; nullOr (either attrs (listOf attrs));
|
|
||||||
default = null;
|
|
||||||
description = ''
|
|
||||||
Content of the manifest file. A single attribute set will
|
|
||||||
generate a single document YAML file. A list of attribute sets
|
|
||||||
will generate multiple documents separated by `---` in a single
|
|
||||||
YAML file.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
source = lib.mkOption {
|
|
||||||
type = lib.types.path;
|
|
||||||
example = lib.literalExpression "./manifests/app.yaml";
|
|
||||||
description = ''
|
|
||||||
Path of the source `.yaml` file.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = {
|
|
||||||
target = lib.mkDefault (mkManifestTarget name);
|
|
||||||
source = lib.mkIf (config.content != null) (
|
|
||||||
let
|
|
||||||
name' = "k3s-manifest-" + builtins.baseNameOf name;
|
|
||||||
docName = "k3s-manifest-doc-" + builtins.baseNameOf name;
|
|
||||||
mkSource =
|
|
||||||
value:
|
|
||||||
if builtins.isList value then
|
|
||||||
pkgs.concatText name' (
|
|
||||||
lib.concatMap (x: [
|
|
||||||
yamlDocSeparator
|
|
||||||
(yamlFormat.generate docName x)
|
|
||||||
]) value
|
|
||||||
)
|
|
||||||
else
|
|
||||||
yamlFormat.generate name' value;
|
|
||||||
in
|
|
||||||
lib.mkDerivedConfig options.content mkSource
|
|
||||||
);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
);
|
|
||||||
in
|
|
||||||
{
|
|
||||||
imports = [ (removeOption [ "docker" ] "k3s docker option is no longer supported.") ];
|
|
||||||
|
|
||||||
# interface
|
|
||||||
options.services.k3s = {
|
|
||||||
enable = lib.mkEnableOption "k3s";
|
|
||||||
|
|
||||||
package = lib.mkPackageOption pkgs "k3s" { };
|
|
||||||
|
|
||||||
role = lib.mkOption {
|
|
||||||
description = ''
|
|
||||||
Whether k3s should run as a server or agent.
|
|
||||||
|
|
||||||
If it's a server:
|
|
||||||
|
|
||||||
- By default it also runs workloads as an agent.
|
|
||||||
- Starts by default as a standalone server using an embedded sqlite datastore.
|
|
||||||
- Configure `clusterInit = true` to switch over to embedded etcd datastore and enable HA mode.
|
|
||||||
- Configure `serverAddr` to join an already-initialized HA cluster.
|
|
||||||
|
|
||||||
If it's an agent:
|
|
||||||
|
|
||||||
- `serverAddr` is required.
|
|
||||||
'';
|
|
||||||
default = "server";
|
|
||||||
type = lib.types.enum [
|
|
||||||
"server"
|
|
||||||
"agent"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
serverAddr = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = ''
|
|
||||||
The k3s server to connect to.
|
|
||||||
|
|
||||||
Servers and agents need to communicate each other. Read
|
|
||||||
[the networking docs](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#networking)
|
|
||||||
to know how to configure the firewall.
|
|
||||||
'';
|
|
||||||
example = "https://10.0.0.10:6443";
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterInit = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
default = false;
|
|
||||||
description = ''
|
|
||||||
Initialize HA cluster using an embedded etcd datastore.
|
|
||||||
|
|
||||||
If this option is `false` and `role` is `server`
|
|
||||||
|
|
||||||
On a server that was using the default embedded sqlite backend,
|
|
||||||
enabling this option will migrate to an embedded etcd DB.
|
|
||||||
|
|
||||||
If an HA cluster using the embedded etcd datastore was already initialized,
|
|
||||||
this option has no effect.
|
|
||||||
|
|
||||||
This option only makes sense in a server that is not connecting to another server.
|
|
||||||
|
|
||||||
If you are configuring an HA cluster with an embedded etcd,
|
|
||||||
the 1st server must have `clusterInit = true`
|
|
||||||
and other servers must connect to it using `serverAddr`.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
token = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = ''
|
|
||||||
The k3s token to use when connecting to a server.
|
|
||||||
|
|
||||||
WARNING: This option will expose store your token unencrypted world-readable in the nix store.
|
|
||||||
If this is undesired use the tokenFile option instead.
|
|
||||||
'';
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
tokenFile = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.path;
|
|
||||||
description = "File path containing k3s token to use when connecting to the server.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
extraFlags = lib.mkOption {
|
|
||||||
description = "Extra flags to pass to the k3s command.";
|
|
||||||
type = with lib.types; either str (listOf str);
|
|
||||||
default = [ ];
|
|
||||||
example = [
|
|
||||||
"--disable traefik"
|
|
||||||
"--cluster-cidr 10.24.0.0/16"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
disableAgent = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
default = false;
|
|
||||||
description = "Only run the server. This option only makes sense for a server.";
|
|
||||||
};
|
|
||||||
|
|
||||||
environmentFile = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.path;
|
|
||||||
description = ''
|
|
||||||
File path containing environment variables for configuring the k3s service in the format of an EnvironmentFile. See {manpage}`systemd.exec(5)`.
|
|
||||||
'';
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
configPath = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.path;
|
|
||||||
default = null;
|
|
||||||
description = "File path containing the k3s YAML config. This is useful when the config is generated (for example on boot).";
|
|
||||||
};
|
|
||||||
|
|
||||||
manifests = lib.mkOption {
|
|
||||||
type = lib.types.attrsOf manifestModule;
|
|
||||||
default = { };
|
|
||||||
example = lib.literalExpression ''
|
|
||||||
{
|
|
||||||
deployment.source = ../manifests/deployment.yaml;
|
|
||||||
my-service = {
|
|
||||||
enable = false;
|
|
||||||
target = "app-service.yaml";
|
|
||||||
content = {
|
|
||||||
apiVersion = "v1";
|
|
||||||
kind = "Service";
|
|
||||||
metadata = {
|
|
||||||
name = "app-service";
|
|
||||||
};
|
|
||||||
spec = {
|
|
||||||
selector = {
|
|
||||||
"app.kubernetes.io/name" = "MyApp";
|
|
||||||
};
|
|
||||||
ports = [
|
|
||||||
{
|
|
||||||
name = "name-of-service-port";
|
|
||||||
protocol = "TCP";
|
|
||||||
port = 80;
|
|
||||||
targetPort = "http-web-svc";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
nginx.content = [
|
|
||||||
{
|
|
||||||
apiVersion = "v1";
|
|
||||||
kind = "Pod";
|
|
||||||
metadata = {
|
|
||||||
name = "nginx";
|
|
||||||
labels = {
|
|
||||||
"app.kubernetes.io/name" = "MyApp";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
spec = {
|
|
||||||
containers = [
|
|
||||||
{
|
|
||||||
name = "nginx";
|
|
||||||
image = "nginx:1.14.2";
|
|
||||||
ports = [
|
|
||||||
{
|
|
||||||
containerPort = 80;
|
|
||||||
name = "http-web-svc";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
{
|
|
||||||
apiVersion = "v1";
|
|
||||||
kind = "Service";
|
|
||||||
metadata = {
|
|
||||||
name = "nginx-service";
|
|
||||||
};
|
|
||||||
spec = {
|
|
||||||
selector = {
|
|
||||||
"app.kubernetes.io/name" = "MyApp";
|
|
||||||
};
|
|
||||||
ports = [
|
|
||||||
{
|
|
||||||
name = "name-of-service-port";
|
|
||||||
protocol = "TCP";
|
|
||||||
port = 80;
|
|
||||||
targetPort = "http-web-svc";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
'';
|
|
||||||
description = ''
|
|
||||||
Auto-deploying manifests that are linked to {file}`${manifestDir}` before k3s starts.
|
|
||||||
Note that deleting manifest files will not remove or otherwise modify the resources
|
|
||||||
it created. Please use the the `--disable` flag or `.skip` files to delete/disable AddOns,
|
|
||||||
as mentioned in the [docs](https://docs.k3s.io/installation/packaged-components#disabling-manifests).
|
|
||||||
This option only makes sense on server nodes (`role = server`).
|
|
||||||
Read the [auto-deploying manifests docs](https://docs.k3s.io/installation/packaged-components#auto-deploying-manifests-addons)
|
|
||||||
for further information.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
charts = lib.mkOption {
|
|
||||||
type = with lib.types; attrsOf (either path package);
|
|
||||||
default = { };
|
|
||||||
example = lib.literalExpression ''
|
|
||||||
nginx = ../charts/my-nginx-chart.tgz;
|
|
||||||
redis = ../charts/my-redis-chart.tgz;
|
|
||||||
'';
|
|
||||||
description = ''
|
|
||||||
Packaged Helm charts that are linked to {file}`${chartDir}` before k3s starts.
|
|
||||||
The attribute name will be used as the link target (relative to {file}`${chartDir}`).
|
|
||||||
The specified charts will only be placed on the file system and made available to the
|
|
||||||
Kubernetes APIServer from within the cluster. See the [](#opt-services.k3s.autoDeployCharts)
|
|
||||||
option and the [k3s Helm controller docs](https://docs.k3s.io/helm#using-the-helm-controller)
|
|
||||||
to deploy Helm charts. This option only makes sense on server nodes (`role = server`).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
containerdConfigTemplate = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.str;
|
|
||||||
default = null;
|
|
||||||
example = lib.literalExpression ''
|
|
||||||
# Base K3s config
|
|
||||||
{{ template "base" . }}
|
|
||||||
|
|
||||||
# Add a custom runtime
|
|
||||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes."custom"]
|
|
||||||
runtime_type = "io.containerd.runc.v2"
|
|
||||||
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes."custom".options]
|
|
||||||
BinaryName = "/path/to/custom-container-runtime"
|
|
||||||
'';
|
|
||||||
description = ''
|
|
||||||
Config template for containerd, to be placed at
|
|
||||||
`/var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl`.
|
|
||||||
See the K3s docs on [configuring containerd](https://docs.k3s.io/advanced#configuring-containerd).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
images = lib.mkOption {
|
|
||||||
type = with lib.types; listOf package;
|
|
||||||
default = [ ];
|
|
||||||
example = lib.literalExpression ''
|
|
||||||
[
|
|
||||||
(pkgs.dockerTools.pullImage {
|
|
||||||
imageName = "docker.io/bitnami/keycloak";
|
|
||||||
imageDigest = "sha256:714dfadc66a8e3adea6609bda350345bd3711657b7ef3cf2e8015b526bac2d6b";
|
|
||||||
hash = "sha256-IM2BLZ0EdKIZcRWOtuFY9TogZJXCpKtPZnMnPsGlq0Y=";
|
|
||||||
finalImageTag = "21.1.2-debian-11-r0";
|
|
||||||
})
|
|
||||||
|
|
||||||
config.services.k3s.package.airgap-images
|
|
||||||
]
|
|
||||||
'';
|
|
||||||
description = ''
|
|
||||||
List of derivations that provide container images.
|
|
||||||
All images are linked to {file}`${imageDir}` before k3s starts and consequently imported
|
|
||||||
by the k3s agent. Consider importing the k3s airgap images archive of the k3s package in
|
|
||||||
use, if you want to pre-provision this node with all k3s container images. This option
|
|
||||||
only makes sense on nodes with an enabled agent.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
gracefulNodeShutdown = {
|
|
||||||
enable = lib.mkEnableOption ''
|
|
||||||
graceful node shutdowns where the kubelet attempts to detect
|
|
||||||
node system shutdown and terminates pods running on the node. See the
|
|
||||||
[documentation](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown)
|
|
||||||
for further information.
|
|
||||||
'';
|
|
||||||
|
|
||||||
shutdownGracePeriod = lib.mkOption {
|
|
||||||
type = lib.types.nonEmptyStr;
|
|
||||||
default = "30s";
|
|
||||||
example = "1m30s";
|
|
||||||
description = ''
|
|
||||||
Specifies the total duration that the node should delay the shutdown by. This is the total
|
|
||||||
grace period for pod termination for both regular and critical pods.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
shutdownGracePeriodCriticalPods = lib.mkOption {
|
|
||||||
type = lib.types.nonEmptyStr;
|
|
||||||
default = "10s";
|
|
||||||
example = "15s";
|
|
||||||
description = ''
|
|
||||||
Specifies the duration used to terminate critical pods during a node shutdown. This should be
|
|
||||||
less than `shutdownGracePeriod`.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
extraKubeletConfig = lib.mkOption {
|
|
||||||
type = with lib.types; attrsOf anything;
|
|
||||||
default = { };
|
|
||||||
example = {
|
|
||||||
podsPerCore = 3;
|
|
||||||
memoryThrottlingFactor = 0.69;
|
|
||||||
containerLogMaxSize = "5Mi";
|
|
||||||
};
|
|
||||||
description = ''
|
|
||||||
Extra configuration to add to the kubelet's configuration file. The subset of the kubelet's
|
|
||||||
configuration that can be configured via a file is defined by the
|
|
||||||
[KubeletConfiguration](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/)
|
|
||||||
struct. See the
|
|
||||||
[documentation](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/)
|
|
||||||
for further information.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
extraKubeProxyConfig = lib.mkOption {
|
|
||||||
type = with lib.types; attrsOf anything;
|
|
||||||
default = { };
|
|
||||||
example = {
|
|
||||||
mode = "nftables";
|
|
||||||
clientConnection.kubeconfig = "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig";
|
|
||||||
};
|
|
||||||
description = ''
|
|
||||||
Extra configuration to add to the kube-proxy's configuration file. The subset of the kube-proxy's
|
|
||||||
configuration that can be configured via a file is defined by the
|
|
||||||
[KubeProxyConfiguration](https://kubernetes.io/docs/reference/config-api/kube-proxy-config.v1alpha1/)
|
|
||||||
struct. Note that the kubeconfig param will be override by `clientConnection.kubeconfig`, so you must
|
|
||||||
set the `clientConnection.kubeconfig` if you want to use `extraKubeProxyConfig`.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
autoDeployCharts = lib.mkOption {
|
|
||||||
type = lib.types.attrsOf autoDeployChartsModule;
|
|
||||||
apply = lib.mapAttrs mkAutoDeployChartManifest;
|
|
||||||
default = { };
|
|
||||||
example = lib.literalExpression ''
|
|
||||||
{
|
|
||||||
harbor = {
|
|
||||||
name = "harbor";
|
|
||||||
repo = "https://helm.goharbor.io";
|
|
||||||
version = "1.14.0";
|
|
||||||
hash = "sha256-fMP7q1MIbvzPGS9My91vbQ1d3OJMjwc+o8YE/BXZaYU=";
|
|
||||||
values = {
|
|
||||||
existingSecretAdminPassword = "harbor-admin";
|
|
||||||
expose = {
|
|
||||||
tls = {
|
|
||||||
enabled = true;
|
|
||||||
certSource = "secret";
|
|
||||||
secret.secretName = "my-tls-secret";
|
|
||||||
};
|
|
||||||
ingress = {
|
|
||||||
hosts.core = "example.com";
|
|
||||||
className = "nginx";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
nginx = {
|
|
||||||
repo = "oci://registry-1.docker.io/bitnamicharts/nginx";
|
|
||||||
version = "20.0.0";
|
|
||||||
hash = "sha256-sy+tzB+i9jIl/tqOMzzuhVhTU4EZVsoSBtPznxF/36c=";
|
|
||||||
};
|
|
||||||
custom-chart = {
|
|
||||||
package = ../charts/my-chart.tgz;
|
|
||||||
values = ../values/my-values.yaml;
|
|
||||||
extraFieldDefinitions = {
|
|
||||||
spec.timeout = "60s";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
description = ''
|
|
||||||
Auto deploying Helm charts that are installed by the k3s Helm controller. Avoid to use
|
|
||||||
attribute names that are also used in the [](#opt-services.k3s.manifests) and
|
|
||||||
[](#opt-services.k3s.charts) options. Manifests with the same name will override
|
|
||||||
auto deploying charts with the same name. Similiarly, charts with the same name will
|
|
||||||
overwrite the Helm chart contained in auto deploying charts. This option only makes
|
|
||||||
sense on server nodes (`role = server`). See the
|
|
||||||
[k3s Helm documentation](https://docs.k3s.io/helm) for further information.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# implementation
|
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
|
||||||
warnings =
|
|
||||||
(lib.optional (cfg.role != "server" && cfg.manifests != { })
|
|
||||||
"k3s: Auto deploying manifests are only installed on server nodes (role == server), they will be ignored by this node."
|
|
||||||
)
|
|
||||||
++ (lib.optional (cfg.role != "server" && cfg.charts != { })
|
|
||||||
"k3s: Helm charts are only made available to the cluster on server nodes (role == server), they will be ignored by this node."
|
|
||||||
)
|
|
||||||
++ (lib.optional (cfg.role != "server" && cfg.autoDeployCharts != { })
|
|
||||||
"k3s: Auto deploying Helm charts are only installed on server nodes (role == server), they will be ignored by this node."
|
|
||||||
)
|
|
||||||
++ (lib.optional (duplicateManifests != [ ])
|
|
||||||
"k3s: The following auto deploying charts are overriden by manifests of the same name: ${toString duplicateManifests}."
|
|
||||||
)
|
|
||||||
++ (lib.optional (duplicateCharts != [ ])
|
|
||||||
"k3s: The following auto deploying charts are overriden by charts of the same name: ${toString duplicateCharts}."
|
|
||||||
)
|
|
||||||
++ (lib.optional (
|
|
||||||
cfg.disableAgent && cfg.images != [ ]
|
|
||||||
) "k3s: Images are only imported on nodes with an enabled agent, they will be ignored by this node")
|
|
||||||
++ (lib.optional (
|
|
||||||
cfg.role == "agent" && cfg.configPath == null && cfg.serverAddr == ""
|
|
||||||
) "k3s: serverAddr or configPath (with 'server' key) should be set if role is 'agent'")
|
|
||||||
++ (lib.optional
|
|
||||||
(cfg.role == "agent" && cfg.configPath == null && cfg.tokenFile == null && cfg.token == "")
|
|
||||||
"k3s: Token or tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'"
|
|
||||||
);
|
|
||||||
|
|
||||||
assertions = [
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !cfg.disableAgent;
|
|
||||||
message = "k3s: disableAgent must be false if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !cfg.clusterInit;
|
|
||||||
message = "k3s: clusterInit must be false if role is 'agent'";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
environment.systemPackages = [ config.services.k3s.package ];
|
|
||||||
|
|
||||||
# Use systemd-tmpfiles to activate k3s content
|
|
||||||
systemd.tmpfiles.settings."10-k3s" =
|
|
||||||
let
|
|
||||||
# Merge manifest with manifests generated from auto deploying charts, keep only enabled manifests
|
|
||||||
enabledManifests = lib.filterAttrs (_: v: v.enable) (cfg.autoDeployCharts // cfg.manifests);
|
|
||||||
# Merge charts with charts contained in enabled auto deploying charts
|
|
||||||
helmCharts =
|
|
||||||
(lib.concatMapAttrs (n: v: { ${n} = v.package; }) (
|
|
||||||
lib.filterAttrs (_: v: v.enable) cfg.autoDeployCharts
|
|
||||||
))
|
|
||||||
// cfg.charts;
|
|
||||||
# Make a systemd-tmpfiles rule for a manifest
|
|
||||||
mkManifestRule = manifest: {
|
|
||||||
name = "${manifestDir}/${manifest.target}";
|
|
||||||
value = {
|
|
||||||
"L+".argument = "${manifest.source}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
# Ensure that all chart targets have a .tgz suffix
|
|
||||||
mkChartTarget = name: if (lib.hasSuffix ".tgz" name) then name else name + ".tgz";
|
|
||||||
# Make a systemd-tmpfiles rule for a chart
|
|
||||||
mkChartRule = target: source: {
|
|
||||||
name = "${chartDir}/${mkChartTarget target}";
|
|
||||||
value = {
|
|
||||||
"L+".argument = "${source}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
# Make a systemd-tmpfiles rule for a container image
|
|
||||||
mkImageRule = image: {
|
|
||||||
name = "${imageDir}/${image.name}";
|
|
||||||
value = {
|
|
||||||
"L+".argument = "${image}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
in
|
|
||||||
(lib.mapAttrs' (_: v: mkManifestRule v) enabledManifests)
|
|
||||||
// (lib.mapAttrs' (n: v: mkChartRule n v) helmCharts)
|
|
||||||
// (builtins.listToAttrs (map mkImageRule cfg.images))
|
|
||||||
// (lib.optionalAttrs (cfg.containerdConfigTemplate != null) {
|
|
||||||
${containerdConfigTemplateFile} = {
|
|
||||||
"L+".argument = "${pkgs.writeText "config.toml.tmpl" cfg.containerdConfigTemplate}";
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
systemd.services.k3s =
|
|
||||||
let
|
|
||||||
kubeletParams =
|
|
||||||
(lib.optionalAttrs (cfg.gracefulNodeShutdown.enable) {
|
|
||||||
inherit (cfg.gracefulNodeShutdown) shutdownGracePeriod shutdownGracePeriodCriticalPods;
|
|
||||||
})
|
|
||||||
// cfg.extraKubeletConfig;
|
|
||||||
kubeletConfig = (pkgs.formats.yaml { }).generate "k3s-kubelet-config" (
|
|
||||||
{
|
|
||||||
apiVersion = "kubelet.config.k8s.io/v1beta1";
|
|
||||||
kind = "KubeletConfiguration";
|
|
||||||
}
|
|
||||||
// kubeletParams
|
|
||||||
);
|
|
||||||
|
|
||||||
kubeProxyConfig = (pkgs.formats.yaml { }).generate "k3s-kubeProxy-config" (
|
|
||||||
{
|
|
||||||
apiVersion = "kubeproxy.config.k8s.io/v1alpha1";
|
|
||||||
kind = "KubeProxyConfiguration";
|
|
||||||
}
|
|
||||||
// cfg.extraKubeProxyConfig
|
|
||||||
);
|
|
||||||
in
|
|
||||||
{
|
|
||||||
description = "k3s service";
|
|
||||||
after = [
|
|
||||||
"firewall.service"
|
|
||||||
"network-online.target"
|
|
||||||
];
|
|
||||||
wants = [
|
|
||||||
"firewall.service"
|
|
||||||
"network-online.target"
|
|
||||||
];
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
path = lib.optional config.boot.zfs.enabled config.boot.zfs.package;
|
|
||||||
serviceConfig = {
|
|
||||||
# See: https://github.com/rancher/k3s/blob/dddbd16305284ae4bd14c0aade892412310d7edc/install.sh#L197
|
|
||||||
Type = if cfg.role == "agent" then "exec" else "notify";
|
|
||||||
KillMode = "process";
|
|
||||||
Delegate = "yes";
|
|
||||||
Restart = "always";
|
|
||||||
RestartSec = "5s";
|
|
||||||
LimitNOFILE = 1048576;
|
|
||||||
LimitNPROC = "infinity";
|
|
||||||
LimitCORE = "infinity";
|
|
||||||
TasksMax = "infinity";
|
|
||||||
EnvironmentFile = cfg.environmentFile;
|
|
||||||
ExecStart = lib.concatStringsSep " \\\n " (
|
|
||||||
[ "${cfg.package}/bin/k3s ${cfg.role}" ]
|
|
||||||
++ (lib.optional cfg.clusterInit "--cluster-init")
|
|
||||||
++ (lib.optional cfg.disableAgent "--disable-agent")
|
|
||||||
++ (lib.optional (cfg.serverAddr != "") "--server ${cfg.serverAddr}")
|
|
||||||
++ (lib.optional (cfg.token != "") "--token ${cfg.token}")
|
|
||||||
++ (lib.optional (cfg.tokenFile != null) "--token-file ${cfg.tokenFile}")
|
|
||||||
++ (lib.optional (cfg.configPath != null) "--config ${cfg.configPath}")
|
|
||||||
++ (lib.optional (kubeletParams != { }) "--kubelet-arg=config=${kubeletConfig}")
|
|
||||||
++ (lib.optional (cfg.extraKubeProxyConfig != { }) "--kube-proxy-arg=config=${kubeProxyConfig}")
|
|
||||||
++ (lib.flatten cfg.extraFlags)
|
|
||||||
);
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
meta.maintainers = lib.teams.k3s.members;
|
|
||||||
}
|
|
||||||
966
nixos/modules/services/cluster/rancher/default.nix
Normal file
966
nixos/modules/services/cluster/rancher/default.nix
Normal file
@@ -0,0 +1,966 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
pkgs,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
mkRancherModule =
|
||||||
|
{
|
||||||
|
# name used in paths/bin names/etc, e.g. k3s
|
||||||
|
name,
|
||||||
|
# systemd service name
|
||||||
|
serviceName ? name,
|
||||||
|
# extra flags to pass to the binary before user-defined extraFlags
|
||||||
|
extraBinFlags ? [ ],
|
||||||
|
# generate manifests as JSON rather than YAML, see rke2.nix
|
||||||
|
jsonManifests ? false,
|
||||||
|
|
||||||
|
# which port on the local node hosts content placed in ${staticContentChartDir} on /static/
|
||||||
|
# if null, it's assumed the content can be accessed via https://%{KUBERNETES_API}%/static/
|
||||||
|
staticContentPort ? null,
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.${name};
|
||||||
|
|
||||||
|
# Paths defined here are passed to the downstream modules as `paths`
|
||||||
|
manifestDir = "/var/lib/rancher/${name}/server/manifests";
|
||||||
|
imageDir = "/var/lib/rancher/${name}/agent/images";
|
||||||
|
containerdConfigTemplateFile = "/var/lib/rancher/${name}/agent/etc/containerd/config.toml.tmpl";
|
||||||
|
staticContentChartDir = "/var/lib/rancher/${name}/server/static/charts";
|
||||||
|
|
||||||
|
manifestFormat = if jsonManifests then pkgs.formats.json { } else pkgs.formats.yaml { };
|
||||||
|
# Manifests need a valid suffix to be respected
|
||||||
|
mkManifestTarget =
|
||||||
|
name:
|
||||||
|
if (lib.hasSuffix ".yaml" name || lib.hasSuffix ".yml" name || lib.hasSuffix ".json" name) then
|
||||||
|
name
|
||||||
|
else if jsonManifests then
|
||||||
|
name + ".json"
|
||||||
|
else
|
||||||
|
name + ".yaml";
|
||||||
|
# Returns a path to the final manifest file
|
||||||
|
mkManifestSource =
|
||||||
|
name: manifests:
|
||||||
|
manifestFormat.generate name (
|
||||||
|
if builtins.isList manifests then
|
||||||
|
{
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "List";
|
||||||
|
items = manifests;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
manifests
|
||||||
|
);
|
||||||
|
|
||||||
|
# Produces a list containing all duplicate manifest names
|
||||||
|
duplicateManifests = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
||||||
|
builtins.attrNames cfg.manifests
|
||||||
|
);
|
||||||
|
# Produces a list containing all duplicate chart names
|
||||||
|
duplicateCharts = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
||||||
|
builtins.attrNames cfg.charts
|
||||||
|
);
|
||||||
|
|
||||||
|
# Converts YAML -> JSON -> Nix
|
||||||
|
fromYaml =
|
||||||
|
path:
|
||||||
|
builtins.fromJSON (
|
||||||
|
builtins.readFile (
|
||||||
|
pkgs.runCommand "${path}-converted.json" { nativeBuildInputs = [ pkgs.yq-go ]; } ''
|
||||||
|
yq --no-colors --output-format json ${path} > $out
|
||||||
|
''
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
# Replace prefixes and characters that are problematic in file names
|
||||||
|
cleanHelmChartName =
|
||||||
|
name:
|
||||||
|
let
|
||||||
|
woPrefix = lib.removePrefix "https://" (lib.removePrefix "oci://" name);
|
||||||
|
in
|
||||||
|
lib.replaceStrings
|
||||||
|
[
|
||||||
|
"/"
|
||||||
|
":"
|
||||||
|
]
|
||||||
|
[
|
||||||
|
"-"
|
||||||
|
"-"
|
||||||
|
]
|
||||||
|
woPrefix;
|
||||||
|
|
||||||
|
# Fetch a Helm chart from a public registry. This only supports a basic Helm pull.
|
||||||
|
fetchHelm =
|
||||||
|
{
|
||||||
|
name,
|
||||||
|
repo,
|
||||||
|
version,
|
||||||
|
hash ? lib.fakeHash,
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
isOci = lib.hasPrefix "oci://" repo;
|
||||||
|
pullCmd = if isOci then repo else "--repo ${repo} ${name}";
|
||||||
|
name' = if isOci then "${repo}-${version}" else "${repo}-${name}-${version}";
|
||||||
|
in
|
||||||
|
pkgs.runCommand (cleanHelmChartName "${name'}.tgz")
|
||||||
|
{
|
||||||
|
inherit (lib.fetchers.normalizeHash { } { inherit hash; }) outputHash outputHashAlgo;
|
||||||
|
impureEnvVars = lib.fetchers.proxyImpureEnvVars;
|
||||||
|
nativeBuildInputs = with pkgs; [
|
||||||
|
kubernetes-helm
|
||||||
|
cacert
|
||||||
|
# Helm requires HOME to refer to a writable dir
|
||||||
|
writableTmpDirAsHomeHook
|
||||||
|
];
|
||||||
|
}
|
||||||
|
''
|
||||||
|
helm pull ${pullCmd} --version ${version}
|
||||||
|
mv ./*.tgz $out
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Returns the path to a YAML manifest file
|
||||||
|
mkExtraDeployManifest =
|
||||||
|
x:
|
||||||
|
# x is a derivation that provides a YAML file
|
||||||
|
if lib.isDerivation x then
|
||||||
|
x.outPath
|
||||||
|
# x is an attribute set that needs to be converted to a YAML file
|
||||||
|
else if builtins.isAttrs x then
|
||||||
|
(manifestFormat.generate "extra-deploy-chart-manifest" x)
|
||||||
|
# assume x is a path to a YAML file
|
||||||
|
else
|
||||||
|
x;
|
||||||
|
|
||||||
|
# Generate a HelmChart custom resource.
|
||||||
|
mkHelmChartCR =
|
||||||
|
name: value:
|
||||||
|
let
|
||||||
|
chartValues = if (lib.isPath value.values) then fromYaml value.values else value.values;
|
||||||
|
# use JSON for values as it's a subset of YAML and understood by the rancher Helm controller
|
||||||
|
valuesContent = builtins.toJSON chartValues;
|
||||||
|
in
|
||||||
|
# merge with extraFieldDefinitions to allow setting advanced values and overwrite generated
|
||||||
|
# values
|
||||||
|
lib.recursiveUpdate {
|
||||||
|
apiVersion = "helm.cattle.io/v1";
|
||||||
|
kind = "HelmChart";
|
||||||
|
metadata = {
|
||||||
|
inherit name;
|
||||||
|
namespace = "kube-system";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
inherit valuesContent;
|
||||||
|
inherit (value) targetNamespace createNamespace;
|
||||||
|
chart =
|
||||||
|
if staticContentPort == null then
|
||||||
|
"https://%{KUBERNETES_API}%/static/charts/${name}.tgz"
|
||||||
|
else
|
||||||
|
"https://localhost:${toString staticContentPort}/static/charts/${name}.tgz";
|
||||||
|
bootstrap = staticContentPort != null; # needed for host network access
|
||||||
|
};
|
||||||
|
} value.extraFieldDefinitions;
|
||||||
|
|
||||||
|
# Generate a HelmChart custom resource together with extraDeploy manifests.
|
||||||
|
mkAutoDeployChartManifest = name: value: {
|
||||||
|
# target is the final name of the link created for the manifest file
|
||||||
|
target = mkManifestTarget name;
|
||||||
|
inherit (value) enable package;
|
||||||
|
# source is a store path containing the complete manifest file
|
||||||
|
source = mkManifestSource "auto-deploy-chart-${name}" (
|
||||||
|
lib.singleton (mkHelmChartCR name value)
|
||||||
|
++ builtins.map (x: fromYaml (mkExtraDeployManifest x)) value.extraDeploy
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
autoDeployChartsModule = lib.types.submodule (
|
||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
enable = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
default = true;
|
||||||
|
example = false;
|
||||||
|
description = ''
|
||||||
|
Whether to enable the installation of this Helm chart. Note that setting
|
||||||
|
this option to `false` will not uninstall the chart from the cluster, if
|
||||||
|
it was previously installed. Please use the the `--disable` flag or `.skip`
|
||||||
|
files to delete/disable Helm charts, as mentioned in the
|
||||||
|
[docs](https://docs.k3s.io/installation/packaged-components#disabling-manifests).
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
repo = lib.mkOption {
|
||||||
|
type = lib.types.nonEmptyStr;
|
||||||
|
example = "https://kubernetes.github.io/ingress-nginx";
|
||||||
|
description = ''
|
||||||
|
The repo of the Helm chart. Only has an effect if `package` is not set.
|
||||||
|
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
||||||
|
filesystem.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
name = lib.mkOption {
|
||||||
|
type = lib.types.nonEmptyStr;
|
||||||
|
example = "ingress-nginx";
|
||||||
|
description = ''
|
||||||
|
The name of the Helm chart. Only has an effect if `package` is not set.
|
||||||
|
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
||||||
|
filesystem.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
version = lib.mkOption {
|
||||||
|
type = lib.types.nonEmptyStr;
|
||||||
|
example = "4.7.0";
|
||||||
|
description = ''
|
||||||
|
The version of the Helm chart. Only has an effect if `package` is not set.
|
||||||
|
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
||||||
|
filesystem.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
hash = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
example = "sha256-ej+vpPNdiOoXsaj1jyRpWLisJgWo8EqX+Z5VbpSjsPA=";
|
||||||
|
default = "";
|
||||||
|
description = ''
|
||||||
|
The hash of the packaged Helm chart. Only has an effect if `package` is not set.
|
||||||
|
The Helm chart is fetched during build time and placed as a `.tgz` archive on the
|
||||||
|
filesystem.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
package = lib.mkOption {
|
||||||
|
type = with lib.types; either path package;
|
||||||
|
example = lib.literalExpression "../my-helm-chart.tgz";
|
||||||
|
description = ''
|
||||||
|
The packaged Helm chart. Overwrites the options `repo`, `name`, `version`
|
||||||
|
and `hash` in case of conflicts.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
targetNamespace = lib.mkOption {
|
||||||
|
type = lib.types.nonEmptyStr;
|
||||||
|
default = "default";
|
||||||
|
example = "kube-system";
|
||||||
|
description = "The namespace in which the Helm chart gets installed.";
|
||||||
|
};
|
||||||
|
|
||||||
|
createNamespace = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
default = false;
|
||||||
|
example = true;
|
||||||
|
description = "Whether to create the target namespace if not present.";
|
||||||
|
};
|
||||||
|
|
||||||
|
values = lib.mkOption {
|
||||||
|
type = with lib.types; either path attrs;
|
||||||
|
default = { };
|
||||||
|
example = {
|
||||||
|
replicaCount = 3;
|
||||||
|
hostName = "my-host";
|
||||||
|
server = {
|
||||||
|
name = "nginx";
|
||||||
|
port = 80;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
description = ''
|
||||||
|
Override default chart values via Nix expressions. This is equivalent to setting
|
||||||
|
values in a `values.yaml` file.
|
||||||
|
|
||||||
|
**WARNING**: The values (including secrets!) specified here are exposed unencrypted
|
||||||
|
in the world-readable nix store.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
extraDeploy = lib.mkOption {
|
||||||
|
type = with lib.types; listOf (either path attrs);
|
||||||
|
default = [ ];
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
[
|
||||||
|
../manifests/my-extra-deployment.yaml
|
||||||
|
{
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "Service";
|
||||||
|
metadata = {
|
||||||
|
name = "app-service";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
selector = {
|
||||||
|
"app.kubernetes.io/name" = "MyApp";
|
||||||
|
};
|
||||||
|
ports = [
|
||||||
|
{
|
||||||
|
name = "name-of-service-port";
|
||||||
|
protocol = "TCP";
|
||||||
|
port = 80;
|
||||||
|
targetPort = "http-web-svc";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
'';
|
||||||
|
description = "List of extra Kubernetes manifests to deploy with this Helm chart.";
|
||||||
|
};
|
||||||
|
|
||||||
|
extraFieldDefinitions = lib.mkOption {
|
||||||
|
inherit (manifestFormat) type;
|
||||||
|
default = { };
|
||||||
|
example = {
|
||||||
|
spec = {
|
||||||
|
bootstrap = true;
|
||||||
|
helmVersion = "v2";
|
||||||
|
backOffLimit = 3;
|
||||||
|
jobImage = "custom-helm-controller:v0.0.1";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
description = ''
|
||||||
|
Extra HelmChart field definitions that are merged with the rest of the HelmChart
|
||||||
|
custom resource. This can be used to set advanced fields or to overwrite
|
||||||
|
generated fields. See <https://docs.${name}.io/helm#helmchart-field-definitions>
|
||||||
|
for possible fields.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config.package = lib.mkDefault (fetchHelm {
|
||||||
|
inherit (config)
|
||||||
|
repo
|
||||||
|
name
|
||||||
|
version
|
||||||
|
hash
|
||||||
|
;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
manifestModule = lib.types.submodule (
|
||||||
|
{
|
||||||
|
name,
|
||||||
|
config,
|
||||||
|
options,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
enable = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
default = true;
|
||||||
|
description = "Whether this manifest file should be generated.";
|
||||||
|
};
|
||||||
|
|
||||||
|
target = lib.mkOption {
|
||||||
|
type = lib.types.nonEmptyStr;
|
||||||
|
example = "manifest.yaml";
|
||||||
|
description = ''
|
||||||
|
Name of the symlink (relative to {file}`${manifestDir}`).
|
||||||
|
Defaults to the attribute name.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
content = lib.mkOption {
|
||||||
|
type = with lib.types; nullOr (either attrs (listOf attrs));
|
||||||
|
default = null;
|
||||||
|
description = ''
|
||||||
|
Content of the manifest file. A single attribute set will
|
||||||
|
generate a single document YAML file. A list of attribute sets
|
||||||
|
will generate multiple documents separated by `---` in a single
|
||||||
|
YAML file.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
source = lib.mkOption {
|
||||||
|
type = lib.types.path;
|
||||||
|
example = lib.literalExpression "./manifests/app.yaml";
|
||||||
|
description = ''
|
||||||
|
Path of the source `.yaml` file.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
target = lib.mkDefault (mkManifestTarget name);
|
||||||
|
source = lib.mkIf (config.content != null) (
|
||||||
|
let
|
||||||
|
name' = "${name}-manifest-" + builtins.baseNameOf name;
|
||||||
|
mkSource = mkManifestSource name';
|
||||||
|
in
|
||||||
|
lib.mkDerivedConfig options.content mkSource
|
||||||
|
);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
paths = {
|
||||||
|
inherit
|
||||||
|
manifestDir
|
||||||
|
imageDir
|
||||||
|
containerdConfigTemplateFile
|
||||||
|
staticContentChartDir
|
||||||
|
;
|
||||||
|
};
|
||||||
|
|
||||||
|
# interface
|
||||||
|
|
||||||
|
options = {
|
||||||
|
enable = lib.mkEnableOption name;
|
||||||
|
|
||||||
|
package = lib.mkPackageOption pkgs name { };
|
||||||
|
|
||||||
|
role = lib.mkOption {
|
||||||
|
description = "Whether ${name} should run as a server or agent.";
|
||||||
|
default = "server";
|
||||||
|
type = lib.types.enum [
|
||||||
|
"server"
|
||||||
|
"agent"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
serverAddr = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = "The ${name} server to connect to, used to join a cluster.";
|
||||||
|
example = "https://10.0.0.10:6443";
|
||||||
|
default = "";
|
||||||
|
};
|
||||||
|
|
||||||
|
token = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = ''
|
||||||
|
The ${name} token to use when connecting to a server.
|
||||||
|
|
||||||
|
**WARNING**: This option will expose your token unencrypted in the world-readable nix store.
|
||||||
|
If this is undesired use the tokenFile option instead.
|
||||||
|
'';
|
||||||
|
default = "";
|
||||||
|
};
|
||||||
|
|
||||||
|
tokenFile = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
|
description = "File path containing the ${name} token to use when connecting to a server.";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
agentToken = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = ''
|
||||||
|
The ${name} token agents can use to connect to the server.
|
||||||
|
This option only makes sense on server nodes (`role = server`).
|
||||||
|
|
||||||
|
**WARNING**: This option will expose your token unencrypted in the world-readable nix store.
|
||||||
|
If this is undesired use the tokenFile option instead.
|
||||||
|
'';
|
||||||
|
default = "";
|
||||||
|
};
|
||||||
|
|
||||||
|
agentTokenFile = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
|
description = ''
|
||||||
|
File path containing the ${name} token agents can use to connect to the server.
|
||||||
|
This option only makes sense on server nodes (`role = server`).
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
extraFlags = lib.mkOption {
|
||||||
|
description = "Extra flags to pass to the ${name} command.";
|
||||||
|
type = with lib.types; either str (listOf str);
|
||||||
|
default = [ ];
|
||||||
|
example = [
|
||||||
|
"--etcd-expose-metrics"
|
||||||
|
"--cluster-cidr 10.24.0.0/16"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
environmentFile = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
|
description = ''
|
||||||
|
File path containing environment variables for configuring the ${name} service in the format of an EnvironmentFile. See {manpage}`systemd.exec(5)`.
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
configPath = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
|
default = null;
|
||||||
|
description = "File path containing the ${name} YAML config. This is useful when the config is generated (for example on boot).";
|
||||||
|
};
|
||||||
|
|
||||||
|
disable = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
description = "Disable default components via the `--disable` flag.";
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeName = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "Node name.";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeLabel = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
description = "Registering and starting kubelet with set of labels.";
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeTaint = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
description = "Registering kubelet with set of taints.";
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeIP = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "IPv4/IPv6 addresses to advertise for node.";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
selinux = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
description = "Enable SELinux in containerd.";
|
||||||
|
default = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
manifests = lib.mkOption {
|
||||||
|
type = lib.types.attrsOf manifestModule;
|
||||||
|
default = { };
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
{
|
||||||
|
deployment.source = ../manifests/deployment.yaml;
|
||||||
|
my-service = {
|
||||||
|
enable = false;
|
||||||
|
target = "app-service.yaml";
|
||||||
|
content = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "Service";
|
||||||
|
metadata = {
|
||||||
|
name = "app-service";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
selector = {
|
||||||
|
"app.kubernetes.io/name" = "MyApp";
|
||||||
|
};
|
||||||
|
ports = [
|
||||||
|
{
|
||||||
|
name = "name-of-service-port";
|
||||||
|
protocol = "TCP";
|
||||||
|
port = 80;
|
||||||
|
targetPort = "http-web-svc";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
nginx.content = [
|
||||||
|
{
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "Pod";
|
||||||
|
metadata = {
|
||||||
|
name = "nginx";
|
||||||
|
labels = {
|
||||||
|
"app.kubernetes.io/name" = "MyApp";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
containers = [
|
||||||
|
{
|
||||||
|
name = "nginx";
|
||||||
|
image = "nginx:1.14.2";
|
||||||
|
ports = [
|
||||||
|
{
|
||||||
|
containerPort = 80;
|
||||||
|
name = "http-web-svc";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
{
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "Service";
|
||||||
|
metadata = {
|
||||||
|
name = "nginx-service";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
selector = {
|
||||||
|
"app.kubernetes.io/name" = "MyApp";
|
||||||
|
};
|
||||||
|
ports = [
|
||||||
|
{
|
||||||
|
name = "name-of-service-port";
|
||||||
|
protocol = "TCP";
|
||||||
|
port = 80;
|
||||||
|
targetPort = "http-web-svc";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
Auto-deploying manifests that are linked to {file}`${manifestDir}` before ${name} starts.
|
||||||
|
Note that deleting manifest files will not remove or otherwise modify the resources
|
||||||
|
it created. Please use the the `--disable` flag or `.skip` files to delete/disable AddOns,
|
||||||
|
as mentioned in the [docs](https://docs.k3s.io/installation/packaged-components#disabling-manifests).
|
||||||
|
This option only makes sense on server nodes (`role = server`).
|
||||||
|
Read the [auto-deploying manifests docs](https://docs.k3s.io/installation/packaged-components#auto-deploying-manifests-addons)
|
||||||
|
for further information.
|
||||||
|
|
||||||
|
**WARNING**: If you have multiple server nodes, and set this option on more than one server,
|
||||||
|
it is your responsibility to ensure that files stay in sync across those nodes. AddOn content is
|
||||||
|
not synced between nodes, and ${name} cannot guarantee correct behavior if different servers attempt
|
||||||
|
to deploy conflicting manifests.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
containerdConfigTemplate = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
default = null;
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
# Base config
|
||||||
|
{{ template "base" . }}
|
||||||
|
|
||||||
|
# Add a custom runtime
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes."custom"]
|
||||||
|
runtime_type = "io.containerd.runc.v2"
|
||||||
|
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes."custom".options]
|
||||||
|
BinaryName = "/path/to/custom-container-runtime"
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
Config template for containerd, to be placed at
|
||||||
|
`/var/lib/rancher/${name}/agent/etc/containerd/config.toml.tmpl`.
|
||||||
|
See the docs on [configuring containerd](https://docs.${name}.io/advanced#configuring-containerd).
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
images = lib.mkOption {
|
||||||
|
type = with lib.types; listOf package;
|
||||||
|
default = [ ];
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
[
|
||||||
|
(pkgs.dockerTools.pullImage {
|
||||||
|
imageName = "docker.io/bitnami/keycloak";
|
||||||
|
imageDigest = "sha256:714dfadc66a8e3adea6609bda350345bd3711657b7ef3cf2e8015b526bac2d6b";
|
||||||
|
hash = "sha256-IM2BLZ0EdKIZcRWOtuFY9TogZJXCpKtPZnMnPsGlq0Y=";
|
||||||
|
finalImageTag = "21.1.2-debian-11-r0";
|
||||||
|
})
|
||||||
|
]
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
List of derivations that provide container images.
|
||||||
|
All images are linked to {file}`${imageDir}` before ${name} starts and are consequently imported
|
||||||
|
by the ${name} agent. This option only makes sense on nodes with an enabled agent.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
gracefulNodeShutdown = {
|
||||||
|
enable = lib.mkEnableOption ''
|
||||||
|
graceful node shutdowns where the kubelet attempts to detect
|
||||||
|
node system shutdown and terminates pods running on the node. See the
|
||||||
|
[documentation](https://kubernetes.io/docs/concepts/cluster-administration/node-shutdown/#graceful-node-shutdown)
|
||||||
|
for further information.
|
||||||
|
'';
|
||||||
|
|
||||||
|
shutdownGracePeriod = lib.mkOption {
|
||||||
|
type = lib.types.nonEmptyStr;
|
||||||
|
default = "30s";
|
||||||
|
example = "1m30s";
|
||||||
|
description = ''
|
||||||
|
Specifies the total duration that the node should delay the shutdown by. This is the total
|
||||||
|
grace period for pod termination for both regular and critical pods.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
shutdownGracePeriodCriticalPods = lib.mkOption {
|
||||||
|
type = lib.types.nonEmptyStr;
|
||||||
|
default = "10s";
|
||||||
|
example = "15s";
|
||||||
|
description = ''
|
||||||
|
Specifies the duration used to terminate critical pods during a node shutdown. This should be
|
||||||
|
less than `shutdownGracePeriod`.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
extraKubeletConfig = lib.mkOption {
|
||||||
|
type = with lib.types; attrsOf anything;
|
||||||
|
default = { };
|
||||||
|
example = {
|
||||||
|
podsPerCore = 3;
|
||||||
|
memoryThrottlingFactor = 0.69;
|
||||||
|
containerLogMaxSize = "5Mi";
|
||||||
|
};
|
||||||
|
description = ''
|
||||||
|
Extra configuration to add to the kubelet's configuration file. The subset of the kubelet's
|
||||||
|
configuration that can be configured via a file is defined by the
|
||||||
|
[KubeletConfiguration](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/)
|
||||||
|
struct. See the
|
||||||
|
[documentation](https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/)
|
||||||
|
for further information.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
extraKubeProxyConfig = lib.mkOption {
|
||||||
|
type = with lib.types; attrsOf anything;
|
||||||
|
default = { };
|
||||||
|
example = {
|
||||||
|
mode = "nftables";
|
||||||
|
clientConnection.kubeconfig = "/var/lib/rancher/${name}/agent/kubeproxy.kubeconfig";
|
||||||
|
};
|
||||||
|
description = ''
|
||||||
|
Extra configuration to add to the kube-proxy's configuration file. The subset of the kube-proxy's
|
||||||
|
configuration that can be configured via a file is defined by the
|
||||||
|
[KubeProxyConfiguration](https://kubernetes.io/docs/reference/config-api/kube-proxy-config.v1alpha1/)
|
||||||
|
struct. Note that the kubeconfig param will be overriden by `clientConnection.kubeconfig`, so you must
|
||||||
|
set the `clientConnection.kubeconfig` option if you want to use `extraKubeProxyConfig`.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
autoDeployCharts = lib.mkOption {
|
||||||
|
type = lib.types.attrsOf autoDeployChartsModule;
|
||||||
|
apply = lib.mapAttrs mkAutoDeployChartManifest;
|
||||||
|
default = { };
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
{
|
||||||
|
harbor = {
|
||||||
|
name = "harbor";
|
||||||
|
repo = "https://helm.goharbor.io";
|
||||||
|
version = "1.14.0";
|
||||||
|
hash = "sha256-fMP7q1MIbvzPGS9My91vbQ1d3OJMjwc+o8YE/BXZaYU=";
|
||||||
|
values = {
|
||||||
|
existingSecretAdminPassword = "harbor-admin";
|
||||||
|
expose = {
|
||||||
|
tls = {
|
||||||
|
enabled = true;
|
||||||
|
certSource = "secret";
|
||||||
|
secret.secretName = "my-tls-secret";
|
||||||
|
};
|
||||||
|
ingress = {
|
||||||
|
hosts.core = "example.com";
|
||||||
|
className = "nginx";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
nginx = {
|
||||||
|
repo = "oci://registry-1.docker.io/bitnamicharts/nginx";
|
||||||
|
version = "20.0.0";
|
||||||
|
hash = "sha256-sy+tzB+i9jIl/tqOMzzuhVhTU4EZVsoSBtPznxF/36c=";
|
||||||
|
};
|
||||||
|
custom-chart = {
|
||||||
|
package = ../charts/my-chart.tgz;
|
||||||
|
values = ../values/my-values.yaml;
|
||||||
|
extraFieldDefinitions = {
|
||||||
|
spec.timeout = "60s";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
Auto deploying Helm charts that are installed by the ${name} Helm controller. Avoid using
|
||||||
|
attribute names that are also used in the [](#opt-services.${name}.manifests) and
|
||||||
|
[](#opt-services.${name}.charts) options. Manifests with the same name will override
|
||||||
|
auto deploying charts with the same name.
|
||||||
|
This option only makes sense on server nodes (`role = server`). See the
|
||||||
|
[${name} Helm documentation](https://docs.${name}.io/helm) for further information.
|
||||||
|
|
||||||
|
**WARNING**: If you have multiple server nodes, and set this option on more than one server,
|
||||||
|
it is your responsibility to ensure that files stay in sync across those nodes. AddOn content is
|
||||||
|
not synced between nodes, and ${name} cannot guarantee correct behavior if different servers attempt
|
||||||
|
to deploy conflicting manifests.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
charts = lib.mkOption {
|
||||||
|
type = with lib.types; attrsOf (either path package);
|
||||||
|
default = { };
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
nginx = ../charts/my-nginx-chart.tgz;
|
||||||
|
redis = ../charts/my-redis-chart.tgz;
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
Packaged Helm charts that are linked to {file}`${staticContentChartDir}` before ${name} starts.
|
||||||
|
The attribute name will be used as the link target (relative to {file}`${staticContentChartDir}`).
|
||||||
|
The specified charts will only be placed on the file system and made available via ${
|
||||||
|
if staticContentPort == null then
|
||||||
|
"the Kubernetes APIServer from within the cluster"
|
||||||
|
else
|
||||||
|
"port ${toString staticContentPort} on server nodes"
|
||||||
|
}. See the [](#opt-services.${name}.autoDeployCharts) option and the
|
||||||
|
[${name} Helm controller docs](https://docs.${name}.io/helm#using-the-helm-controller)
|
||||||
|
to deploy Helm charts. This option only makes sense on server nodes (`role = server`).
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# implementation
|
||||||
|
|
||||||
|
config = {
|
||||||
|
warnings =
|
||||||
|
(lib.optional (cfg.role != "server" && cfg.manifests != { })
|
||||||
|
"${name}: Auto deploying manifests are only installed on server nodes (role == server), they will be ignored by this node."
|
||||||
|
)
|
||||||
|
++ (lib.optional (cfg.role != "server" && cfg.autoDeployCharts != { })
|
||||||
|
"${name}: Auto deploying Helm charts are only installed on server nodes (role == server), they will be ignored by this node."
|
||||||
|
)
|
||||||
|
++ (lib.optional (duplicateManifests != [ ])
|
||||||
|
"${name}: The following auto deploying charts are overriden by manifests of the same name: ${toString duplicateManifests}."
|
||||||
|
)
|
||||||
|
++ (lib.optional (duplicateCharts != [ ])
|
||||||
|
"${name}: The following auto deploying charts are overriden by charts of the same name: ${toString duplicateCharts}."
|
||||||
|
)
|
||||||
|
++ (lib.optional (cfg.role != "server" && cfg.charts != { })
|
||||||
|
"${name}: Helm charts are only made available to the cluster on server nodes (role == server), they will be ignored by this node."
|
||||||
|
)
|
||||||
|
++ (lib.optional (
|
||||||
|
cfg.role == "agent" && cfg.configPath == null && cfg.serverAddr == ""
|
||||||
|
) "${name}: serverAddr or configPath (with 'server' key) should be set if role is 'agent'")
|
||||||
|
++ (lib.optional
|
||||||
|
(cfg.role == "agent" && cfg.configPath == null && cfg.tokenFile == null && cfg.token == "")
|
||||||
|
"${name}: token, tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'"
|
||||||
|
)
|
||||||
|
++ (lib.optional (
|
||||||
|
cfg.role == "agent" && !(cfg.agentTokenFile != null || cfg.agentToken != "")
|
||||||
|
) "${name}: agentToken and agentToken should not be set if role is 'agent'");
|
||||||
|
|
||||||
|
environment.systemPackages = [ config.services.${name}.package ];
|
||||||
|
|
||||||
|
# Use systemd-tmpfiles to activate content
|
||||||
|
systemd.tmpfiles.settings."10-${name}" =
|
||||||
|
let
|
||||||
|
# Merge manifest with manifests generated from auto deploying charts, keep only enabled manifests
|
||||||
|
enabledManifests = lib.filterAttrs (_: v: v.enable) (cfg.autoDeployCharts // cfg.manifests);
|
||||||
|
# Make a systemd-tmpfiles rule for a manifest
|
||||||
|
mkManifestRule = manifest: {
|
||||||
|
name = "${manifestDir}/${manifest.target}";
|
||||||
|
value = {
|
||||||
|
"L+".argument = "${manifest.source}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# Make a systemd-tmpfiles rule for a container image
|
||||||
|
mkImageRule = image: {
|
||||||
|
name = "${imageDir}/${image.name}";
|
||||||
|
value = {
|
||||||
|
"L+".argument = "${image}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
# Merge charts with charts contained in enabled auto deploying charts
|
||||||
|
helmCharts =
|
||||||
|
(lib.concatMapAttrs (n: v: { ${n} = v.package; }) (
|
||||||
|
lib.filterAttrs (_: v: v.enable) cfg.autoDeployCharts
|
||||||
|
))
|
||||||
|
// cfg.charts;
|
||||||
|
# Ensure that all chart targets have a .tgz suffix
|
||||||
|
mkChartTarget = name: if (lib.hasSuffix ".tgz" name) then name else name + ".tgz";
|
||||||
|
# Make a systemd-tmpfiles rule for a chart
|
||||||
|
mkChartRule = target: source: {
|
||||||
|
name = "${staticContentChartDir}/${mkChartTarget target}";
|
||||||
|
value = {
|
||||||
|
"L+".argument = "${source}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
(lib.mapAttrs' (_: v: mkManifestRule v) enabledManifests)
|
||||||
|
// (builtins.listToAttrs (map mkImageRule cfg.images))
|
||||||
|
// (lib.optionalAttrs (cfg.containerdConfigTemplate != null) {
|
||||||
|
${containerdConfigTemplateFile} = {
|
||||||
|
"L+".argument = "${pkgs.writeText "config.toml.tmpl" cfg.containerdConfigTemplate}";
|
||||||
|
};
|
||||||
|
})
|
||||||
|
// (lib.mapAttrs' mkChartRule helmCharts);
|
||||||
|
|
||||||
|
systemd.services.${serviceName} =
|
||||||
|
let
|
||||||
|
kubeletParams =
|
||||||
|
(lib.optionalAttrs (cfg.gracefulNodeShutdown.enable) {
|
||||||
|
inherit (cfg.gracefulNodeShutdown) shutdownGracePeriod shutdownGracePeriodCriticalPods;
|
||||||
|
})
|
||||||
|
// cfg.extraKubeletConfig;
|
||||||
|
kubeletConfig = manifestFormat.generate "${name}-kubelet-config" (
|
||||||
|
{
|
||||||
|
apiVersion = "kubelet.config.k8s.io/v1beta1";
|
||||||
|
kind = "KubeletConfiguration";
|
||||||
|
}
|
||||||
|
// kubeletParams
|
||||||
|
);
|
||||||
|
|
||||||
|
kubeProxyConfig = manifestFormat.generate "${name}-kubeProxy-config" (
|
||||||
|
{
|
||||||
|
apiVersion = "kubeproxy.config.k8s.io/v1alpha1";
|
||||||
|
kind = "KubeProxyConfiguration";
|
||||||
|
}
|
||||||
|
// cfg.extraKubeProxyConfig
|
||||||
|
);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
description = "${name} service";
|
||||||
|
after = [
|
||||||
|
"firewall.service"
|
||||||
|
"network-online.target"
|
||||||
|
];
|
||||||
|
wants = [
|
||||||
|
"firewall.service"
|
||||||
|
"network-online.target"
|
||||||
|
];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
path = lib.optional config.boot.zfs.enabled config.boot.zfs.package;
|
||||||
|
serviceConfig = {
|
||||||
|
# See: https://github.com/rancher/k3s/blob/dddbd16305284ae4bd14c0aade892412310d7edc/install.sh#L197
|
||||||
|
Type = if cfg.role == "agent" then "exec" else "notify";
|
||||||
|
KillMode = "process";
|
||||||
|
Delegate = "yes";
|
||||||
|
Restart = "always";
|
||||||
|
RestartSec = "5s";
|
||||||
|
LimitNOFILE = 1048576;
|
||||||
|
LimitNPROC = "infinity";
|
||||||
|
LimitCORE = "infinity";
|
||||||
|
TasksMax = "infinity";
|
||||||
|
TimeoutStartSec = 0;
|
||||||
|
EnvironmentFile = cfg.environmentFile;
|
||||||
|
ExecStart = lib.concatStringsSep " \\\n " (
|
||||||
|
[ "${cfg.package}/bin/${name} ${cfg.role}" ]
|
||||||
|
++ (lib.optional (cfg.serverAddr != "") "--server ${cfg.serverAddr}")
|
||||||
|
++ (lib.optional (cfg.token != "") "--token ${cfg.token}")
|
||||||
|
++ (lib.optional (cfg.tokenFile != null) "--token-file ${cfg.tokenFile}")
|
||||||
|
++ (lib.optional (cfg.agentToken != "") "--agent-token ${cfg.agentToken}")
|
||||||
|
++ (lib.optional (cfg.agentTokenFile != null) "--agent-token-file ${cfg.agentTokenFile}")
|
||||||
|
++ (lib.optional (cfg.configPath != null) "--config ${cfg.configPath}")
|
||||||
|
++ (map (d: "--disable=${d}") cfg.disable)
|
||||||
|
++ (lib.optional (cfg.nodeName != null) "--node-name=${cfg.nodeName}")
|
||||||
|
++ (lib.optionals (cfg.nodeLabel != [ ]) (map (l: "--node-label=${l}") cfg.nodeLabel))
|
||||||
|
++ (lib.optionals (cfg.nodeTaint != [ ]) (map (t: "--node-taint=${t}") cfg.nodeTaint))
|
||||||
|
++ (lib.optional (cfg.nodeIP != null) "--node-ip=${cfg.nodeIP}")
|
||||||
|
++ (lib.optional cfg.selinux "--selinux")
|
||||||
|
++ (lib.optional (kubeletParams != { }) "--kubelet-arg=config=${kubeletConfig}")
|
||||||
|
++ (lib.optional (cfg.extraKubeProxyConfig != { }) "--kube-proxy-arg=config=${kubeProxyConfig}")
|
||||||
|
++ extraBinFlags
|
||||||
|
++ (lib.flatten cfg.extraFlags)
|
||||||
|
);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports =
|
||||||
|
# pass mkRancherModule explicitly instead of via
|
||||||
|
# _modules.args to prevent infinite recursion
|
||||||
|
let
|
||||||
|
args = {
|
||||||
|
inherit config lib;
|
||||||
|
inherit mkRancherModule;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
[
|
||||||
|
(import ./k3s.nix args)
|
||||||
|
(import ./rke2.nix args)
|
||||||
|
];
|
||||||
|
|
||||||
|
meta.maintainers = pkgs.rke2.meta.maintainers ++ lib.teams.k3s.members;
|
||||||
|
}
|
||||||
137
nixos/modules/services/cluster/rancher/k3s.nix
Normal file
137
nixos/modules/services/cluster/rancher/k3s.nix
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
mkRancherModule,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.k3s;
|
||||||
|
baseModule = mkRancherModule {
|
||||||
|
name = "k3s";
|
||||||
|
extraBinFlags =
|
||||||
|
(lib.optional cfg.clusterInit "--cluster-init")
|
||||||
|
++ (lib.optional cfg.disableAgent "--disable-agent");
|
||||||
|
};
|
||||||
|
|
||||||
|
removeOption =
|
||||||
|
config: instruction:
|
||||||
|
lib.mkRemovedOptionModule (
|
||||||
|
[
|
||||||
|
"services"
|
||||||
|
"k3s"
|
||||||
|
]
|
||||||
|
++ config
|
||||||
|
) instruction;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [ (removeOption [ "docker" ] "k3s docker option is no longer supported.") ];
|
||||||
|
|
||||||
|
# interface
|
||||||
|
|
||||||
|
options.services.k3s = lib.recursiveUpdate baseModule.options {
|
||||||
|
|
||||||
|
# option overrides
|
||||||
|
|
||||||
|
role.description = ''
|
||||||
|
Whether k3s should run as a server or agent.
|
||||||
|
|
||||||
|
If it's a server:
|
||||||
|
|
||||||
|
- By default it also runs workloads as an agent.
|
||||||
|
- Starts by default as a standalone server using an embedded sqlite datastore.
|
||||||
|
- Configure `clusterInit = true` to switch over to embedded etcd datastore and enable HA mode.
|
||||||
|
- Configure `serverAddr` to join an already-initialized HA cluster.
|
||||||
|
|
||||||
|
If it's an agent:
|
||||||
|
|
||||||
|
- `serverAddr` is required.
|
||||||
|
'';
|
||||||
|
|
||||||
|
serverAddr.description = ''
|
||||||
|
The k3s server to connect to.
|
||||||
|
|
||||||
|
Servers and agents need to communicate each other. Read
|
||||||
|
[the networking docs](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#networking)
|
||||||
|
to know how to configure the firewall.
|
||||||
|
'';
|
||||||
|
|
||||||
|
disable.description = ''
|
||||||
|
Disable default components, see the [K3s documentation](https://docs.k3s.io/installation/packaged-components#using-the---disable-flag).
|
||||||
|
'';
|
||||||
|
|
||||||
|
images = {
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
[
|
||||||
|
(pkgs.dockerTools.pullImage {
|
||||||
|
imageName = "docker.io/bitnami/keycloak";
|
||||||
|
imageDigest = "sha256:714dfadc66a8e3adea6609bda350345bd3711657b7ef3cf2e8015b526bac2d6b";
|
||||||
|
hash = "sha256-IM2BLZ0EdKIZcRWOtuFY9TogZJXCpKtPZnMnPsGlq0Y=";
|
||||||
|
finalImageTag = "21.1.2-debian-11-r0";
|
||||||
|
})
|
||||||
|
|
||||||
|
config.services.k3s.package.airgap-images
|
||||||
|
]
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
List of derivations that provide container images.
|
||||||
|
All images are linked to {file}`${baseModule.paths.imageDir}` before k3s starts and are consequently imported
|
||||||
|
by the k3s agent. Consider importing the k3s airgap images archive of the k3s package in
|
||||||
|
use, if you want to pre-provision this node with all k3s container images. This option
|
||||||
|
only makes sense on nodes with an enabled agent.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# k3s-specific options
|
||||||
|
|
||||||
|
clusterInit = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Initialize HA cluster using an embedded etcd datastore.
|
||||||
|
|
||||||
|
If this option is `false` and `role` is `server`
|
||||||
|
|
||||||
|
On a server that was using the default embedded sqlite backend,
|
||||||
|
enabling this option will migrate to an embedded etcd DB.
|
||||||
|
|
||||||
|
If an HA cluster using the embedded etcd datastore was already initialized,
|
||||||
|
this option has no effect.
|
||||||
|
|
||||||
|
This option only makes sense in a server that is not connecting to another server.
|
||||||
|
|
||||||
|
If you are configuring an HA cluster with an embedded etcd,
|
||||||
|
the 1st server must have `clusterInit = true`
|
||||||
|
and other servers must connect to it using `serverAddr`.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
disableAgent = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Only run the server. This option only makes sense for a server.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# implementation
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable (
|
||||||
|
lib.recursiveUpdate baseModule.config {
|
||||||
|
warnings = (
|
||||||
|
lib.optional (
|
||||||
|
cfg.disableAgent && cfg.images != [ ]
|
||||||
|
) "k3s: Images are only imported on nodes with an enabled agent, they will be ignored by this node."
|
||||||
|
);
|
||||||
|
|
||||||
|
assertions = [
|
||||||
|
{
|
||||||
|
assertion = cfg.role == "agent" -> !cfg.disableAgent;
|
||||||
|
message = "k3s: disableAgent must be false if role is 'agent'";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assertion = cfg.role == "agent" -> !cfg.clusterInit;
|
||||||
|
message = "k3s: clusterInit must be false if role is 'agent'";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
160
nixos/modules/services/cluster/rancher/rke2.nix
Normal file
160
nixos/modules/services/cluster/rancher/rke2.nix
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
mkRancherModule,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.rke2;
|
||||||
|
baseModule = mkRancherModule {
|
||||||
|
name = "rke2";
|
||||||
|
serviceName = "rke2-${cfg.role}"; # upstream default, used by rke2-killall.sh
|
||||||
|
|
||||||
|
extraBinFlags =
|
||||||
|
(lib.optional (cfg.cni != null) "--cni=${cfg.cni}")
|
||||||
|
++ (lib.optional cfg.cisHardening "--profile=${
|
||||||
|
if lib.versionAtLeast cfg.package.version "1.25" then
|
||||||
|
"cis"
|
||||||
|
else if lib.versionAtLeast cfg.package.version "1.23" then
|
||||||
|
"cis-1.23"
|
||||||
|
else
|
||||||
|
"cis-1.6"
|
||||||
|
}");
|
||||||
|
|
||||||
|
# RKE2 sometimes tries opening YAML manifests on start with O_RDWR, which we can't support
|
||||||
|
# without ugly workarounds since they're linked from the read-only /nix/store.
|
||||||
|
# https://github.com/rancher/rke2/blob/fa7ed3a87055830924d05009a1071acfbbfbcc2c/pkg/bootstrap/bootstrap.go#L355
|
||||||
|
jsonManifests = true;
|
||||||
|
|
||||||
|
# see https://github.com/rancher/rke2/issues/224
|
||||||
|
# not all charts can be base64-encoded into chartContent due to
|
||||||
|
# https://github.com/k3s-io/helm-controller/issues/267
|
||||||
|
staticContentPort = 9345;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# interface
|
||||||
|
|
||||||
|
options.services.rke2 = lib.recursiveUpdate baseModule.options {
|
||||||
|
# option overrides
|
||||||
|
role.description = ''
|
||||||
|
Whether rke2 should run as a server or agent.
|
||||||
|
|
||||||
|
If it's a server:
|
||||||
|
|
||||||
|
- By default it also runs workloads as an agent.
|
||||||
|
- All options can be set.
|
||||||
|
|
||||||
|
If it's an agent:
|
||||||
|
|
||||||
|
- `serverAddr` is required.
|
||||||
|
- `token` or `tokenFile` is required.
|
||||||
|
- `agentToken`, `agentTokenFile`, `disable` and `cni` should not be set.
|
||||||
|
'';
|
||||||
|
|
||||||
|
disable.description = ''
|
||||||
|
Disable default components, see the [RKE2 documentation](https://docs.rke2.io/install/packaged_components#using-the---disable-flag).
|
||||||
|
'';
|
||||||
|
|
||||||
|
images = {
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
[
|
||||||
|
(pkgs.dockerTools.pullImage {
|
||||||
|
imageName = "docker.io/bitnami/keycloak";
|
||||||
|
imageDigest = "sha256:714dfadc66a8e3adea6609bda350345bd3711657b7ef3cf2e8015b526bac2d6b";
|
||||||
|
hash = "sha256-IM2BLZ0EdKIZcRWOtuFY9TogZJXCpKtPZnMnPsGlq0Y=";
|
||||||
|
finalImageTag = "21.1.2-debian-11-r0";
|
||||||
|
})
|
||||||
|
|
||||||
|
config.services.rke2.package.images-core-linux-amd64-tar-zst
|
||||||
|
config.services.rke2.package.images-canal-linux-amd64-tar-zst
|
||||||
|
]
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
List of derivations that provide container images.
|
||||||
|
All images are linked to {file}`${baseModule.paths.imageDir}` before rke2 starts and are consequently imported
|
||||||
|
by the rke2 agent. Consider importing the rke2 core and CNI image archives of the rke2 package in
|
||||||
|
use, if you want to pre-provision this node with all rke2 container images. For a full list of available airgap images, check the
|
||||||
|
[source](https://github.com/NixOS/nixpkgs/blob/c8a1939887ee6e5f5aae29ce97321c0d83165f7d/pkgs/applications/networking/cluster/rke2/1_32/images-versions.json).
|
||||||
|
of the rke2 package in use.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# rke2-specific options
|
||||||
|
cni = lib.mkOption {
|
||||||
|
type =
|
||||||
|
with lib.types;
|
||||||
|
nullOr (enum [
|
||||||
|
"none"
|
||||||
|
"canal"
|
||||||
|
"cilium"
|
||||||
|
"calico"
|
||||||
|
"flannel"
|
||||||
|
]);
|
||||||
|
description = ''
|
||||||
|
CNI plugins to deploy, one of `none`, `calico`, `canal`, `cilium` or `flannel`.
|
||||||
|
|
||||||
|
All CNI plugins get installed via a helm chart after the main components are up and running
|
||||||
|
and can be [customized by modifying the helm chart options](https://docs.rke2.io/helm).
|
||||||
|
|
||||||
|
[Learn more about RKE2 and CNI plugins](https://docs.rke2.io/networking/basic_network_options)
|
||||||
|
|
||||||
|
> **WARNING**: Flannel support in RKE2 is currently experimental.
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
cisHardening = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
description = ''
|
||||||
|
Enable CIS Hardening for RKE2.
|
||||||
|
|
||||||
|
The OS-level configuration options required to pass the CIS benchmark are enabled by default.
|
||||||
|
This option only creates the `etcd` user and group, and passes the `--profile=cis` flag to RKE2.
|
||||||
|
|
||||||
|
Learn more about [CIS Hardening for RKE2](https://docs.rke2.io/security/hardening_guide).
|
||||||
|
'';
|
||||||
|
default = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# implementation
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable (
|
||||||
|
lib.recursiveUpdate baseModule.config {
|
||||||
|
warnings = (
|
||||||
|
lib.optional (
|
||||||
|
cfg.role == "agent" && cfg.cni != null
|
||||||
|
) "rke2: cni should not be set if role is 'agent'"
|
||||||
|
);
|
||||||
|
|
||||||
|
# Configure NetworkManager to ignore CNI network interfaces.
|
||||||
|
# See: https://docs.rke2.io/known_issues#networkmanager
|
||||||
|
environment.etc."NetworkManager/conf.d/rke2-canal.conf" = {
|
||||||
|
enable = config.networking.networkmanager.enable;
|
||||||
|
text = ''
|
||||||
|
[keyfile]
|
||||||
|
unmanaged-devices=interface-name:flannel*;interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# CIS hardening
|
||||||
|
# https://docs.rke2.io/security/hardening_guide#kernel-parameters
|
||||||
|
# https://github.com/rancher/rke2/blob/ef0fc7aa9d3bbaa95ce9b1895972488cbd92e302/bundle/share/rke2/rke2-cis-sysctl.conf
|
||||||
|
boot.kernel.sysctl = {
|
||||||
|
"vm.panic_on_oom" = 0;
|
||||||
|
"vm.overcommit_memory" = 1;
|
||||||
|
"kernel.panic" = 10;
|
||||||
|
"kernel.panic_on_oops" = 1;
|
||||||
|
};
|
||||||
|
# https://docs.rke2.io/security/hardening_guide#etcd-is-configured-properly
|
||||||
|
users = lib.mkIf cfg.cisHardening {
|
||||||
|
users.etcd = {
|
||||||
|
isSystemUser = true;
|
||||||
|
group = "etcd";
|
||||||
|
};
|
||||||
|
groups.etcd = { };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -1,338 +0,0 @@
|
|||||||
{
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
pkgs,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
cfg = config.services.rke2;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
imports = [ ];
|
|
||||||
|
|
||||||
options.services.rke2 = {
|
|
||||||
enable = lib.mkEnableOption "rke2";
|
|
||||||
|
|
||||||
package = lib.mkPackageOption pkgs "rke2" { };
|
|
||||||
|
|
||||||
role = lib.mkOption {
|
|
||||||
type = lib.types.enum [
|
|
||||||
"server"
|
|
||||||
"agent"
|
|
||||||
];
|
|
||||||
description = ''
|
|
||||||
Whether rke2 should run as a server or agent.
|
|
||||||
|
|
||||||
If it's a server:
|
|
||||||
|
|
||||||
- By default it also runs workloads as an agent.
|
|
||||||
- any optionals is allowed.
|
|
||||||
|
|
||||||
If it's an agent:
|
|
||||||
|
|
||||||
- `serverAddr` is required.
|
|
||||||
- `token` or `tokenFile` is required.
|
|
||||||
- `agentToken` or `agentTokenFile` or `disable` or `cni` are not allowed.
|
|
||||||
'';
|
|
||||||
default = "server";
|
|
||||||
};
|
|
||||||
|
|
||||||
configPath = lib.mkOption {
|
|
||||||
type = lib.types.path;
|
|
||||||
description = "Load configuration from FILE.";
|
|
||||||
default = "/etc/rancher/rke2/config.yaml";
|
|
||||||
};
|
|
||||||
|
|
||||||
debug = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
description = "Turn on debug logs.";
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
dataDir = lib.mkOption {
|
|
||||||
type = lib.types.path;
|
|
||||||
description = "The folder to hold state in.";
|
|
||||||
default = "/var/lib/rancher/rke2";
|
|
||||||
};
|
|
||||||
|
|
||||||
token = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Shared secret used to join a server or agent to a cluster.
|
|
||||||
|
|
||||||
> WARNING: This option will expose store your token unencrypted world-readable in the nix store.
|
|
||||||
If this is undesired use the `tokenFile` option instead.
|
|
||||||
'';
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
tokenFile = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.path;
|
|
||||||
description = "File path containing rke2 token to use when connecting to the server.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
disable = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = "Do not deploy packaged components and delete any deployed components.";
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeName = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.str;
|
|
||||||
description = "Node name.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeLabel = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = "Registering and starting kubelet with set of labels.";
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeTaint = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = "Registering kubelet with set of taints.";
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeIP = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.str;
|
|
||||||
description = "IPv4/IPv6 addresses to advertise for node.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
agentToken = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Shared secret used to join agents to the cluster, but not servers.
|
|
||||||
|
|
||||||
> **WARNING**: This option will expose store your token unencrypted world-readable in the nix store.
|
|
||||||
If this is undesired use the `agentTokenFile` option instead.
|
|
||||||
'';
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
agentTokenFile = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.path;
|
|
||||||
description = "File path containing rke2 agent token to use when connecting to the server.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
serverAddr = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = "The rke2 server to connect to, used to join a cluster.";
|
|
||||||
example = "https://10.0.0.10:6443";
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
selinux = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
description = "Enable SELinux in containerd.";
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
cni = lib.mkOption {
|
|
||||||
type = lib.types.enum [
|
|
||||||
"none"
|
|
||||||
"canal"
|
|
||||||
"cilium"
|
|
||||||
"calico"
|
|
||||||
"flannel"
|
|
||||||
];
|
|
||||||
description = ''
|
|
||||||
CNI Plugins to deploy, one of `none`, `calico`, `canal`, `cilium` or `flannel`.
|
|
||||||
|
|
||||||
All CNI plugins get installed via a helm chart after the main components are up and running
|
|
||||||
and can be [customized by modifying the helm chart options](https://docs.rke2.io/helm).
|
|
||||||
|
|
||||||
[Learn more about RKE2 and CNI plugins](https://docs.rke2.io/networking/basic_network_options)
|
|
||||||
|
|
||||||
> **WARNING**: Flannel support in RKE2 is currently experimental.
|
|
||||||
'';
|
|
||||||
default = "canal";
|
|
||||||
};
|
|
||||||
|
|
||||||
cisHardening = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
description = ''
|
|
||||||
Enable CIS Hardening for RKE2.
|
|
||||||
|
|
||||||
It will set the configurations and controls required to address Kubernetes benchmark controls
|
|
||||||
from the Center for Internet Security (CIS).
|
|
||||||
|
|
||||||
Learn more about [CIS Hardening for RKE2](https://docs.rke2.io/security/hardening_guide).
|
|
||||||
|
|
||||||
> **NOTICE**:
|
|
||||||
>
|
|
||||||
> You may need restart the `systemd-sysctl` muaually by:
|
|
||||||
>
|
|
||||||
> ```shell
|
|
||||||
> sudo systemctl restart systemd-sysctl
|
|
||||||
> ```
|
|
||||||
'';
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
extraFlags = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Extra flags to pass to the rke2 service/agent.
|
|
||||||
|
|
||||||
Here you can find all the available flags:
|
|
||||||
|
|
||||||
- [Server Configuration Reference](https://docs.rke2.io/reference/server_config)
|
|
||||||
- [Agent Configuration Reference](https://docs.rke2.io/reference/linux_agent_config)
|
|
||||||
'';
|
|
||||||
example = [
|
|
||||||
"--disable-kube-proxy"
|
|
||||||
"--cluster-cidr=10.24.0.0/16"
|
|
||||||
];
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
environmentVars = lib.mkOption {
|
|
||||||
type = lib.types.attrsOf lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Environment variables for configuring the rke2 service/agent.
|
|
||||||
|
|
||||||
Here you can find all the available environment variables:
|
|
||||||
|
|
||||||
- [Server Configuration Reference](https://docs.rke2.io/reference/server_config)
|
|
||||||
- [Agent Configuration Reference](https://docs.rke2.io/reference/linux_agent_config)
|
|
||||||
|
|
||||||
Besides the options above, you can also active environment variables by edit/create those files:
|
|
||||||
|
|
||||||
- `/etc/default/rke2`
|
|
||||||
- `/etc/sysconfig/rke2`
|
|
||||||
- `/usr/local/lib/systemd/system/rke2.env`
|
|
||||||
'';
|
|
||||||
# See: https://github.com/rancher/rke2/blob/master/bundle/lib/systemd/system/rke2-server.env#L1
|
|
||||||
default = {
|
|
||||||
HOME = "/root";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
|
||||||
assertions = [
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> (builtins.pathExists cfg.configPath || cfg.serverAddr != "");
|
|
||||||
message = "serverAddr or configPath (with 'server' key) should be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion =
|
|
||||||
cfg.role == "agent"
|
|
||||||
-> (builtins.pathExists cfg.configPath || cfg.tokenFile != null || cfg.token != "");
|
|
||||||
message = "token or tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !(cfg.agentTokenFile != null || cfg.agentToken != "");
|
|
||||||
message = "agentToken or agentTokenFile should NOT be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !(cfg.disable != [ ]);
|
|
||||||
message = "disable should not be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !(cfg.cni != "canal");
|
|
||||||
message = "cni should not be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
environment.systemPackages = [ config.services.rke2.package ];
|
|
||||||
# To configure NetworkManager to ignore calico/flannel related network interfaces.
|
|
||||||
# See: https://docs.rke2.io/known_issues#networkmanager
|
|
||||||
environment.etc."NetworkManager/conf.d/rke2-canal.conf" = {
|
|
||||||
enable = config.networking.networkmanager.enable;
|
|
||||||
text = ''
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:flannel*
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
# See: https://docs.rke2.io/security/hardening_guide#set-kernel-parameters
|
|
||||||
boot.kernel.sysctl = lib.mkIf cfg.cisHardening {
|
|
||||||
"vm.panic_on_oom" = 0;
|
|
||||||
"vm.overcommit_memory" = 1;
|
|
||||||
"kernel.panic" = 10;
|
|
||||||
"kernel.panic_on_oops" = 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services."rke2-${cfg.role}" = {
|
|
||||||
description = "Rancher Kubernetes Engine v2";
|
|
||||||
documentation = [ "https://github.com/rancher/rke2#readme" ];
|
|
||||||
after = [ "network-online.target" ];
|
|
||||||
wants = [ "network-online.target" ];
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = if cfg.role == "agent" then "exec" else "notify";
|
|
||||||
EnvironmentFile = [
|
|
||||||
"-/etc/default/%N"
|
|
||||||
"-/etc/sysconfig/%N"
|
|
||||||
"-/usr/local/lib/systemd/system/%N.env"
|
|
||||||
];
|
|
||||||
Environment = lib.mapAttrsToList (k: v: "${k}=${v}") cfg.environmentVars;
|
|
||||||
KillMode = "process";
|
|
||||||
Delegate = "yes";
|
|
||||||
LimitNOFILE = 1048576;
|
|
||||||
LimitNPROC = "infinity";
|
|
||||||
LimitCORE = "infinity";
|
|
||||||
TasksMax = "infinity";
|
|
||||||
TimeoutStartSec = 0;
|
|
||||||
Restart = "always";
|
|
||||||
RestartSec = "5s";
|
|
||||||
ExecStartPre = [
|
|
||||||
# There is a conflict between RKE2 and `nm-cloud-setup.service`. This service add a routing table that
|
|
||||||
# interfere with the CNI plugin's configuration. This script checks if the service is enabled and if so,
|
|
||||||
# failed the RKE2 start.
|
|
||||||
# See: https://github.com/rancher/rke2/issues/1053
|
|
||||||
(pkgs.writeScript "check-nm-cloud-setup.sh" ''
|
|
||||||
#! ${pkgs.runtimeShell}
|
|
||||||
set -x
|
|
||||||
! /run/current-system/systemd/bin/systemctl is-enabled --quiet nm-cloud-setup.service
|
|
||||||
'')
|
|
||||||
"-${pkgs.kmod}/bin/modprobe br_netfilter"
|
|
||||||
"-${pkgs.kmod}/bin/modprobe overlay"
|
|
||||||
];
|
|
||||||
ExecStart = "${cfg.package}/bin/rke2 '${cfg.role}' ${
|
|
||||||
lib.escapeShellArgs (
|
|
||||||
(lib.optional (cfg.configPath != "/etc/rancher/rke2/config.yaml") "--config=${cfg.configPath}")
|
|
||||||
++ (lib.optional cfg.debug "--debug")
|
|
||||||
++ (lib.optional (cfg.dataDir != "/var/lib/rancher/rke2") "--data-dir=${cfg.dataDir}")
|
|
||||||
++ (lib.optional (cfg.token != "") "--token=${cfg.token}")
|
|
||||||
++ (lib.optional (cfg.tokenFile != null) "--token-file=${cfg.tokenFile}")
|
|
||||||
++ (lib.optionals (cfg.role == "server" && cfg.disable != [ ]) (
|
|
||||||
map (d: "--disable=${d}") cfg.disable
|
|
||||||
))
|
|
||||||
++ (lib.optional (cfg.nodeName != null) "--node-name=${cfg.nodeName}")
|
|
||||||
++ (lib.optionals (cfg.nodeLabel != [ ]) (map (l: "--node-label=${l}") cfg.nodeLabel))
|
|
||||||
++ (lib.optionals (cfg.nodeTaint != [ ]) (map (t: "--node-taint=${t}") cfg.nodeTaint))
|
|
||||||
++ (lib.optional (cfg.nodeIP != null) "--node-ip=${cfg.nodeIP}")
|
|
||||||
++ (lib.optional (cfg.role == "server" && cfg.agentToken != "") "--agent-token=${cfg.agentToken}")
|
|
||||||
++ (lib.optional (
|
|
||||||
cfg.role == "server" && cfg.agentTokenFile != null
|
|
||||||
) "--agent-token-file=${cfg.agentTokenFile}")
|
|
||||||
++ (lib.optional (cfg.serverAddr != "") "--server=${cfg.serverAddr}")
|
|
||||||
++ (lib.optional cfg.selinux "--selinux")
|
|
||||||
++ (lib.optional (cfg.role == "server" && cfg.cni != "canal") "--cni=${cfg.cni}")
|
|
||||||
++ (lib.optional cfg.cisHardening "--profile=${
|
|
||||||
if cfg.package.version >= "1.25" then "cis-1.23" else "cis-1.6"
|
|
||||||
}")
|
|
||||||
++ cfg.extraFlags
|
|
||||||
)
|
|
||||||
}";
|
|
||||||
ExecStopPost =
|
|
||||||
let
|
|
||||||
killProcess = pkgs.writeScript "kill-process.sh" ''
|
|
||||||
#! ${pkgs.runtimeShell}
|
|
||||||
/run/current-system/systemd/bin/systemd-cgls /system.slice/$1 | \
|
|
||||||
${pkgs.gnugrep}/bin/grep -Eo '[0-9]+ (containerd|kubelet)' | \
|
|
||||||
${pkgs.gawk}/bin/awk '{print $1}' | \
|
|
||||||
${pkgs.findutils}/bin/xargs -r ${pkgs.util-linux}/bin/kill
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
"-${killProcess} %n";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -135,7 +135,7 @@ import ../make-test-python.nix (
|
|||||||
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/values-file.yaml")
|
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/values-file.yaml")
|
||||||
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/advanced.yaml")
|
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/advanced.yaml")
|
||||||
# check that the timeout is set correctly, select only the first doc in advanced.yaml
|
# check that the timeout is set correctly, select only the first doc in advanced.yaml
|
||||||
advancedManifest = json.loads(machine.succeed("yq -o json 'select(di == 0)' /var/lib/rancher/k3s/server/manifests/advanced.yaml"))
|
advancedManifest = json.loads(machine.succeed("yq -o json '.items[0]' /var/lib/rancher/k3s/server/manifests/advanced.yaml"))
|
||||||
t.assertEqual(advancedManifest["spec"]["timeout"], "69s", "unexpected value for spec.timeout")
|
t.assertEqual(advancedManifest["spec"]["timeout"], "69s", "unexpected value for spec.timeout")
|
||||||
# wait for test jobs to complete
|
# wait for test jobs to complete
|
||||||
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello", timeout=180)
|
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello", timeout=180)
|
||||||
|
|||||||
@@ -31,46 +31,21 @@ import ../make-test-python.nix (
|
|||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
# A daemonset that responds 'hello' on port 8000
|
|
||||||
networkTestDaemonset = pkgs.writeText "test.yml" ''
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: test
|
|
||||||
labels:
|
|
||||||
name: test
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
name: test
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
name: test
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: test
|
|
||||||
image: test.local/hello:local
|
|
||||||
imagePullPolicy: Never
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 20Mi
|
|
||||||
command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo hello"]
|
|
||||||
'';
|
|
||||||
tokenFile = pkgs.writeText "token" "p@s$w0rd";
|
tokenFile = pkgs.writeText "token" "p@s$w0rd";
|
||||||
agentTokenFile = pkgs.writeText "agent-token" "agentP@s$w0rd";
|
agentTokenFile = pkgs.writeText "agent-token" "agentP@s$w0rd";
|
||||||
# Let flannel use eth1 to enable inter-node communication in tests
|
# Let flannel use eth1 to enable inter-node communication in tests
|
||||||
canalConfig = pkgs.writeText "rke2-canal-config.yaml" ''
|
canalConfig = {
|
||||||
apiVersion: helm.cattle.io/v1
|
apiVersion = "helm.cattle.io/v1";
|
||||||
kind: HelmChartConfig
|
kind = "HelmChartConfig";
|
||||||
metadata:
|
metadata = {
|
||||||
name: rke2-canal
|
name = "rke2-canal";
|
||||||
namespace: kube-system
|
namespace = "kube-system";
|
||||||
spec:
|
};
|
||||||
valuesContent: |-
|
# spec.valuesContent needs to a string, either json or yaml
|
||||||
flannel:
|
spec.valuesContent = builtins.toJSON {
|
||||||
iface: "eth1"
|
flannel.iface = "eth1";
|
||||||
'';
|
};
|
||||||
|
};
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
name = "${rke2.name}-multi-node";
|
name = "${rke2.name}-multi-node";
|
||||||
@@ -85,23 +60,6 @@ import ../make-test-python.nix (
|
|||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
# Setup image archives to be imported by rke2
|
|
||||||
systemd.tmpfiles.settings."10-rke2" = {
|
|
||||||
"/var/lib/rancher/rke2/agent/images/rke2-images-core.tar.zst" = {
|
|
||||||
"L+".argument = "${coreImages}";
|
|
||||||
};
|
|
||||||
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.tar.zst" = {
|
|
||||||
"L+".argument = "${canalImages}";
|
|
||||||
};
|
|
||||||
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
|
|
||||||
"L+".argument = "${helloImage}";
|
|
||||||
};
|
|
||||||
# Copy the canal config so that rke2 can write the remaining default values to it
|
|
||||||
"/var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml" = {
|
|
||||||
"C".argument = "${canalConfig}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Canal CNI with VXLAN
|
# Canal CNI with VXLAN
|
||||||
networking.firewall.allowedUDPPorts = [ 8472 ];
|
networking.firewall.allowedUDPPorts = [ 8472 ];
|
||||||
networking.firewall.allowedTCPPorts = [
|
networking.firewall.allowedTCPPorts = [
|
||||||
@@ -134,6 +92,41 @@ import ../make-test-python.nix (
|
|||||||
"rke2-snapshot-controller-crd"
|
"rke2-snapshot-controller-crd"
|
||||||
"rke2-snapshot-validation-webhook"
|
"rke2-snapshot-validation-webhook"
|
||||||
];
|
];
|
||||||
|
images = [
|
||||||
|
coreImages
|
||||||
|
canalImages
|
||||||
|
helloImage
|
||||||
|
];
|
||||||
|
manifests = {
|
||||||
|
canal-config.content = canalConfig;
|
||||||
|
# A daemonset that responds 'hello' on port 8000
|
||||||
|
network-test.content = {
|
||||||
|
apiVersion = "apps/v1";
|
||||||
|
kind = "DaemonSet";
|
||||||
|
metadata = {
|
||||||
|
name = "test";
|
||||||
|
labels.name = "test";
|
||||||
|
};
|
||||||
|
spec = {
|
||||||
|
selector.matchLabels.name = "test";
|
||||||
|
template = {
|
||||||
|
metadata.labels.name = "test";
|
||||||
|
spec.containers = [
|
||||||
|
{
|
||||||
|
name = "hello";
|
||||||
|
image = "${helloImage.imageName}:${helloImage.imageTag}";
|
||||||
|
imagePullPolicy = "Never";
|
||||||
|
command = [
|
||||||
|
"socat"
|
||||||
|
"TCP4-LISTEN:8000,fork"
|
||||||
|
"EXEC:echo hello"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -145,22 +138,6 @@ import ../make-test-python.nix (
|
|||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
# Setup image archives to be imported by rke2
|
|
||||||
systemd.tmpfiles.settings."10-rke2" = {
|
|
||||||
"/var/lib/rancher/rke2/agent/images/rke2-images-core.linux-amd64.tar.zst" = {
|
|
||||||
"L+".argument = "${coreImages}";
|
|
||||||
};
|
|
||||||
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.linux-amd64.tar.zst" = {
|
|
||||||
"L+".argument = "${canalImages}";
|
|
||||||
};
|
|
||||||
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
|
|
||||||
"L+".argument = "${helloImage}";
|
|
||||||
};
|
|
||||||
"/var/lib/rancher/rke2/server/manifests/rke2-canal-config.yaml" = {
|
|
||||||
"C".argument = "${canalConfig}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# Canal CNI health checks
|
# Canal CNI health checks
|
||||||
networking.firewall.allowedTCPPorts = [ 9099 ];
|
networking.firewall.allowedTCPPorts = [ 9099 ];
|
||||||
# Canal CNI with VXLAN
|
# Canal CNI with VXLAN
|
||||||
@@ -177,6 +154,12 @@ import ../make-test-python.nix (
|
|||||||
tokenFile = agentTokenFile;
|
tokenFile = agentTokenFile;
|
||||||
serverAddr = "https://${nodes.server.networking.primaryIPAddress}:9345";
|
serverAddr = "https://${nodes.server.networking.primaryIPAddress}:9345";
|
||||||
nodeIP = config.networking.primaryIPAddress;
|
nodeIP = config.networking.primaryIPAddress;
|
||||||
|
manifests.canal-config.content = canalConfig;
|
||||||
|
images = [
|
||||||
|
coreImages
|
||||||
|
canalImages
|
||||||
|
helloImage
|
||||||
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -199,8 +182,7 @@ import ../make-test-python.nix (
|
|||||||
server.succeed("${kubectl} cluster-info")
|
server.succeed("${kubectl} cluster-info")
|
||||||
server.wait_until_succeeds("${kubectl} get serviceaccount default")
|
server.wait_until_succeeds("${kubectl} get serviceaccount default")
|
||||||
|
|
||||||
# Now create a pod on each node via a daemonset and verify they can talk to each other.
|
# Now verify that each daemonset pod can talk to each other.
|
||||||
server.succeed("${kubectl} apply -f ${networkTestDaemonset}")
|
|
||||||
server.wait_until_succeeds(
|
server.wait_until_succeeds(
|
||||||
f'[ "$(${kubectl} get ds test -o json | ${jq} .status.numberReady)" -eq {len(machines)} ]'
|
f'[ "$(${kubectl} get ds test -o json | ${jq} .status.numberReady)" -eq {len(machines)} ]'
|
||||||
)
|
)
|
||||||
@@ -217,9 +199,9 @@ import ../make-test-python.nix (
|
|||||||
server.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
|
server.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
|
||||||
agent.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
|
agent.wait_until_succeeds(f"ping -c 1 {pod_ip}", timeout=5)
|
||||||
# Verify the server can exec into the pod
|
# Verify the server can exec into the pod
|
||||||
# for pod in pods:
|
for pod in pods:
|
||||||
# resp = server.succeed(f"${kubectl} exec {pod} -- socat TCP:{pod_ip}:8000 -")
|
resp = server.succeed(f"${kubectl} exec {pod} -- socat TCP:{pod_ip}:8000 -").strip()
|
||||||
# assert resp.strip() == "hello", f"Unexpected response from hello daemonset: {resp.strip()}"
|
assert resp == "hello", f"Unexpected response from hello daemonset: {resp}"
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -26,19 +26,13 @@ import ../make-test-python.nix (
|
|||||||
copyToRoot = pkgs.hello;
|
copyToRoot = pkgs.hello;
|
||||||
config.Entrypoint = [ "${pkgs.hello}/bin/hello" ];
|
config.Entrypoint = [ "${pkgs.hello}/bin/hello" ];
|
||||||
};
|
};
|
||||||
testJobYaml = pkgs.writeText "test.yaml" ''
|
# A ConfigMap in regular yaml format
|
||||||
apiVersion: batch/v1
|
cmFile = (pkgs.formats.yaml { }).generate "rke2-manifest-from-file.yaml" {
|
||||||
kind: Job
|
apiVersion = "v1";
|
||||||
metadata:
|
kind = "ConfigMap";
|
||||||
name: test
|
metadata.name = "from-file";
|
||||||
spec:
|
data.username = "foo-file";
|
||||||
template:
|
};
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: test
|
|
||||||
image: "test.local/hello:local"
|
|
||||||
restartPolicy: Never
|
|
||||||
'';
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
name = "${rke2.name}-single-node";
|
name = "${rke2.name}-single-node";
|
||||||
@@ -51,19 +45,6 @@ import ../make-test-python.nix (
|
|||||||
...
|
...
|
||||||
}:
|
}:
|
||||||
{
|
{
|
||||||
# Setup image archives to be imported by rke2
|
|
||||||
systemd.tmpfiles.settings."10-rke2" = {
|
|
||||||
"/var/lib/rancher/rke2/agent/images/rke2-images-core.tar.zst" = {
|
|
||||||
"L+".argument = "${coreImages}";
|
|
||||||
};
|
|
||||||
"/var/lib/rancher/rke2/agent/images/rke2-images-canal.tar.zst" = {
|
|
||||||
"L+".argument = "${canalImages}";
|
|
||||||
};
|
|
||||||
"/var/lib/rancher/rke2/agent/images/hello.tar.zst" = {
|
|
||||||
"L+".argument = "${helloImage}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# RKE2 needs more resources than the default
|
# RKE2 needs more resources than the default
|
||||||
virtualisation.cores = 4;
|
virtualisation.cores = 4;
|
||||||
virtualisation.memorySize = 4096;
|
virtualisation.memorySize = 4096;
|
||||||
@@ -84,6 +65,47 @@ import ../make-test-python.nix (
|
|||||||
"rke2-snapshot-controller-crd"
|
"rke2-snapshot-controller-crd"
|
||||||
"rke2-snapshot-validation-webhook"
|
"rke2-snapshot-validation-webhook"
|
||||||
];
|
];
|
||||||
|
images = [
|
||||||
|
coreImages
|
||||||
|
canalImages
|
||||||
|
helloImage
|
||||||
|
];
|
||||||
|
manifests = {
|
||||||
|
test-job.content = {
|
||||||
|
apiVersion = "batch/v1";
|
||||||
|
kind = "Job";
|
||||||
|
metadata.name = "test";
|
||||||
|
spec.template.spec = {
|
||||||
|
containers = [
|
||||||
|
{
|
||||||
|
name = "hello";
|
||||||
|
image = "${helloImage.imageName}:${helloImage.imageTag}";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
restartPolicy = "Never";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
disabled = {
|
||||||
|
enable = false;
|
||||||
|
content = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "ConfigMap";
|
||||||
|
metadata.name = "disabled";
|
||||||
|
data.username = "foo";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
from-file.source = "${cmFile}";
|
||||||
|
custom-target = {
|
||||||
|
enable = true;
|
||||||
|
target = "my-manifest.json";
|
||||||
|
content = {
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "ConfigMap";
|
||||||
|
metadata.name = "custom-target";
|
||||||
|
data.username = "foo-custom";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -95,14 +117,28 @@ import ../make-test-python.nix (
|
|||||||
''
|
''
|
||||||
start_all()
|
start_all()
|
||||||
|
|
||||||
machine.wait_for_unit("rke2-server")
|
with subtest("Start cluster"):
|
||||||
machine.succeed("${kubectl} cluster-info")
|
machine.wait_for_unit("rke2-server")
|
||||||
|
machine.succeed("${kubectl} cluster-info")
|
||||||
|
machine.wait_until_succeeds("${kubectl} get serviceaccount default")
|
||||||
|
|
||||||
machine.wait_until_succeeds("${kubectl} get serviceaccount default")
|
with subtest("Test job completes successfully"):
|
||||||
machine.succeed("${kubectl} apply -f ${testJobYaml}")
|
machine.wait_until_succeeds("${kubectl} wait --for 'condition=complete' job/test")
|
||||||
machine.wait_until_succeeds("${kubectl} wait --for 'condition=complete' job/test")
|
output = machine.succeed("${kubectl} logs -l batch.kubernetes.io/job-name=test").rstrip()
|
||||||
output = machine.succeed("${kubectl} logs -l batch.kubernetes.io/job-name=test")
|
assert output == "Hello, world!", f"unexpected output of test job: {output}"
|
||||||
assert output.rstrip() == "Hello, world!", f"unexpected output of test job: {output}"
|
|
||||||
|
with subtest("ConfigMap from-file exists"):
|
||||||
|
output = machine.succeed("${kubectl} get cm from-file -o=jsonpath='{.data.username}'").rstrip()
|
||||||
|
assert output == "foo-file", f"Unexpected data in Configmap from-file: {output}"
|
||||||
|
|
||||||
|
with subtest("ConfigMap custom-target exists"):
|
||||||
|
# Check that the file exists at the custom target path
|
||||||
|
machine.succeed("ls /var/lib/rancher/rke2/server/manifests/my-manifest.json")
|
||||||
|
output = machine.succeed("${kubectl} get cm custom-target -o=jsonpath='{.data.username}'").rstrip()
|
||||||
|
assert output == "foo-custom", f"Unexpected data in Configmap custom-target: {output}"
|
||||||
|
|
||||||
|
with subtest("Disabled ConfigMap doesn't exist"):
|
||||||
|
machine.fail("${kubectl} get cm disabled")
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ A K3s maintainer, maintains K3s's:
|
|||||||
- [issues](https://github.com/NixOS/nixpkgs/issues?q=is%3Aissue+is%3Aopen+k3s)
|
- [issues](https://github.com/NixOS/nixpkgs/issues?q=is%3Aissue+is%3Aopen+k3s)
|
||||||
- [pull requests](https://github.com/NixOS/nixpkgs/pulls?q=is%3Aopen+is%3Apr+label%3A%226.topic%3A+k3s%22)
|
- [pull requests](https://github.com/NixOS/nixpkgs/pulls?q=is%3Aopen+is%3Apr+label%3A%226.topic%3A+k3s%22)
|
||||||
- [NixOS tests](https://github.com/NixOS/nixpkgs/tree/master/nixos/tests/k3s)
|
- [NixOS tests](https://github.com/NixOS/nixpkgs/tree/master/nixos/tests/k3s)
|
||||||
- [NixOS service module](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/cluster/k3s/default.nix)
|
- [NixOS service module](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/cluster/rancher)
|
||||||
- [update script](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/cluster/k3s/update-script.sh) (the process of updating)
|
- [update script](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/cluster/k3s/update-script.sh) (the process of updating)
|
||||||
- updates (the act of updating) and [r-ryantm bot logs](https://r.ryantm.com/log/k3s/)
|
- updates (the act of updating) and [r-ryantm bot logs](https://r.ryantm.com/log/k3s/)
|
||||||
- deprecations
|
- deprecations
|
||||||
|
|||||||
@@ -157,6 +157,7 @@ buildGoModule (finalAttrs: {
|
|||||||
changelog = "https://github.com/rancher/rke2/releases/tag/v${finalAttrs.version}";
|
changelog = "https://github.com/rancher/rke2/releases/tag/v${finalAttrs.version}";
|
||||||
license = lib.licenses.asl20;
|
license = lib.licenses.asl20;
|
||||||
maintainers = with lib.maintainers; [
|
maintainers = with lib.maintainers; [
|
||||||
|
azey7f
|
||||||
rorosen
|
rorosen
|
||||||
zimbatm
|
zimbatm
|
||||||
zygot
|
zygot
|
||||||
|
|||||||
Reference in New Issue
Block a user