nixos/rke2: merge with nixos/k3s
K3s module was moved from `nixos/.../k3s` to `nixos/.../rancher`. `rke2/default.nix` was moved to `rancher/rke2.nix`, and some options from RKE2 were migrated into the common `default.nix` for backwards compatibility. Manifest generation was also changed, instead of multi-doc YAML files the module now generates `kind: List` manifests.
This commit is contained in:
3
.github/labeler.yml
vendored
3
.github/labeler.yml
vendored
@@ -261,7 +261,8 @@
|
|||||||
- any:
|
- any:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file:
|
- any-glob-to-any-file:
|
||||||
- nixos/modules/services/cluster/k3s/**/*
|
- nixos/modules/services/cluster/rancher/default.nix
|
||||||
|
- nixos/modules/services/cluster/rancher/k3s.nix
|
||||||
- nixos/tests/k3s/**/*
|
- nixos/tests/k3s/**/*
|
||||||
- pkgs/applications/networking/cluster/k3s/**/*
|
- pkgs/applications/networking/cluster/k3s/**/*
|
||||||
|
|
||||||
|
|||||||
@@ -450,6 +450,9 @@ and [release notes for v18](https://goteleport.com/docs/changelog/#1800-070325).
|
|||||||
|
|
||||||
- `services.matter-server` now hosts a debug dashboard on the configured port. Open the port on the firewall with `services.matter-server.openFirewall`.
|
- `services.matter-server` now hosts a debug dashboard on the configured port. Open the port on the firewall with `services.matter-server.openFirewall`.
|
||||||
|
|
||||||
|
- `services.k3s` now shares most of its code with `services.rke2`. The merge resulted in both modules providing more options, with `services.rke2` receiving the most improvements.
|
||||||
|
Existing configurations for either module should not be affected.
|
||||||
|
|
||||||
- The new option [networking.ipips](#opt-networking.ipips) has been added to create IP within IP kind of tunnels (including 4in6, ip6ip6 and ipip).
|
- The new option [networking.ipips](#opt-networking.ipips) has been added to create IP within IP kind of tunnels (including 4in6, ip6ip6 and ipip).
|
||||||
With the existing [networking.sits](#opt-networking.sits) option (6in4), it is now possible to create all combinations of IPv4 and IPv6 encapsulation.
|
With the existing [networking.sits](#opt-networking.sits) option (6in4), it is now possible to create all combinations of IPv4 and IPv6 encapsulation.
|
||||||
|
|
||||||
|
|||||||
@@ -475,7 +475,6 @@
|
|||||||
./services/cluster/corosync/default.nix
|
./services/cluster/corosync/default.nix
|
||||||
./services/cluster/druid/default.nix
|
./services/cluster/druid/default.nix
|
||||||
./services/cluster/hadoop/default.nix
|
./services/cluster/hadoop/default.nix
|
||||||
./services/cluster/k3s/default.nix
|
|
||||||
./services/cluster/kubernetes/addon-manager.nix
|
./services/cluster/kubernetes/addon-manager.nix
|
||||||
./services/cluster/kubernetes/addons/dns.nix
|
./services/cluster/kubernetes/addons/dns.nix
|
||||||
./services/cluster/kubernetes/apiserver.nix
|
./services/cluster/kubernetes/apiserver.nix
|
||||||
@@ -488,7 +487,7 @@
|
|||||||
./services/cluster/kubernetes/scheduler.nix
|
./services/cluster/kubernetes/scheduler.nix
|
||||||
./services/cluster/pacemaker/default.nix
|
./services/cluster/pacemaker/default.nix
|
||||||
./services/cluster/patroni/default.nix
|
./services/cluster/patroni/default.nix
|
||||||
./services/cluster/rke2/default.nix
|
./services/cluster/rancher/default.nix
|
||||||
./services/cluster/spark/default.nix
|
./services/cluster/spark/default.nix
|
||||||
./services/cluster/temporal/default.nix
|
./services/cluster/temporal/default.nix
|
||||||
./services/computing/boinc/client.nix
|
./services/computing/boinc/client.nix
|
||||||
|
|||||||
@@ -7,26 +7,60 @@
|
|||||||
let
|
let
|
||||||
mkRancherModule =
|
mkRancherModule =
|
||||||
{
|
{
|
||||||
# name used in paths/names, e.g. k3s
|
# name used in paths/bin names/etc, e.g. k3s
|
||||||
name ? null,
|
name,
|
||||||
|
# systemd service name
|
||||||
|
serviceName ? name,
|
||||||
# extra flags to pass to the binary before user-defined extraFlags
|
# extra flags to pass to the binary before user-defined extraFlags
|
||||||
extraBinFlags ? [ ],
|
extraBinFlags ? [ ],
|
||||||
|
# generate manifests as JSON rather than YAML, see rke2.nix
|
||||||
|
jsonManifests ? false,
|
||||||
|
|
||||||
|
# which port on the local node hosts content placed in ${staticContentChartDir} on /static/
|
||||||
|
# if null, it's assumed the content can be accessed via https://%{KUBERNETES_API}%/static/
|
||||||
|
staticContentPort ? null,
|
||||||
}:
|
}:
|
||||||
let
|
let
|
||||||
cfg = config.services.${name};
|
cfg = config.services.${name};
|
||||||
|
|
||||||
|
# Paths defined here are passed to the downstream modules as `paths`
|
||||||
manifestDir = "/var/lib/rancher/${name}/server/manifests";
|
manifestDir = "/var/lib/rancher/${name}/server/manifests";
|
||||||
imageDir = "/var/lib/rancher/${name}/agent/images";
|
imageDir = "/var/lib/rancher/${name}/agent/images";
|
||||||
containerdConfigTemplateFile = "/var/lib/rancher/${name}/agent/etc/containerd/config.toml.tmpl";
|
containerdConfigTemplateFile = "/var/lib/rancher/${name}/agent/etc/containerd/config.toml.tmpl";
|
||||||
|
staticContentChartDir = "/var/lib/rancher/${name}/server/static/charts";
|
||||||
|
|
||||||
yamlFormat = pkgs.formats.yaml { };
|
manifestFormat = if jsonManifests then pkgs.formats.json { } else pkgs.formats.yaml { };
|
||||||
yamlDocSeparator = builtins.toFile "yaml-doc-separator" "\n---\n";
|
# Manifests need a valid suffix to be respected
|
||||||
# Manifests need a valid YAML suffix to be respected
|
|
||||||
mkManifestTarget =
|
mkManifestTarget =
|
||||||
name: if (lib.hasSuffix ".yaml" name || lib.hasSuffix ".yml" name) then name else name + ".yaml";
|
name:
|
||||||
|
if (lib.hasSuffix ".yaml" name || lib.hasSuffix ".yml" name || lib.hasSuffix ".json" name) then
|
||||||
|
name
|
||||||
|
else if jsonManifests then
|
||||||
|
name + ".json"
|
||||||
|
else
|
||||||
|
name + ".yaml";
|
||||||
|
# Returns a path to the final manifest file
|
||||||
|
mkManifestSource =
|
||||||
|
name: manifests:
|
||||||
|
manifestFormat.generate name (
|
||||||
|
if builtins.isList manifests then
|
||||||
|
{
|
||||||
|
apiVersion = "v1";
|
||||||
|
kind = "List";
|
||||||
|
items = manifests;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
manifests
|
||||||
|
);
|
||||||
|
|
||||||
# Produces a list containing all duplicate manifest names
|
# Produces a list containing all duplicate manifest names
|
||||||
duplicateManifests = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
duplicateManifests = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
||||||
builtins.attrNames cfg.manifests
|
builtins.attrNames cfg.manifests
|
||||||
);
|
);
|
||||||
|
# Produces a list containing all duplicate chart names
|
||||||
|
duplicateCharts = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
||||||
|
builtins.attrNames cfg.charts
|
||||||
|
);
|
||||||
|
|
||||||
# Converts YAML -> JSON -> Nix
|
# Converts YAML -> JSON -> Nix
|
||||||
fromYaml =
|
fromYaml =
|
||||||
@@ -93,7 +127,7 @@ let
|
|||||||
x.outPath
|
x.outPath
|
||||||
# x is an attribute set that needs to be converted to a YAML file
|
# x is an attribute set that needs to be converted to a YAML file
|
||||||
else if builtins.isAttrs x then
|
else if builtins.isAttrs x then
|
||||||
(yamlFormat.generate "extra-deploy-chart-manifest" x)
|
(manifestFormat.generate "extra-deploy-chart-manifest" x)
|
||||||
# assume x is a path to a YAML file
|
# assume x is a path to a YAML file
|
||||||
else
|
else
|
||||||
x;
|
x;
|
||||||
@@ -118,28 +152,24 @@ let
|
|||||||
spec = {
|
spec = {
|
||||||
inherit valuesContent;
|
inherit valuesContent;
|
||||||
inherit (value) targetNamespace createNamespace;
|
inherit (value) targetNamespace createNamespace;
|
||||||
chart = "https://%{KUBERNETES_API}%/static/charts/${name}.tgz";
|
chart =
|
||||||
|
if staticContentPort == null then
|
||||||
|
"https://%{KUBERNETES_API}%/static/charts/${name}.tgz"
|
||||||
|
else
|
||||||
|
"https://localhost:${toString staticContentPort}/static/charts/${name}.tgz";
|
||||||
|
bootstrap = staticContentPort != null; # needed for host network access
|
||||||
};
|
};
|
||||||
} value.extraFieldDefinitions;
|
} value.extraFieldDefinitions;
|
||||||
|
|
||||||
# Generate a HelmChart custom resource together with extraDeploy manifests. This
|
# Generate a HelmChart custom resource together with extraDeploy manifests.
|
||||||
# generates possibly a multi document YAML file that the auto deploy mechanism
|
|
||||||
# deploys.
|
|
||||||
mkAutoDeployChartManifest = name: value: {
|
mkAutoDeployChartManifest = name: value: {
|
||||||
# target is the final name of the link created for the manifest file
|
# target is the final name of the link created for the manifest file
|
||||||
target = mkManifestTarget name;
|
target = mkManifestTarget name;
|
||||||
inherit (value) enable package;
|
inherit (value) enable package;
|
||||||
# source is a store path containing the complete manifest file
|
# source is a store path containing the complete manifest file
|
||||||
source = pkgs.concatText "auto-deploy-chart-${name}.yaml" (
|
source = mkManifestSource "auto-deploy-chart-${name}" (
|
||||||
[
|
lib.singleton (mkHelmChartCR name value)
|
||||||
(yamlFormat.generate "helm-chart-manifest-${name}.yaml" (mkHelmChartCR name value))
|
++ builtins.map (x: fromYaml (mkExtraDeployManifest x)) value.extraDeploy
|
||||||
]
|
|
||||||
# alternate the YAML doc separator (---) and extraDeploy manifests to create
|
|
||||||
# multi document YAMLs
|
|
||||||
++ (lib.concatMap (x: [
|
|
||||||
yamlDocSeparator
|
|
||||||
(mkExtraDeployManifest x)
|
|
||||||
]) value.extraDeploy)
|
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -239,7 +269,7 @@ let
|
|||||||
Override default chart values via Nix expressions. This is equivalent to setting
|
Override default chart values via Nix expressions. This is equivalent to setting
|
||||||
values in a `values.yaml` file.
|
values in a `values.yaml` file.
|
||||||
|
|
||||||
WARNING: The values (including secrets!) specified here are exposed unencrypted
|
**WARNING**: The values (including secrets!) specified here are exposed unencrypted
|
||||||
in the world-readable nix store.
|
in the world-readable nix store.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
@@ -276,7 +306,7 @@ let
|
|||||||
};
|
};
|
||||||
|
|
||||||
extraFieldDefinitions = lib.mkOption {
|
extraFieldDefinitions = lib.mkOption {
|
||||||
inherit (yamlFormat) type;
|
inherit (manifestFormat) type;
|
||||||
default = { };
|
default = { };
|
||||||
example = {
|
example = {
|
||||||
spec = {
|
spec = {
|
||||||
@@ -289,7 +319,7 @@ let
|
|||||||
description = ''
|
description = ''
|
||||||
Extra HelmChart field definitions that are merged with the rest of the HelmChart
|
Extra HelmChart field definitions that are merged with the rest of the HelmChart
|
||||||
custom resource. This can be used to set advanced fields or to overwrite
|
custom resource. This can be used to set advanced fields or to overwrite
|
||||||
generated fields. See <https://docs.k3s.io/helm#helmchart-field-definitions>
|
generated fields. See <https://docs.${name}.io/helm#helmchart-field-definitions>
|
||||||
for possible fields.
|
for possible fields.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
@@ -355,18 +385,7 @@ let
|
|||||||
source = lib.mkIf (config.content != null) (
|
source = lib.mkIf (config.content != null) (
|
||||||
let
|
let
|
||||||
name' = "${name}-manifest-" + builtins.baseNameOf name;
|
name' = "${name}-manifest-" + builtins.baseNameOf name;
|
||||||
docName = "${name}-manifest-doc-" + builtins.baseNameOf name;
|
mkSource = mkManifestSource name';
|
||||||
mkSource =
|
|
||||||
value:
|
|
||||||
if builtins.isList value then
|
|
||||||
pkgs.concatText name' (
|
|
||||||
lib.concatMap (x: [
|
|
||||||
yamlDocSeparator
|
|
||||||
(yamlFormat.generate docName x)
|
|
||||||
]) value
|
|
||||||
)
|
|
||||||
else
|
|
||||||
yamlFormat.generate name' value;
|
|
||||||
in
|
in
|
||||||
lib.mkDerivedConfig options.content mkSource
|
lib.mkDerivedConfig options.content mkSource
|
||||||
);
|
);
|
||||||
@@ -375,7 +394,14 @@ let
|
|||||||
);
|
);
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
paths = { inherit manifestDir imageDir containerdConfigTemplateFile; };
|
paths = {
|
||||||
|
inherit
|
||||||
|
manifestDir
|
||||||
|
imageDir
|
||||||
|
containerdConfigTemplateFile
|
||||||
|
staticContentChartDir
|
||||||
|
;
|
||||||
|
};
|
||||||
|
|
||||||
# interface
|
# interface
|
||||||
|
|
||||||
@@ -405,7 +431,7 @@ let
|
|||||||
description = ''
|
description = ''
|
||||||
The ${name} token to use when connecting to a server.
|
The ${name} token to use when connecting to a server.
|
||||||
|
|
||||||
WARNING: This option will expose your token unencrypted in the world-readable nix store.
|
**WARNING**: This option will expose your token unencrypted in the world-readable nix store.
|
||||||
If this is undesired use the tokenFile option instead.
|
If this is undesired use the tokenFile option instead.
|
||||||
'';
|
'';
|
||||||
default = "";
|
default = "";
|
||||||
@@ -413,7 +439,28 @@ let
|
|||||||
|
|
||||||
tokenFile = lib.mkOption {
|
tokenFile = lib.mkOption {
|
||||||
type = lib.types.nullOr lib.types.path;
|
type = lib.types.nullOr lib.types.path;
|
||||||
description = "File path containing ${name} token to use when connecting to the server.";
|
description = "File path containing the ${name} token to use when connecting to a server.";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
agentToken = lib.mkOption {
|
||||||
|
type = lib.types.str;
|
||||||
|
description = ''
|
||||||
|
The ${name} token agents can use to connect to the server.
|
||||||
|
This option only makes sense on server nodes (`role = server`).
|
||||||
|
|
||||||
|
**WARNING**: This option will expose your token unencrypted in the world-readable nix store.
|
||||||
|
If this is undesired use the tokenFile option instead.
|
||||||
|
'';
|
||||||
|
default = "";
|
||||||
|
};
|
||||||
|
|
||||||
|
agentTokenFile = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
|
description = ''
|
||||||
|
File path containing the ${name} token agents can use to connect to the server.
|
||||||
|
This option only makes sense on server nodes (`role = server`).
|
||||||
|
'';
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -441,6 +488,42 @@ let
|
|||||||
description = "File path containing the ${name} YAML config. This is useful when the config is generated (for example on boot).";
|
description = "File path containing the ${name} YAML config. This is useful when the config is generated (for example on boot).";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
disable = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
description = "Disable default components via the `--disable` flag.";
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeName = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "Node name.";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeLabel = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
description = "Registering and starting kubelet with set of labels.";
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeTaint = lib.mkOption {
|
||||||
|
type = lib.types.listOf lib.types.str;
|
||||||
|
description = "Registering kubelet with set of taints.";
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodeIP = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
|
description = "IPv4/IPv6 addresses to advertise for node.";
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
selinux = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
description = "Enable SELinux in containerd.";
|
||||||
|
default = false;
|
||||||
|
};
|
||||||
|
|
||||||
manifests = lib.mkOption {
|
manifests = lib.mkOption {
|
||||||
type = lib.types.attrsOf manifestModule;
|
type = lib.types.attrsOf manifestModule;
|
||||||
default = { };
|
default = { };
|
||||||
@@ -528,6 +611,11 @@ let
|
|||||||
This option only makes sense on server nodes (`role = server`).
|
This option only makes sense on server nodes (`role = server`).
|
||||||
Read the [auto-deploying manifests docs](https://docs.k3s.io/installation/packaged-components#auto-deploying-manifests-addons)
|
Read the [auto-deploying manifests docs](https://docs.k3s.io/installation/packaged-components#auto-deploying-manifests-addons)
|
||||||
for further information.
|
for further information.
|
||||||
|
|
||||||
|
**WARNING**: If you have multiple server nodes, and set this option on more than one server,
|
||||||
|
it is your responsibility to ensure that files stay in sync across those nodes. AddOn content is
|
||||||
|
not synced between nodes, and ${name} cannot guarantee correct behavior if different servers attempt
|
||||||
|
to deploy conflicting manifests.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -676,10 +764,37 @@ let
|
|||||||
'';
|
'';
|
||||||
description = ''
|
description = ''
|
||||||
Auto deploying Helm charts that are installed by the ${name} Helm controller. Avoid using
|
Auto deploying Helm charts that are installed by the ${name} Helm controller. Avoid using
|
||||||
attribute names that are also used in the [](#opt-services.${name}.manifests) option.
|
attribute names that are also used in the [](#opt-services.${name}.manifests) and
|
||||||
Manifests with the same name will override auto deploying charts with the same name.
|
[](#opt-services.${name}.charts) options. Manifests with the same name will override
|
||||||
|
auto deploying charts with the same name.
|
||||||
This option only makes sense on server nodes (`role = server`). See the
|
This option only makes sense on server nodes (`role = server`). See the
|
||||||
[${name} Helm documentation](https://docs.${name}.io/helm) for further information.
|
[${name} Helm documentation](https://docs.${name}.io/helm) for further information.
|
||||||
|
|
||||||
|
**WARNING**: If you have multiple server nodes, and set this option on more than one server,
|
||||||
|
it is your responsibility to ensure that files stay in sync across those nodes. AddOn content is
|
||||||
|
not synced between nodes, and ${name} cannot guarantee correct behavior if different servers attempt
|
||||||
|
to deploy conflicting manifests.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
charts = lib.mkOption {
|
||||||
|
type = with lib.types; attrsOf (either path package);
|
||||||
|
default = { };
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
nginx = ../charts/my-nginx-chart.tgz;
|
||||||
|
redis = ../charts/my-redis-chart.tgz;
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
Packaged Helm charts that are linked to {file}`${staticContentChartDir}` before ${name} starts.
|
||||||
|
The attribute name will be used as the link target (relative to {file}`${staticContentChartDir}`).
|
||||||
|
The specified charts will only be placed on the file system and made available via ${
|
||||||
|
if staticContentPort == null then
|
||||||
|
"the Kubernetes APIServer from within the cluster"
|
||||||
|
else
|
||||||
|
"port ${toString staticContentPort} on server nodes"
|
||||||
|
}. See the [](#opt-services.${name}.autoDeployCharts) option and the
|
||||||
|
[${name} Helm controller docs](https://docs.${name}.io/helm#using-the-helm-controller)
|
||||||
|
to deploy Helm charts. This option only makes sense on server nodes (`role = server`).
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@@ -697,13 +812,22 @@ let
|
|||||||
++ (lib.optional (duplicateManifests != [ ])
|
++ (lib.optional (duplicateManifests != [ ])
|
||||||
"${name}: The following auto deploying charts are overriden by manifests of the same name: ${toString duplicateManifests}."
|
"${name}: The following auto deploying charts are overriden by manifests of the same name: ${toString duplicateManifests}."
|
||||||
)
|
)
|
||||||
|
++ (lib.optional (duplicateCharts != [ ])
|
||||||
|
"${name}: The following auto deploying charts are overriden by charts of the same name: ${toString duplicateCharts}."
|
||||||
|
)
|
||||||
|
++ (lib.optional (cfg.role != "server" && cfg.charts != { })
|
||||||
|
"${name}: Helm charts are only made available to the cluster on server nodes (role == server), they will be ignored by this node."
|
||||||
|
)
|
||||||
++ (lib.optional (
|
++ (lib.optional (
|
||||||
cfg.role == "agent" && cfg.configPath == null && cfg.serverAddr == ""
|
cfg.role == "agent" && cfg.configPath == null && cfg.serverAddr == ""
|
||||||
) "${name}: serverAddr or configPath (with 'server' key) should be set if role is 'agent'")
|
) "${name}: serverAddr or configPath (with 'server' key) should be set if role is 'agent'")
|
||||||
++ (lib.optional
|
++ (lib.optional
|
||||||
(cfg.role == "agent" && cfg.configPath == null && cfg.tokenFile == null && cfg.token == "")
|
(cfg.role == "agent" && cfg.configPath == null && cfg.tokenFile == null && cfg.token == "")
|
||||||
"${name}: Token or tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'"
|
"${name}: token, tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'"
|
||||||
);
|
)
|
||||||
|
++ (lib.optional (
|
||||||
|
cfg.role == "agent" && !(cfg.agentTokenFile != null || cfg.agentToken != "")
|
||||||
|
) "${name}: agentToken and agentToken should not be set if role is 'agent'");
|
||||||
|
|
||||||
environment.systemPackages = [ config.services.${name}.package ];
|
environment.systemPackages = [ config.services.${name}.package ];
|
||||||
|
|
||||||
@@ -726,6 +850,21 @@ let
|
|||||||
"L+".argument = "${image}";
|
"L+".argument = "${image}";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
# Merge charts with charts contained in enabled auto deploying charts
|
||||||
|
helmCharts =
|
||||||
|
(lib.concatMapAttrs (n: v: { ${n} = v.package; }) (
|
||||||
|
lib.filterAttrs (_: v: v.enable) cfg.autoDeployCharts
|
||||||
|
))
|
||||||
|
// cfg.charts;
|
||||||
|
# Ensure that all chart targets have a .tgz suffix
|
||||||
|
mkChartTarget = name: if (lib.hasSuffix ".tgz" name) then name else name + ".tgz";
|
||||||
|
# Make a systemd-tmpfiles rule for a chart
|
||||||
|
mkChartRule = target: source: {
|
||||||
|
name = "${staticContentChartDir}/${mkChartTarget target}";
|
||||||
|
value = {
|
||||||
|
"L+".argument = "${source}";
|
||||||
|
};
|
||||||
|
};
|
||||||
in
|
in
|
||||||
(lib.mapAttrs' (_: v: mkManifestRule v) enabledManifests)
|
(lib.mapAttrs' (_: v: mkManifestRule v) enabledManifests)
|
||||||
// (builtins.listToAttrs (map mkImageRule cfg.images))
|
// (builtins.listToAttrs (map mkImageRule cfg.images))
|
||||||
@@ -733,16 +872,17 @@ let
|
|||||||
${containerdConfigTemplateFile} = {
|
${containerdConfigTemplateFile} = {
|
||||||
"L+".argument = "${pkgs.writeText "config.toml.tmpl" cfg.containerdConfigTemplate}";
|
"L+".argument = "${pkgs.writeText "config.toml.tmpl" cfg.containerdConfigTemplate}";
|
||||||
};
|
};
|
||||||
});
|
})
|
||||||
|
// (lib.mapAttrs' mkChartRule helmCharts);
|
||||||
|
|
||||||
systemd.services.${name} =
|
systemd.services.${serviceName} =
|
||||||
let
|
let
|
||||||
kubeletParams =
|
kubeletParams =
|
||||||
(lib.optionalAttrs (cfg.gracefulNodeShutdown.enable) {
|
(lib.optionalAttrs (cfg.gracefulNodeShutdown.enable) {
|
||||||
inherit (cfg.gracefulNodeShutdown) shutdownGracePeriod shutdownGracePeriodCriticalPods;
|
inherit (cfg.gracefulNodeShutdown) shutdownGracePeriod shutdownGracePeriodCriticalPods;
|
||||||
})
|
})
|
||||||
// cfg.extraKubeletConfig;
|
// cfg.extraKubeletConfig;
|
||||||
kubeletConfig = (pkgs.formats.yaml { }).generate "${name}-kubelet-config" (
|
kubeletConfig = manifestFormat.generate "${name}-kubelet-config" (
|
||||||
{
|
{
|
||||||
apiVersion = "kubelet.config.k8s.io/v1beta1";
|
apiVersion = "kubelet.config.k8s.io/v1beta1";
|
||||||
kind = "KubeletConfiguration";
|
kind = "KubeletConfiguration";
|
||||||
@@ -750,7 +890,7 @@ let
|
|||||||
// kubeletParams
|
// kubeletParams
|
||||||
);
|
);
|
||||||
|
|
||||||
kubeProxyConfig = (pkgs.formats.yaml { }).generate "${name}-kubeProxy-config" (
|
kubeProxyConfig = manifestFormat.generate "${name}-kubeProxy-config" (
|
||||||
{
|
{
|
||||||
apiVersion = "kubeproxy.config.k8s.io/v1alpha1";
|
apiVersion = "kubeproxy.config.k8s.io/v1alpha1";
|
||||||
kind = "KubeProxyConfiguration";
|
kind = "KubeProxyConfiguration";
|
||||||
@@ -781,13 +921,22 @@ let
|
|||||||
LimitNPROC = "infinity";
|
LimitNPROC = "infinity";
|
||||||
LimitCORE = "infinity";
|
LimitCORE = "infinity";
|
||||||
TasksMax = "infinity";
|
TasksMax = "infinity";
|
||||||
|
TimeoutStartSec = 0;
|
||||||
EnvironmentFile = cfg.environmentFile;
|
EnvironmentFile = cfg.environmentFile;
|
||||||
ExecStart = lib.concatStringsSep " \\\n " (
|
ExecStart = lib.concatStringsSep " \\\n " (
|
||||||
[ "${cfg.package}/bin/${name} ${cfg.role}" ]
|
[ "${cfg.package}/bin/${name} ${cfg.role}" ]
|
||||||
++ (lib.optional (cfg.serverAddr != "") "--server ${cfg.serverAddr}")
|
++ (lib.optional (cfg.serverAddr != "") "--server ${cfg.serverAddr}")
|
||||||
++ (lib.optional (cfg.token != "") "--token ${cfg.token}")
|
++ (lib.optional (cfg.token != "") "--token ${cfg.token}")
|
||||||
++ (lib.optional (cfg.tokenFile != null) "--token-file ${cfg.tokenFile}")
|
++ (lib.optional (cfg.tokenFile != null) "--token-file ${cfg.tokenFile}")
|
||||||
|
++ (lib.optional (cfg.agentToken != "") "--agent-token ${cfg.agentToken}")
|
||||||
|
++ (lib.optional (cfg.agentTokenFile != null) "--agent-token-file ${cfg.agentTokenFile}")
|
||||||
++ (lib.optional (cfg.configPath != null) "--config ${cfg.configPath}")
|
++ (lib.optional (cfg.configPath != null) "--config ${cfg.configPath}")
|
||||||
|
++ (map (d: "--disable=${d}") cfg.disable)
|
||||||
|
++ (lib.optional (cfg.nodeName != null) "--node-name=${cfg.nodeName}")
|
||||||
|
++ (lib.optionals (cfg.nodeLabel != [ ]) (map (l: "--node-label=${l}") cfg.nodeLabel))
|
||||||
|
++ (lib.optionals (cfg.nodeTaint != [ ]) (map (t: "--node-taint=${t}") cfg.nodeTaint))
|
||||||
|
++ (lib.optional (cfg.nodeIP != null) "--node-ip=${cfg.nodeIP}")
|
||||||
|
++ (lib.optional cfg.selinux "--selinux")
|
||||||
++ (lib.optional (kubeletParams != { }) "--kubelet-arg=config=${kubeletConfig}")
|
++ (lib.optional (kubeletParams != { }) "--kubelet-arg=config=${kubeletConfig}")
|
||||||
++ (lib.optional (cfg.extraKubeProxyConfig != { }) "--kube-proxy-arg=config=${kubeProxyConfig}")
|
++ (lib.optional (cfg.extraKubeProxyConfig != { }) "--kube-proxy-arg=config=${kubeProxyConfig}")
|
||||||
++ extraBinFlags
|
++ extraBinFlags
|
||||||
@@ -802,16 +951,16 @@ in
|
|||||||
imports =
|
imports =
|
||||||
# pass mkRancherModule explicitly instead of via
|
# pass mkRancherModule explicitly instead of via
|
||||||
# _modules.args to prevent infinite recursion
|
# _modules.args to prevent infinite recursion
|
||||||
builtins.map (
|
let
|
||||||
f:
|
args = {
|
||||||
import f {
|
|
||||||
inherit config lib;
|
inherit config lib;
|
||||||
inherit mkRancherModule;
|
inherit mkRancherModule;
|
||||||
}
|
};
|
||||||
) [ ./k3s.nix ];
|
in
|
||||||
|
[
|
||||||
|
(import ./k3s.nix args)
|
||||||
|
(import ./rke2.nix args)
|
||||||
|
];
|
||||||
|
|
||||||
meta.maintainers =
|
meta.maintainers = pkgs.rke2.meta.maintainers ++ lib.teams.k3s.members;
|
||||||
with lib.maintainers;
|
|
||||||
[ azey7f ] # modules only
|
|
||||||
++ lib.teams.k3s.members;
|
|
||||||
}
|
}
|
||||||
@@ -22,12 +22,6 @@ let
|
|||||||
]
|
]
|
||||||
++ config
|
++ config
|
||||||
) instruction;
|
) instruction;
|
||||||
|
|
||||||
chartDir = "/var/lib/rancher/k3s/server/static/charts";
|
|
||||||
# Produces a list containing all duplicate chart names
|
|
||||||
duplicateCharts = lib.intersectLists (builtins.attrNames cfg.autoDeployCharts) (
|
|
||||||
builtins.attrNames cfg.charts
|
|
||||||
);
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
imports = [ (removeOption [ "docker" ] "k3s docker option is no longer supported.") ];
|
imports = [ (removeOption [ "docker" ] "k3s docker option is no longer supported.") ];
|
||||||
@@ -61,6 +55,10 @@ in
|
|||||||
to know how to configure the firewall.
|
to know how to configure the firewall.
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
disable.description = ''
|
||||||
|
Disable default components, see the [K3s documentation](https://docs.k3s.io/installation/packaged-components#using-the---disable-flag).
|
||||||
|
'';
|
||||||
|
|
||||||
images = {
|
images = {
|
||||||
example = lib.literalExpression ''
|
example = lib.literalExpression ''
|
||||||
[
|
[
|
||||||
@@ -76,23 +74,13 @@ in
|
|||||||
'';
|
'';
|
||||||
description = ''
|
description = ''
|
||||||
List of derivations that provide container images.
|
List of derivations that provide container images.
|
||||||
All images are linked to {file}`${baseModule.imageDir}` before k3s starts and are consequently imported
|
All images are linked to {file}`${baseModule.paths.imageDir}` before k3s starts and are consequently imported
|
||||||
by the k3s agent. Consider importing the k3s airgap images archive of the k3s package in
|
by the k3s agent. Consider importing the k3s airgap images archive of the k3s package in
|
||||||
use, if you want to pre-provision this node with all k3s container images. This option
|
use, if you want to pre-provision this node with all k3s container images. This option
|
||||||
only makes sense on nodes with an enabled agent.
|
only makes sense on nodes with an enabled agent.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
autoDeployCharts.description = ''
|
|
||||||
Auto deploying Helm charts that are installed by the k3s Helm controller. Avoid using
|
|
||||||
attribute names that are also used in the [](#opt-services.k3s.manifests) and
|
|
||||||
[](#opt-services.k3s.charts) options. Manifests with the same name will override
|
|
||||||
auto deploying charts with the same name. Similiarly, charts with the same name will
|
|
||||||
overwrite the Helm chart contained in auto deploying charts. This option only makes
|
|
||||||
sense on server nodes (`role = server`). See the
|
|
||||||
[k3s Helm documentation](https://docs.k3s.io/helm) for further information.
|
|
||||||
'';
|
|
||||||
|
|
||||||
# k3s-specific options
|
# k3s-specific options
|
||||||
|
|
||||||
clusterInit = lib.mkOption {
|
clusterInit = lib.mkOption {
|
||||||
@@ -122,38 +110,16 @@ in
|
|||||||
default = false;
|
default = false;
|
||||||
description = "Only run the server. This option only makes sense for a server.";
|
description = "Only run the server. This option only makes sense for a server.";
|
||||||
};
|
};
|
||||||
|
|
||||||
charts = lib.mkOption {
|
|
||||||
type = with lib.types; attrsOf (either path package);
|
|
||||||
default = { };
|
|
||||||
example = lib.literalExpression ''
|
|
||||||
nginx = ../charts/my-nginx-chart.tgz;
|
|
||||||
redis = ../charts/my-redis-chart.tgz;
|
|
||||||
'';
|
|
||||||
description = ''
|
|
||||||
Packaged Helm charts that are linked to {file}`${chartDir}` before k3s starts.
|
|
||||||
The attribute name will be used as the link target (relative to {file}`${chartDir}`).
|
|
||||||
The specified charts will only be placed on the file system and made available to the
|
|
||||||
Kubernetes APIServer from within the cluster. See the [](#opt-services.k3s.autoDeployCharts)
|
|
||||||
option and the [k3s Helm controller docs](https://docs.k3s.io/helm#using-the-helm-controller)
|
|
||||||
to deploy Helm charts. This option only makes sense on server nodes (`role = server`).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
# implementation
|
# implementation
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable (
|
config = lib.mkIf cfg.enable (
|
||||||
lib.recursiveUpdate baseModule.config {
|
lib.recursiveUpdate baseModule.config {
|
||||||
warnings =
|
warnings = (
|
||||||
(lib.optional (cfg.role != "server" && cfg.charts != { })
|
lib.optional (
|
||||||
"k3s: Helm charts are only made available to the cluster on server nodes (role == server), they will be ignored by this node."
|
cfg.disableAgent && cfg.images != [ ]
|
||||||
)
|
) "k3s: Images are only imported on nodes with an enabled agent, they will be ignored by this node."
|
||||||
++ (lib.optional (duplicateCharts != [ ])
|
|
||||||
"k3s: The following auto deploying charts are overriden by charts of the same name: ${toString duplicateCharts}."
|
|
||||||
)
|
|
||||||
++ (lib.optional (cfg.disableAgent && cfg.images != [ ])
|
|
||||||
"k3s: Images are only imported on nodes with an enabled agent, they will be ignored by this node."
|
|
||||||
);
|
);
|
||||||
|
|
||||||
assertions = [
|
assertions = [
|
||||||
@@ -166,26 +132,6 @@ in
|
|||||||
message = "k3s: clusterInit must be false if role is 'agent'";
|
message = "k3s: clusterInit must be false if role is 'agent'";
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
systemd.tmpfiles.settings."10-k3s" =
|
|
||||||
let
|
|
||||||
# Merge charts with charts contained in enabled auto deploying charts
|
|
||||||
helmCharts =
|
|
||||||
(lib.concatMapAttrs (n: v: { ${n} = v.package; }) (
|
|
||||||
lib.filterAttrs (_: v: v.enable) cfg.autoDeployCharts
|
|
||||||
))
|
|
||||||
// cfg.charts;
|
|
||||||
# Ensure that all chart targets have a .tgz suffix
|
|
||||||
mkChartTarget = name: if (lib.hasSuffix ".tgz" name) then name else name + ".tgz";
|
|
||||||
# Make a systemd-tmpfiles rule for a chart
|
|
||||||
mkChartRule = target: source: {
|
|
||||||
name = "${chartDir}/${mkChartTarget target}";
|
|
||||||
value = {
|
|
||||||
"L+".argument = "${source}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
in
|
|
||||||
lib.mapAttrs' (n: v: mkChartRule n v) helmCharts;
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
160
nixos/modules/services/cluster/rancher/rke2.nix
Normal file
160
nixos/modules/services/cluster/rancher/rke2.nix
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
{
|
||||||
|
config,
|
||||||
|
lib,
|
||||||
|
mkRancherModule,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.rke2;
|
||||||
|
baseModule = mkRancherModule {
|
||||||
|
name = "rke2";
|
||||||
|
serviceName = "rke2-${cfg.role}"; # upstream default, used by rke2-killall.sh
|
||||||
|
|
||||||
|
extraBinFlags =
|
||||||
|
(lib.optional (cfg.cni != null) "--cni=${cfg.cni}")
|
||||||
|
++ (lib.optional cfg.cisHardening "--profile=${
|
||||||
|
if lib.versionAtLeast cfg.package.version "1.25" then
|
||||||
|
"cis"
|
||||||
|
else if lib.versionAtLeast cfg.package.version "1.23" then
|
||||||
|
"cis-1.23"
|
||||||
|
else
|
||||||
|
"cis-1.6"
|
||||||
|
}");
|
||||||
|
|
||||||
|
# RKE2 sometimes tries opening YAML manifests on start with O_RDWR, which we can't support
|
||||||
|
# without ugly workarounds since they're linked from the read-only /nix/store.
|
||||||
|
# https://github.com/rancher/rke2/blob/fa7ed3a87055830924d05009a1071acfbbfbcc2c/pkg/bootstrap/bootstrap.go#L355
|
||||||
|
jsonManifests = true;
|
||||||
|
|
||||||
|
# see https://github.com/rancher/rke2/issues/224
|
||||||
|
# not all charts can be base64-encoded into chartContent due to
|
||||||
|
# https://github.com/k3s-io/helm-controller/issues/267
|
||||||
|
staticContentPort = 9345;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# interface
|
||||||
|
|
||||||
|
options.services.rke2 = lib.recursiveUpdate baseModule.options {
|
||||||
|
# option overrides
|
||||||
|
role.description = ''
|
||||||
|
Whether rke2 should run as a server or agent.
|
||||||
|
|
||||||
|
If it's a server:
|
||||||
|
|
||||||
|
- By default it also runs workloads as an agent.
|
||||||
|
- All options can be set.
|
||||||
|
|
||||||
|
If it's an agent:
|
||||||
|
|
||||||
|
- `serverAddr` is required.
|
||||||
|
- `token` or `tokenFile` is required.
|
||||||
|
- `agentToken`, `agentTokenFile`, `disable` and `cni` should not be set.
|
||||||
|
'';
|
||||||
|
|
||||||
|
disable.description = ''
|
||||||
|
Disable default components, see the [RKE2 documentation](https://docs.rke2.io/install/packaged_components#using-the---disable-flag).
|
||||||
|
'';
|
||||||
|
|
||||||
|
images = {
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
[
|
||||||
|
(pkgs.dockerTools.pullImage {
|
||||||
|
imageName = "docker.io/bitnami/keycloak";
|
||||||
|
imageDigest = "sha256:714dfadc66a8e3adea6609bda350345bd3711657b7ef3cf2e8015b526bac2d6b";
|
||||||
|
hash = "sha256-IM2BLZ0EdKIZcRWOtuFY9TogZJXCpKtPZnMnPsGlq0Y=";
|
||||||
|
finalImageTag = "21.1.2-debian-11-r0";
|
||||||
|
})
|
||||||
|
|
||||||
|
config.services.rke2.package.images-core-linux-amd64-tar-zst
|
||||||
|
config.services.rke2.package.images-canal-linux-amd64-tar-zst
|
||||||
|
]
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
List of derivations that provide container images.
|
||||||
|
All images are linked to {file}`${baseModule.paths.imageDir}` before rke2 starts and are consequently imported
|
||||||
|
by the rke2 agent. Consider importing the rke2 core and CNI image archives of the rke2 package in
|
||||||
|
use, if you want to pre-provision this node with all rke2 container images. For a full list of available airgap images, check the
|
||||||
|
[source](https://github.com/NixOS/nixpkgs/blob/c8a1939887ee6e5f5aae29ce97321c0d83165f7d/pkgs/applications/networking/cluster/rke2/1_32/images-versions.json).
|
||||||
|
of the rke2 package in use.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# rke2-specific options
|
||||||
|
cni = lib.mkOption {
|
||||||
|
type =
|
||||||
|
with lib.types;
|
||||||
|
nullOr (enum [
|
||||||
|
"none"
|
||||||
|
"canal"
|
||||||
|
"cilium"
|
||||||
|
"calico"
|
||||||
|
"flannel"
|
||||||
|
]);
|
||||||
|
description = ''
|
||||||
|
CNI plugins to deploy, one of `none`, `calico`, `canal`, `cilium` or `flannel`.
|
||||||
|
|
||||||
|
All CNI plugins get installed via a helm chart after the main components are up and running
|
||||||
|
and can be [customized by modifying the helm chart options](https://docs.rke2.io/helm).
|
||||||
|
|
||||||
|
[Learn more about RKE2 and CNI plugins](https://docs.rke2.io/networking/basic_network_options)
|
||||||
|
|
||||||
|
> **WARNING**: Flannel support in RKE2 is currently experimental.
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
cisHardening = lib.mkOption {
|
||||||
|
type = lib.types.bool;
|
||||||
|
description = ''
|
||||||
|
Enable CIS Hardening for RKE2.
|
||||||
|
|
||||||
|
The OS-level configuration options required to pass the CIS benchmark are enabled by default.
|
||||||
|
This option only creates the `etcd` user and group, and passes the `--profile=cis` flag to RKE2.
|
||||||
|
|
||||||
|
Learn more about [CIS Hardening for RKE2](https://docs.rke2.io/security/hardening_guide).
|
||||||
|
'';
|
||||||
|
default = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# implementation
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable (
|
||||||
|
lib.recursiveUpdate baseModule.config {
|
||||||
|
warnings = (
|
||||||
|
lib.optional (
|
||||||
|
cfg.role == "agent" && cfg.cni != null
|
||||||
|
) "rke2: cni should not be set if role is 'agent'"
|
||||||
|
);
|
||||||
|
|
||||||
|
# Configure NetworkManager to ignore CNI network interfaces.
|
||||||
|
# See: https://docs.rke2.io/known_issues#networkmanager
|
||||||
|
environment.etc."NetworkManager/conf.d/rke2-canal.conf" = {
|
||||||
|
enable = config.networking.networkmanager.enable;
|
||||||
|
text = ''
|
||||||
|
[keyfile]
|
||||||
|
unmanaged-devices=interface-name:flannel*;interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# CIS hardening
|
||||||
|
# https://docs.rke2.io/security/hardening_guide#kernel-parameters
|
||||||
|
# https://github.com/rancher/rke2/blob/ef0fc7aa9d3bbaa95ce9b1895972488cbd92e302/bundle/share/rke2/rke2-cis-sysctl.conf
|
||||||
|
boot.kernel.sysctl = {
|
||||||
|
"vm.panic_on_oom" = 0;
|
||||||
|
"vm.overcommit_memory" = 1;
|
||||||
|
"kernel.panic" = 10;
|
||||||
|
"kernel.panic_on_oops" = 1;
|
||||||
|
};
|
||||||
|
# https://docs.rke2.io/security/hardening_guide#etcd-is-configured-properly
|
||||||
|
users = lib.mkIf cfg.cisHardening {
|
||||||
|
users.etcd = {
|
||||||
|
isSystemUser = true;
|
||||||
|
group = "etcd";
|
||||||
|
};
|
||||||
|
groups.etcd = { };
|
||||||
|
};
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -1,338 +0,0 @@
|
|||||||
{
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
pkgs,
|
|
||||||
...
|
|
||||||
}:
|
|
||||||
let
|
|
||||||
cfg = config.services.rke2;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
imports = [ ];
|
|
||||||
|
|
||||||
options.services.rke2 = {
|
|
||||||
enable = lib.mkEnableOption "rke2";
|
|
||||||
|
|
||||||
package = lib.mkPackageOption pkgs "rke2" { };
|
|
||||||
|
|
||||||
role = lib.mkOption {
|
|
||||||
type = lib.types.enum [
|
|
||||||
"server"
|
|
||||||
"agent"
|
|
||||||
];
|
|
||||||
description = ''
|
|
||||||
Whether rke2 should run as a server or agent.
|
|
||||||
|
|
||||||
If it's a server:
|
|
||||||
|
|
||||||
- By default it also runs workloads as an agent.
|
|
||||||
- any optionals is allowed.
|
|
||||||
|
|
||||||
If it's an agent:
|
|
||||||
|
|
||||||
- `serverAddr` is required.
|
|
||||||
- `token` or `tokenFile` is required.
|
|
||||||
- `agentToken` or `agentTokenFile` or `disable` or `cni` are not allowed.
|
|
||||||
'';
|
|
||||||
default = "server";
|
|
||||||
};
|
|
||||||
|
|
||||||
configPath = lib.mkOption {
|
|
||||||
type = lib.types.path;
|
|
||||||
description = "Load configuration from FILE.";
|
|
||||||
default = "/etc/rancher/rke2/config.yaml";
|
|
||||||
};
|
|
||||||
|
|
||||||
debug = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
description = "Turn on debug logs.";
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
dataDir = lib.mkOption {
|
|
||||||
type = lib.types.path;
|
|
||||||
description = "The folder to hold state in.";
|
|
||||||
default = "/var/lib/rancher/rke2";
|
|
||||||
};
|
|
||||||
|
|
||||||
token = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Shared secret used to join a server or agent to a cluster.
|
|
||||||
|
|
||||||
> WARNING: This option will expose store your token unencrypted world-readable in the nix store.
|
|
||||||
If this is undesired use the `tokenFile` option instead.
|
|
||||||
'';
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
tokenFile = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.path;
|
|
||||||
description = "File path containing rke2 token to use when connecting to the server.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
disable = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = "Do not deploy packaged components and delete any deployed components.";
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeName = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.str;
|
|
||||||
description = "Node name.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeLabel = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = "Registering and starting kubelet with set of labels.";
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeTaint = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = "Registering kubelet with set of taints.";
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
nodeIP = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.str;
|
|
||||||
description = "IPv4/IPv6 addresses to advertise for node.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
agentToken = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Shared secret used to join agents to the cluster, but not servers.
|
|
||||||
|
|
||||||
> **WARNING**: This option will expose store your token unencrypted world-readable in the nix store.
|
|
||||||
If this is undesired use the `agentTokenFile` option instead.
|
|
||||||
'';
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
agentTokenFile = lib.mkOption {
|
|
||||||
type = lib.types.nullOr lib.types.path;
|
|
||||||
description = "File path containing rke2 agent token to use when connecting to the server.";
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
serverAddr = lib.mkOption {
|
|
||||||
type = lib.types.str;
|
|
||||||
description = "The rke2 server to connect to, used to join a cluster.";
|
|
||||||
example = "https://10.0.0.10:6443";
|
|
||||||
default = "";
|
|
||||||
};
|
|
||||||
|
|
||||||
selinux = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
description = "Enable SELinux in containerd.";
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
cni = lib.mkOption {
|
|
||||||
type = lib.types.enum [
|
|
||||||
"none"
|
|
||||||
"canal"
|
|
||||||
"cilium"
|
|
||||||
"calico"
|
|
||||||
"flannel"
|
|
||||||
];
|
|
||||||
description = ''
|
|
||||||
CNI Plugins to deploy, one of `none`, `calico`, `canal`, `cilium` or `flannel`.
|
|
||||||
|
|
||||||
All CNI plugins get installed via a helm chart after the main components are up and running
|
|
||||||
and can be [customized by modifying the helm chart options](https://docs.rke2.io/helm).
|
|
||||||
|
|
||||||
[Learn more about RKE2 and CNI plugins](https://docs.rke2.io/networking/basic_network_options)
|
|
||||||
|
|
||||||
> **WARNING**: Flannel support in RKE2 is currently experimental.
|
|
||||||
'';
|
|
||||||
default = "canal";
|
|
||||||
};
|
|
||||||
|
|
||||||
cisHardening = lib.mkOption {
|
|
||||||
type = lib.types.bool;
|
|
||||||
description = ''
|
|
||||||
Enable CIS Hardening for RKE2.
|
|
||||||
|
|
||||||
It will set the configurations and controls required to address Kubernetes benchmark controls
|
|
||||||
from the Center for Internet Security (CIS).
|
|
||||||
|
|
||||||
Learn more about [CIS Hardening for RKE2](https://docs.rke2.io/security/hardening_guide).
|
|
||||||
|
|
||||||
> **NOTICE**:
|
|
||||||
>
|
|
||||||
> You may need restart the `systemd-sysctl` muaually by:
|
|
||||||
>
|
|
||||||
> ```shell
|
|
||||||
> sudo systemctl restart systemd-sysctl
|
|
||||||
> ```
|
|
||||||
'';
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
extraFlags = lib.mkOption {
|
|
||||||
type = lib.types.listOf lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Extra flags to pass to the rke2 service/agent.
|
|
||||||
|
|
||||||
Here you can find all the available flags:
|
|
||||||
|
|
||||||
- [Server Configuration Reference](https://docs.rke2.io/reference/server_config)
|
|
||||||
- [Agent Configuration Reference](https://docs.rke2.io/reference/linux_agent_config)
|
|
||||||
'';
|
|
||||||
example = [
|
|
||||||
"--disable-kube-proxy"
|
|
||||||
"--cluster-cidr=10.24.0.0/16"
|
|
||||||
];
|
|
||||||
default = [ ];
|
|
||||||
};
|
|
||||||
|
|
||||||
environmentVars = lib.mkOption {
|
|
||||||
type = lib.types.attrsOf lib.types.str;
|
|
||||||
description = ''
|
|
||||||
Environment variables for configuring the rke2 service/agent.
|
|
||||||
|
|
||||||
Here you can find all the available environment variables:
|
|
||||||
|
|
||||||
- [Server Configuration Reference](https://docs.rke2.io/reference/server_config)
|
|
||||||
- [Agent Configuration Reference](https://docs.rke2.io/reference/linux_agent_config)
|
|
||||||
|
|
||||||
Besides the options above, you can also active environment variables by edit/create those files:
|
|
||||||
|
|
||||||
- `/etc/default/rke2`
|
|
||||||
- `/etc/sysconfig/rke2`
|
|
||||||
- `/usr/local/lib/systemd/system/rke2.env`
|
|
||||||
'';
|
|
||||||
# See: https://github.com/rancher/rke2/blob/master/bundle/lib/systemd/system/rke2-server.env#L1
|
|
||||||
default = {
|
|
||||||
HOME = "/root";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
|
||||||
assertions = [
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> (builtins.pathExists cfg.configPath || cfg.serverAddr != "");
|
|
||||||
message = "serverAddr or configPath (with 'server' key) should be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion =
|
|
||||||
cfg.role == "agent"
|
|
||||||
-> (builtins.pathExists cfg.configPath || cfg.tokenFile != null || cfg.token != "");
|
|
||||||
message = "token or tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !(cfg.agentTokenFile != null || cfg.agentToken != "");
|
|
||||||
message = "agentToken or agentTokenFile should NOT be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !(cfg.disable != [ ]);
|
|
||||||
message = "disable should not be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = cfg.role == "agent" -> !(cfg.cni != "canal");
|
|
||||||
message = "cni should not be set if role is 'agent'";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
environment.systemPackages = [ config.services.rke2.package ];
|
|
||||||
# To configure NetworkManager to ignore calico/flannel related network interfaces.
|
|
||||||
# See: https://docs.rke2.io/known_issues#networkmanager
|
|
||||||
environment.etc."NetworkManager/conf.d/rke2-canal.conf" = {
|
|
||||||
enable = config.networking.networkmanager.enable;
|
|
||||||
text = ''
|
|
||||||
[keyfile]
|
|
||||||
unmanaged-devices=interface-name:cali*;interface-name:flannel*
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
# See: https://docs.rke2.io/security/hardening_guide#set-kernel-parameters
|
|
||||||
boot.kernel.sysctl = lib.mkIf cfg.cisHardening {
|
|
||||||
"vm.panic_on_oom" = 0;
|
|
||||||
"vm.overcommit_memory" = 1;
|
|
||||||
"kernel.panic" = 10;
|
|
||||||
"kernel.panic_on_oops" = 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services."rke2-${cfg.role}" = {
|
|
||||||
description = "Rancher Kubernetes Engine v2";
|
|
||||||
documentation = [ "https://github.com/rancher/rke2#readme" ];
|
|
||||||
after = [ "network-online.target" ];
|
|
||||||
wants = [ "network-online.target" ];
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = if cfg.role == "agent" then "exec" else "notify";
|
|
||||||
EnvironmentFile = [
|
|
||||||
"-/etc/default/%N"
|
|
||||||
"-/etc/sysconfig/%N"
|
|
||||||
"-/usr/local/lib/systemd/system/%N.env"
|
|
||||||
];
|
|
||||||
Environment = lib.mapAttrsToList (k: v: "${k}=${v}") cfg.environmentVars;
|
|
||||||
KillMode = "process";
|
|
||||||
Delegate = "yes";
|
|
||||||
LimitNOFILE = 1048576;
|
|
||||||
LimitNPROC = "infinity";
|
|
||||||
LimitCORE = "infinity";
|
|
||||||
TasksMax = "infinity";
|
|
||||||
TimeoutStartSec = 0;
|
|
||||||
Restart = "always";
|
|
||||||
RestartSec = "5s";
|
|
||||||
ExecStartPre = [
|
|
||||||
# There is a conflict between RKE2 and `nm-cloud-setup.service`. This service add a routing table that
|
|
||||||
# interfere with the CNI plugin's configuration. This script checks if the service is enabled and if so,
|
|
||||||
# failed the RKE2 start.
|
|
||||||
# See: https://github.com/rancher/rke2/issues/1053
|
|
||||||
(pkgs.writeScript "check-nm-cloud-setup.sh" ''
|
|
||||||
#! ${pkgs.runtimeShell}
|
|
||||||
set -x
|
|
||||||
! /run/current-system/systemd/bin/systemctl is-enabled --quiet nm-cloud-setup.service
|
|
||||||
'')
|
|
||||||
"-${pkgs.kmod}/bin/modprobe br_netfilter"
|
|
||||||
"-${pkgs.kmod}/bin/modprobe overlay"
|
|
||||||
];
|
|
||||||
ExecStart = "${cfg.package}/bin/rke2 '${cfg.role}' ${
|
|
||||||
lib.escapeShellArgs (
|
|
||||||
(lib.optional (cfg.configPath != "/etc/rancher/rke2/config.yaml") "--config=${cfg.configPath}")
|
|
||||||
++ (lib.optional cfg.debug "--debug")
|
|
||||||
++ (lib.optional (cfg.dataDir != "/var/lib/rancher/rke2") "--data-dir=${cfg.dataDir}")
|
|
||||||
++ (lib.optional (cfg.token != "") "--token=${cfg.token}")
|
|
||||||
++ (lib.optional (cfg.tokenFile != null) "--token-file=${cfg.tokenFile}")
|
|
||||||
++ (lib.optionals (cfg.role == "server" && cfg.disable != [ ]) (
|
|
||||||
map (d: "--disable=${d}") cfg.disable
|
|
||||||
))
|
|
||||||
++ (lib.optional (cfg.nodeName != null) "--node-name=${cfg.nodeName}")
|
|
||||||
++ (lib.optionals (cfg.nodeLabel != [ ]) (map (l: "--node-label=${l}") cfg.nodeLabel))
|
|
||||||
++ (lib.optionals (cfg.nodeTaint != [ ]) (map (t: "--node-taint=${t}") cfg.nodeTaint))
|
|
||||||
++ (lib.optional (cfg.nodeIP != null) "--node-ip=${cfg.nodeIP}")
|
|
||||||
++ (lib.optional (cfg.role == "server" && cfg.agentToken != "") "--agent-token=${cfg.agentToken}")
|
|
||||||
++ (lib.optional (
|
|
||||||
cfg.role == "server" && cfg.agentTokenFile != null
|
|
||||||
) "--agent-token-file=${cfg.agentTokenFile}")
|
|
||||||
++ (lib.optional (cfg.serverAddr != "") "--server=${cfg.serverAddr}")
|
|
||||||
++ (lib.optional cfg.selinux "--selinux")
|
|
||||||
++ (lib.optional (cfg.role == "server" && cfg.cni != "canal") "--cni=${cfg.cni}")
|
|
||||||
++ (lib.optional cfg.cisHardening "--profile=${
|
|
||||||
if cfg.package.version >= "1.25" then "cis-1.23" else "cis-1.6"
|
|
||||||
}")
|
|
||||||
++ cfg.extraFlags
|
|
||||||
)
|
|
||||||
}";
|
|
||||||
ExecStopPost =
|
|
||||||
let
|
|
||||||
killProcess = pkgs.writeScript "kill-process.sh" ''
|
|
||||||
#! ${pkgs.runtimeShell}
|
|
||||||
/run/current-system/systemd/bin/systemd-cgls /system.slice/$1 | \
|
|
||||||
${pkgs.gnugrep}/bin/grep -Eo '[0-9]+ (containerd|kubelet)' | \
|
|
||||||
${pkgs.gawk}/bin/awk '{print $1}' | \
|
|
||||||
${pkgs.findutils}/bin/xargs -r ${pkgs.util-linux}/bin/kill
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
"-${killProcess} %n";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -135,7 +135,7 @@ import ../make-test-python.nix (
|
|||||||
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/values-file.yaml")
|
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/values-file.yaml")
|
||||||
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/advanced.yaml")
|
machine.succeed("test -e /var/lib/rancher/k3s/server/manifests/advanced.yaml")
|
||||||
# check that the timeout is set correctly, select only the first doc in advanced.yaml
|
# check that the timeout is set correctly, select only the first doc in advanced.yaml
|
||||||
advancedManifest = json.loads(machine.succeed("yq -o json 'select(di == 0)' /var/lib/rancher/k3s/server/manifests/advanced.yaml"))
|
advancedManifest = json.loads(machine.succeed("yq -o json '.items[0]' /var/lib/rancher/k3s/server/manifests/advanced.yaml"))
|
||||||
t.assertEqual(advancedManifest["spec"]["timeout"], "69s", "unexpected value for spec.timeout")
|
t.assertEqual(advancedManifest["spec"]["timeout"], "69s", "unexpected value for spec.timeout")
|
||||||
# wait for test jobs to complete
|
# wait for test jobs to complete
|
||||||
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello", timeout=180)
|
machine.wait_until_succeeds("kubectl wait --for=condition=complete job/hello", timeout=180)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ A K3s maintainer, maintains K3s's:
|
|||||||
- [issues](https://github.com/NixOS/nixpkgs/issues?q=is%3Aissue+is%3Aopen+k3s)
|
- [issues](https://github.com/NixOS/nixpkgs/issues?q=is%3Aissue+is%3Aopen+k3s)
|
||||||
- [pull requests](https://github.com/NixOS/nixpkgs/pulls?q=is%3Aopen+is%3Apr+label%3A%226.topic%3A+k3s%22)
|
- [pull requests](https://github.com/NixOS/nixpkgs/pulls?q=is%3Aopen+is%3Apr+label%3A%226.topic%3A+k3s%22)
|
||||||
- [NixOS tests](https://github.com/NixOS/nixpkgs/tree/master/nixos/tests/k3s)
|
- [NixOS tests](https://github.com/NixOS/nixpkgs/tree/master/nixos/tests/k3s)
|
||||||
- [NixOS service module](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/cluster/k3s/default.nix)
|
- [NixOS service module](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/cluster/rancher)
|
||||||
- [update script](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/cluster/k3s/update-script.sh) (the process of updating)
|
- [update script](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/cluster/k3s/update-script.sh) (the process of updating)
|
||||||
- updates (the act of updating) and [r-ryantm bot logs](https://r.ryantm.com/log/k3s/)
|
- updates (the act of updating) and [r-ryantm bot logs](https://r.ryantm.com/log/k3s/)
|
||||||
- deprecations
|
- deprecations
|
||||||
|
|||||||
Reference in New Issue
Block a user