nixos/acme: improve scalability - reduce superfluous unit activations
The previous setup caused all renewal units to be triggered upon
ever so slight changes in config. In larger setups (100+ certificates)
adding a new certificate caused high system load and/or large memory
consumption issues. The memory issues are already a alleviated with
the locking mechanism. However, this then causes long delays upwards
of multiple minutes depending on individual runs and also caused
superfluous activations.
In this change we streamline the overall setup of units:
1. The unit that other services can depend upon is 'acme-{cert}.service'.
We call this the 'base unit'. As this one as `RemainAfterExit` set
the `acme-finished-{cert}` targets are not required any longer.
2. We now always generate initial self-signed certificates to simplify
the dependency structure. This deprecates the `preliminarySelfsigned`
option.
3. The `acme-order-renew-{cert}` service gets activated after the base
unit and services using certificates have started and performs all acme
interactions. When it finishes others services (like web servers) will
be notified through the `reloadServices` option or they can use
`wantedBy` and `after` dependencies if they implement their own reload
units.
The renewal timer also triggers this unit.
4. The timer unit is explicitly blocked from being started by s-t-c.
5. Permission management has been cleaned up a bit: there was an
inconsistency between having the .lego files set to 600 vs 640
on the exposed side. This is unified to 640 now.
6. Exempt the account target from being restarted by s-t-c. This will
happen automatically if something relevant to the account changes.
This commit is contained in:
@@ -3,6 +3,36 @@ import time
|
||||
|
||||
TOTAL_RETRIES = 20
|
||||
|
||||
# BackoffTracker provides a robust system for handling test retries
|
||||
class BackoffTracker:
|
||||
delay = 1
|
||||
increment = 1
|
||||
|
||||
def handle_fail(self, retries, message) -> int:
|
||||
assert retries < TOTAL_RETRIES, message
|
||||
|
||||
print(f"Retrying in {self.delay}s, {retries + 1}/{TOTAL_RETRIES}")
|
||||
time.sleep(self.delay)
|
||||
|
||||
# Only increment after the first try
|
||||
if retries == 0:
|
||||
self.delay += self.increment
|
||||
self.increment *= 2
|
||||
|
||||
return retries + 1
|
||||
|
||||
def protect(self, func):
|
||||
def wrapper(*args, retries: int = 0, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as err:
|
||||
retries = self.handle_fail(retries, err.args)
|
||||
return wrapper(*args, retries=retries, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
backoff = BackoffTracker()
|
||||
|
||||
def run(node, cmd, fail=False):
|
||||
if fail:
|
||||
@@ -39,6 +69,7 @@ def switch_to(node, name, fail=False) -> None:
|
||||
# and matches the issuer we expect it to be.
|
||||
# It's a good validation to ensure the cert.pem and fullchain.pem
|
||||
# are not still selfsigned after verification
|
||||
@backoff.protect
|
||||
def check_issuer(node, cert_name, issuer) -> None:
|
||||
for fname in ("cert.pem", "fullchain.pem"):
|
||||
actual_issuer = node.succeed(
|
||||
@@ -102,9 +133,10 @@ def check_permissions(node, cert_name, group):
|
||||
f"test $({stat} /var/lib/acme/{cert_name}/*.pem"
|
||||
f" | tee /dev/stderr | grep -v '640 acme {group}' | wc -l) -eq 0"
|
||||
)
|
||||
node.execute(f"ls -lahR /var/lib/acme/.lego/{cert_name}/* > /dev/stderr")
|
||||
node.succeed(
|
||||
f"test $({stat} /var/lib/acme/.lego/{cert_name}/*/{cert_name}*"
|
||||
f" | tee /dev/stderr | grep -v '600 acme {group}' | wc -l) -eq 0"
|
||||
f" | tee /dev/stderr | grep -v '640 acme {group}' | wc -l) -eq 0"
|
||||
)
|
||||
node.succeed(
|
||||
f"test $({stat} /var/lib/acme/{cert_name}"
|
||||
@@ -115,37 +147,6 @@ def check_permissions(node, cert_name, group):
|
||||
f" | tee /dev/stderr | grep -v '600 acme {group}' | wc -l) -eq 0"
|
||||
)
|
||||
|
||||
# BackoffTracker provides a robust system for handling test retries
|
||||
class BackoffTracker:
|
||||
delay = 1
|
||||
increment = 1
|
||||
|
||||
def handle_fail(self, retries, message) -> int:
|
||||
assert retries < TOTAL_RETRIES, message
|
||||
|
||||
print(f"Retrying in {self.delay}s, {retries + 1}/{TOTAL_RETRIES}")
|
||||
time.sleep(self.delay)
|
||||
|
||||
# Only increment after the first try
|
||||
if retries == 0:
|
||||
self.delay += self.increment
|
||||
self.increment *= 2
|
||||
|
||||
return retries + 1
|
||||
|
||||
def protect(self, func):
|
||||
def wrapper(*args, retries: int = 0, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as err:
|
||||
retries = self.handle_fail(retries, err.args)
|
||||
return wrapper(*args, retries=retries, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
backoff = BackoffTracker()
|
||||
|
||||
|
||||
@backoff.protect
|
||||
def download_ca_certs(node, ca_domain):
|
||||
|
||||
Reference in New Issue
Block a user