Merge staging-next into staging

This commit is contained in:
nixpkgs-ci[bot]
2025-09-14 00:17:55 +00:00
committed by GitHub
262 changed files with 2000 additions and 3336 deletions

View File

@@ -15,13 +15,13 @@ Searching online for prior art can be helpful if you are running into solved pro
### Github {#javascript-finding-examples-github}
- Searching Nix files for `mkYarnPackage`: <https://github.com/search?q=mkYarnPackage+language%3ANix&type=code>
- Searching just `flake.nix` files for `mkYarnPackage`: <https://github.com/search?q=mkYarnPackage+path%3A**%2Fflake.nix&type=code>
- Searching Nix files for `yarnConfigHook`: <https://github.com/search?q=yarnConfigHook+language%3ANix&type=code>
- Searching just `flake.nix` files for `yarnConfigHook`: <https://github.com/search?q=yarnConfigHook+path%3A**%2Fflake.nix&type=code>
### Gitlab {#javascript-finding-examples-gitlab}
- Searching Nix files for `mkYarnPackage`: <https://gitlab.com/search?scope=blobs&search=mkYarnPackage+extension%3Anix>
- Searching just `flake.nix` files for `mkYarnPackage`: <https://gitlab.com/search?scope=blobs&search=mkYarnPackage+filename%3Aflake.nix>
- Searching Nix files for `yarnConfigHook`: <https://gitlab.com/search?scope=blobs&search=yarnConfigHook+extension%3Anix>
- Searching just `flake.nix` files for `yarnConfigHook`: <https://gitlab.com/search?scope=blobs&search=yarnConfigHook+filename%3Aflake.nix>
## Tools overview {#javascript-tools-overview}
@@ -668,7 +668,8 @@ To install the package `yarnInstallHook` uses both `npm` and `yarn` to cleanup p
#### yarn2nix {#javascript-yarn2nix}
WARNING: The `yarn2nix` functions have been deprecated in favor of `yarnConfigHook`, `yarnBuildHook` and `yarnInstallHook` (for Yarn v1) and `yarn-berry_*.*` tooling (Yarn v3 and v4). Documentation for `yarn2nix` functions still appears here for the sake of the packages that still use them. See also a tracking issue [#324246](https://github.com/NixOS/nixpkgs/issues/324246).
> [!WARNING]
> The `yarn2nix` functions have been deprecated in favor of `yarnConfigHook`, `yarnBuildHook` and `yarnInstallHook` (for Yarn v1) and `yarn-berry_*.*` tooling (Yarn v3 and v4). Documentation for `yarn2nix` functions still appears here for the sake of the packages that still use them. See also a tracking issue [#324246](https://github.com/NixOS/nixpkgs/issues/324246).
##### Preparation {#javascript-yarn2nix-preparation}
@@ -687,6 +688,9 @@ If the downloaded files contain the `package.json` and `yarn.lock` files they ca
##### mkYarnPackage {#javascript-yarn2nix-mkYarnPackage}
> [!WARNING]
> The `mkYarnPackage` functions have been deprecated in favor of `yarnConfigHook`, `yarnBuildHook` and `yarnInstallHook` (for Yarn v1) and `yarn-berry_*.*` tooling (Yarn v3 and v4). Documentation for `mkYarnPackage` functions still appears here for the sake of the packages that still use them. See also a tracking issue [#324246](https://github.com/NixOS/nixpkgs/issues/324246).
`mkYarnPackage` will by default try to generate a binary. For packages only generating static assets (Svelte, Vue, React, Webpack, ...), you will need to explicitly override the build step with your instructions.
It's important to use the `--offline` flag. For example if you script is `"build": "something"` in `package.json` use:

View File

@@ -80,6 +80,8 @@
- `stdenv.mkDerivation` and other derivation builders that use it no longer allow the value of `env` to be anything but an attribute set, for the purpose of setting environment variables that are available to the [builder](https://nix.dev/manual/nix/latest/store/derivation/#builder) process. An environment variable called `env` can still be provided by means of `mkDerivation { env.env = ...; }`, though we recommend to use a more specific name than "env".
- `purple-matrix` has been removed, since it has been unmaintained since April 2022 and upstream does not recommend using it anymore.
- The default Android NDK version has been raised to 27, and the default SDK version to 35.
NDK 2126 have been removed, as they are endoflife.

View File

@@ -157,6 +157,11 @@ lib.mapAttrs mkLicense (
fullName = "Artistic License 2.0";
};
asl11 = {
spdxId = "Apache-1.1";
fullName = "Apache License 1.1";
};
asl20 = {
spdxId = "Apache-2.0";
fullName = "Apache License 2.0";
@@ -279,11 +284,6 @@ lib.mapAttrs mkLicense (
redistributable = true;
};
caossl = {
fullName = "Computer Associates Open Source Licence Version 1.0";
url = "http://jxplorer.org/licence.html";
};
cal10 = {
spdxId = "CAL-1.0";
fullName = "Cryptographic Autonomy License version 1.0 (CAL-1.0)";
@@ -937,6 +937,11 @@ lib.mapAttrs mkLicense (
fullName = "Lucent Public License v1.02";
};
lsof = {
spdxId = "lsof";
fullName = "lsof License"; # also known as Purdue BSD-Style License
};
miros = {
spdxId = "MirOS";
fullName = "MirOS License";
@@ -1164,11 +1169,6 @@ lib.mapAttrs mkLicense (
fullName = "Public Domain";
};
purdueBsd = {
fullName = "Purdue BSD-Style License"; # also known as lsof license
url = "https://enterprise.dejacode.com/licenses/public/purdue-bsd";
};
prosperity30 = {
fullName = "Prosperity-3.0.0";
free = false;
@@ -1185,14 +1185,9 @@ lib.mapAttrs mkLicense (
fullName = "Q Public License 1.0";
};
qwt = {
fullName = "Qwt License, Version 1.0";
url = "https://qwt.sourceforge.io/qwtlicense.html";
};
radiance = {
fullName = "The Radiance Software License, Version 2.0";
url = "https://github.com/LBNL-ETA/Radiance/blob/master/License.txt";
qwtException = {
spdxId = "Qwt-exception-1.0";
fullName = "Qwt exception 1.0";
};
ruby = {

View File

@@ -271,6 +271,7 @@ with lib.maintainers;
cuda = {
members = [
connorbaker
GaetanLepage
prusnak
samuela
SomeoneSerge
@@ -818,7 +819,7 @@ with lib.maintainers;
megheaiulian
mkg20001
];
scope = "All things linuxcontainers. LXC, Incus, LXD and related packages.";
scope = "All things linuxcontainers. Incus, LXC, and related packages.";
shortName = "lxc";
};

View File

@@ -90,6 +90,8 @@
- [nix-store-veritysetup](https://github.com/nikstur/nix-store-veritysetup-generator), a systemd generator to unlock the Nix Store as a dm-verity protected block device. Available as [boot.initrd.nix-store-veritysetup](options.html#opt-boot.initrd.nix-store-veritysetup.enable).
- [ente](https://github.com/ente-io/ente), a service that provides a fully open source, end-to-end encrypted platform for photos and videos. Available as [services.ente.api](#opt-services.ente.api.enable) and [services.ente.web](#opt-services.ente.web.enable).
- [SuiteNumérique Docs](https://github.com/suitenumerique/docs), a collaborative note taking, wiki and documentation web platform and alternative to Notion or Outline. Available as [services.lasuite-docs](#opt-services.lasuite-docs.enable).
- [dwl](https://codeberg.org/dwl/dwl), a compact, hackable compositor for Wayland based on wlroots. Available as [programs.dwl](#opt-programs.dwl.enable).
@@ -134,6 +136,8 @@
- The `services.polipo` module has been removed as `polipo` is unmaintained and archived upstream.
- `virtualisation.lxd` has been removed due to lack of Nixpkgs maintenance. Users can migrate to `virtualisation.incus`, a fork of LXD, as a replacement. See [Incus migration documentation](https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/) for migration information.
- The non-LTS Forgejo package (`forgejo`) has been updated to 12.0.0. This release contains breaking changes, see the [release blog post](https://forgejo.org/2025-07-release-v12-0/)
for all the details and how to ensure smooth upgrades.

View File

@@ -1,34 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help).
{ modulesPath, ... }:
{
imports = [
# Include the default lxd configuration.
"${modulesPath}/virtualisation/lxc-container.nix"
# Include the container-specific autogenerated configuration.
./lxd.nix
];
networking = {
dhcpcd.enable = false;
useDHCP = false;
useHostResolvConf = false;
};
systemd.network = {
enable = true;
networks."50-eth0" = {
matchConfig.Name = "eth0";
networkConfig = {
DHCP = "ipv4";
IPv6AcceptRA = true;
};
linkConfig.RequiredForOnline = "routable";
};
};
system.stateVersion = "@stateVersion@"; # Did you read the comment?
}

View File

@@ -1,48 +0,0 @@
{ lib, pkgs, ... }:
{
imports = [
../../../modules/virtualisation/lxc-container.nix
];
virtualisation.lxc.templates.nix = {
enable = true;
target = "/etc/nixos/lxd.nix";
template = ./nix.tpl;
when = [
"create"
"copy"
];
};
# copy the config for nixos-rebuild
system.activationScripts.config =
let
config = pkgs.replaceVars ./lxd-container-image-inner.nix {
stateVersion = lib.trivial.release;
};
in
''
if [ ! -e /etc/nixos/configuration.nix ]; then
install -m 0644 -D ${config} /etc/nixos/configuration.nix
fi
'';
networking = {
dhcpcd.enable = false;
useDHCP = false;
useHostResolvConf = false;
};
systemd.network = {
enable = true;
networks."50-eth0" = {
matchConfig.Name = "eth0";
networkConfig = {
DHCP = "ipv4";
IPv6AcceptRA = true;
};
linkConfig.RequiredForOnline = "routable";
};
};
}

View File

@@ -1,34 +0,0 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help).
{ modulesPath, ... }:
{
imports = [
# Include the default lxd configuration.
"${modulesPath}/virtualisation/lxd-virtual-machine.nix"
# Include the container-specific autogenerated configuration.
./lxd.nix
];
networking = {
dhcpcd.enable = false;
useDHCP = false;
useHostResolvConf = false;
};
systemd.network = {
enable = true;
networks."50-enp5s0" = {
matchConfig.Name = "enp5s0";
networkConfig = {
DHCP = "ipv4";
IPv6AcceptRA = true;
};
linkConfig.RequiredForOnline = "routable";
};
};
system.stateVersion = "@stateVersion@"; # Did you read the comment?
}

View File

@@ -1,49 +0,0 @@
{ lib, pkgs, ... }:
{
imports = [
../../../modules/virtualisation/lxd-virtual-machine.nix
];
virtualisation.lxc.templates.nix = {
enable = true;
target = "/etc/nixos/lxd.nix";
template = ./nix.tpl;
when = [
"create"
"copy"
];
};
# copy the config for nixos-rebuild
system.activationScripts.config =
let
config = pkgs.replaceVars ./lxd-virtual-machine-image-inner.nix {
stateVersion = lib.trivial.release;
};
in
''
if [ ! -e /etc/nixos/configuration.nix ]; then
install -m 0644 -D ${config} /etc/nixos/configuration.nix
fi
'';
# Network
networking = {
dhcpcd.enable = false;
useDHCP = false;
useHostResolvConf = false;
};
systemd.network = {
enable = true;
networks."50-enp5s0" = {
matchConfig.Name = "enp5s0";
networkConfig = {
DHCP = "ipv4";
IPv6AcceptRA = true;
};
linkConfig.RequiredForOnline = "routable";
};
};
}

View File

@@ -1,7 +0,0 @@
{ lib, config, pkgs, ... }:
# WARNING: THIS CONFIGURATION IS AUTOGENERATED AND WILL BE OVERWRITTEN AUTOMATICALLY
{
networking.hostName = "{{ container.name }}";
}

View File

@@ -584,7 +584,6 @@ in
#shout = 206; #unused
#gateone = 207; #removed 2025-08-21
namecoin = 208;
#lxd = 210; # unused
#kibana = 211;
xtreemfs = 212;
calibre-server = 213;

View File

@@ -1574,6 +1574,7 @@
./services/web-apps/echoip.nix
./services/web-apps/eintopf.nix
./services/web-apps/engelsystem.nix
./services/web-apps/ente.nix
./services/web-apps/ethercalc.nix
./services/web-apps/fediwall.nix
./services/web-apps/fider.nix
@@ -1921,8 +1922,6 @@
./virtualisation/libvirtd.nix
./virtualisation/lxc.nix
./virtualisation/lxcfs.nix
./virtualisation/lxd-agent.nix
./virtualisation/lxd.nix
./virtualisation/multipass.nix
./virtualisation/nixos-containers.nix
./virtualisation/oci-containers.nix

View File

@@ -369,6 +369,11 @@ in
(mkRemovedOptionModule [ "services" "gateone" ] ''
The gateone module was removed since the package was removed alongside much other obsolete python 2.
'')
(mkRemovedOptionModule [ "virtualisation" "lxd" ] ''
LXD has been removed from NixOS due to lack of Nixpkgs maintenance.
Consider migrating or switching to Incus, or remove from your configuration.
https://linuxcontainers.org/incus/docs/main/howto/server_migrate_lxd/
'')
# Do NOT add any option renames here, see top of the file
];
}

View File

@@ -19,6 +19,12 @@ in
meta.maintainers = with lib.maintainers; [ ];
imports = [
(lib.mkRemovedOptionModule [ "services" "chromadb" "logFile" ] ''
ChromaDB has removed the --log-path parameter that logFile relied on.
'')
];
options = {
services.chromadb = {
enable = mkEnableOption "ChromaDB, an open-source AI application database.";
@@ -47,14 +53,6 @@ in
'';
};
logFile = mkOption {
type = types.path;
default = "/var/log/chromadb/chromadb.log";
description = ''
Specifies the location of file for logging output.
'';
};
dbpath = mkOption {
type = types.str;
default = "/var/lib/chromadb";
@@ -81,7 +79,7 @@ in
StateDirectory = "chromadb";
WorkingDirectory = "/var/lib/chromadb";
LogsDirectory = "chromadb";
ExecStart = "${lib.getExe cfg.package} run --path ${cfg.dbpath} --host ${cfg.host} --port ${toString cfg.port} --log-path ${cfg.logFile}";
ExecStart = "${lib.getExe cfg.package} run --path ${cfg.dbpath} --host ${cfg.host} --port ${toString cfg.port}";
Restart = "on-failure";
ProtectHome = true;
ProtectSystem = "strict";

View File

@@ -123,7 +123,7 @@ in
libpurple_plugins = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = [ ];
example = lib.literalExpression "[ pkgs.purple-matrix ]";
example = lib.literalExpression "[ pkgs.purple-discord ]";
description = ''
The list of libpurple plugins to install.
'';

View File

@@ -282,6 +282,7 @@ in
Type = "notify";
Restart = "always";
ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
ExecReload = "${pkgs.coreutils}/bin/kill -s HUP $MAINPID";
UMask = "0027";
CapabilityBoundingSet = capabilities;
AmbientCapabilities = capabilities;

View File

@@ -0,0 +1,178 @@
# Ente.io {#module-services-ente}
[Ente](https://ente.io/) is a service that provides a fully open source,
end-to-end encrypted platform for photos and videos.
## Quickstart {#module-services-ente-quickstart}
To host ente, you need the following things:
- S3 storage server (either external or self-hosted like [minio](https://github.com/minio/minio))
- Several subdomains pointing to your server:
- accounts.example.com
- albums.example.com
- api.example.com
- cast.example.com
- photos.example.com
- s3.example.com
The following example shows how to setup ente with a self-hosted S3 storage via minio.
You can host the minio s3 storage on the same server as ente, but as this isn't
a requirement the example shows the minio and ente setup separately.
We assume that the minio server will be reachable at `https://s3.example.com`.
```nix
{
services.minio = {
enable = true;
# ente's config must match this region!
region = "us-east-1";
# Please use a file, agenix or sops-nix to securely store your root user password!
# MINIO_ROOT_USER=your_root_user
# MINIO_ROOT_PASSWORD=a_randomly_generated_long_password
rootCredentialsFile = "/run/secrets/minio-credentials-full";
};
systemd.services.minio.environment.MINIO_SERVER_URL = "https://s3.example.com";
# Proxy for minio
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
recommendedProxySettings = true;
virtualHosts."s3.example.com" = {
forceSSL = true;
useACME = true;
locations."/".proxyPass = "http://localhost:9000";
# determine max file upload size
extraConfig = ''
client_max_body_size 16G;
proxy_buffering off;
proxy_request_buffering off;
'';
};
};
}
```
And the configuration for ente:
```nix
{
services.ente = {
web = {
enable = true;
domains = {
accounts = "accounts.example.com";
albums = "albums.example.com";
cast = "cast.example.com";
photos = "photos.example.com";
};
};
api = {
enable = true;
nginx.enable = true;
# Create a local postgres database and set the necessary config in ente
enableLocalDB = true;
domain = "api.example.com";
# You can hide secrets by setting xyz._secret = file instead of xyz = value.
# Make sure to not include any of the secrets used here directly
# in your config. They would be publicly readable in the nix store.
# Use agenix, sops-nix or an equivalent secret management solution.
settings = {
s3 = {
use_path_style_urls = true;
b2-eu-cen = {
endpoint = "https://s3.example.com";
region = "us-east-1";
bucket = "ente";
key._secret = pkgs.writeText "minio_user" "minio_user";
secret._secret = pkgs.writeText "minio_pw" "minio_pw";
};
};
key = {
# generate with: openssl rand -base64 32
encryption._secret = pkgs.writeText "encryption" "T0sn+zUVFOApdX4jJL4op6BtqqAfyQLH95fu8ASWfno=";
# generate with: openssl rand -base64 64
hash._secret = pkgs.writeText "hash" "g/dBZBs1zi9SXQ0EKr4RCt1TGr7ZCKkgrpjyjrQEKovWPu5/ce8dYM6YvMIPL23MMZToVuuG+Z6SGxxTbxg5NQ==";
};
# generate with: openssl rand -base64 32
jwt.secret._secret = pkgs.writeText "jwt" "i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8=";
};
};
};
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
recommendedProxySettings = true; # This is important!
virtualHosts."accounts.${domain}".enableACME = true;
virtualHosts."albums.${domain}".enableACME = true;
virtualHosts."api.${domain}".enableACME = true;
virtualHosts."cast.${domain}".enableACME = true;
virtualHosts."photos.${domain}".enableACME = true;
};
}
```
If you have a mail server or smtp relay, you can optionally configure
`services.ente.api.settings.smtp` so ente can send you emails (registration code and possibly
other events). This is optional.
After starting the minio server, make sure the bucket exists:
```
mc alias set minio https://s3.example.com root_user root_password --api s3v4
mc mb -p minio/ente
```
Now ente should be ready to go under `https://photos.example.com`.
## Registering users {#module-services-ente-registering-users}
Now you can open photos.example.com and register your user(s).
Beware that the first created account will be considered to be the admin account,
which among some other things allows you to use `ente-cli` to increase storage limits for any user.
If you have configured smtp, you will get a mail with a verification code,
otherwise you can find the code in the server logs.
```
journalctl -eu ente
[...]
ente # [ 157.145165] ente[982]: INFO[0141]email.go:130 sendViaTransmail Skipping sending email to a@a.a: Verification code: 134033
```
After you have registered your users, you can set
`settings.internal.disable-registration = true;` to prevent
further signups.
## Increasing storage limit {#module-services-ente-increasing-storage-limit}
By default, all users will be on the free plan which is the only plan
available. While adding new plans is possible in theory, it requires some
manual database operations which isn't worthwhile. Instead, use `ente-cli`
with your admin user to modify the storage limit.
## iOS background sync
On iOS, background sync is achived via a silent notification sent by the server
every 30 minutes that allows the phone to sync for about 30 seconds, enough for
all but the largest videos to be synced on background (if the app is brought to
foreground though, sync will resume as normal). To achive this however, a
Firebase account is needed. In the settings option, configure credentials-dir
to point towards the directory where the JSON containing the Firebase
credentials are stored.
```nix
{
# This directory should contain your fcm-service-account.json file
services.ente.api.settings = {
credentials-dir = "/path/to/credentials";
# [...]
};
}
```

View File

@@ -0,0 +1,363 @@
{
config,
lib,
pkgs,
utils,
...
}:
let
inherit (lib)
getExe
mkDefault
mkEnableOption
mkIf
mkMerge
mkOption
mkPackageOption
optional
types
;
cfgApi = config.services.ente.api;
cfgWeb = config.services.ente.web;
webPackage =
enteApp:
cfgWeb.package.override {
inherit enteApp;
enteMainUrl = "https://${cfgWeb.domains.photos}";
extraBuildEnv = {
NEXT_PUBLIC_ENTE_ENDPOINT = "https://${cfgWeb.domains.api}";
NEXT_PUBLIC_ENTE_ALBUMS_ENDPOINT = "https://${cfgWeb.domains.albums}";
NEXT_TELEMETRY_DISABLED = "1";
};
};
defaultUser = "ente";
defaultGroup = "ente";
dataDir = "/var/lib/ente";
yamlFormat = pkgs.formats.yaml { };
in
{
options.services.ente = {
web = {
enable = mkEnableOption "Ente web frontend (Photos, Albums)";
package = mkPackageOption pkgs "ente-web" { };
domains = {
api = mkOption {
type = types.str;
example = "api.ente.example.com";
description = ''
The domain under which the api is served. This will NOT serve the api itself,
but is a required setting to host the frontends! This will automatically be set
for you if you enable both the api server and web frontends.
'';
};
accounts = mkOption {
type = types.str;
example = "accounts.ente.example.com";
description = "The domain under which the accounts frontend will be served.";
};
cast = mkOption {
type = types.str;
example = "cast.ente.example.com";
description = "The domain under which the cast frontend will be served.";
};
albums = mkOption {
type = types.str;
example = "albums.ente.example.com";
description = "The domain under which the albums frontend will be served.";
};
photos = mkOption {
type = types.str;
example = "photos.ente.example.com";
description = "The domain under which the photos frontend will be served.";
};
};
};
api = {
enable = mkEnableOption "Museum (API server for ente.io)";
package = mkPackageOption pkgs "museum" { };
nginx.enable = mkEnableOption "nginx proxy for the API server";
user = mkOption {
type = types.str;
default = defaultUser;
description = "User under which museum runs. If you set this option you must make sure the user exists.";
};
group = mkOption {
type = types.str;
default = defaultGroup;
description = "Group under which museum runs. If you set this option you must make sure the group exists.";
};
domain = mkOption {
type = types.str;
example = "api.ente.example.com";
description = "The domain under which the api will be served.";
};
enableLocalDB = mkEnableOption "the automatic creation of a local postgres database for museum.";
settings = mkOption {
description = ''
Museum yaml configuration. Refer to upstream [local.yaml](https://github.com/ente-io/ente/blob/main/server/configurations/local.yaml) for more information.
You can specify secret values in this configuration by setting `somevalue._secret = "/path/to/file"` instead of setting `somevalue` directly.
'';
default = { };
type = types.submodule {
freeformType = yamlFormat.type;
options = {
apps = {
public-albums = mkOption {
type = types.str;
default = "https://albums.ente.io";
description = ''
If you're running a self hosted instance and wish to serve public links,
set this to the URL where your albums web app is running.
'';
};
cast = mkOption {
type = types.str;
default = "https://cast.ente.io";
description = ''
Set this to the URL where your cast page is running.
This is for browser and chromecast casting support.
'';
};
accounts = mkOption {
type = types.str;
default = "https://accounts.ente.io";
description = ''
Set this to the URL where your accounts page is running.
This is primarily for passkey support.
'';
};
};
db = {
host = mkOption {
type = types.str;
description = "The database host";
};
port = mkOption {
type = types.port;
default = 5432;
description = "The database port";
};
name = mkOption {
type = types.str;
description = "The database name";
};
user = mkOption {
type = types.str;
description = "The database user";
};
};
};
};
};
};
};
config = mkMerge [
(mkIf cfgApi.enable {
services.postgresql = mkIf cfgApi.enableLocalDB {
enable = true;
ensureUsers = [
{
name = "ente";
ensureDBOwnership = true;
}
];
ensureDatabases = [ "ente" ];
};
services.ente.web.domains.api = mkIf cfgWeb.enable cfgApi.domain;
services.ente.api.settings = {
# This will cause logs to be written to stdout/err, which then end up in the journal
log-file = mkDefault "";
db = mkIf cfgApi.enableLocalDB {
host = "/run/postgresql";
port = 5432;
name = "ente";
user = "ente";
};
};
systemd.services.ente = {
description = "Ente.io Museum API Server";
after = [ "network.target" ] ++ optional cfgApi.enableLocalDB "postgresql.service";
requires = optional cfgApi.enableLocalDB "postgresql.service";
wantedBy = [ "multi-user.target" ];
preStart = ''
# Generate config including secret values. YAML is a superset of JSON, so we can use this here.
${utils.genJqSecretsReplacementSnippet cfgApi.settings "/run/ente/local.yaml"}
# Setup paths
mkdir -p ${dataDir}/configurations
ln -sTf /run/ente/local.yaml ${dataDir}/configurations/local.yaml
'';
serviceConfig = {
ExecStart = getExe cfgApi.package;
Type = "simple";
Restart = "on-failure";
AmbientCapablities = [ ];
CapabilityBoundingSet = [ ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_NETLINK"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = "@system-service";
UMask = "077";
BindReadOnlyPaths = [
"${cfgApi.package}/share/museum/migrations:${dataDir}/migrations"
"${cfgApi.package}/share/museum/mail-templates:${dataDir}/mail-templates"
"${cfgApi.package}/share/museum/web-templates:${dataDir}/web-templates"
];
User = cfgApi.user;
Group = cfgApi.group;
SyslogIdentifier = "ente";
StateDirectory = "ente";
WorkingDirectory = dataDir;
RuntimeDirectory = "ente";
};
# Environment MUST be called local, otherwise we cannot log to stdout
environment = {
ENVIRONMENT = "local";
GIN_MODE = "release";
};
};
users = {
users = mkIf (cfgApi.user == defaultUser) {
${defaultUser} = {
description = "ente.io museum service user";
inherit (cfgApi) group;
isSystemUser = true;
home = dataDir;
};
};
groups = mkIf (cfgApi.group == defaultGroup) { ${defaultGroup} = { }; };
};
services.nginx = mkIf cfgApi.nginx.enable {
enable = true;
upstreams.museum = {
servers."localhost:8080" = { };
extraConfig = ''
zone museum 64k;
keepalive 20;
'';
};
virtualHosts.${cfgApi.domain} = {
forceSSL = mkDefault true;
locations."/".proxyPass = "http://museum";
extraConfig = ''
client_max_body_size 4M;
'';
};
};
})
(mkIf cfgWeb.enable {
services.ente.api.settings = mkIf cfgApi.enable {
apps = {
accounts = "https://${cfgWeb.domains.accounts}";
cast = "https://${cfgWeb.domains.cast}";
public-albums = "https://${cfgWeb.domains.albums}";
};
webauthn = {
rpid = cfgWeb.domains.accounts;
rporigins = [ "https://${cfgWeb.domains.accounts}" ];
};
};
services.nginx =
let
domainFor = app: cfgWeb.domains.${app};
in
{
enable = true;
virtualHosts.${domainFor "accounts"} = {
forceSSL = mkDefault true;
locations."/" = {
root = webPackage "accounts";
tryFiles = "$uri $uri.html /index.html";
extraConfig = ''
add_header Access-Control-Allow-Origin 'https://${cfgWeb.domains.api}';
'';
};
};
virtualHosts.${domainFor "cast"} = {
forceSSL = mkDefault true;
locations."/" = {
root = webPackage "cast";
tryFiles = "$uri $uri.html /index.html";
extraConfig = ''
add_header Access-Control-Allow-Origin 'https://${cfgWeb.domains.api}';
'';
};
};
virtualHosts.${domainFor "photos"} = {
serverAliases = [
(domainFor "albums") # the albums app is shared with the photos frontend
];
forceSSL = mkDefault true;
locations."/" = {
root = webPackage "photos";
tryFiles = "$uri $uri.html /index.html";
extraConfig = ''
add_header Access-Control-Allow-Origin 'https://${cfgWeb.domains.api}';
'';
};
};
};
})
];
meta.maintainers = with lib.maintainers; [ oddlama ];
}

View File

@@ -77,7 +77,7 @@ in
options = {
virtualisation.lxc = {
templates = lib.mkOption {
description = "Templates for LXD";
description = "Templates for LXC images";
type = lib.types.attrsOf (lib.types.submodule templateSubmodule);
default = { };
example = lib.literalExpression ''

View File

@@ -1,110 +0,0 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.virtualisation.lxd.agent;
# the lxd agent is provided by the lxd daemon through a virtiofs or 9p mount
# this is a port of the distrobuilder lxd-agent generator
# https://github.com/lxc/distrobuilder/blob/f77300bf7d7d5707b08eaf8a434d647d1ba81b5d/generators/lxd-agent.go#L18-L55
preStartScript = ''
PREFIX="/run/lxd_agent"
mount_virtiofs() {
mount -t virtiofs config "$PREFIX/.mnt" >/dev/null 2>&1
}
mount_9p() {
modprobe 9pnet_virtio >/dev/null 2>&1 || true
mount -t 9p config "$PREFIX/.mnt" -o access=0,trans=virtio,size=1048576 >/dev/null 2>&1
}
fail() {
umount -l "$PREFIX" >/dev/null 2>&1 || true
rmdir "$PREFIX" >/dev/null 2>&1 || true
echo "$1"
exit 1
}
# Setup the mount target.
umount -l "$PREFIX" >/dev/null 2>&1 || true
mkdir -p "$PREFIX"
mount -t tmpfs tmpfs "$PREFIX" -o mode=0700,size=50M
mkdir -p "$PREFIX/.mnt"
# Try virtiofs first.
mount_virtiofs || mount_9p || fail "Couldn't mount virtiofs or 9p, failing."
# Copy the data.
cp -Ra "$PREFIX/.mnt/"* "$PREFIX"
# Unmount the temporary mount.
umount "$PREFIX/.mnt"
rmdir "$PREFIX/.mnt"
# Fix up permissions.
chown -R root:root "$PREFIX"
'';
in
{
options = {
virtualisation.lxd.agent.enable = lib.mkEnableOption "LXD agent";
};
config = lib.mkIf cfg.enable {
# https://github.com/lxc/distrobuilder/blob/f77300bf7d7d5707b08eaf8a434d647d1ba81b5d/generators/lxd-agent.go#L108-L125
systemd.services.lxd-agent = {
enable = true;
wantedBy = [ "multi-user.target" ];
before = [
"shutdown.target"
]
++ lib.optionals config.services.cloud-init.enable [
"cloud-init.target"
"cloud-init.service"
"cloud-init-local.service"
];
conflicts = [ "shutdown.target" ];
path = [
pkgs.kmod
pkgs.util-linux
# allow `incus exec` to find system binaries
"/run/current-system/sw"
];
preStart = preStartScript;
# avoid killing nixos-rebuild switch when executed through lxc exec
restartIfChanged = false;
stopIfChanged = false;
unitConfig = {
Description = "LXD - agent";
Documentation = "https://documentation.ubuntu.com/lxd/en/latest";
ConditionPathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
DefaultDependencies = "no";
StartLimitInterval = "60";
StartLimitBurst = "10";
};
serviceConfig = {
Type = "notify";
WorkingDirectory = "-/run/lxd_agent";
ExecStart = "/run/lxd_agent/lxd-agent";
Restart = "on-failure";
RestartSec = "5s";
};
};
systemd.paths.lxd-agent = {
enable = true;
wantedBy = [ "multi-user.target" ];
pathConfig.PathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
};
};
}

View File

@@ -1,56 +0,0 @@
{
config,
lib,
pkgs,
...
}:
let
serialDevice = if pkgs.stdenv.hostPlatform.isx86 then "ttyS0" else "ttyAMA0"; # aarch64
in
{
imports = [
./lxc-instance-common.nix
../profiles/qemu-guest.nix
];
config = {
system.build.qemuImage = import ../../lib/make-disk-image.nix {
inherit pkgs lib config;
partitionTableType = "efi";
format = "qcow2-compressed";
copyChannel = true;
};
fileSystems = {
"/" = {
device = "/dev/disk/by-label/nixos";
autoResize = true;
fsType = "ext4";
};
"/boot" = {
device = "/dev/disk/by-label/ESP";
fsType = "vfat";
};
};
boot.growPartition = true;
boot.loader.systemd-boot.enable = true;
# image building needs to know what device to install bootloader on
boot.loader.grub.device = "/dev/vda";
boot.kernelParams = [
"console=tty1"
"console=${serialDevice}"
];
services.udev.extraRules = ''
SUBSYSTEM=="cpu", CONST{arch}=="x86-64", TEST=="online", ATTR{online}=="0", ATTR{online}="1"
'';
virtualisation.lxd.agent.enable = lib.mkDefault true;
};
}

View File

@@ -1,283 +0,0 @@
# Systemd services for lxd.
{
config,
lib,
pkgs,
...
}:
let
cfg = config.virtualisation.lxd;
preseedFormat = pkgs.formats.yaml { };
in
{
imports = [
(lib.mkRemovedOptionModule [
"virtualisation"
"lxd"
"zfsPackage"
] "Override zfs in an overlay instead to override it globally")
];
options = {
virtualisation.lxd = {
enable = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
This option enables lxd, a daemon that manages
containers. Users in the "lxd" group can interact with
the daemon (e.g. to start or stop containers) using the
{command}`lxc` command line tool, among others.
Most of the time, you'll also want to start lxcfs, so
that containers can "see" the limits:
```
virtualisation.lxc.lxcfs.enable = true;
```
'';
};
package = lib.mkPackageOption pkgs "lxd-lts" { };
lxcPackage = lib.mkOption {
type = lib.types.package;
default = config.virtualisation.lxc.package;
defaultText = lib.literalExpression "config.virtualisation.lxc.package";
description = "The lxc package to use.";
};
zfsSupport = lib.mkOption {
type = lib.types.bool;
default = config.boot.zfs.enabled;
defaultText = lib.literalExpression "config.boot.zfs.enabled";
description = ''
Enables lxd to use zfs as a storage for containers.
This option is enabled by default if a zfs pool is configured
with nixos.
'';
};
recommendedSysctlSettings = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Enables various settings to avoid common pitfalls when
running containers requiring many file operations.
Fixes errors like "Too many open files" or
"neighbour: ndisc_cache: neighbor table overflow!".
See <https://lxd.readthedocs.io/en/latest/production-setup/>
for details.
'';
};
preseed = lib.mkOption {
type = lib.types.nullOr (
lib.types.submodule {
freeformType = preseedFormat.type;
}
);
default = null;
description = ''
Configuration for LXD preseed, see
<https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#initialize-preseed>
for supported values.
Changes to this will be re-applied to LXD which will overwrite existing entities or create missing ones,
but entities will *not* be removed by preseed.
'';
example = lib.literalExpression ''
{
networks = [
{
name = "lxdbr0";
type = "bridge";
config = {
"ipv4.address" = "10.0.100.1/24";
"ipv4.nat" = "true";
};
}
];
profiles = [
{
name = "default";
devices = {
eth0 = {
name = "eth0";
network = "lxdbr0";
type = "nic";
};
root = {
path = "/";
pool = "default";
size = "35GiB";
type = "disk";
};
};
}
];
storage_pools = [
{
name = "default";
driver = "dir";
config = {
source = "/var/lib/lxd/storage-pools/default";
};
}
];
}
'';
};
startTimeout = lib.mkOption {
type = lib.types.int;
default = 600;
apply = toString;
description = ''
Time to wait (in seconds) for LXD to become ready to process requests.
If LXD does not reply within the configured time, lxd.service will be
considered failed and systemd will attempt to restart it.
'';
};
ui = {
enable = lib.mkEnableOption "(experimental) LXD UI";
package = lib.mkPackageOption pkgs [ "lxd-ui" ] { };
};
};
};
###### implementation
config = lib.mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
# Note: the following options are also declared in virtualisation.lxc, but
# the latter can't be simply enabled to reuse the formers, because it
# does a bunch of unrelated things.
systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
security.apparmor = {
packages = [ cfg.lxcPackage ];
policies = {
"bin.lxc-start".profile = ''
include ${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start
'';
"lxc-containers".profile = ''
include ${cfg.lxcPackage}/etc/apparmor.d/lxc-containers
'';
};
};
systemd.sockets.lxd = {
description = "LXD UNIX socket";
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream = "/var/lib/lxd/unix.socket";
SocketMode = "0660";
SocketGroup = "lxd";
Service = "lxd.service";
};
};
systemd.services.lxd = {
description = "LXD Container Management Daemon";
wantedBy = [ "multi-user.target" ];
after = [
"network-online.target"
(lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
];
requires = [
"network-online.target"
"lxd.socket"
(lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
];
documentation = [ "man:lxd(1)" ];
path = [ pkgs.util-linux ] ++ lib.optional cfg.zfsSupport config.boot.zfs.package;
environment = lib.mkIf (cfg.ui.enable) {
"LXD_UI" = cfg.ui.package;
};
serviceConfig = {
ExecStart = "@${cfg.package}/bin/lxd lxd --group lxd";
ExecStartPost = "${cfg.package}/bin/lxd waitready --timeout=${cfg.startTimeout}";
ExecStop = "${cfg.package}/bin/lxd shutdown";
KillMode = "process"; # when stopping, leave the containers alone
LimitMEMLOCK = "infinity";
LimitNOFILE = "1048576";
LimitNPROC = "infinity";
TasksMax = "infinity";
Delegate = true; # LXD needs to manage cgroups in its subtree
# By default, `lxd` loads configuration files from hard-coded
# `/usr/share/lxc/config` - since this is a no-go for us, we have to
# explicitly tell it where the actual configuration files are
Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable) "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
};
unitConfig.ConditionPathExists = "!/var/lib/incus/.migrated-from-lxd";
};
systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {
description = "LXD initialization with preseed file";
wantedBy = [ "multi-user.target" ];
requires = [ "lxd.service" ];
after = [ "lxd.service" ];
script = ''
${pkgs.coreutils}/bin/cat ${preseedFormat.generate "lxd-preseed.yaml" cfg.preseed} | ${cfg.package}/bin/lxd init --preseed
'';
serviceConfig = {
Type = "oneshot";
};
};
users.groups.lxd = { };
users.users.root = {
subUidRanges = [
{
startUid = 1000000;
count = 65536;
}
];
subGidRanges = [
{
startGid = 1000000;
count = 65536;
}
];
};
boot.kernel.sysctl = lib.mkIf cfg.recommendedSysctlSettings {
"fs.inotify.max_queued_events" = 1048576;
"fs.inotify.max_user_instances" = 1048576;
"fs.inotify.max_user_watches" = 1048576;
"vm.max_map_count" = 262144; # TODO: Default vm.max_map_count has been increased system-wide
"kernel.dmesg_restrict" = 1;
"net.ipv4.neigh.default.gc_thresh3" = 8192;
"net.ipv6.neigh.default.gc_thresh3" = 8192;
"kernel.keys.maxkeys" = 2000;
};
boot.kernelModules = [
"veth"
"xt_comment"
"xt_CHECKSUM"
"xt_MASQUERADE"
"vhost_vsock"
]
++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
};
}

View File

@@ -434,100 +434,6 @@ rec {
)
);
# An image that can be imported into lxd and used for container creation
lxdContainerImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (
system:
with import ./.. { inherit system; };
hydraJob (
(import lib/eval-config.nix {
inherit system;
modules = [
configuration
versionModule
./maintainers/scripts/lxd/lxd-container-image.nix
];
}).config.system.build.tarball
)
);
lxdContainerImageSquashfs = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (
system:
with import ./.. { inherit system; };
hydraJob (
(import lib/eval-config.nix {
inherit system;
modules = [
configuration
versionModule
./maintainers/scripts/lxd/lxd-container-image.nix
];
}).config.system.build.squashfs
)
);
# Metadata for the lxd image
lxdContainerMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (
system:
with import ./.. { inherit system; };
hydraJob (
(import lib/eval-config.nix {
inherit system;
modules = [
configuration
versionModule
./maintainers/scripts/lxd/lxd-container-image.nix
];
}).config.system.build.metadata
)
);
# An image that can be imported into lxd and used for container creation
lxdVirtualMachineImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (
system:
with import ./.. { inherit system; };
hydraJob (
(import lib/eval-config.nix {
inherit system;
modules = [
configuration
versionModule
./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
];
}).config.system.build.qemuImage
)
);
# Metadata for the lxd image
lxdVirtualMachineImageMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (
system:
with import ./.. { inherit system; };
hydraJob (
(import lib/eval-config.nix {
inherit system;
modules = [
configuration
versionModule
./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
];
}).config.system.build.metadata
)
);
# Ensure that all packages used by the minimal NixOS config end up in the channel.
dummy = forAllSystems (
system:

View File

@@ -489,6 +489,7 @@ in
endlessh-go = runTest ./endlessh-go.nix;
engelsystem = runTest ./engelsystem.nix;
enlightenment = runTest ./enlightenment.nix;
ente = runTest ./ente;
env = runTest ./env.nix;
envfs = runTest ./envfs.nix;
envoy = runTest {
@@ -855,7 +856,6 @@ in
luks = runTest ./luks.nix;
lvm2 = handleTest ./lvm2 { };
lxc = handleTest ./lxc { };
lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; });
lxd-image-server = runTest ./lxd-image-server.nix;
lxqt = runTest ./lxqt.nix;
ly = runTest ./ly.nix;

View File

@@ -0,0 +1,15 @@
-----BEGIN CERTIFICATE-----
MIICRDCCAcqgAwIBAgIIBx6YLUwhT34wCgYIKoZIzj0EAwMwIDEeMBwGA1UEAxMV
bWluaWNhIHJvb3QgY2EgNjRhYWY2MB4XDTI1MDUxMzA4NTMyMVoXDTQ1MDUxMzA4
NTMyMVowFDESMBAGA1UEAxMJYWNtZS50ZXN0MHYwEAYHKoZIzj0CAQYFK4EEACID
YgAEcuBBV1FZ9s6D3Iz3+K07BwtcSqDOmk5WGsuL/owdeIQkT5OhqdZ+0v4TA6V3
HLb9fyaEeZ6cG8vX4fMy6wIMi1E38o1cfiTYLjS9mU/GVN+eTsnYdUS8g7uz8p0e
C0X2o4HcMIHZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI
KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBTNdPze2U/U7/72ULml
V/K/73d2xTB5BgNVHREEcjBwgglhY21lLnRlc3SCEmFjY291bnRzLmFjbWUudGVz
dIIQYWxidW1zLmFjbWUudGVzdIINYXBpLmFjbWUudGVzdIIOY2FzdC5hY21lLnRl
c3SCEHBob3Rvcy5hY21lLnRlc3SCDHMzLmFjbWUudGVzdDAKBggqhkjOPQQDAwNo
ADBlAjB9Eao+y/Wzy+mMw4e4P2OidFxDFv8o1jDlCN5mvXBQrlAoSKVwgkpreKsd
R/3iaacCMQC7CS3XKJVRbOtI6CjVHs7SV9fwCqJ6EaLcUjeNcigxcSRKGfG1ntl+
bt0LubZZd+c=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,6 @@
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDB631W2iczyfu4h/4f/
721JKAsYRAnxLV7oYSUv9rFC+z8CPC7T74Lzmoccr0mR72WhZANiAARy4EFXUVn2
zoPcjPf4rTsHC1xKoM6aTlYay4v+jB14hCRPk6Gp1n7S/hMDpXcctv1/JoR5npwb
y9fh8zLrAgyLUTfyjVx+JNguNL2ZT8ZU355Oydh1RLyDu7PynR4LRfY=
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,13 @@
-----BEGIN CERTIFICATE-----
MIIB/DCCAYKgAwIBAgIIZKr2ScoFkWAwCgYIKoZIzj0EAwMwIDEeMBwGA1UEAxMV
bWluaWNhIHJvb3QgY2EgNjRhYWY2MCAXDTI1MDUxMzA4NTMyMVoYDzIxMjUwNTEz
MDg1MzIxWjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSA2NGFhZjYwdjAQBgcq
hkjOPQIBBgUrgQQAIgNiAAST7GqqY2N7XW9SDHXkNOhbLMaIBTtdCpmu4AAEjRzS
/KozwcGfWf98GyMJ+t8bFg9f0mCbWrl1TVhIb3eV7k7oadJYvBNljIBnnkKgmw1b
nzIE0qbzcRWmz0m5ReFNkGCjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQW
MBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1Ud
DgQWBBTNdPze2U/U7/72ULmlV/K/73d2xTAfBgNVHSMEGDAWgBTNdPze2U/U7/72
ULmlV/K/73d2xTAKBggqhkjOPQQDAwNoADBlAjBto95DikOxFmQEv/c5dCbz4eYW
dsB78N+m2nrMgx10pzOvXNkvrt/D3mUbbnZI1DMCMQDQKQ+qPUF+PdDdSc21v778
4Sokp/5SNBUVm7CT0I7OiPTtuLc//r6SK8d9VBQArx0=
-----END CERTIFICATE-----

View File

@@ -0,0 +1,6 @@
-----BEGIN PRIVATE KEY-----
MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDCIBDkk1pfjwxBpwex2
2izySRuBmJ4Za2aRtbnTbPevhHYs0WL8LTPID47dAt0erFihZANiAAST7GqqY2N7
XW9SDHXkNOhbLMaIBTtdCpmu4AAEjRzS/KozwcGfWf98GyMJ+t8bFg9f0mCbWrl1
TVhIb3eV7k7oadJYvBNljIBnnkKgmw1bnzIE0qbzcRWmz0m5ReFNkGA=
-----END PRIVATE KEY-----

View File

@@ -0,0 +1,139 @@
{ lib, pkgs, ... }:
let
accessKey = "BKIKJAA5BMMU2RHO6IBB";
secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
rootCredentialsFile = pkgs.writeText "minio-credentials-full" ''
MINIO_ROOT_USER=${accessKey}
MINIO_ROOT_PASSWORD=${secretKey}
'';
certs = import ./snakeoil-certs.nix;
domain = certs.domain;
in
{
name = "ente";
meta.maintainers = [ lib.maintainers.oddlama ];
nodes.minio =
{ ... }:
{
environment.systemPackages = [ pkgs.minio-client ];
services.minio = {
enable = true;
inherit rootCredentialsFile;
};
networking.firewall.allowedTCPPorts = [
9000
];
systemd.services.minio.environment = {
MINIO_SERVER_URL = "https://s3.${domain}";
};
};
nodes.ente =
{
config,
nodes,
lib,
...
}:
{
security.pki.certificateFiles = [ certs.ca.cert ];
networking.extraHosts = ''
${config.networking.primaryIPAddress} accounts.${domain} albums.${domain} api.${domain} cast.${domain} photos.${domain} s3.${domain}
'';
networking.firewall.allowedTCPPorts = [
80
443
];
services.nginx = {
recommendedProxySettings = true;
virtualHosts =
lib.genAttrs
[
"accounts.${domain}"
"albums.${domain}"
"api.${domain}"
"cast.${domain}"
"photos.${domain}"
]
(_: {
sslCertificate = certs.${domain}.cert;
sslCertificateKey = certs.${domain}.key;
})
// {
"s3.${domain}" = {
forceSSL = true;
sslCertificate = certs.${domain}.cert;
sslCertificateKey = certs.${domain}.key;
locations."/".proxyPass = "http://${nodes.minio.networking.primaryIPAddress}:9000";
extraConfig = ''
client_max_body_size 32M;
proxy_buffering off;
proxy_request_buffering off;
'';
};
};
};
services.ente = {
web = {
enable = true;
domains = {
accounts = "accounts.${domain}";
albums = "albums.${domain}";
cast = "cast.${domain}";
photos = "photos.${domain}";
};
};
api = {
enable = true;
nginx.enable = true;
enableLocalDB = true;
domain = "api.${domain}";
settings = {
s3 = {
use_path_style_urls = true;
b2-eu-cen = {
endpoint = "https://s3.${domain}";
region = "us-east-1";
bucket = "ente";
key._secret = pkgs.writeText "accesskey" accessKey;
secret._secret = pkgs.writeText "secretkey" secretKey;
};
};
key = {
encryption._secret = pkgs.writeText "encryption" "T0sn+zUVFOApdX4jJL4op6BtqqAfyQLH95fu8ASWfno=";
hash._secret = pkgs.writeText "hash" "g/dBZBs1zi9SXQ0EKr4RCt1TGr7ZCKkgrpjyjrQEKovWPu5/ce8dYM6YvMIPL23MMZToVuuG+Z6SGxxTbxg5NQ==";
};
jwt.secret._secret = pkgs.writeText "jwt" "i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8=";
};
};
};
};
testScript = ''
minio.start()
minio.wait_for_unit("minio.service")
minio.wait_for_open_port(9000)
# Create a test bucket on the server
minio.succeed("mc alias set minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4")
minio.succeed("mc mb -p minio/ente")
# Start ente
ente.start()
ente.wait_for_unit("ente.service")
ente.wait_for_unit("nginx.service")
# Wait until api is up
ente.wait_until_succeeds("journalctl --since -2m --unit ente.service --grep 'We have lift-off.'", timeout=30)
# Wait until photos app is up
ente.wait_until_succeeds("curl -Ls https://photos.${domain}/ | grep -q 'Ente Photos'", timeout=30)
'';
}

View File

@@ -0,0 +1,36 @@
# Minica can provide a CA key and cert, plus a key
# and cert for our fake CA server's Web Front End (WFE).
{
pkgs ? import <nixpkgs> { },
minica ? pkgs.minica,
mkDerivation ? pkgs.stdenv.mkDerivation,
}:
let
conf = import ./snakeoil-certs.nix;
domain = conf.domain;
in
mkDerivation {
name = "test-certs";
buildInputs = [
(minica.overrideAttrs (_old: {
prePatch = ''
sed -i 's_NotAfter: time.Now().AddDate(2, 0, 30),_NotAfter: time.Now().AddDate(20, 0, 0),_' main.go
'';
}))
];
dontUnpack = true;
buildPhase = ''
minica \
--ca-key ca.key.pem \
--ca-cert ca.cert.pem \
--domains ${domain},accounts.${domain},albums.${domain},api.${domain},cast.${domain},photos.${domain},s3.${domain}
'';
installPhase = ''
mkdir -p $out
mv ca.*.pem $out/
mv ${domain}/key.pem $out/${domain}.key.pem
mv ${domain}/cert.pem $out/${domain}.cert.pem
'';
}

View File

@@ -0,0 +1,14 @@
let
domain = "acme.test";
in
{
inherit domain;
ca = {
cert = ./ca.cert.pem;
key = ./ca.key.pem;
};
"${domain}" = {
cert = ./. + "/${domain}.cert.pem";
key = ./. + "/${domain}.key.pem";
};
}

View File

@@ -24,10 +24,6 @@ in
storageLvm = true;
};
lxd-to-incus = import ./lxd-to-incus.nix {
inherit lts pkgs system;
};
openvswitch = incusTest {
inherit lts pkgs system;
networkOvs = true;

View File

@@ -1,119 +0,0 @@
import ../make-test-python.nix (
{
pkgs,
lib,
lts ? true,
...
}:
let
releases = import ../../release.nix { configuration.documentation.enable = lib.mkForce false; };
container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
in
{
name = "lxd-to-incus";
meta = {
maintainers = lib.teams.lxc.members;
};
nodes.machine =
{ ... }:
{
virtualisation = {
diskSize = 6144;
cores = 2;
memorySize = 2048;
lxd.enable = true;
lxd.preseed = {
networks = [
{
name = "nixostestbr0";
type = "bridge";
config = {
"ipv4.address" = "10.0.100.1/24";
"ipv4.nat" = "true";
};
}
];
profiles = [
{
name = "default";
devices = {
eth0 = {
name = "eth0";
network = "nixostestbr0";
type = "nic";
};
root = {
path = "/";
pool = "nixostest_pool";
size = "35GiB";
type = "disk";
};
};
}
{
name = "nixos_notdefault";
devices = { };
}
];
storage_pools = [
{
name = "nixostest_pool";
driver = "dir";
}
];
};
incus = {
enable = true;
package = if lts then pkgs.incus-lts else pkgs.incus;
};
};
networking.nftables.enable = true;
};
testScript = ''
def lxd_wait_for_preseed(_) -> bool:
_, output = machine.systemctl("is-active lxd-preseed.service")
return ("inactive" in output)
def lxd_instance_is_up(_) -> bool:
status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
return status == 0
def incus_instance_is_up(_) -> bool:
status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/systemctl -- is-system-running")
return status == 0
with machine.nested("initialize lxd and resources"):
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
retry(lxd_wait_for_preseed)
machine.succeed("lxc image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
machine.succeed("lxc launch nixos container")
retry(lxd_instance_is_up)
machine.wait_for_unit("incus.service")
with machine.nested("run migration"):
machine.succeed("${pkgs.incus}/bin/lxd-to-incus --yes")
with machine.nested("verify resources migrated to incus"):
machine.succeed("incus config show container")
retry(incus_instance_is_up)
machine.succeed("incus exec container -- true")
machine.succeed("incus profile show default | grep nixostestbr0")
machine.succeed("incus profile show default | grep nixostest_pool")
machine.succeed("incus profile show nixos_notdefault")
machine.succeed("incus storage show nixostest_pool")
machine.succeed("incus network show nixostestbr0")
'';
}
)

View File

@@ -9,8 +9,8 @@ import ../make-test-python.nix (
};
};
lxc-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
lxc-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
lxc-image-metadata = releases.incusContainerMeta.${pkgs.stdenv.hostPlatform.system};
lxc-image-rootfs = releases.incusContainerImage.${pkgs.stdenv.hostPlatform.system};
in
{

View File

@@ -1,15 +1,20 @@
{ pkgs, lib, ... }:
let
lxd-image = import ../release.nix {
incus-image = import ../release.nix {
configuration = {
# Building documentation makes the test unnecessarily take a longer time:
documentation.enable = lib.mkForce false;
};
};
lxd-image-metadata = lxd-image.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
lxd-image-rootfs = lxd-image.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
incus-image-metadata =
incus-image.incusContainerMeta.${pkgs.stdenv.hostPlatform.system}
+ "/tarball/nixos-image-lxc-*-${pkgs.stdenv.hostPlatform.system}.tar.xz";
incus-image-rootfs =
incus-image.incusContainerImage.${pkgs.stdenv.hostPlatform.system}
+ "/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs";
in
{
@@ -31,10 +36,12 @@ in
memorySize = 2048;
diskSize = 4096;
lxc.lxcfs.enable = true;
lxd.enable = true;
incus.enable = true;
};
# incus requires
networking.nftables.enable = true;
security.pki.certificates = [
(builtins.readFile ./common/acme/server/ca.cert.pem)
];
@@ -64,37 +71,29 @@ in
testScript = ''
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
machine.wait_for_file("/var/lib/lxd/unix.socket")
machine.wait_for_unit("incus.service")
# Wait for lxd to settle
machine.succeed("lxd waitready")
# lxd expects the pool's directory to already exist
machine.succeed("mkdir /var/lxd-pool")
machine.succeed("incus admin waitready")
machine.succeed("incus admin init --minimal")
machine.succeed(
"lxd init --minimal"
)
machine.succeed(
"lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
"incus image import ${incus-image-metadata} ${incus-image-rootfs} --alias nixos"
)
loc = "/var/www/simplestreams/images/iats/nixos/amd64/default/v1"
with subtest("push image to server"):
machine.succeed("lxc launch nixos test")
machine.succeed("incus launch nixos test")
machine.sleep(5)
machine.succeed("lxc stop -f test")
machine.succeed("lxc publish --public test --alias=testimg")
machine.succeed("lxc image export testimg")
machine.succeed("incus stop -f test")
machine.succeed("incus publish --public test --alias=testimg")
machine.succeed("incus image export testimg")
machine.succeed("ls >&2")
machine.succeed("mkdir -p " + loc)
machine.succeed("mv *.tar.gz " + loc)
with subtest("pull image from server"):
machine.succeed("lxc remote add img https://acme.test --protocol=simplestreams")
machine.succeed("lxc image list img: >&2")
machine.succeed("incus remote add img https://acme.test --protocol=simplestreams")
machine.succeed("incus image list img: >&2")
'';
}

View File

@@ -1,133 +0,0 @@
import ../make-test-python.nix (
{ pkgs, lib, ... }:
let
releases = import ../../release.nix {
configuration = {
# Building documentation makes the test unnecessarily take a longer time:
documentation.enable = lib.mkForce false;
# Our tests require `grep` & friends:
environment.systemPackages = with pkgs; [ busybox ];
};
};
lxd-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
lxd-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
lxd-image-rootfs-squashfs = releases.lxdContainerImageSquashfs.${pkgs.stdenv.hostPlatform.system};
in
{
name = "lxd-container";
nodes.machine =
{ lib, ... }:
{
virtualisation = {
diskSize = 6144;
# Since we're testing `limits.cpu`, we've gotta have a known number of
# cores to lean on
cores = 2;
# Ditto, for `limits.memory`
memorySize = 512;
lxc.lxcfs.enable = true;
lxd.enable = true;
};
};
testScript = ''
def instance_is_up(_) -> bool:
status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
return status == 0
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
machine.wait_for_file("/var/lib/lxd/unix.socket")
# Wait for lxd to settle
machine.succeed("lxd waitready")
# no preseed should mean no service
machine.fail("systemctl status lxd-preseed.service")
machine.succeed("lxd init --minimal")
machine.succeed(
"lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
)
with subtest("Container can be managed"):
machine.succeed("lxc launch nixos container")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
machine.succeed("lxc delete -f container")
with subtest("Squashfs image is functional"):
machine.succeed(
"lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs-squashfs}/nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}.squashfs --alias nixos-squashfs"
)
machine.succeed("lxc launch nixos-squashfs container")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
machine.succeed("lxc delete -f container")
with subtest("Container is mounted with lxcfs inside"):
machine.succeed("lxc launch nixos container")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
## ---------- ##
## limits.cpu ##
machine.succeed("lxc config set container limits.cpu 1")
machine.succeed("lxc restart container")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
assert (
"1"
== machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
)
machine.succeed("lxc config set container limits.cpu 2")
machine.succeed("lxc restart container")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
assert (
"2"
== machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
)
## ------------- ##
## limits.memory ##
machine.succeed("lxc config set container limits.memory 64MB")
machine.succeed("lxc restart container")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
assert (
"MemTotal: 62500 kB"
== machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
)
machine.succeed("lxc config set container limits.memory 128MB")
machine.succeed("lxc restart container")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
assert (
"MemTotal: 125000 kB"
== machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
)
machine.succeed("lxc delete -f container")
'';
}
)

View File

@@ -1,13 +0,0 @@
{
system ? builtins.currentSystem,
config ? { },
pkgs ? import ../../.. { inherit system config; },
handleTestOn,
}:
{
container = import ./container.nix { inherit system pkgs; };
nftables = import ./nftables.nix { inherit system pkgs; };
preseed = import ./preseed.nix { inherit system pkgs; };
ui = import ./ui.nix { inherit system pkgs; };
virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix { inherit system pkgs; };
}

View File

@@ -1,51 +0,0 @@
# This test makes sure that lxd stops implicitly depending on iptables when
# user enabled nftables.
#
# It has been extracted from `lxd.nix` for clarity, and because switching from
# iptables to nftables requires a full reboot, which is a bit hard inside NixOS
# tests.
import ../make-test-python.nix (
{ pkgs, lib, ... }:
{
name = "lxd-nftables";
nodes.machine =
{ lib, ... }:
{
virtualisation = {
lxd.enable = true;
};
networking = {
firewall.enable = false;
nftables.enable = true;
nftables.tables."filter".family = "inet";
nftables.tables."filter".content = ''
chain incoming {
type filter hook input priority 0;
policy accept;
}
chain forward {
type filter hook forward priority 0;
policy accept;
}
chain output {
type filter hook output priority 0;
policy accept;
}
'';
};
};
testScript = ''
machine.wait_for_unit("network.target")
with subtest("When nftables are enabled, lxd doesn't depend on iptables anymore"):
machine.succeed("lsmod | grep nf_tables")
machine.fail("lsmod | grep ip_tables")
'';
}
)

View File

@@ -1,71 +0,0 @@
import ../make-test-python.nix (
{ pkgs, lib, ... }:
{
name = "lxd-preseed";
nodes.machine =
{ lib, ... }:
{
virtualisation = {
diskSize = 4096;
lxc.lxcfs.enable = true;
lxd.enable = true;
lxd.preseed = {
networks = [
{
name = "nixostestbr0";
type = "bridge";
config = {
"ipv4.address" = "10.0.100.1/24";
"ipv4.nat" = "true";
};
}
];
profiles = [
{
name = "nixostest_default";
devices = {
eth0 = {
name = "eth0";
network = "nixostestbr0";
type = "nic";
};
root = {
path = "/";
pool = "default";
size = "35GiB";
type = "disk";
};
};
}
];
storage_pools = [
{
name = "nixostest_pool";
driver = "dir";
}
];
};
};
};
testScript = ''
def wait_for_preseed(_) -> bool:
_, output = machine.systemctl("is-active lxd-preseed.service")
return ("inactive" in output)
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
with machine.nested("Waiting for preseed to complete"):
retry(wait_for_preseed)
with subtest("Verify preseed resources created"):
machine.succeed("lxc profile show nixostest_default")
machine.succeed("lxc network info nixostestbr0")
machine.succeed("lxc storage show nixostest_pool")
'';
}
)

View File

@@ -1,74 +0,0 @@
import ../make-test-python.nix (
{ pkgs, ... }:
{
name = "lxd-ui";
nodes.machine =
{ lib, ... }:
{
virtualisation = {
lxd.enable = true;
lxd.ui.enable = true;
};
environment.systemPackages =
let
seleniumScript =
pkgs.writers.writePython3Bin "selenium-script"
{
libraries = with pkgs.python3Packages; [ selenium ];
}
''
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
options = Options()
options.add_argument("--headless")
service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}") # noqa: E501
driver = webdriver.Firefox(options=options, service=service)
driver.implicitly_wait(10)
driver.get("https://localhost:8443/ui")
wait = WebDriverWait(driver, 60)
assert len(driver.find_elements(By.CLASS_NAME, "l-application")) > 0
assert len(driver.find_elements(By.CLASS_NAME, "l-navigation__drawer")) > 0
driver.close()
'';
in
with pkgs;
[
curl
firefox-unwrapped
geckodriver
seleniumScript
];
};
testScript = ''
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
machine.wait_for_file("/var/lib/lxd/unix.socket")
# Wait for lxd to settle
machine.succeed("lxd waitready")
# Configure LXC listen address
machine.succeed("lxc config set core.https_address :8443")
machine.succeed("systemctl restart lxd")
# Check that the LXD_UI environment variable is populated in the systemd unit
machine.succeed("cat /etc/systemd/system/lxd.service | grep 'LXD_UI'")
# Ensure the endpoint returns an HTML page with 'LXD UI' in the title
machine.succeed("curl -kLs https://localhost:8443/ui | grep '<title>LXD UI</title>'")
# Ensure the application is actually rendered by the Javascript
machine.succeed("PYTHONUNBUFFERED=1 selenium-script")
'';
}
)

View File

@@ -1,65 +0,0 @@
import ../make-test-python.nix (
{ pkgs, lib, ... }:
let
releases = import ../../release.nix {
configuration = {
# Building documentation makes the test unnecessarily take a longer time:
documentation.enable = lib.mkForce false;
# Our tests require `grep` & friends:
environment.systemPackages = with pkgs; [ busybox ];
};
};
lxd-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
lxd-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
instance-name = "instance1";
in
{
name = "lxd-virtual-machine";
nodes.machine =
{ lib, ... }:
{
virtualisation = {
diskSize = 4096;
cores = 2;
# Ensure we have enough memory for the nested virtual machine
memorySize = 1024;
lxc.lxcfs.enable = true;
lxd.enable = true;
};
};
testScript = ''
def instance_is_up(_) -> bool:
status, _ = machine.execute("lxc exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/true")
return status == 0
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
machine.wait_for_file("/var/lib/lxd/unix.socket")
# Wait for lxd to settle
machine.succeed("lxd waitready")
machine.succeed("lxd init --minimal")
with subtest("virtual-machine image can be imported"):
machine.succeed("lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-disk}/nixos.qcow2 --alias nixos")
with subtest("virtual-machine can be launched and become available"):
machine.succeed("lxc launch nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
with machine.nested("Waiting for instance to start and be usable"):
retry(instance_is_up)
with subtest("lxd-agent is started"):
machine.succeed("lxc exec ${instance-name} systemctl is-active lxd-agent")
'';
}
)

View File

@@ -7,8 +7,8 @@ buildVscodeMarketplaceExtension {
mktplcRef = {
name = "vscode-wakatime";
publisher = "WakaTime";
version = "25.3.0";
hash = "sha256-cw3wcMr8QKG75VofIsAmlD2RqN/0fGdqhugen/vmJlo=";
version = "25.3.2";
hash = "sha256-xX1vejS8zoidcI6fnp7vvtSw4rMHIe2IF4JQJB5hvqs=";
};
meta = {

View File

@@ -7,8 +7,8 @@ vscode-utils.buildVscodeMarketplaceExtension (finalAttrs: {
mktplcRef = {
name = "amazon-q-vscode";
publisher = "AmazonWebServices";
version = "1.90.0";
hash = "sha256-9z8EB5jMtpmQadbX0usWUlbs/n87wX9dJcyrveKqyJ8=";
version = "1.93.0";
hash = "sha256-Jj8Sj9UrTZVwejbinwxuRt4sZ58vsYlBHhqHkTWjNPM=";
};
meta = {

View File

@@ -1,25 +1,15 @@
{
lib,
vscode-utils,
claude-code,
}:
vscode-utils.buildVscodeExtension (finalAttrs: {
pname = "claude-code";
inherit (claude-code) version;
vscodeExtPublisher = "anthropic";
vscodeExtName = "claude-code";
vscodeExtUniqueId = "${finalAttrs.vscodeExtPublisher}.${finalAttrs.vscodeExtName}";
src = "${claude-code}/lib/node_modules/@anthropic-ai/claude-code/vendor/claude-code.vsix";
unpackPhase = ''
runHook preUnpack
unzip $src
runHook postUnpack
'';
vscode-utils.buildVscodeMarketplaceExtension {
mktplcRef = {
name = "claude-code";
publisher = "anthropic";
version = "1.0.112";
hash = "sha256-xQvw68PW+JjCxmILZBA/XB/+Qy1hTfBRxX3PI3zmUb8=";
};
meta = {
description = "Harness the power of Claude Code without leaving your IDE";
@@ -29,4 +19,4 @@ vscode-utils.buildVscodeExtension (finalAttrs: {
sourceProvenance = with lib.sourceTypes; [ binaryBytecode ];
maintainers = with lib.maintainers; [ xiaoxiangmoe ];
};
})
}

View File

@@ -11,8 +11,8 @@ vscode-utils.buildVscodeMarketplaceExtension {
mktplcRef = {
name = "calva";
publisher = "betterthantomorrow";
version = "2.0.524";
hash = "sha256-gt6+juIwTKES0CDBxv4uVSPsp0v1RUKRQoWneDfyVJQ=";
version = "2.0.525";
hash = "sha256-pTRyDsgxd9o8Y9p2rsZTT+uG6+mSBBM/k4nczvlYGrM=";
};
nativeBuildInputs = [

View File

@@ -279,8 +279,8 @@ let
mktplcRef = {
name = "icons-carbon";
publisher = "antfu";
version = "0.2.6";
hash = "sha256-R8eHLuebfgHaKtHPKBaaYybotluuH9WrUBpgyuIVOxc=";
version = "0.2.7";
hash = "sha256-m9iBSHHkDsOtGQD50QDgmqzOu7EXVYZxpZCZQ/q5NWU=";
};
meta = {
license = lib.licenses.mit;
@@ -1193,8 +1193,8 @@ let
mktplcRef = {
name = "flutter";
publisher = "dart-code";
version = "3.116.0";
hash = "sha256-5T/C9o6fRLnWM7MF8ATYJMEdJgLA4ZdA4Lgjtjxh2ZE=";
version = "3.118.0";
hash = "sha256-mjGDQslTcWnT0gQ+1l1LXR/eWc335dJljlbvSHvCxNk=";
};
meta.license = lib.licenses.mit;
@@ -1973,8 +1973,8 @@ let
mktplcRef = {
publisher = "github";
name = "vscode-pull-request-github";
version = "0.116.1";
hash = "sha256-qJGCY1NBCv11xzeryELG0OVZy4wQZqdcYPFadZ1tlIU=";
version = "0.116.2";
hash = "sha256-PDX3Wpmy82MvBzr/fnPP5Y7HMKkfphJLe/Rc284Vxlc=";
};
meta = {
license = lib.licenses.mit;
@@ -3218,8 +3218,8 @@ let
mktplcRef = {
name = "vscode-kubernetes-tools";
publisher = "ms-kubernetes-tools";
version = "1.3.25";
hash = "sha256-aBo19JYfYa04XUFJT0Z0O4FFI74GoCPLbE06x9VmJS8=";
version = "1.3.26";
hash = "sha256-wiRV8FQw9TPNYvsgoVy8nAvCA9eosxXTaXs7YjdoBFs=";
};
meta = {
license = lib.licenses.mit;
@@ -3784,8 +3784,8 @@ let
mktplcRef = {
name = "prisma";
publisher = "Prisma";
version = "6.13.0";
hash = "sha256-qx+2lKRx/4fS2xz9lBIQsTD5tcjTzow7WmYsHYyrfOw=";
version = "6.15.0";
hash = "sha256-MFHOKQp1mjygjAo5moahAnQFRo01D7r78eFlM1+k998=";
};
meta = {
changelog = "https://marketplace.visualstudio.com/items/Prisma.prisma/changelog";
@@ -4528,8 +4528,8 @@ let
mktplcRef = {
name = "tabnine-vscode";
publisher = "tabnine";
version = "3.298.0";
hash = "sha256-zzsoVOiSgA5W88YuWVHILdHN/PuWaQAXjZ0eUL9B9ZI=";
version = "3.314.1";
hash = "sha256-KWXQY4HyK7s4mNGh6x1eDK6okC5rxWe916RjlsACQxA=";
};
meta = {
license = lib.licenses.mit;
@@ -4767,8 +4767,8 @@ let
mktplcRef = {
name = "emacs-mcx";
publisher = "tuttieee";
version = "0.89.0";
hash = "sha256-dlAgnN8Ku6hapJrWI8DPAFbbOFllr9pu8H6atWdkNYc=";
version = "0.90.8";
hash = "sha256-0I4jf9Ba8PlNM0eUYHaMyCvuHZ5U3+RvT8aIbHJu9KU=";
};
meta = {
changelog = "https://github.com/whitphx/vscode-emacs-mcx/blob/main/CHANGELOG.md";
@@ -5169,8 +5169,8 @@ let
mktplcRef = {
name = "volar";
publisher = "Vue";
version = "3.0.5";
hash = "sha256-Ja0zWCHHxd1XE2f2ZQvchqzCKv0pbcAU3uEh2f6+X3c=";
version = "3.0.6";
hash = "sha256-hQfS6JTq4hQM5JDCtYFfIaBoV5ORbnao/CL5bqgu+jk=";
};
meta = {
changelog = "https://github.com/vuejs/language-tools/blob/master/CHANGELOG.md";

View File

@@ -11,7 +11,7 @@ vscode-utils.buildVscodeMarketplaceExtension {
name = "tinymist";
publisher = "myriad-dreamin";
inherit (tinymist) version;
hash = "sha256-wFFzUwOyaMInaVskKK/KA1eDd71fZ2j+snZ2NvFB5nU=";
hash = "sha256-VSbrKzj+FBK+MtXkBJzHe8pEODQACCYgADKYyyKUHAY=";
};
nativeBuildInputs = [

View File

@@ -14,19 +14,19 @@ let
{
x86_64-linux = {
arch = "linux-x64";
hash = "sha256-2hmkSgS3r4ghAXA8E0blWhe7kLvtZoApSRWXf6Ff5AE=";
hash = "sha256-4vZn0n2oQ0Bu1k/mOo5QYON9FyUCnifQWi7rt8v64Qw=";
};
aarch64-linux = {
arch = "linux-arm64";
hash = "sha256-XVygGMHtEhk+Fttd/xdZr5Yau9P3yCSo43RrXhqh/PQ=";
hash = "sha256-L7FproPmOySe3SuLyvaD5hje9/QecRpVgSATSzddD9U=";
};
x86_64-darwin = {
arch = "darwin-x64";
hash = "sha256-8awJFJVSo6ru3ej4utkTF/5eK4dMw63Z3KHNHRRFSBs=";
hash = "sha256-fJt5UXPdUSQHD1t1ThArnD2n+1hVpzXJD+CNKzQoaI0=";
};
aarch64-darwin = {
arch = "darwin-arm64";
hash = "sha256-JNik8Q9/BDjjuLVNJFOazyH9/a4s2HmkuENLQlDdKP4=";
hash = "sha256-u3H2+nz6lJtMXn38dDnshaJcnoC7cKJg2q2n0nCa0Dc=";
};
}
.${system} or (throw "Unsupported system: ${system}");
@@ -38,7 +38,7 @@ vscode-utils.buildVscodeMarketplaceExtension {
# Please update the corresponding binary (typos-lsp)
# when updating this extension.
# See pkgs/by-name/ty/typos-lsp/package.nix
version = "0.1.41";
version = "0.1.43";
inherit (extInfo) hash arch;
};

View File

@@ -16,13 +16,13 @@
mkDerivation rec {
pname = "klayout";
version = "0.30.3";
version = "0.30.4";
src = fetchFromGitHub {
owner = "KLayout";
repo = "klayout";
rev = "v${version}";
hash = "sha256-YsyKCSSxg0THflzPVF9yRn1X2liVT5xNafeQej/pdyI=";
hash = "sha256-iG1f43gvKcu8jlVJ47IH5A6Ld8zusEbZL2Xou6ix5QU=";
};
postPatch = ''

View File

@@ -14,16 +14,16 @@ builtins.mapAttrs
}:
buildGoModule rec {
inherit pname;
version = "3.30.2";
version = "3.30.3";
src = fetchFromGitHub {
owner = "projectcalico";
repo = "calico";
rev = "v${version}";
hash = "sha256-UvHrCA/1n9dklcMY1AfNNW5/TtxVdmwmQb2DHEBFZhA=";
hash = "sha256-Z2kYUak/zzO0IsKQyQ6sb3UD4QUZ9+9vGGVfl4qdPF8=";
};
vendorHash = "sha256-Cp1Eo8Xa4c0o5l6/p+pyHa/t3jMUpgUDDXEAKwS6aCE=";
vendorHash = "sha256-C9sge+xNTsW30PF2wJhRUNI1YEmXInD+xcboCtcC9kc=";
inherit doCheck subPackages;

View File

@@ -126,13 +126,13 @@
"vendorHash": null
},
"aws": {
"hash": "sha256-5W12hwonBaEto5PTnvcBm9qeeIY37ALsyXs0r9pfVak=",
"hash": "sha256-McrDfdkmV6pvzMGSqLgPopQk2o3Gg3I/0Qfr5jVR4uw=",
"homepage": "https://registry.terraform.io/providers/hashicorp/aws",
"owner": "hashicorp",
"repo": "terraform-provider-aws",
"rev": "v6.11.0",
"rev": "v6.13.0",
"spdx": "MPL-2.0",
"vendorHash": "sha256-i3NwpixAXi9PzciiwTtHB8yrzLWv67gHiDY4HshAbBo="
"vendorHash": "sha256-eci9CC5Gf0Wgfci1Yof9X7pCPcJiITwYxEcCzBcYDU0="
},
"awscc": {
"hash": "sha256-1ZOxJyjDVLhbVEdiSIWF+eNVZ9g2coyz1Bq1TWYq768=",
@@ -660,13 +660,13 @@
"vendorHash": null
},
"ibm": {
"hash": "sha256-llsHIVqRN1yKQnMYA0MIinbUk2TL+NYeI0UlUcpCnN0=",
"hash": "sha256-sNhAq5htdk9cXVCHBfSdePy/On5JqMSoW/fbi2eiqVc=",
"homepage": "https://registry.terraform.io/providers/IBM-Cloud/ibm",
"owner": "IBM-Cloud",
"repo": "terraform-provider-ibm",
"rev": "v1.81.1",
"rev": "v1.82.1",
"spdx": "MPL-2.0",
"vendorHash": "sha256-cWfISNNeVPb6BU2V3sLbvlnFKzf3fVniV5Lu1Kpb9f0="
"vendorHash": "sha256-AF93N5v9pzDOoCF3IF4SBprvNZZTR+KQKSN31an7l1g="
},
"icinga2": {
"hash": "sha256-Y/Oq0aTzP+oSKPhHiHY9Leal4HJJm7TNDpcdqkUsCmk=",
@@ -912,13 +912,13 @@
"vendorHash": "sha256-U8eA/9og4LIedhPSEN9SyInLQuJSzvm0AeFhzC3oqyQ="
},
"ns1": {
"hash": "sha256-fRF2UsVpIWg0UGPAePEULxAjKi1TioYEeOeSxUuhvIc=",
"hash": "sha256-xIl0jUYmXe2tom8HwXLQRbdSWWyZkRtdDc0XqkQ0RcQ=",
"homepage": "https://registry.terraform.io/providers/ns1-terraform/ns1",
"owner": "ns1-terraform",
"repo": "terraform-provider-ns1",
"rev": "v2.6.5",
"rev": "v2.7.0",
"spdx": "MPL-2.0",
"vendorHash": "sha256-9J8RrnF9k503YLmg5rBA8u8SqldhB5AF4+PVtUy8wX8="
"vendorHash": "sha256-HC2MCVF3gS4nXKplC1pGrdjbhjRzr03ajqZ7pQIgSOc="
},
"null": {
"hash": "sha256-hPAcFWkeK1vjl1Cg/d7FaZpPhyU3pkU6VBIwxX2gEvA=",
@@ -1174,13 +1174,13 @@
"vendorHash": "sha256-Icua01a4ILF+oAO5nMeCGPZrWc3V/SVObWydO72CU3I="
},
"scaleway": {
"hash": "sha256-YUOfCTtlPn9UBnmmPNODUwEbGR4EkknkdIVdZpmDnQw=",
"hash": "sha256-FiC5FAag+ycf8Ti1iDXsJM5cb7xQUx8RLlv0gJ3+cNA=",
"homepage": "https://registry.terraform.io/providers/scaleway/scaleway",
"owner": "scaleway",
"repo": "terraform-provider-scaleway",
"rev": "v2.59.0",
"rev": "v2.60.0",
"spdx": "MPL-2.0",
"vendorHash": "sha256-VH20r9RBlygGXuriXCzs3xBar/l3blPR+UcgCobIdWU="
"vendorHash": "sha256-z8MzanM6u5CJSy7EFI583otoMzubkIrEuK1bldmV0u8="
},
"secret": {
"hash": "sha256-MmAnA/4SAPqLY/gYcJSTnEttQTsDd2kEdkQjQj6Bb+A=",

View File

@@ -52,8 +52,6 @@ lib.makeScope newScope (
purple-lurch = callPackage ./purple-lurch { };
purple-matrix = callPackage ./purple-matrix { };
purple-mm-sms = callPackage ./purple-mm-sms { };
purple-plugin-pack = callPackage ./purple-plugin-pack { };
@@ -69,5 +67,9 @@ lib.makeScope newScope (
pidgin-opensteamworks = callPackage ./pidgin-opensteamworks { };
purple-facebook = callPackage ./purple-facebook { };
}
// lib.optionalAttrs config.allowAliases {
purple-matrix = throw "'pidginPackages.purple-matrix' has been unmaintained since April 2022, so it was removed.";
}
)

View File

@@ -1,59 +0,0 @@
{
lib,
stdenv,
fetchFromGitHub,
pkg-config,
pidgin,
json-glib,
glib,
http-parser,
sqlite,
olm,
libgcrypt,
}:
stdenv.mkDerivation {
pname = "purple-matrix-unstable";
version = "2019-06-06";
src = fetchFromGitHub {
owner = "matrix-org";
repo = "purple-matrix";
rev = "4494ba22b479917f0b1f96a3019792d3d75bcff1";
sha256 = "1gjm0z4wa5vi9x1xk43rany5pffrwg958n180ahdj9a7sa8a4hpm";
};
env.NIX_CFLAGS_COMPILE = builtins.toString [
# glib-2.62 deprecations
"-DGLIB_DISABLE_DEPRECATION_WARNINGS"
# override "-O0 -Werror" set by build system
"-O3"
"-Wno-error"
];
nativeBuildInputs = [ pkg-config ];
buildInputs = [
pidgin
json-glib
glib
http-parser
sqlite
olm
libgcrypt
];
makeFlags = [
"PLUGIN_DIR_PURPLE=${placeholder "out"}/lib/purple-2"
"DATA_ROOT_DIR_PURPLE=${placeholder "out"}/share"
];
buildFlags = [ "CC=${stdenv.cc.targetPrefix}cc" ]; # fix build on darwin
meta = with lib; {
homepage = "https://github.com/matrix-org/purple-matrix";
description = "Matrix support for Pidgin / libpurple";
license = licenses.gpl2;
platforms = platforms.unix;
maintainers = with maintainers; [ symphorien ];
};
}

View File

@@ -3,23 +3,23 @@
{
"kicad" = {
kicadVersion = {
version = "9.0.3";
version = "9.0.4";
src = {
rev = "08e2e9df692929a2087bbf1340a915aa2365c622";
sha256 = "19rij2hz79rsmikdbygxzll2l7im5qi3i6phz4sdiagkc5k8b3rb";
rev = "aa7c96856b2c145cc83727304a0d86be6648efa0";
sha256 = "0736hhf8rs4g8cyhy3xyamyr4iszlvf18a1hwfpcv6qxy0hcbdcv";
};
};
libVersion = {
version = "9.0.3";
version = "9.0.4";
libSources = {
symbols.rev = "77ee421d180de82fce2d8c00f1b13a9456b43526";
symbols.sha256 = "0r9aimyrv7p4ykqnwb9ac3fd0dv11zmv2ll6qkmm5s875s35hhfl";
templates.rev = "6e651a795134380ac0dc3df1417d11cfab228033";
symbols.rev = "13ded2487d953fa9db16181eae38f890618296f7";
symbols.sha256 = "0qm1zq8bq6r7l1pssb9isnm5a03kixf5p3x7670ap4xwligdn3wg";
templates.rev = "c324e461b7e279eaaa3e15348fab15b6b36c02ba";
templates.sha256 = "0zs29zn8qjgxv0w1vyr8yxmj02m8752zagn4vcraqgik46dwg2id";
footprints.rev = "3ef8a3e0691599c633864118a3241e1cbeb873f1";
footprints.sha256 = "1ysnj0973y05nn016hxrghccfv65cas772i369xflay0sns8anqf";
packages3d.rev = "4c91925fde1402cc6da61d97cfb30a3de08d5bb6";
packages3d.sha256 = "0njv4y31k62qhcx0xxcl94p34jgna8z4bs3hwjwzjfmp7ddl2dyx";
footprints.rev = "0037953aada2d21e595bd82a5c44fcb20164191f";
footprints.sha256 = "15kdg661pq79npwb4j28hllqrvwygsz5rblzbdishiikysrba8wl";
packages3d.rev = "1ca1b14c02301828b539d756cf6306e5f1e13b5e";
packages3d.sha256 = "0ngf0k5f0a073k5v4q78zk6gj6xjjxzbb6551qf9k9wy8bsmgr2k";
};
};
};

View File

@@ -10,13 +10,13 @@
}:
stdenv.mkDerivation rec {
pname = "obs-pipewire-audio-capture";
version = "1.2.0";
version = "1.2.1";
src = fetchFromGitHub {
owner = "dimtpap";
repo = pname;
rev = version;
sha256 = "sha256-nkd/AoMsEUUxQQH5CjbnPbNwAwkd1y6j2nCa1GIAFPs=";
sha256 = "sha256-GrfogPsqpQ976Gcc4JVdslAAWTj49PdspwVp/JXYXSQ=";
};
nativeBuildInputs = [

View File

@@ -0,0 +1,45 @@
{
lib,
stdenv,
fetchFromGitHub,
cmake,
gtest,
static ? stdenv.hostPlatform.isStatic,
cxxStandard ? null,
}:
stdenv.mkDerivation (finalAttrs: {
pname = "abseil-cpp";
version = "20250814.0";
src = fetchFromGitHub {
owner = "abseil";
repo = "abseil-cpp";
tag = finalAttrs.version;
hash = "sha256-6Ro7miql9+wcArsOKTjlyDSyD91rmmPsIfO5auk9kiI=";
};
cmakeFlags = [
(lib.cmakeBool "ABSL_BUILD_TEST_HELPERS" true)
(lib.cmakeBool "ABSL_USE_EXTERNAL_GOOGLETEST" true)
(lib.cmakeBool "BUILD_SHARED_LIBS" (!static))
]
++ lib.optionals (cxxStandard != null) [
(lib.cmakeFeature "CMAKE_CXX_STANDARD" cxxStandard)
];
strictDeps = true;
nativeBuildInputs = [ cmake ];
buildInputs = [ gtest ];
meta = {
description = "Open-source collection of C++ code designed to augment the C++ standard library";
homepage = "https://abseil.io/";
changelog = "https://github.com/abseil/abseil-cpp/releases/tag/${finalAttrs.version}";
license = lib.licenses.asl20;
platforms = lib.platforms.all;
maintainers = [ lib.maintainers.GaetanLepage ];
};
})

View File

@@ -8,11 +8,11 @@
stdenv.mkDerivation rec {
pname = "aespipe";
version = "2.4i";
version = "2.4j";
src = fetchurl {
url = "mirror://sourceforge/loop-aes/aespipe/aespipe-v${version}.tar.bz2";
sha256 = "sha256-tBx6qsJULlnY/1jB/52HtS1KjBhHt5nIr+yR2UUXx14=";
sha256 = "sha256-RI/h5YYSwYSVFkXd2Sb8W9tk/E8vgox2bIKqESfpo+I=";
};
nativeBuildInputs = [ makeWrapper ];

View File

@@ -114,13 +114,13 @@ let
in
stdenv.mkDerivation rec {
pname = "airgeddon";
version = "11.51";
version = "11.52";
src = fetchFromGitHub {
owner = "v1s1t0r1sh3r3";
repo = "airgeddon";
tag = "v${version}";
hash = "sha256-PkP8sPpX/z3yjvTpsRYJ9fKzUaMsnCp+p6AAoTlcAA0=";
hash = "sha256-FQB348wOXi89CnjS32cwZwTewjkguTbhK5Izvh/74Q0=";
};
strictDeps = true;

View File

@@ -11,6 +11,7 @@
bzip2,
celt,
ffmpeg,
gmp,
jack2,
lame,
libX11,
@@ -37,7 +38,6 @@
x264,
xvidcore,
}:
rustPlatform.buildRustPackage rec {
pname = "alvr";
version = "20.14.1";
@@ -90,6 +90,7 @@ rustPlatform.buildRustPackage rec {
bzip2
celt
ffmpeg
gmp
jack2
lame
libX11

File diff suppressed because it is too large Load Diff

View File

@@ -8,12 +8,12 @@
buildDotnetModule rec {
dotnet-sdk = dotnetCorePackages.sdk_8_0;
dotnet-runtime = dotnetCorePackages.runtime_8_0;
version = "1.3.0";
version = "1.4.1";
src = fetchFromGitHub {
owner = "microsoft";
repo = "artifacts-credprovider";
rev = "v${version}";
sha256 = "sha256-JbcoDs4c/+uKIgVWZkuo4jqd1hlqe+H949jNfkDwZls=";
sha256 = "sha256-MYOl+UfRExeZsozcPJynWbx5JpYL0dxTADycAt6Wm7o=";
};
pname = "azure-artifacts-credprovider";
projectFile = "CredentialProvider.Microsoft/CredentialProvider.Microsoft.csproj";

View File

@@ -22,16 +22,16 @@ let
in
buildNpmPackage' rec {
pname = "balena-cli";
version = "22.3.0";
version = "22.4.1";
src = fetchFromGitHub {
owner = "balena-io";
repo = "balena-cli";
rev = "v${version}";
hash = "sha256-V/y2gsHjcWyduIYq+lddRqAC5ECafNBXQ0tiK/dLHOI=";
hash = "sha256-eUkH2rzYiNkMAB3NrDzd20nmeP12uJxTcFISE+6Ty7o=";
};
npmDepsHash = "sha256-SWtWXvWUuIzMqLoEDRTqVJyWNK/FXOA/LF73kCWfuz4=";
npmDepsHash = "sha256-WEYhtsCOdupU7nYF7scy4QP3/PVcchJX9CRRWjSkBJQ=";
postPatch = ''
ln -s npm-shrinkwrap.json package-lock.json

View File

@@ -11,7 +11,7 @@
stdenv.mkDerivation rec {
pname = "blackfire";
version = "2.28.31";
version = "2.29.0";
src =
passthru.sources.${stdenv.hostPlatform.system}
@@ -60,23 +60,23 @@ stdenv.mkDerivation rec {
sources = {
"x86_64-linux" = fetchurl {
url = "https://packages.blackfire.io/debian/pool/any/main/b/blackfire/blackfire_${version}_amd64.deb";
sha256 = "v2MkLqTNG9creNZBBshTuhrMxlF4OIDlzQOgrEyKmMY=";
sha256 = "i5R/O9pcIGpJqSl+txjDKl6qBDQmjNO2VJpDm6En4b8=";
};
"i686-linux" = fetchurl {
url = "https://packages.blackfire.io/debian/pool/any/main/b/blackfire/blackfire_${version}_i386.deb";
sha256 = "hFJdO/YFxwCpWBmR8jZwmak2Fc9wgMwLax5puaLVEbs=";
sha256 = "orymOOYL7/VwuN8itOhBSINNaBJ9gI7Zn2mTN55BpUA=";
};
"aarch64-linux" = fetchurl {
url = "https://packages.blackfire.io/debian/pool/any/main/b/blackfire/blackfire_${version}_arm64.deb";
sha256 = "RDk4DE0x24m5yBBezlGuLQ/l7yugyd5SYvfYoDFfJuI=";
sha256 = "d4GML/5uvMnx6g+PNaNEqD3pXVwzWLHFiV8VzSSvIHw=";
};
"aarch64-darwin" = fetchurl {
url = "https://packages.blackfire.io/blackfire/${version}/blackfire-darwin_arm64.pkg.tar.gz";
sha256 = "Xq0G0w2FJXEGVmXmyhbdzXY9OiZn4+5i8GypMOdeAhA=";
sha256 = "YZ61kEVr40BwiOCsQUz1UfiScNvWW1/ZF/daR+8BV+Q=";
};
"x86_64-darwin" = fetchurl {
url = "https://packages.blackfire.io/blackfire/${version}/blackfire-darwin_amd64.pkg.tar.gz";
sha256 = "cHmh1JyPa2u2c6KGlZC1/hlDlxMSrDKY1jwaEcJVq0M=";
sha256 = "IuFmV3FjMMY6bPooFU9egJplyP0fzi032QCZxr6Y1tw=";
};
};

View File

@@ -19,20 +19,20 @@
buildNpmPackage rec {
pname = "bruno";
version = "2.10.0";
version = "2.11.0";
src = fetchFromGitHub {
owner = "usebruno";
repo = "bruno";
tag = "v${version}";
hash = "sha256-NHl9+Z8r1dALs/epNO+bYLuETPH8MiFBI5x2kdg2gKQ=";
hash = "sha256-U6q82T/xqwREGsUcCdeAzvk9DWu9579MtF/JE0OIBW4=";
postFetch = ''
${lib.getExe npm-lockfile-fix} $out/package-lock.json
'';
};
npmDepsHash = "sha256-VMcSsaUmUJ4WcuBPoYxfmVpfvQQXY57LFpPiYdfFp2M=";
npmDepsHash = "sha256-i7bT6ZvdkHwqw+LkMqCdSMCNUsz1LPOHuF+u//lUYJ8=";
npmFlags = [ "--legacy-peer-deps" ];
nativeBuildInputs = [

View File

@@ -7,12 +7,12 @@
}:
let
pname = "capacities";
version = "1.50.4";
version = "1.52.6";
name = "${pname}-${version}";
src = fetchurl {
url = "https://web.archive.org/web/20250519011655/https://capacities-desktop-app.fra1.cdn.digitaloceanspaces.com/capacities-${version}.AppImage";
hash = "sha256-8sp6q86C36FbJ1azhX5QWbb2PmqJ/bbZQX31yzNByU4=";
hash = "sha256-M5K2TxrB2Ut/wYKasl8EqbzLjFJrqjWfPIJTZV4fi4s=";
};
appimageContents = appimageTools.extractType2 {

View File

@@ -14,13 +14,13 @@
stdenv.mkDerivation rec {
pname = "cdogs-sdl";
version = "2.3.1";
version = "2.3.2";
src = fetchFromGitHub {
repo = "cdogs-sdl";
owner = "cxong";
rev = version;
sha256 = "sha256-jdrmtI/FADZ0vJDtX4Kq0A9RJ1ELjsQZjO2nMDf/fT8=";
sha256 = "sha256-g1eLFdHsmqnz6zTlmaiLOXgX5dnS94k/PvaFJE3gfLo=";
};
postPatch = ''

View File

@@ -21,13 +21,13 @@
stdenv.mkDerivation (finalAttrs: {
pname = "cherrytree";
version = "1.5.0";
version = "1.6.0";
src = fetchFromGitHub {
owner = "giuspen";
repo = "cherrytree";
tag = "v${finalAttrs.version}";
hash = "sha256-WsxN2VGESrDUv0sSTsMSpZr6Ca7yDwGNR2aB3BrmfkY=";
hash = "sha256-VzY91ZyHL1gNj5liTzisA6iL74294CPLLa8duJy0m8A=";
};
nativeBuildInputs = [

View File

@@ -6,13 +6,13 @@
"packages": {
"": {
"dependencies": {
"@anthropic-ai/claude-code": "^1.0.109"
"@anthropic-ai/claude-code": "^1.0.113"
}
},
"node_modules/@anthropic-ai/claude-code": {
"version": "1.0.109",
"resolved": "https://registry.npmjs.org/@anthropic-ai/claude-code/-/claude-code-1.0.109.tgz",
"integrity": "sha512-gjj76f/+M5KfI+ORA9VNVJgR7s8eyur66XCWIIO66q4poNWcszxidXo+TDTqokLLwuNV+qFGx4JkK/PDmtwqMA==",
"version": "1.0.113",
"resolved": "https://registry.npmjs.org/@anthropic-ai/claude-code/-/claude-code-1.0.113.tgz",
"integrity": "sha512-K/+N/rECfWa1ZauWLD6C/CnX6bxxAck5CFDuK58JjRN8v6QDuJVX7HZcNCanB0ucxEkaczAwvWnEM+UjFQsdqw==",
"license": "SEE LICENSE IN README.md",
"bin": {
"claude": "cli.js"

View File

@@ -7,16 +7,16 @@
buildNpmPackage rec {
pname = "claude-code";
version = "1.0.109";
version = "1.0.113";
nodejs = nodejs_20; # required for sandboxed Nix builds on Darwin
src = fetchzip {
url = "https://registry.npmjs.org/@anthropic-ai/claude-code/-/claude-code-${version}.tgz";
hash = "sha256-bmva84iO0iDf8V537DX6Ggh1PyjKEkfebx4CSB3f4/U=";
hash = "sha256-N3lKbu3OtF1X65Dr9JghMdgsqQD2RYS/YJUNtPJVyyw=";
};
npmDepsHash = "sha256-jpvy7b4A+E5iI7Y7kYnwH51BZAQGVXKaf3lQjI9e3OM=";
npmDepsHash = "sha256-z+EXesi9nfoTE+eX7BUZv50BzCWSxqKFfvRlJWWdWDU=";
postPatch = ''
cp ${./package-lock.json} package-lock.json

View File

@@ -29,11 +29,11 @@ let
in
stdenv.mkDerivation rec {
pname = "clightning";
version = "25.05";
version = "25.09";
src = fetchurl {
url = "https://github.com/ElementsProject/lightning/releases/download/v${version}/clightning-v${version}.zip";
hash = "sha256-ANYzpjVw9kGdsNvXW1A7sEug9utGmJTab87SqJSdgAc=";
hash = "sha256-qX9EZHuDtEcYCU8YOMbHTo3JDAAJ8nc6N7F/+AAEpn4=";
};
# when building on darwin we need cctools to provide the correct libtool

View File

@@ -41,13 +41,13 @@
stdenv.mkDerivation (finalAttrs: {
pname = "cockpit";
version = "345";
version = "346";
src = fetchFromGitHub {
owner = "cockpit-project";
repo = "cockpit";
tag = finalAttrs.version;
hash = "sha256-dK2sMqPmxNKMKrlNkr7FoSaHY5Qn1dtNHX32d/IYu8U=";
hash = "sha256-ZTVcZ1a43cwm8y74XKp9z8tqSK1wxlW9lfoLN/cSFcs=";
fetchSubmodules = true;
};

View File

@@ -5,7 +5,7 @@
"packages": {
"": {
"dependencies": {
"codebuff": "^1.0.451"
"codebuff": "^1.0.473"
}
},
"node_modules/chownr": {
@@ -18,9 +18,9 @@
}
},
"node_modules/codebuff": {
"version": "1.0.451",
"resolved": "https://registry.npmjs.org/codebuff/-/codebuff-1.0.451.tgz",
"integrity": "sha512-LYzX+cu1zMnU/qntnRMQzQ+iPT436OYphFyIrEvx5DarfEEns5UIMDyWp0E9PWxbU4WsJfHJnL6srYxC/T8hUg==",
"version": "1.0.473",
"resolved": "https://registry.npmjs.org/codebuff/-/codebuff-1.0.473.tgz",
"integrity": "sha512-maT1kgspsqqDBYZ5K2Fpze3cB3NwTzl5nQZ9EY7CFjkiypYRjlFosrkxY8eUXs6hCZy/WmuBEYRjdyobEaqrcA==",
"cpu": [
"x64",
"arm64"

View File

@@ -6,14 +6,14 @@
buildNpmPackage rec {
pname = "codebuff";
version = "1.0.451";
version = "1.0.473";
src = fetchzip {
url = "https://registry.npmjs.org/codebuff/-/codebuff-${version}.tgz";
hash = "sha256-98NiHDb0PrK71I28y7DwDJf2i+mKTQBp22PY4WJh5ig=";
hash = "sha256-SlSS1++pEVtvY3sSDZzoD4K6PiFA8Z6tBX+hfIDrhrY=";
};
npmDepsHash = "sha256-qtBi5OT7UBsCIriO6Fk33gLOFNp5Ae0bT9qN+37b2sg=";
npmDepsHash = "sha256-fEK3CzK66AwHlZyt9ax+r5QGqUCfOw11EMMzA2d337c=";
postPatch = ''
cp ${./package-lock.json} package-lock.json

View File

@@ -50,6 +50,7 @@ stdenv.mkDerivation (finalAttrs: {
mainProgram = "cpu-info";
maintainers = with lib.maintainers; [ pawelchcki ];
pkgConfigModules = [ "libcpuinfo" ];
platforms = lib.platforms.all;
# https://github.com/pytorch/cpuinfo/blob/877328f188a3c7d1fa855871a278eb48d530c4c0/CMakeLists.txt#L98
platforms = lib.platforms.x86 ++ lib.platforms.aarch ++ lib.platforms.riscv;
};
})

View File

@@ -15,18 +15,16 @@
}:
stdenv.mkDerivation (finalAttrs: {
pname = "dashy-ui";
# This is like 3.1.1 but the latest working yarn.lock.
# All other changes are for docs with the exception of 768d746cbfcf365c58ad1194c5ccc74c14f3ed3a, which simply adds no-referrer meta tag
version = "3.1.1-unstable-2024-07-14";
version = "3.1.1-unstable-2025-09-12";
src = fetchFromGitHub {
owner = "lissy93";
repo = "dashy";
rev = "0b1af9db483f80323e782e7834da2a337393e111";
hash = "sha256-lRJ3lI9UUIaw9GWPEy81Dbf4cu6rClA4VjdWejVQN+g=";
rev = "e70ade555fdccf4e723a90f8a2d46fcf83645c4f";
hash = "sha256-edsGHc6Hi306aq+TA2g5FL/ZYNfExbcgHS5PWF9O0+0=";
};
yarnOfflineCache = fetchYarnDeps {
yarnLock = finalAttrs.src + "/yarn.lock";
hash = "sha256-KVAZIBM47yp1NWYc2esvTwfoAev4q7Wgi0c73PUZRNw=";
hash = "sha256-r36w3Cz/V7E/xPYYpvfQsdk2QXfCVDYE9OxiFNyKP2s=";
};
# - If no settings are passed, use the default config provided by upstream
# - Despite JSON being valid YAML (and the JSON passing the config validator),

View File

@@ -8,13 +8,13 @@
stdenv.mkDerivation rec {
pname = "diamond";
version = "2.1.13";
version = "2.1.14";
src = fetchFromGitHub {
owner = "bbuchfink";
repo = "diamond";
rev = "v${version}";
sha256 = "sha256-1y1eBOmWxZSPKX/VWo1tyimDxqyabOpyP6fX1CcL9sU=";
sha256 = "sha256-w6+lSc2YZ1PwPj3p6ieI/yTkoiSLWH2Za863n4BTClo=";
};
nativeBuildInputs = [ cmake ];

View File

@@ -9,14 +9,14 @@
}:
python3.pkgs.buildPythonApplication rec {
pname = "discover-overlay";
version = "0.7.8";
version = "0.7.9";
pyproject = true;
src = fetchFromGitHub {
owner = "trigg";
repo = "Discover";
tag = "v${version}";
hash = "sha256-0b0uZDa9Q3pQ6X65C+E31dMpdTPt4vvHDEqFEtRoedg=";
hash = "sha256-Z554/zRikZztdD4NZiDDjMWgIlnQDGkemlA3ONRhqR8=";
};
buildInputs = [

View File

@@ -1,68 +0,0 @@
{
lib,
stdenv,
fetchFromGitHub,
fetchpatch2,
autoreconfHook,
pkg-config,
file,
libuv,
raft-canonical,
sqlite,
lxd-lts,
}:
stdenv.mkDerivation (finalAttrs: {
pname = "dqlite";
version = "1.18.1";
src = fetchFromGitHub {
owner = "canonical";
repo = "dqlite";
tag = "v${finalAttrs.version}";
hash = "sha256-7ou077ozbpH21PcvEEcprr4UYJ/X398Ph9dh5C3YyBQ=";
};
patches = [
(fetchpatch2 {
url = "https://github.com/canonical/dqlite/commit/be453628ce782167f6652c055e600908e2641da7.patch?full_index=1";
hash = "sha256-5DvZ1TW6QmE/heh/RjV395gSgwKM5XnqxqznfYQPC/Y=";
})
];
nativeBuildInputs = [
autoreconfHook
file
pkg-config
];
buildInputs = [
libuv
raft-canonical.dev
sqlite
];
enableParallelBuilding = true;
# tests fail
doCheck = false;
outputs = [
"dev"
"out"
];
passthru.tests = {
inherit lxd-lts;
};
meta = {
description = ''
Expose a SQLite database over the network and replicate it across a
cluster of peers
'';
homepage = "https://dqlite.io/";
license = lib.licenses.asl20;
maintainers = [ ];
platforms = lib.platforms.linux;
};
})

View File

@@ -8,7 +8,7 @@
let
themeName = "Dracula";
version = "4.0.0-unstable-2025-08-04";
version = "4.0.0-unstable-2025-08-31";
in
stdenvNoCC.mkDerivation {
pname = "dracula-theme";
@@ -17,8 +17,8 @@ stdenvNoCC.mkDerivation {
src = fetchFromGitHub {
owner = "dracula";
repo = "gtk";
rev = "646918e419c98747f4e6f8305c0ecaf6bbc860c6";
hash = "sha256-8p9IS5aMZGP/VCuFTjQU+D3wfFIwfT/lcY7ujUv3SRc=";
rev = "1e6067a6b9246c0bddd4655d962f90e110f842f6";
hash = "sha256-UWon2riHFD1szSQglwUBePuiTsk0znj01MZmJwvPGGM=";
};
propagatedUserEnvPkgs = [

View File

@@ -6,13 +6,13 @@
buildGoModule rec {
pname = "amazon-ecs-agent";
version = "1.97.1";
version = "1.98.0";
src = fetchFromGitHub {
rev = "v${version}";
owner = "aws";
repo = "amazon-ecs-agent";
hash = "sha256-o4jvB15YZdZGbUyvKkHvJRKpzBBsuSt1cObADRw6dco=";
hash = "sha256-zp1Rdl6Gl24hXFNMZX3qcf7p3eAdzE0EA5ZnwjW4guU=";
};
vendorHash = null;

View File

@@ -8,10 +8,17 @@
yarnBuildHook,
nix-update-script,
extraBuildEnv ? { },
# This package contains serveral sub-applications. This specifies which of them you want to build.
enteApp ? "photos",
# Accessing some apps (such as account) directly will result in a hardcoded redirect to ente.io.
# To prevent users from accidentally logging in to ente.io instead of the selfhosted instance, you
# can set this parameter to override these occurrences with your own url. Must include the schema.
# Example: https://my-ente.example.com
enteMainUrl ? null,
}:
stdenv.mkDerivation (finalAttrs: {
pname = "ente-web";
pname = "ente-web-${enteApp}";
version = "1.2.4";
src = fetchFromGitHub {
@@ -38,14 +45,31 @@ stdenv.mkDerivation (finalAttrs: {
# See: https://github.com/ente-io/ente/blob/main/web/apps/photos/.env
env = extraBuildEnv;
installPhase = ''
runHook preInstall
# Replace hardcoded ente.io urls if desired
postPatch = lib.optionalString (enteMainUrl != null) ''
substituteInPlace \
apps/payments/src/services/billing.ts \
apps/photos/src/pages/shared-albums.tsx \
--replace-fail "https://ente.io" ${lib.escapeShellArg enteMainUrl}
cp -r apps/photos/out $out
runHook postInstall
substituteInPlace \
apps/accounts/src/pages/index.tsx \
--replace-fail "https://web.ente.io" ${lib.escapeShellArg enteMainUrl}
'';
yarnBuildScript = "build:${enteApp}";
installPhase =
let
distName = if enteApp == "payments" then "dist" else "out";
in
''
runHook preInstall
cp -r apps/${enteApp}/${distName} $out
runHook postInstall
'';
passthru.updateScript = nix-update-script {
extraArgs = [
"--version-regex"
@@ -54,12 +78,13 @@ stdenv.mkDerivation (finalAttrs: {
};
meta = {
description = "Web client for Ente Photos";
description = "Ente application web frontends";
homepage = "https://ente.io/";
changelog = "https://github.com/ente-io/ente/releases";
license = lib.licenses.agpl3Only;
maintainers = with lib.maintainers; [
pinpox
oddlama
];
platforms = lib.platforms.all;
};

View File

@@ -8,16 +8,16 @@
buildGoModule rec {
pname = "envconsul";
version = "0.13.3";
version = "0.13.4";
src = fetchFromGitHub {
owner = "hashicorp";
repo = "envconsul";
rev = "v${version}";
hash = "sha256-hPq+r4DOMu2elOpaT0xDQoelUb1D/zYM/a6fZZdu/AY=";
hash = "sha256-7F+Zsvh13r38FTxgwKbHSaH9cdnnOl8A+nXSbW4XyXA=";
};
vendorHash = "sha256-0hrZsh08oWqhVqvM6SwUskYToH6Z4YWmV/i0V2MkFMw=";
vendorHash = "sha256-7AXWQ/+rWBGvjkSSWIIGLFY32t3v05GXE7IJwFFsJt4=";
ldflags = [
"-s"

View File

@@ -26,13 +26,13 @@
stdenv.mkDerivation (finalAttrs: {
pname = "euphonica";
version = "0.96.3-beta";
version = "0.96.4-beta";
src = fetchFromGitHub {
owner = "htkhiem";
repo = "euphonica";
tag = "v${finalAttrs.version}";
hash = "sha256-IxU0LXSh516I2x8keLuuoFwfjVF+Xp0Dc56ryYY6w10=";
hash = "sha256-iPkqTnC5Gg2hnzQ2Lul5aXF5QhYpHQ1MiilvNiKHFdc=";
fetchSubmodules = true;
};
@@ -45,7 +45,7 @@ stdenv.mkDerivation (finalAttrs: {
cargoDeps = rustPlatform.fetchCargoVendor {
inherit (finalAttrs) pname version src;
hash = "sha256-j4btvkBIQ+SppqE1rvIHWbQSgBn8ORcKGFDXYypEqsA=";
hash = "sha256-AISBkWJ0ZZy2HdZCwW6S5DcD09nVJOmglsoevCaD/3g=";
};
mesonBuildType = "release";

View File

@@ -8,13 +8,13 @@
stdenv.mkDerivation rec {
pname = "fatrace";
version = "0.18.0";
version = "0.19.1";
src = fetchFromGitHub {
owner = "martinpitt";
repo = "fatrace";
rev = version;
sha256 = "sha256-QkavhxORENQSLUg4tq+W1WB7zSYYinWOOYPqNbv+pEI=";
sha256 = "sha256-ncLmO7DwkB2nC4K/40ctwRheVVSPDK+zfcGJZvYyuVI=";
};
buildInputs = [

View File

@@ -12,13 +12,13 @@
stdenv.mkDerivation rec {
pname = "fio";
version = "3.40";
version = "3.41";
src = fetchFromGitHub {
owner = "axboe";
repo = "fio";
rev = "fio-${version}";
sha256 = "sha256-rfO4JEZ+B15NvR2AiTnlbQq++UchPYiXz3vVsFaG6r4=";
sha256 = "sha256-m4JskjSc/KHjID+6j/hbhnGzehPxMxA3m2Iyn49bJDU=";
};
buildInputs = [

View File

@@ -6,13 +6,13 @@
buildGoModule rec {
pname = "olm";
version = "1.1.0";
version = "1.1.1";
src = fetchFromGitHub {
owner = "fosrl";
repo = "olm";
tag = version;
hash = "sha256-/sDWsWOMgDcBYerBbxKWMfWlOUaeQeKQ+OIcE7LJg00=";
hash = "sha256-yGknbxoBMaI6GwIf8hVfWmgFAgI4kxYrNq/puy4aG2M=";
};
vendorHash = "sha256-DqZU64jwg2AHmze1oWOmDgltB+k1mLSHQyAxnovLaVo=";

View File

@@ -28,16 +28,16 @@ in
buildNpmPackage (finalAttrs: {
pname = "pangolin";
version = "1.9.1";
version = "1.9.4";
src = fetchFromGitHub {
owner = "fosrl";
repo = "pangolin";
tag = finalAttrs.version;
hash = "sha256-r0/HtRWdlDV749yT2pMnKqQKKYm6FPpcy3eul6M8iDQ=";
hash = "sha256-2tTe8HlkTMHK6W+rLaiOKA/m4yLq63BQP5Pu+Jcxu88=";
};
npmDepsHash = "sha256-OygskQhveT9CiymOOd5gx+aR9v3nMUZj72k/om3IF/c=";
npmDepsHash = "sha256-K3G8t+RwG87Sc7zp/zQZNJmBgezk95bpUpHzqnVaThQ=";
nativeBuildInputs = [
esbuild

View File

@@ -7,16 +7,16 @@
buildGoModule rec {
pname = "gcsfuse";
version = "3.2.0";
version = "3.3.0";
src = fetchFromGitHub {
owner = "googlecloudplatform";
repo = "gcsfuse";
rev = "v${version}";
hash = "sha256-O+wkNb48+8kI8PBhDCkQvhVmApXFrVgNd31B6IMe8JA=";
hash = "sha256-uHLfK6z2Ck38kxGtz91yyWV9YUW/Bft7S/MOUEHMf3o=";
};
vendorHash = "sha256-NUT1VGQ17cBDjdZVxBEqh7UJUGw5G5EtqpF7LXkXAH8=";
vendorHash = "sha256-M4hI9ciDe49siQhFVRFTXNfQBMt9aBulu9+HvCQeVHA=";
subPackages = [
"."

View File

@@ -8,13 +8,13 @@
buildGoModule (finalAttrs: {
pname = "github-mcp-server";
version = "0.13.0";
version = "0.14.0";
src = fetchFromGitHub {
owner = "github";
repo = "github-mcp-server";
tag = "v${finalAttrs.version}";
hash = "sha256-E1ta3qt0xXOFw9KhQYKt6cLolJ2wkH6JU22NbCWeuf0=";
hash = "sha256-IGjbuW4gPOZQrLMhyrZ3ii/GeRrgEyNVb0w3/Z0hihU=";
};
vendorHash = "sha256-F6PR4bxFSixgYQX65zjrVxcxEQxCoavQqa5mBGrZH8o=";

View File

@@ -13,12 +13,12 @@
}:
runCommand "gitwatch"
rec {
version = "0.3";
version = "0.4";
src = fetchFromGitHub {
owner = "gitwatch";
repo = "gitwatch";
rev = "v${version}";
hash = "sha256-dKXKuqUQhFUXMwPs7Uilzn2yKH6DIlBTOFztKo+PqVU=";
hash = "sha256-DEHhwQvI8i+8ExAQvfY+zL5epmhOkht3a69XOn0cKqY=";
};
nativeBuildInputs = [ makeWrapper ];

View File

@@ -2,7 +2,7 @@
lib,
fetchurl,
fetchFromGitea,
buildGoModule,
buildGo124Module,
nixosTests,
}:
let
@@ -17,7 +17,7 @@ let
hash = "sha256-et1jguboadjJJdUpugmRvkAtpdfHxn4+ftXUH/hWTdE=";
};
in
buildGoModule rec {
buildGo124Module rec {
inherit version;
pname = repo;

View File

@@ -12,14 +12,14 @@
}:
buildGoModule rec {
version = "3.5.3";
version = "3.5.5";
pname = "grafana-loki";
src = fetchFromGitHub {
owner = "grafana";
repo = "loki";
rev = "v${version}";
hash = "sha256-3/cI5KiSuHMDe+YqPOnygTbZfWdG9G6dz5RAIXeT4S4=";
hash = "sha256-nNmY3LrRhrTGQBtnjQ2V252cAvJp2F6D2XHCkcvFfn8=";
};
vendorHash = null;

View File

@@ -10,13 +10,13 @@
stdenv.mkDerivation (finalAttrs: {
pname = "graphene-hardened-malloc";
version = "2025041100";
version = "2025090300";
src = fetchFromGitHub {
owner = "GrapheneOS";
repo = "hardened_malloc";
rev = finalAttrs.version;
hash = "sha256-HCuH5SUiw/+3T1dv+IKKsQEC1GbuG0Se376bw2fG5u8=";
hash = "sha256-SpcdZIo9YOmVBq2yG4n174gjAoThnnWbQcWK2BfQ2BM=";
};
nativeCheckInputs = [ python3 ];

View File

@@ -68,13 +68,13 @@ lib.checkListOfEnum "${pname}: colorVariants" colorVariantList colorVariants lib
stdenvNoCC.mkDerivation
{
inherit pname;
version = "0-unstable-2025-08-13";
version = "0-unstable-2025-08-28";
src = fetchFromGitHub {
owner = "Fausto-Korpsvart";
repo = "Gruvbox-GTK-Theme";
rev = "f9f56cb51ba06d27f5ee8e7b88e20b0b4de6bf4c";
hash = "sha256-gKJQ2TTh0MJB0SULA2ND8gvZ/YlC1dSxCOr0K2X4So0=";
rev = "2034b4ce9aa30281ff8f3af49b0e53e34a029fd7";
hash = "sha256-OM5ZkXZxDSYtLS2CbqAjXXJM95plC7VWgvUG0qYbEJI=";
};
propagatedUserEnvPkgs = [ gtk-engine-murrine ];

View File

@@ -12,11 +12,11 @@
}:
stdenv.mkDerivation rec {
pname = "guile-goblins";
version = "0.15.1";
version = "0.16.1";
src = fetchurl {
url = "https://spritely.institute/files/releases/guile-goblins/guile-goblins-${version}.tar.gz";
hash = "sha256-2oPS6Ar0ee7BQBtjvhJCCQYXK2TLIiADiCwnDaHPGBc=";
hash = "sha256-MLuCcarwqgRtxsMONBsvfvrLz30KF6ztLWAyi1JuzoE=";
};
strictDeps = true;

View File

@@ -5,11 +5,11 @@
}:
let
pname = "handheld-daemon-ui";
version = "3.3.14";
version = "3.4.0";
src = fetchurl {
url = "https://github.com/hhd-dev/hhd-ui/releases/download/v${version}/hhd-ui.Appimage";
hash = "sha256-WZ0renrLt2OHg0p0JApyuctujinerzPnidlN85LyMi0=";
hash = "sha256-OeZMh3lC3fluwz1pU3JnLRkwFYiIkthGuclYkOJm430=";
};
extractedFiles = appimageTools.extractType2 { inherit pname version src; };
in

View File

@@ -7,18 +7,18 @@
rustPlatform.buildRustPackage rec {
pname = "harper";
version = "0.62.0";
version = "0.63.0";
src = fetchFromGitHub {
owner = "Automattic";
repo = "harper";
rev = "v${version}";
hash = "sha256-rtd/cuTy5n89NZtZf+lbP7jGySYjUnOfgzfziMYg+40=";
hash = "sha256-c24JekkvV+utJoHvpZO8z1XAwbQBBIrGIO+os5NW9Y4=";
};
buildAndTestSubdir = "harper-ls";
cargoHash = "sha256-ZSyAnlekjBGb0SJW4Ae1EEGwSnsDWXVUfYA0d87Ug1w=";
cargoHash = "sha256-iwESdSCmZIA96ECS4weqxx3n1u8UzYte06Vk/svmm/g=";
passthru.updateScript = nix-update-script { };

View File

@@ -15,14 +15,14 @@ let
in
py.pkgs.buildPythonApplication rec {
pname = "healthchecks";
version = "3.11";
version = "3.11.2";
format = "other";
src = fetchFromGitHub {
owner = "healthchecks";
repo = "healthchecks";
tag = "v${version}";
sha256 = "sha256-s8qhCp+6d2rixgrduWXopiWEpBCLVKkoDjTYT0eLSN8=";
sha256 = "sha256-EHXxb5T5+WFvhBZQ6d6abSzpBEUBz6F1ftqMWECmdpg=";
};
propagatedBuildInputs = with py.pkgs; [

View File

@@ -5,10 +5,10 @@
}:
let
pname = "heptabase";
version = "1.69.0";
version = "1.73.2";
src = fetchurl {
url = "https://github.com/heptameta/project-meta/releases/download/v${version}/Heptabase-${version}.AppImage";
hash = "sha256-QB2N/RJ4o6IN25qSRbiB69/qGHEKA4GRbLdMYS2cRIQ=";
hash = "sha256-cwKNFGoBKr2RM51uL4uiNHf6oCa0NPBgsXHBe7EPoNM=";
};
appimageContents = appimageTools.extractType2 { inherit pname version src; };

View File

@@ -13,12 +13,12 @@
stdenv.mkDerivation rec {
pname = "htmldoc";
version = "1.9.20";
version = "1.9.21";
src = fetchFromGitHub {
owner = "michaelrsweet";
repo = "htmldoc";
rev = "v${version}";
hash = "sha256-nEDvG2Q6uMYWyb49EKOZimkOfEavCjvfFgucwi3u64k=";
hash = "sha256-MZKXEwJdQzn49JIUm4clqKBTtjKu6tBU5Sdq6ESn1k4=";
};
nativeBuildInputs = [ pkg-config ];

Some files were not shown because too many files have changed in this diff Show More