mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
68eccbdc5b
* flake.lock: update to hotfix CUDA::cuda_driver Required to support https://github.com/ggerganov/llama.cpp/pull/4606 * flake.nix: rewrite 1. Split into separate files per output. 2. Added overlays, so that this flake can be integrated into others. The names in the overlay are `llama-cpp`, `llama-cpp-opencl`, `llama-cpp-cuda`, and `llama-cpp-rocm` so that they fit into the broader set of Nix packages from [nixpkgs](https://github.com/nixos/nixpkgs). 3. Use [callPackage](https://summer.nixos.org/blog/callpackage-a-tool-for-the-lazy/) rather than `with pkgs;` so that there's dependency injection rather than dependency lookup. 4. Add a description and meta information for each package. The description includes a bit about what's trying to accelerate each one. 5. Use specific CUDA packages instead of cudatoolkit on the advice of SomeoneSerge. 6. Format with `serokell/nixfmt` for a consistent style. 7. Update `flake.lock` with the latest goods. * flake.nix: use finalPackage instead of passing it manually * nix: unclutter darwin support * nix: pass most darwin frameworks unconditionally ...for simplicity * *.nix: nixfmt nix shell github:piegamesde/nixfmt/rfc101-style --command \ nixfmt flake.nix .devops/nix/*.nix * flake.nix: add maintainers * nix: move meta down to follow Nixpkgs style more closely * nix: add missing meta attributes nix: clarify the interpretation of meta.maintainers nix: clarify the meaning of "broken" and "badPlatforms" nix: passthru: expose the use* flags for inspection E.g.: ``` ❯ nix eval .#cuda.useCuda true ``` * flake.nix: avoid re-evaluating nixpkgs too many times * flake.nix: use flake-parts * nix: migrate to pname+version * flake.nix: overlay: expose both the namespace and the default attribute * ci: add the (Nix) flakestry workflow * nix: cmakeFlags: explicit OFF bools * nix: cuda: reduce runtime closure * nix: fewer rebuilds * nix: respect config.cudaCapabilities * nix: add the impure driver's location to the DT_RUNPATHs * nix: clean sources more thoroughly ...this way outPaths change less frequently, and so there are fewer rebuilds * nix: explicit mpi support * nix: explicit jetson support * flake.nix: darwin: only expose the default --------- Co-authored-by: Someone Serge <sergei.kozlukov@aalto.fi>
100 lines
3.5 KiB
Nix
100 lines
3.5 KiB
Nix
{
|
||
description = "Port of Facebook's LLaMA model in C/C++";
|
||
|
||
inputs = {
|
||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||
flake-parts.url = "github:hercules-ci/flake-parts";
|
||
};
|
||
|
||
# For inspection, use `nix flake show github:ggerganov/llama.cpp` or the nix repl:
|
||
#
|
||
# ```bash
|
||
# ❯ nix repl
|
||
# nix-repl> :lf github:ggerganov/llama.cpp
|
||
# Added 13 variables.
|
||
# nix-repl> outputs.apps.x86_64-linux.quantize
|
||
# { program = "/nix/store/00000000000000000000000000000000-llama.cpp/bin/quantize"; type = "app"; }
|
||
# ```
|
||
outputs =
|
||
{ self, flake-parts, ... }@inputs:
|
||
let
|
||
# We could include the git revisions in the package names but those would
|
||
# needlessly trigger rebuilds:
|
||
# llamaVersion = self.dirtyShortRev or self.shortRev;
|
||
|
||
# Nix already uses cryptographic hashes for versioning, so we'll just fix
|
||
# the fake semver for now:
|
||
llamaVersion = "0.0.0";
|
||
in
|
||
flake-parts.lib.mkFlake { inherit inputs; }
|
||
|
||
{
|
||
|
||
imports = [
|
||
.devops/nix/nixpkgs-instances.nix
|
||
.devops/nix/apps.nix
|
||
.devops/nix/devshells.nix
|
||
.devops/nix/jetson-support.nix
|
||
];
|
||
|
||
# An overlay can be used to have a more granular control over llama-cpp's
|
||
# dependencies and configuration, than that offered by the `.override`
|
||
# mechanism. Cf. https://nixos.org/manual/nixpkgs/stable/#chap-overlays.
|
||
#
|
||
# E.g. in a flake:
|
||
# ```
|
||
# { nixpkgs, llama-cpp, ... }:
|
||
# let pkgs = import nixpkgs {
|
||
# overlays = [ (llama-cpp.overlays.default) ];
|
||
# system = "aarch64-linux";
|
||
# config.allowUnfree = true;
|
||
# config.cudaSupport = true;
|
||
# config.cudaCapabilities = [ "7.2" ];
|
||
# config.cudaEnableForwardCompat = false;
|
||
# }; in {
|
||
# packages.aarch64-linux.llamaJetsonXavier = pkgs.llamaPackages.llama-cpp;
|
||
# }
|
||
# ```
|
||
#
|
||
# Cf. https://nixos.org/manual/nix/unstable/command-ref/new-cli/nix3-flake.html?highlight=flake#flake-format
|
||
flake.overlays.default =
|
||
(final: prev: {
|
||
llamaPackages = final.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
|
||
inherit (final.llamaPackages) llama-cpp;
|
||
});
|
||
|
||
systems = [
|
||
"aarch64-darwin"
|
||
"aarch64-linux"
|
||
"x86_64-darwin" # x86_64-darwin isn't tested (and likely isn't relevant)
|
||
"x86_64-linux"
|
||
];
|
||
|
||
perSystem =
|
||
{
|
||
config,
|
||
lib,
|
||
pkgs,
|
||
pkgsCuda,
|
||
pkgsRocm,
|
||
...
|
||
}:
|
||
{
|
||
# We don't use the overlay here so as to avoid making too many instances of nixpkgs,
|
||
# cf. https://zimbatm.com/notes/1000-instances-of-nixpkgs
|
||
packages =
|
||
{
|
||
default = (pkgs.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp;
|
||
}
|
||
// lib.optionalAttrs pkgs.stdenv.isLinux {
|
||
opencl = config.packages.default.override { useOpenCL = true; };
|
||
cuda = (pkgsCuda.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp;
|
||
rocm = (pkgsRocm.callPackage .devops/nix/scope.nix { inherit llamaVersion; }).llama-cpp;
|
||
|
||
mpi-cpu = config.packages.default.override { useMpi = true; };
|
||
mpi-cuda = config.packages.default.override { useMpi = true; };
|
||
};
|
||
};
|
||
};
|
||
}
|