llama.cpp/.gitignore
2024-06-08 13:42:01 +01:00

127 lines
1.6 KiB
Plaintext

*.o
*.a
*.so
*.gguf
*.gguf.json
*.bin
*.exe
*.dll
*.log
*.gcov
*.gcno
*.gcda
*.dot
*.bat
*.tmp
*.metallib
*.etag
*.lastModified
.DS_Store
.build/
.cache/
.ccls-cache/
.direnv/
.envrc
.swiftpm
.venv
.clang-tidy
.vs/
.vscode/
.idea/
ggml-metal-embed.metal
lcov-report/
gcovr-report/
tags
build*
!build.zig
cmake-build-*
android-ndk-*
out/
tmp/
models/*
models-mnt
/Pipfile
/llama-baby
/llama-beam-search
/llama-benchmark-matmult
/llama-convert-llama2c-to-ggml
/llama-embedding
/llama-eval-callback
/llama-gguf
/llama-gguf-llama-simple
/llama-gguf-split
/llama-gritlm
/llama-imatrix
/llama-infill
/libllama.so
/llama
/llama-bench
/llava
/llama-server
/llama-lookahead
/llama-lookup
/llama-lookup-create
/llama-lookup-merge
/llama-lookup-stats
/llama-passkey
/llama-perplexity
/llama-q8dot
/llama-quantize
/llama-quantize-stats
/llama-save-load-state
/llama-simple
/llama-batched
/llama-batched-bench
/llama-export-lora
/llama-finetune
/llama-retrieval
/llama-speculative
/llama-parallel
/llama-train-text-from-scratch
/llama-tokenize
/llama-vdot
/common/build-info.cpp
arm_neon.h
compile_commands.json
CMakeSettings.json
__pycache__
dist
zig-out/
zig-cache/
ppl-*.txt
qnt-*.txt
perf-*.txt
examples/jeopardy/results.txt
examples/server/*.html.hpp
examples/server/*.js.hpp
examples/server/*.mjs.hpp
examples/server/*.css.hpp
poetry.lock
poetry.toml
nppBackup
# Test binaries
/tests/test-grammar-parser
/tests/test-llama-grammar
/tests/test-double-float
/tests/test-grad0
/tests/test-opt
/tests/test-quantize-fns
/tests/test-quantize-perf
/tests/test-sampling
/tests/test-tokenizer-0
/tests/test-tokenizer-1-spm
/tests/test-tokenizer-1-bpe
/tests/test-rope
/tests/test-backend-ops