2023-10-10 13:31:13 +02:00
|
|
|
// swift-tools-version:5.5
|
2023-03-28 18:39:01 +02:00
|
|
|
|
|
|
|
import PackageDescription
|
|
|
|
|
2024-04-15 12:14:46 +02:00
|
|
|
var sources = [
|
|
|
|
"ggml.c",
|
ggml : add llamafile sgemm (#6414)
This change upstreams llamafile's cpu matrix multiplication kernels
which improve image and prompt evaluation speed. For starters, Q4_0
and Q8_0 weights should go ~40% faster on CPU. The biggest benefits
are with data types like f16 / f32, which process prompts 2x faster
thus making them faster than quantized data types for prompt evals.
This change also introduces bona fide AVX512 support since tinyBLAS
is able to exploit the larger register file. For example, on my CPU
llama.cpp llava-cli processes an image prompt at 305 tokens/second,
using the Q4_K and Q4_0 types, which has always been faster than if
we used f16 LLaVA weights, which at HEAD go 188 tokens/second. With
this change, f16 LLaVA performance leap frogs to 464 tokens/second.
On Intel Core i9-14900K this change improves F16 prompt perf by 5x.
For example, using llama.cpp at HEAD with Mistral 7b f16 to process
a 215 token prompt will go 13 tok/sec. This change has fixes making
it go 52 tok/sec. It's mostly thanks to my vectorized outer product
kernels but also because I added support for correctly counting the
number of cores on Alderlake, so the default thread count discounts
Intel's new efficiency cores. Only Linux right now can count cores.
This work was sponsored by Mozilla who's given permission to change
the license of this code from Apache 2.0 to MIT. To read more about
what's improved, and how it works, see: https://justine.lol/matmul/
2024-04-16 20:55:30 +02:00
|
|
|
"sgemm.cpp",
|
2024-04-15 12:14:46 +02:00
|
|
|
"llama.cpp",
|
|
|
|
"unicode.cpp",
|
|
|
|
"unicode-data.cpp",
|
|
|
|
"ggml-alloc.c",
|
|
|
|
"ggml-backend.c",
|
|
|
|
"ggml-quants.c",
|
|
|
|
]
|
|
|
|
|
|
|
|
var resources: [Resource] = []
|
|
|
|
var linkerSettings: [LinkerSetting] = []
|
|
|
|
var cSettings: [CSetting] = [
|
|
|
|
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
|
|
|
|
.unsafeFlags(["-fno-objc-arc"]),
|
|
|
|
// NOTE: NEW_LAPACK will required iOS version 16.4+
|
|
|
|
// We should consider add this in the future when we drop support for iOS 14
|
|
|
|
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
|
|
|
|
// .define("ACCELERATE_NEW_LAPACK"),
|
|
|
|
// .define("ACCELERATE_LAPACK_ILP64")
|
|
|
|
]
|
|
|
|
|
|
|
|
#if canImport(Darwin)
|
|
|
|
sources.append("ggml-metal.m")
|
|
|
|
resources.append(.process("ggml-metal.metal"))
|
|
|
|
linkerSettings.append(.linkedFramework("Accelerate"))
|
|
|
|
cSettings.append(
|
|
|
|
contentsOf: [
|
|
|
|
.define("GGML_USE_ACCELERATE"),
|
|
|
|
.define("GGML_USE_METAL")
|
|
|
|
]
|
|
|
|
)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if os(Linux)
|
|
|
|
cSettings.append(.define("_GNU_SOURCE"))
|
|
|
|
#endif
|
|
|
|
|
2023-03-28 18:39:01 +02:00
|
|
|
let package = Package(
|
|
|
|
name: "llama",
|
2023-12-05 08:29:46 +01:00
|
|
|
platforms: [
|
|
|
|
.macOS(.v12),
|
|
|
|
.iOS(.v14),
|
|
|
|
.watchOS(.v4),
|
|
|
|
.tvOS(.v14)
|
|
|
|
],
|
2023-03-28 18:39:01 +02:00
|
|
|
products: [
|
|
|
|
.library(name: "llama", targets: ["llama"]),
|
|
|
|
],
|
|
|
|
targets: [
|
|
|
|
.target(
|
|
|
|
name: "llama",
|
|
|
|
path: ".",
|
2024-02-12 18:54:29 +01:00
|
|
|
exclude: [
|
|
|
|
"cmake",
|
|
|
|
"examples",
|
|
|
|
"scripts",
|
|
|
|
"models",
|
|
|
|
"tests",
|
|
|
|
"CMakeLists.txt",
|
|
|
|
"ggml-cuda.cu",
|
|
|
|
"ggml-cuda.h",
|
|
|
|
"Makefile"
|
|
|
|
],
|
2024-04-15 12:14:46 +02:00
|
|
|
sources: sources,
|
|
|
|
resources: resources,
|
2023-03-28 18:39:01 +02:00
|
|
|
publicHeadersPath: "spm-headers",
|
2024-04-15 12:14:46 +02:00
|
|
|
cSettings: cSettings,
|
|
|
|
linkerSettings: linkerSettings
|
2023-09-09 11:12:10 +02:00
|
|
|
)
|
2023-03-28 18:39:01 +02:00
|
|
|
],
|
|
|
|
cxxLanguageStandard: .cxx11
|
|
|
|
)
|