2023-04-20 03:14:14 +02:00
# Define the default target now so that it is always the first target
default : main quantize quantize -stats perplexity embedding vdot
2023-03-10 19:40:58 +01:00
i f n d e f U N A M E _ S
UNAME_S := $( shell uname -s)
e n d i f
i f n d e f U N A M E _ P
UNAME_P := $( shell uname -p)
e n d i f
i f n d e f U N A M E _ M
UNAME_M := $( shell uname -m)
e n d i f
CCV := $( shell $( CC) --version | head -n 1)
CXXV := $( shell $( CXX) --version | head -n 1)
# Mac OS + Arm can report x86_64
# ref: https://github.com/ggerganov/whisper.cpp/issues/66#issuecomment-1282546789
i f e q ( $( UNAME_S ) , D a r w i n )
ifneq ( $( UNAME_P) ,arm)
2023-03-21 16:44:11 +01:00
SYSCTL_M := $( shell sysctl -n hw.optional.arm64 2>/dev/null)
2023-03-10 19:40:58 +01:00
ifeq ( $( SYSCTL_M) ,1)
# UNAME_P := arm
# UNAME_M := arm64
warn := $( warning Your arch is announced as x86_64, but it seems to actually be ARM64. Not fixing that can lead to bad performance. For more info see: https://github.com/ggerganov/whisper.cpp/issues/66\# issuecomment-1282546789)
endif
endif
e n d i f
#
# Compile flags
#
2023-03-21 16:29:41 +01:00
# keep standard at C11 and C++11
2023-04-29 17:43:28 +02:00
CFLAGS = -I. -O3 -std= c11 -fPIC
CXXFLAGS = -I. -I./examples -O3 -std= c++11 -fPIC
2023-03-10 19:40:58 +01:00
LDFLAGS =
2023-04-29 17:43:28 +02:00
i f n d e f L L A M A _ D E B U G
CFLAGS += -DNDEBUG
CXXFLAGS += -DNDEBUG
e n d i f
2023-03-28 18:48:20 +02:00
# warnings
2023-04-19 18:06:37 +02:00
CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith
Rewrite loading code to try to satisfy everyone:
- Support all three formats (ggml, ggmf, ggjt). (However, I didn't
include the hack needed to support GPT4All files without conversion.
Those can still be used after converting them with convert.py from my
other PR.)
- Support both mmap and read (mmap is used by default, but can be
disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
files or on platforms where mmap is not supported).
- Support multi-file models like before, but automatically determine the
number of parts rather than requiring `--n_parts`.
- Improve validation and error checking.
- Stop using the per-file type field (f16) entirely in favor of just
relying on the per-tensor type/size fields. This has no immediate
benefit, but makes it easier to experiment with different formats, and
should make it easier to support the new GPTQ-for-LLaMa models in the
future (I have some work in progress on that front).
- Support VirtualLock on Windows (using the same `--mlock` option as on
Unix).
- Indicate loading progress when using mmap + mlock. (Which led me
to the interesting observation that on my Linux machine, with a
warm file cache, mlock actually takes some time, whereas mmap
without mlock starts almost instantly...)
- To help implement this, move mlock support from ggml to the
loading code.
- madvise/PrefetchVirtualMemory support (based on #740)
- Switch from ifstream to the `fopen` family of functions to avoid
unnecessary copying and, when mmap is enabled, allow reusing the same
file descriptor for both metadata reads and mmap (whereas the existing
implementation opens the file a second time to mmap).
- Quantization now produces a single-file output even with multi-file
inputs (not really a feature as much as 'it was easier this way').
Implementation notes:
I tried to factor the code into more discrete pieces than before.
Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:
- Destructors to make it easier to ensure everything gets cleaned up.
- Exceptions. I don't even usually use exceptions when writing C++, and
I can remove them if desired... but here they make the loading code
much more succinct while still properly handling a variety of errors,
ranging from API calls failing to integer overflow and allocation
failure. The exceptions are converted to error codes at the
API boundary.)
Co-authored-by: Pavol Rusnak <pavol@rusnak.io> (for the bit I copied from #740)
2023-04-08 21:24:37 +02:00
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
2023-03-28 18:48:20 +02:00
2023-03-10 19:40:58 +01:00
# OS specific
# TODO: support Windows
i f e q ( $( UNAME_S ) , L i n u x )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
i f e q ( $( UNAME_S ) , D a r w i n )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
i f e q ( $( UNAME_S ) , F r e e B S D )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
2023-03-13 17:40:54 +01:00
i f e q ( $( UNAME_S ) , N e t B S D )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
2023-03-21 16:50:09 +01:00
i f e q ( $( UNAME_S ) , O p e n B S D )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
2023-03-10 19:40:58 +01:00
i f e q ( $( UNAME_S ) , H a i k u )
CFLAGS += -pthread
CXXFLAGS += -pthread
e n d i f
# Architecture specific
# TODO: probably these flags need to be tweaked on some architectures
# feel free to update the Makefile for your architecture and send a pull request or issue
i f e q ( $( UNAME_M ) , $( filter $ ( UNAME_M ) ,x 86_ 64 i 686) )
2023-04-02 09:17:05 +02:00
# Use all CPU extensions that are available:
2023-04-22 10:08:12 +02:00
CFLAGS += -march= native -mtune= native
2023-04-05 16:38:37 +02:00
CXXFLAGS += -march= native -mtune= native
2023-04-22 10:08:12 +02:00
# Usage AVX-only
#CFLAGS += -mfma -mf16c -mavx
#CXXFLAGS += -mfma -mf16c -mavx
2023-03-10 19:40:58 +01:00
e n d i f
i f n e q ( $( filter ppc 64%,$ ( UNAME_M ) ) , )
POWER9_M := $( shell grep "POWER9" /proc/cpuinfo)
ifneq ( ,$( findstring POWER9,$( POWER9_M) ) )
2023-04-22 10:08:12 +02:00
CFLAGS += -mcpu= power9
2023-03-24 16:19:26 +01:00
CXXFLAGS += -mcpu= power9
2023-03-10 19:40:58 +01:00
endif
# Require c++23's std::byteswap for big-endian support.
ifeq ( $( UNAME_M) ,ppc64)
CXXFLAGS += -std= c++23 -DGGML_BIG_ENDIAN
endif
e n d i f
2023-03-11 11:26:16 +01:00
i f n d e f L L A M A _ N O _ A C C E L E R A T E
2023-03-21 16:44:11 +01:00
# Mac M1 - include Accelerate framework.
# `-framework Accelerate` works on Mac Intel as well, with negliable performance boost (as of the predict time).
2023-03-10 19:40:58 +01:00
ifeq ( $( UNAME_S) ,Darwin)
CFLAGS += -DGGML_USE_ACCELERATE
LDFLAGS += -framework Accelerate
endif
e n d i f
2023-03-11 11:26:16 +01:00
i f d e f L L A M A _ O P E N B L A S
2023-03-10 19:40:58 +01:00
CFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/openblas
LDFLAGS += -lopenblas
e n d i f
2023-04-19 11:22:45 +02:00
i f d e f L L A M A _ C U B L A S
ggml : add CLBlast support (#1164)
* Allow use of OpenCL GPU-based BLAS using ClBlast instead of OpenBLAS for context processing
* Improve ClBlast implementation, avoid recreating buffers, remove redundant transfers
* Finish merge of ClBlast support
* Move CLBlast implementation to separate file
Add buffer reuse code (adapted from slaren's cuda implementation)
* Add q4_2 and q4_3 CLBlast support, improve code
* Double CLBlast speed by disabling OpenBLAS thread workaround
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
* Fix device selection env variable names
* Fix cast in opencl kernels
* Add CLBlast to CMakeLists.txt
* Replace buffer pool with static buffers a, b, qb, c
Fix compile warnings
* Fix typos, use GGML_TYPE defines, improve code
* Improve btype dequant kernel selection code, add error if type is unsupported
* Improve code quality
* Move internal stuff out of header
* Use internal enums instead of CLBlast enums
* Remove leftover C++ includes and defines
* Make event use easier to read
Co-authored-by: Henri Vasserman <henv@hot.ee>
* Use c compiler for opencl files
* Simplify code, fix include
* First check error, then release event
* Make globals static, fix indentation
* Rename dequant kernels file to conform with other file names
* Fix import cl file name
---------
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-04-28 16:57:16 +02:00
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$( CUDA_PATH) /targets/x86_64-linux/include
2023-04-29 02:04:18 +02:00
CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$( CUDA_PATH) /targets/x86_64-linux/include
ggml : add CLBlast support (#1164)
* Allow use of OpenCL GPU-based BLAS using ClBlast instead of OpenBLAS for context processing
* Improve ClBlast implementation, avoid recreating buffers, remove redundant transfers
* Finish merge of ClBlast support
* Move CLBlast implementation to separate file
Add buffer reuse code (adapted from slaren's cuda implementation)
* Add q4_2 and q4_3 CLBlast support, improve code
* Double CLBlast speed by disabling OpenBLAS thread workaround
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
* Fix device selection env variable names
* Fix cast in opencl kernels
* Add CLBlast to CMakeLists.txt
* Replace buffer pool with static buffers a, b, qb, c
Fix compile warnings
* Fix typos, use GGML_TYPE defines, improve code
* Improve btype dequant kernel selection code, add error if type is unsupported
* Improve code quality
* Move internal stuff out of header
* Use internal enums instead of CLBlast enums
* Remove leftover C++ includes and defines
* Make event use easier to read
Co-authored-by: Henri Vasserman <henv@hot.ee>
* Use c compiler for opencl files
* Simplify code, fix include
* First check error, then release event
* Make globals static, fix indentation
* Rename dequant kernels file to conform with other file names
* Fix import cl file name
---------
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-04-28 16:57:16 +02:00
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$( CUDA_PATH) /targets/x86_64-linux/lib
2023-04-21 21:59:17 +02:00
OBJS += ggml-cuda.o
NVCC = nvcc
2023-04-24 17:29:58 +02:00
NVCCFLAGS = --forward-unknown-to-host-compiler -arch= native
2023-04-20 03:14:14 +02:00
ggml-cuda.o : ggml -cuda .cu ggml -cuda .h
2023-04-24 17:29:58 +02:00
$( NVCC) $( NVCCFLAGS) $( CXXFLAGS) -Wno-pedantic -c $< -o $@
2023-04-19 11:22:45 +02:00
e n d i f
ggml : add CLBlast support (#1164)
* Allow use of OpenCL GPU-based BLAS using ClBlast instead of OpenBLAS for context processing
* Improve ClBlast implementation, avoid recreating buffers, remove redundant transfers
* Finish merge of ClBlast support
* Move CLBlast implementation to separate file
Add buffer reuse code (adapted from slaren's cuda implementation)
* Add q4_2 and q4_3 CLBlast support, improve code
* Double CLBlast speed by disabling OpenBLAS thread workaround
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
* Fix device selection env variable names
* Fix cast in opencl kernels
* Add CLBlast to CMakeLists.txt
* Replace buffer pool with static buffers a, b, qb, c
Fix compile warnings
* Fix typos, use GGML_TYPE defines, improve code
* Improve btype dequant kernel selection code, add error if type is unsupported
* Improve code quality
* Move internal stuff out of header
* Use internal enums instead of CLBlast enums
* Remove leftover C++ includes and defines
* Make event use easier to read
Co-authored-by: Henri Vasserman <henv@hot.ee>
* Use c compiler for opencl files
* Simplify code, fix include
* First check error, then release event
* Make globals static, fix indentation
* Rename dequant kernels file to conform with other file names
* Fix import cl file name
---------
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-04-28 16:57:16 +02:00
i f d e f L L A M A _ C L B L A S T
CFLAGS += -DGGML_USE_CLBLAST
2023-05-05 14:18:21 +02:00
# Mac provides OpenCL as a framework
ifeq ( $( UNAME_S) ,Darwin)
LDFLAGS += -lclblast -framework OpenCL
else
LDFLAGS += -lclblast -lOpenCL
endif
ggml : add CLBlast support (#1164)
* Allow use of OpenCL GPU-based BLAS using ClBlast instead of OpenBLAS for context processing
* Improve ClBlast implementation, avoid recreating buffers, remove redundant transfers
* Finish merge of ClBlast support
* Move CLBlast implementation to separate file
Add buffer reuse code (adapted from slaren's cuda implementation)
* Add q4_2 and q4_3 CLBlast support, improve code
* Double CLBlast speed by disabling OpenBLAS thread workaround
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
* Fix device selection env variable names
* Fix cast in opencl kernels
* Add CLBlast to CMakeLists.txt
* Replace buffer pool with static buffers a, b, qb, c
Fix compile warnings
* Fix typos, use GGML_TYPE defines, improve code
* Improve btype dequant kernel selection code, add error if type is unsupported
* Improve code quality
* Move internal stuff out of header
* Use internal enums instead of CLBlast enums
* Remove leftover C++ includes and defines
* Make event use easier to read
Co-authored-by: Henri Vasserman <henv@hot.ee>
* Use c compiler for opencl files
* Simplify code, fix include
* First check error, then release event
* Make globals static, fix indentation
* Rename dequant kernels file to conform with other file names
* Fix import cl file name
---------
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2023-04-28 16:57:16 +02:00
OBJS += ggml-opencl.o
ggml-opencl.o : ggml -opencl .c ggml -opencl .h
$( CC) $( CFLAGS) -c $< -o $@
e n d i f
2023-03-11 11:26:16 +01:00
i f d e f L L A M A _ G P R O F
2023-03-10 19:40:58 +01:00
CFLAGS += -pg
CXXFLAGS += -pg
e n d i f
2023-04-23 17:15:39 +02:00
i f d e f L L A M A _ P E R F
CFLAGS += -DGGML_PERF
CXXFLAGS += -DGGML_PERF
e n d i f
2023-03-10 19:40:58 +01:00
i f n e q ( $( filter aarch 64%,$ ( UNAME_M ) ) , )
2023-04-30 20:48:38 +02:00
# Apple M1, M2, etc.
# Raspberry Pi 3, 4, Zero 2 (64-bit)
2023-04-22 10:08:12 +02:00
CFLAGS += -mcpu= native
2023-03-10 19:40:58 +01:00
CXXFLAGS += -mcpu= native
e n d i f
i f n e q ( $( filter armv 6%,$ ( UNAME_M ) ) , )
2023-04-30 20:48:38 +02:00
# Raspberry Pi 1, Zero
2023-03-10 19:40:58 +01:00
CFLAGS += -mfpu= neon-fp-armv8 -mfp16-format= ieee -mno-unaligned-access
e n d i f
i f n e q ( $( filter armv 7%,$ ( UNAME_M ) ) , )
2023-04-30 20:48:38 +02:00
# Raspberry Pi 2
2023-03-10 19:40:58 +01:00
CFLAGS += -mfpu= neon-fp-armv8 -mfp16-format= ieee -mno-unaligned-access -funsafe-math-optimizations
e n d i f
i f n e q ( $( filter armv 8%,$ ( UNAME_M ) ) , )
2023-04-30 20:48:38 +02:00
# Raspberry Pi 3, 4, Zero 2 (32-bit)
2023-03-10 19:40:58 +01:00
CFLAGS += -mfp16-format= ieee -mno-unaligned-access
e n d i f
#
# Print build information
#
$(info I llama.cpp build info : )
$(info I UNAME_S : $( UNAME_S ) )
$(info I UNAME_P : $( UNAME_P ) )
$(info I UNAME_M : $( UNAME_M ) )
$(info I CFLAGS : $( CFLAGS ) )
$(info I CXXFLAGS : $( CXXFLAGS ) )
$(info I LDFLAGS : $( LDFLAGS ) )
$(info I CC : $( CCV ) )
$(info I CXX : $( CXXV ) )
$( info )
#
# Build library
#
2023-04-29 02:04:18 +02:00
ggml.o : ggml .c ggml .h ggml -cuda .h
2023-04-14 21:39:48 +02:00
$( CC) $( CFLAGS) -c $< -o $@
2023-03-10 19:40:58 +01:00
2023-04-29 12:53:12 +02:00
llama.o : llama .cpp ggml .h ggml -cuda .h llama .h llama -util .h
2023-04-14 21:39:48 +02:00
$( CXX) $( CXXFLAGS) -c $< -o $@
2023-03-22 06:32:36 +01:00
2023-03-25 19:26:40 +01:00
common.o : examples /common .cpp examples /common .h
2023-04-14 21:39:48 +02:00
$( CXX) $( CXXFLAGS) -c $< -o $@
2023-03-10 19:40:58 +01:00
2023-05-01 18:23:47 +02:00
libllama.so : llama .o ggml .o $( OBJS )
$( CXX) $( CXXFLAGS) -shared -fPIC -o $@ $^ $( LDFLAGS)
2023-03-10 19:40:58 +01:00
clean :
2023-05-01 18:23:47 +02:00
rm -vf *.o main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state build-info.h
2023-03-10 19:40:58 +01:00
2023-05-01 18:23:47 +02:00
#
# Examples
#
main : examples /main /main .cpp build -info .h ggml .o llama .o common .o $( OBJS )
$( CXX) $( CXXFLAGS) $( filter-out %.h,$^) -o $@ $( LDFLAGS)
2023-03-23 12:41:32 +01:00
@echo
@echo '==== Run ./main -h for help. ===='
@echo
2023-03-10 19:40:58 +01:00
2023-05-01 18:23:47 +02:00
quantize : examples /quantize /quantize .cpp build -info .h ggml .o llama .o $( OBJS )
$( CXX) $( CXXFLAGS) $( filter-out %.h,$^) -o $@ $( LDFLAGS)
2023-03-25 19:26:40 +01:00
2023-05-01 18:23:47 +02:00
quantize-stats : examples /quantize -stats /quantize -stats .cpp build -info .h ggml .o llama .o $( OBJS )
$( CXX) $( CXXFLAGS) $( filter-out %.h,$^) -o $@ $( LDFLAGS)
2023-04-08 00:09:18 +02:00
2023-05-01 18:23:47 +02:00
perplexity : examples /perplexity /perplexity .cpp build -info .h ggml .o llama .o common .o $( OBJS )
$( CXX) $( CXXFLAGS) $( filter-out %.h,$^) -o $@ $( LDFLAGS)
2023-03-10 19:40:58 +01:00
2023-05-01 18:23:47 +02:00
embedding : examples /embedding /embedding .cpp build -info .h ggml .o llama .o common .o $( OBJS )
$( CXX) $( CXXFLAGS) $( filter-out %.h,$^) -o $@ $( LDFLAGS)
2023-03-28 08:11:09 +02:00
2023-05-01 18:23:47 +02:00
save-load-state : examples /save -load -state /save -load -state .cpp build -info .h ggml .o llama .o common .o $( OBJS )
$( CXX) $( CXXFLAGS) $( filter-out %.h,$^) -o $@ $( LDFLAGS)
2023-04-18 21:00:14 +02:00
2023-05-01 18:23:47 +02:00
build-info.h : $( wildcard .git /index ) scripts /build -info .sh
2023-05-03 02:52:35 +02:00
@sh scripts/build-info.sh > $@ .tmp
2023-05-01 18:23:47 +02:00
@if ! cmp -s $@ .tmp $@ ; then \
mv $@ .tmp $@ ; \
else \
rm $@ .tmp; \
fi
2023-04-13 16:03:57 +02:00
2023-03-10 19:40:58 +01:00
#
# Tests
#
2023-05-01 18:23:47 +02:00
benchmark-matmult : examples /benchmark /benchmark -matmult .cpp build -info .h ggml .o $( OBJS )
$( CXX) $( CXXFLAGS) $( filter-out %.h,$^) -o $@ $( LDFLAGS)
2023-04-30 14:32:37 +02:00
./$@
2023-04-13 16:03:57 +02:00
2023-05-01 18:23:47 +02:00
vdot : pocs /vdot /vdot .cpp ggml .o $( OBJS )
$( CXX) $( CXXFLAGS) $^ -o $@ $( LDFLAGS)
2023-03-10 19:40:58 +01:00
.PHONY : tests
tests :
bash ./tests/run-tests.sh