Merge branch 'master' into compilade/refactor-kv-cache
@ -10,14 +10,12 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN mkdir build && \
|
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
|
||||||
cd build && \
|
|
||||||
if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
|
|
||||||
echo "LLAMA_SYCL_F16 is set" && \
|
echo "LLAMA_SYCL_F16 is set" && \
|
||||||
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
|
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
|
||||||
fi && \
|
fi && \
|
||||||
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
|
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
|
||||||
cmake --build . --config Release --target main
|
cmake --build build --config Release --target main
|
||||||
|
|
||||||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
|
||||||
|
|
||||||
|
@ -14,10 +14,8 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
|
|||||||
# Build it
|
# Build it
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN mkdir build && \
|
RUN cmake -B build -DLLAMA_VULKAN=1 && \
|
||||||
cd build && \
|
cmake --build build --config Release --target main
|
||||||
cmake .. -DLLAMA_VULKAN=1 && \
|
|
||||||
cmake --build . --config Release --target main
|
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
@ -10,14 +10,12 @@ WORKDIR /app
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
RUN mkdir build && \
|
RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
|
||||||
cd build && \
|
|
||||||
if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
|
|
||||||
echo "LLAMA_SYCL_F16 is set" && \
|
echo "LLAMA_SYCL_F16 is set" && \
|
||||||
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
|
export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \
|
||||||
fi && \
|
fi && \
|
||||||
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
|
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
|
||||||
cmake --build . --config Release --target server
|
cmake --build build --config Release --target server
|
||||||
|
|
||||||
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
|
||||||
|
|
||||||
|
@ -18,10 +18,8 @@ RUN apt-get update && \
|
|||||||
# Build it
|
# Build it
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN mkdir build && \
|
RUN cmake -B build -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
|
||||||
cd build && \
|
cmake --build build --config Release --target server
|
||||||
cmake .. -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \
|
|
||||||
cmake --build . --config Release --target server
|
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
16
.flake8
@ -1,3 +1,17 @@
|
|||||||
[flake8]
|
[flake8]
|
||||||
max-line-length = 125
|
max-line-length = 125
|
||||||
ignore = W503
|
ignore = E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503
|
||||||
|
exclude =
|
||||||
|
# Do not traverse examples
|
||||||
|
examples,
|
||||||
|
# Do not include package initializers
|
||||||
|
__init__.py,
|
||||||
|
# No need to traverse our git directory
|
||||||
|
.git,
|
||||||
|
# There's no value in checking cache directories
|
||||||
|
__pycache__,
|
||||||
|
# No need to include the build path
|
||||||
|
build,
|
||||||
|
# This contains builds that we don't want to check
|
||||||
|
dist # This is generated with `python build .` for package releases
|
||||||
|
# max-complexity = 10
|
||||||
|
20
.github/workflows/bench.yml
vendored
@ -52,7 +52,19 @@ jobs:
|
|||||||
ftype: q4_0
|
ftype: q4_0
|
||||||
pr_comment_enabled: "true"
|
pr_comment_enabled: "true"
|
||||||
|
|
||||||
if: ${{ github.event.inputs.gpu-series == 'Standard_NC4as_T4_v3' || github.event.schedule || github.event.pull_request || github.head_ref == 'master' || github.ref_name == 'master' || github.event.push.ref == 'refs/heads/master' }}
|
if: |
|
||||||
|
inputs.gpu-series == 'Standard_NC4as_T4_v3'
|
||||||
|
|| (
|
||||||
|
github.event_name == 'schedule'
|
||||||
|
&& github.ref_name == 'master'
|
||||||
|
&& github.repository_owner == 'ggerganov'
|
||||||
|
)
|
||||||
|
|| github.event_name == 'pull_request_target'
|
||||||
|
|| (
|
||||||
|
github.event_name == 'push'
|
||||||
|
&& github.event.ref == 'refs/heads/master'
|
||||||
|
&& github.repository_owner == 'ggerganov'
|
||||||
|
)
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
id: checkout
|
id: checkout
|
||||||
@ -96,9 +108,7 @@ jobs:
|
|||||||
id: cmake_build
|
id: cmake_build
|
||||||
run: |
|
run: |
|
||||||
set -eux
|
set -eux
|
||||||
mkdir build
|
cmake -B build \
|
||||||
cd build
|
|
||||||
cmake .. \
|
|
||||||
-DLLAMA_NATIVE=OFF \
|
-DLLAMA_NATIVE=OFF \
|
||||||
-DLLAMA_BUILD_SERVER=ON \
|
-DLLAMA_BUILD_SERVER=ON \
|
||||||
-DLLAMA_CURL=ON \
|
-DLLAMA_CURL=ON \
|
||||||
@ -109,7 +119,7 @@ jobs:
|
|||||||
-DLLAMA_FATAL_WARNINGS=OFF \
|
-DLLAMA_FATAL_WARNINGS=OFF \
|
||||||
-DLLAMA_ALL_WARNINGS=OFF \
|
-DLLAMA_ALL_WARNINGS=OFF \
|
||||||
-DCMAKE_BUILD_TYPE=Release;
|
-DCMAKE_BUILD_TYPE=Release;
|
||||||
cmake --build . --config Release -j $(nproc) --target server
|
cmake --build build --config Release -j $(nproc) --target server
|
||||||
|
|
||||||
- name: Download the dataset
|
- name: Download the dataset
|
||||||
id: download_dataset
|
id: download_dataset
|
||||||
|
2
.github/workflows/close-issue.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v5
|
- uses: actions/stale@v5
|
||||||
with:
|
with:
|
||||||
exempt-issue-labels: "refactor,help wanted,good first issue,research"
|
exempt-issue-labels: "refactor,help wanted,good first issue,research,bug"
|
||||||
days-before-issue-stale: 30
|
days-before-issue-stale: 30
|
||||||
days-before-issue-close: 14
|
days-before-issue-close: 14
|
||||||
stale-issue-label: "stale"
|
stale-issue-label: "stale"
|
||||||
|
3
.github/workflows/python-lint.yml
vendored
@ -20,5 +20,4 @@ jobs:
|
|||||||
- name: flake8 Lint
|
- name: flake8 Lint
|
||||||
uses: py-actions/flake8@v2
|
uses: py-actions/flake8@v2
|
||||||
with:
|
with:
|
||||||
ignore: "E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503"
|
plugins: "flake8-no-print"
|
||||||
exclude: "examples/*,examples/*/**,*/**/__init__.py,convert-hf-to-gguf-update.py"
|
|
||||||
|
12
.github/workflows/server.yml
vendored
@ -94,15 +94,13 @@ jobs:
|
|||||||
- name: Build
|
- name: Build
|
||||||
id: cmake_build
|
id: cmake_build
|
||||||
run: |
|
run: |
|
||||||
mkdir build
|
cmake -B build \
|
||||||
cd build
|
|
||||||
cmake .. \
|
|
||||||
-DLLAMA_NATIVE=OFF \
|
-DLLAMA_NATIVE=OFF \
|
||||||
-DLLAMA_BUILD_SERVER=ON \
|
-DLLAMA_BUILD_SERVER=ON \
|
||||||
-DLLAMA_CURL=ON \
|
-DLLAMA_CURL=ON \
|
||||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||||
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
-DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
|
||||||
cmake --build . --config ${{ matrix.build_type }} -j $(nproc) --target server
|
cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target server
|
||||||
|
|
||||||
|
|
||||||
- name: Tests
|
- name: Tests
|
||||||
@ -143,10 +141,8 @@ jobs:
|
|||||||
- name: Build
|
- name: Build
|
||||||
id: cmake_build
|
id: cmake_build
|
||||||
run: |
|
run: |
|
||||||
mkdir build
|
cmake -B build -DLLAMA_CURL=ON -DCURL_LIBRARY="$env:RUNNER_TEMP/libcurl/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:RUNNER_TEMP/libcurl/include"
|
||||||
cd build
|
cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} --target server
|
||||||
cmake .. -DLLAMA_CURL=ON -DCURL_LIBRARY="$env:RUNNER_TEMP/libcurl/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:RUNNER_TEMP/libcurl/include"
|
|
||||||
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS} --target server
|
|
||||||
|
|
||||||
- name: Python setup
|
- name: Python setup
|
||||||
id: setup_python
|
id: setup_python
|
||||||
|
1
.gitignore
vendored
@ -2,6 +2,7 @@
|
|||||||
*.a
|
*.a
|
||||||
*.so
|
*.so
|
||||||
*.gguf
|
*.gguf
|
||||||
|
*.gguf.json
|
||||||
*.bin
|
*.bin
|
||||||
*.exe
|
*.exe
|
||||||
*.dll
|
*.dll
|
||||||
|
@ -3,13 +3,14 @@
|
|||||||
exclude: prompts/.*.txt
|
exclude: prompts/.*.txt
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v3.2.0
|
rev: v4.6.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
- id: end-of-file-fixer
|
- id: end-of-file-fixer
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
- id: check-added-large-files
|
- id: check-added-large-files
|
||||||
- repo: https://github.com/PyCQA/flake8
|
- repo: https://github.com/PyCQA/flake8
|
||||||
rev: 6.0.0
|
rev: 7.0.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
|
additional_dependencies: [flake8-no-print]
|
||||||
|
@ -103,6 +103,8 @@ set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for
|
|||||||
set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
|
set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
|
||||||
"llama: max. batch size for using peer access")
|
"llama: max. batch size for using peer access")
|
||||||
option(LLAMA_CUDA_NO_PEER_COPY "llama: do not use peer to peer copies" OFF)
|
option(LLAMA_CUDA_NO_PEER_COPY "llama: do not use peer to peer copies" OFF)
|
||||||
|
option(LLAMA_CUDA_NO_VMM "llama: do not try to use CUDA VMM" OFF)
|
||||||
|
|
||||||
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
|
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
|
||||||
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
|
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
|
||||||
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
|
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
|
||||||
@ -294,7 +296,7 @@ if (LLAMA_BLAS)
|
|||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
set(BLA_STATIC ON)
|
set(BLA_STATIC ON)
|
||||||
endif()
|
endif()
|
||||||
if ($(CMAKE_VERSION) VERSION_GREATER_EQUAL 3.22)
|
if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.22)
|
||||||
set(BLA_SIZEOF_INTEGER 8)
|
set(BLA_SIZEOF_INTEGER 8)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -403,12 +405,16 @@ if (LLAMA_CUDA)
|
|||||||
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
|
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CUDA)
|
add_compile_definitions(GGML_USE_CUDA)
|
||||||
|
add_compile_definitions(GGML_CUDA_USE_GRAPHS)
|
||||||
if (LLAMA_CUDA_FORCE_DMMV)
|
if (LLAMA_CUDA_FORCE_DMMV)
|
||||||
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
||||||
endif()
|
endif()
|
||||||
if (LLAMA_CUDA_FORCE_MMQ)
|
if (LLAMA_CUDA_FORCE_MMQ)
|
||||||
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
|
add_compile_definitions(GGML_CUDA_FORCE_MMQ)
|
||||||
endif()
|
endif()
|
||||||
|
if (LLAMA_CUDA_NO_VMM)
|
||||||
|
add_compile_definitions(GGML_CUDA_NO_VMM)
|
||||||
|
endif()
|
||||||
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
||||||
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||||
if (DEFINED LLAMA_CUDA_DMMV_Y)
|
if (DEFINED LLAMA_CUDA_DMMV_Y)
|
||||||
@ -425,7 +431,7 @@ if (LLAMA_CUDA)
|
|||||||
|
|
||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
# As of 12.3.1 CUDA Tookit for Windows does not offer a static cublas library
|
# As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas CUDA::cublasLt)
|
||||||
else ()
|
else ()
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
||||||
@ -434,7 +440,11 @@ if (LLAMA_CUDA)
|
|||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver)
|
if (LLAMA_CUDA_NO_VMM)
|
||||||
|
# No VMM requested, no need to link directly with the cuda driver lib (libcuda.so)
|
||||||
|
else()
|
||||||
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cuda_driver) # required by cuDeviceGetAttribute(), cuMemGetAllocationGranularity(...), ...
|
||||||
|
endif()
|
||||||
|
|
||||||
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
||||||
# 52 == lowest CUDA 12 standard
|
# 52 == lowest CUDA 12 standard
|
||||||
@ -1271,17 +1281,6 @@ install(
|
|||||||
WORLD_READ
|
WORLD_READ
|
||||||
WORLD_EXECUTE
|
WORLD_EXECUTE
|
||||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||||
install(
|
|
||||||
FILES convert-lora-to-ggml.py
|
|
||||||
PERMISSIONS
|
|
||||||
OWNER_READ
|
|
||||||
OWNER_WRITE
|
|
||||||
OWNER_EXECUTE
|
|
||||||
GROUP_READ
|
|
||||||
GROUP_EXECUTE
|
|
||||||
WORLD_READ
|
|
||||||
WORLD_EXECUTE
|
|
||||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
|
||||||
if (LLAMA_METAL)
|
if (LLAMA_METAL)
|
||||||
install(
|
install(
|
||||||
FILES ggml-metal.metal
|
FILES ggml-metal.metal
|
||||||
|
5
Makefile
@ -77,11 +77,10 @@ test: $(TEST_TARGETS)
|
|||||||
./$$test_target $(CURDIR)/models/ggml-vocab-llama-bpe.gguf; \
|
./$$test_target $(CURDIR)/models/ggml-vocab-llama-bpe.gguf; \
|
||||||
./$$test_target $(CURDIR)/models/ggml-vocab-phi-3.gguf; \
|
./$$test_target $(CURDIR)/models/ggml-vocab-phi-3.gguf; \
|
||||||
./$$test_target $(CURDIR)/models/ggml-vocab-falcon.gguf; \
|
./$$test_target $(CURDIR)/models/ggml-vocab-falcon.gguf; \
|
||||||
./$$test_target $(CURDIR)/models/ggml-vocab-deepseek-coder.gguf; \
|
|
||||||
./$$test_target $(CURDIR)/models/ggml-vocab-deepseek-llm.gguf; \
|
|
||||||
./$$test_target $(CURDIR)/models/ggml-vocab-bert-bge.gguf; \
|
./$$test_target $(CURDIR)/models/ggml-vocab-bert-bge.gguf; \
|
||||||
./$$test_target $(CURDIR)/models/ggml-vocab-starcoder.gguf; \
|
./$$test_target $(CURDIR)/models/ggml-vocab-starcoder.gguf; \
|
||||||
./$$test_target $(CURDIR)/models/ggml-vocab-gpt-2.gguf; \
|
./$$test_target $(CURDIR)/models/ggml-vocab-gpt-2.gguf; \
|
||||||
|
./$$test_target $(CURDIR)/models/ggml-vocab-refact.gguf; \
|
||||||
elif [ "$$test_target" = "tests/test-tokenizer-1-spm" ]; then \
|
elif [ "$$test_target" = "tests/test-tokenizer-1-spm" ]; then \
|
||||||
continue; \
|
continue; \
|
||||||
elif [ "$$test_target" = "tests/test-tokenizer-1-bpe" ]; then \
|
elif [ "$$test_target" = "tests/test-tokenizer-1-bpe" ]; then \
|
||||||
@ -434,7 +433,7 @@ ifdef LLAMA_CUDA
|
|||||||
else
|
else
|
||||||
CUDA_PATH ?= /usr/local/cuda
|
CUDA_PATH ?= /usr/local/cuda
|
||||||
endif
|
endif
|
||||||
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include -DGGML_CUDA_USE_GRAPHS
|
||||||
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
||||||
OBJS += ggml-cuda.o
|
OBJS += ggml-cuda.o
|
||||||
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
||||||
|
@ -185,9 +185,8 @@ Upon a successful installation, SYCL is enabled for the available intel devices,
|
|||||||
```sh
|
```sh
|
||||||
git clone https://github.com/oneapi-src/oneMKL
|
git clone https://github.com/oneapi-src/oneMKL
|
||||||
cd oneMKL
|
cd oneMKL
|
||||||
mkdir -p buildWithCublas && cd buildWithCublas
|
cmake -B buildWithCublas -DCMAKE_CXX_COMPILER=icpx -DCMAKE_C_COMPILER=icx -DENABLE_MKLGPU_BACKEND=OFF -DENABLE_MKLCPU_BACKEND=OFF -DENABLE_CUBLAS_BACKEND=ON -DTARGET_DOMAINS=blas
|
||||||
cmake ../ -DCMAKE_CXX_COMPILER=icpx -DCMAKE_C_COMPILER=icx -DENABLE_MKLGPU_BACKEND=OFF -DENABLE_MKLCPU_BACKEND=OFF -DENABLE_CUBLAS_BACKEND=ON -DTARGET_DOMAINS=blas
|
cmake --build buildWithCublas --config Release
|
||||||
make
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -227,16 +226,15 @@ Similarly, user targeting Nvidia GPUs should expect at least one SYCL-CUDA devic
|
|||||||
source /opt/intel/oneapi/setvars.sh
|
source /opt/intel/oneapi/setvars.sh
|
||||||
|
|
||||||
# Build LLAMA with MKL BLAS acceleration for intel GPU
|
# Build LLAMA with MKL BLAS acceleration for intel GPU
|
||||||
mkdir -p build && cd build
|
|
||||||
|
|
||||||
# Option 1: Use FP32 (recommended for better performance in most cases)
|
# Option 1: Use FP32 (recommended for better performance in most cases)
|
||||||
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
# Option 2: Use FP16
|
# Option 2: Use FP16
|
||||||
cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON
|
cmake -B build -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON
|
||||||
|
|
||||||
#build all binary
|
# build all binary
|
||||||
cmake --build . --config Release -j -v
|
cmake --build build --config Release -j -v
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Nvidia GPU
|
#### Nvidia GPU
|
||||||
@ -248,16 +246,15 @@ export CPLUS_INCLUDE_DIR=/path/to/oneMKL/buildWithCublas/include:$CPLUS_INCLUDE_
|
|||||||
export CPLUS_INCLUDE_DIR=/path/to/oneMKL/include:$CPLUS_INCLUDE_DIR
|
export CPLUS_INCLUDE_DIR=/path/to/oneMKL/include:$CPLUS_INCLUDE_DIR
|
||||||
|
|
||||||
# Build LLAMA with Nvidia BLAS acceleration through SYCL
|
# Build LLAMA with Nvidia BLAS acceleration through SYCL
|
||||||
mkdir -p build && cd build
|
|
||||||
|
|
||||||
# Option 1: Use FP32 (recommended for better performance in most cases)
|
# Option 1: Use FP32 (recommended for better performance in most cases)
|
||||||
cmake .. -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
cmake -B build -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
|
|
||||||
# Option 2: Use FP16
|
# Option 2: Use FP16
|
||||||
cmake .. -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON
|
cmake -B build -DLLAMA_SYCL=ON -DLLAMA_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON
|
||||||
|
|
||||||
#build all binary
|
# build all binary
|
||||||
cmake --build . --config Release -j -v
|
cmake --build build --config Release -j -v
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -412,17 +409,15 @@ b. Download & install mingw-w64 make for Windows provided by w64devkit
|
|||||||
On the oneAPI command line window, step into the llama.cpp main directory and run the following:
|
On the oneAPI command line window, step into the llama.cpp main directory and run the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
mkdir -p build
|
|
||||||
cd build
|
|
||||||
@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
|
@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
|
||||||
|
|
||||||
# Option 1: Use FP32 (recommended for better performance in most cases)
|
# Option 1: Use FP32 (recommended for better performance in most cases)
|
||||||
cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release
|
cmake -B build -G "MinGW Makefiles" -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release
|
||||||
|
|
||||||
# Option 2: Or FP16
|
# Option 2: Or FP16
|
||||||
cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON
|
cmake -B build -G "MinGW Makefiles" -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON
|
||||||
|
|
||||||
make -j
|
cmake --build build --config Release -j
|
||||||
```
|
```
|
||||||
|
|
||||||
Otherwise, run the `win-build-sycl.bat` wrapper which encapsulates the former instructions:
|
Otherwise, run the `win-build-sycl.bat` wrapper which encapsulates the former instructions:
|
||||||
|
178
README.md
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png)
|
![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png)
|
||||||
|
|
||||||
[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
|
[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Server](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml/badge.svg?branch=master&event=schedule)](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml)
|
||||||
|
|
||||||
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
|
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
|
||||||
|
|
||||||
@ -20,7 +20,8 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
|
|
||||||
### Hot topics
|
### Hot topics
|
||||||
|
|
||||||
- **BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920**
|
- **Initial Flash-Attention support: https://github.com/ggerganov/llama.cpp/pull/5021**
|
||||||
|
- BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920
|
||||||
- MoE memory layout has been updated - reconvert models for `mmap` support and regenerate `imatrix` https://github.com/ggerganov/llama.cpp/pull/6387
|
- MoE memory layout has been updated - reconvert models for `mmap` support and regenerate `imatrix` https://github.com/ggerganov/llama.cpp/pull/6387
|
||||||
- Model sharding instructions using `gguf-split` https://github.com/ggerganov/llama.cpp/discussions/6404
|
- Model sharding instructions using `gguf-split` https://github.com/ggerganov/llama.cpp/discussions/6404
|
||||||
- Fix major bug in Metal batched inference https://github.com/ggerganov/llama.cpp/pull/6225
|
- Fix major bug in Metal batched inference https://github.com/ggerganov/llama.cpp/pull/6225
|
||||||
@ -175,6 +176,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
|||||||
- [nat/openplayground](https://github.com/nat/openplayground)
|
- [nat/openplayground](https://github.com/nat/openplayground)
|
||||||
- [Faraday](https://faraday.dev/) (proprietary)
|
- [Faraday](https://faraday.dev/) (proprietary)
|
||||||
- [LMStudio](https://lmstudio.ai/) (proprietary)
|
- [LMStudio](https://lmstudio.ai/) (proprietary)
|
||||||
|
- [Layla](https://play.google.com/store/apps/details?id=com.laylalite) (proprietary)
|
||||||
- [LocalAI](https://github.com/mudler/LocalAI) (MIT)
|
- [LocalAI](https://github.com/mudler/LocalAI) (MIT)
|
||||||
- [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL)
|
- [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL)
|
||||||
- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile)
|
- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile)
|
||||||
@ -308,6 +310,8 @@ In order to build llama.cpp you have three different options.
|
|||||||
make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Note**: for `Debug` builds, run `make LLAMA_DEBUG=1`
|
||||||
|
|
||||||
- On Windows:
|
- On Windows:
|
||||||
|
|
||||||
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
||||||
@ -322,12 +326,26 @@ In order to build llama.cpp you have three different options.
|
|||||||
- Using `CMake`:
|
- Using `CMake`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build
|
cmake -B build
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake ..
|
|
||||||
cmake --build . --config Release
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Note**: for `Debug` builds, there are two cases:
|
||||||
|
|
||||||
|
- Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -DCMAKE_BUILD_TYPE=Debug
|
||||||
|
cmake --build build
|
||||||
|
```
|
||||||
|
|
||||||
|
- Multi-config generators (`-G` param set to Visual Studio, XCode...):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -G "Xcode"
|
||||||
|
cmake --build build --config Debug
|
||||||
|
```
|
||||||
|
|
||||||
- Using `Zig` (version 0.11 or later):
|
- Using `Zig` (version 0.11 or later):
|
||||||
|
|
||||||
Building for optimization levels and CPU features can be accomplished using standard build arguments, for example AVX2, FMA, F16C,
|
Building for optimization levels and CPU features can be accomplished using standard build arguments, for example AVX2, FMA, F16C,
|
||||||
@ -439,10 +457,8 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
- Using `CMake` on Linux:
|
- Using `CMake` on Linux:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build
|
cmake -B build -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS
|
|
||||||
cmake --build . --config Release
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- #### BLIS
|
- #### BLIS
|
||||||
@ -462,11 +478,9 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
- Using manual oneAPI installation:
|
- Using manual oneAPI installation:
|
||||||
By default, `LLAMA_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DLLAMA_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps:
|
By default, `LLAMA_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DLLAMA_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps:
|
||||||
```bash
|
```bash
|
||||||
mkdir build
|
|
||||||
cd build
|
|
||||||
source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-basekit docker image, only required for manual installation
|
source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-basekit docker image, only required for manual installation
|
||||||
cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON
|
cmake -B build -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON
|
||||||
cmake --build . --config Release
|
cmake --build build --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
- Using oneAPI docker image:
|
- Using oneAPI docker image:
|
||||||
@ -487,10 +501,8 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
- Using `CMake`:
|
- Using `CMake`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build
|
cmake -B build -DLLAMA_CUDA=ON
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake .. -DLLAMA_CUDA=ON
|
|
||||||
cmake --build . --config Release
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance:
|
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance:
|
||||||
@ -517,8 +529,8 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU):
|
- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU):
|
||||||
```bash
|
```bash
|
||||||
CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ \
|
CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ \
|
||||||
cmake -H. -Bbuild -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
cmake -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
||||||
&& cmake --build build -- -j 16
|
&& cmake --build build --config Release -- -j 16
|
||||||
```
|
```
|
||||||
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`.
|
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`.
|
||||||
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
||||||
@ -564,15 +576,14 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
git clone --recurse-submodules https://github.com/KhronosGroup/OpenCL-SDK.git
|
git clone --recurse-submodules https://github.com/KhronosGroup/OpenCL-SDK.git
|
||||||
mkdir OpenCL-SDK/build
|
cd OpenCL-SDK
|
||||||
cd OpenCL-SDK/build
|
cmake -B build -DBUILD_DOCS=OFF \
|
||||||
cmake .. -DBUILD_DOCS=OFF \
|
|
||||||
-DBUILD_EXAMPLES=OFF \
|
-DBUILD_EXAMPLES=OFF \
|
||||||
-DBUILD_TESTING=OFF \
|
-DBUILD_TESTING=OFF \
|
||||||
-DOPENCL_SDK_BUILD_SAMPLES=OFF \
|
-DOPENCL_SDK_BUILD_SAMPLES=OFF \
|
||||||
-DOPENCL_SDK_TEST_SAMPLES=OFF
|
-DOPENCL_SDK_TEST_SAMPLES=OFF
|
||||||
cmake --build . --config Release
|
cmake --build build
|
||||||
cmake --install . --prefix /some/path
|
cmake --install build --prefix /some/path
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@ -594,23 +605,23 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
```cmd
|
```cmd
|
||||||
set OPENCL_SDK_ROOT="C:/OpenCL-SDK-v2023.04.17-Win-x64"
|
set OPENCL_SDK_ROOT="C:/OpenCL-SDK-v2023.04.17-Win-x64"
|
||||||
git clone https://github.com/CNugteren/CLBlast.git
|
git clone https://github.com/CNugteren/CLBlast.git
|
||||||
mkdir CLBlast\build
|
cd CLBlast
|
||||||
cd CLBlast\build
|
cmake -B build -DBUILD_SHARED_LIBS=OFF -DOVERRIDE_MSVC_FLAGS_TO_MT=OFF -DTUNERS=OFF -DOPENCL_ROOT=%OPENCL_SDK_ROOT% -G "Visual Studio 17 2022" -A x64
|
||||||
cmake .. -DBUILD_SHARED_LIBS=OFF -DOVERRIDE_MSVC_FLAGS_TO_MT=OFF -DTUNERS=OFF -DOPENCL_ROOT=%OPENCL_SDK_ROOT% -G "Visual Studio 17 2022" -A x64
|
cmake --build build --config Release
|
||||||
cmake --build . --config Release
|
cmake --install build --prefix C:/CLBlast
|
||||||
cmake --install . --prefix C:/CLBlast
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
(note: `--config Release` at build time is the default and only relevant for Visual Studio builds - or multi-config Ninja builds)
|
||||||
|
|
||||||
- <details>
|
- <details>
|
||||||
<summary>Unix:</summary>
|
<summary>Unix:</summary>
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
git clone https://github.com/CNugteren/CLBlast.git
|
git clone https://github.com/CNugteren/CLBlast.git
|
||||||
mkdir CLBlast/build
|
cd CLBlast
|
||||||
cd CLBlast/build
|
cmake -B build -DBUILD_SHARED_LIBS=OFF -DTUNERS=OFF
|
||||||
cmake .. -DBUILD_SHARED_LIBS=OFF -DTUNERS=OFF
|
cmake --build build --config Release
|
||||||
cmake --build . --config Release
|
cmake --install build --prefix /some/path
|
||||||
cmake --install . --prefix /some/path
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Where `/some/path` is where the built library will be installed (default is `/usr/local`).
|
Where `/some/path` is where the built library will be installed (default is `/usr/local`).
|
||||||
@ -624,21 +635,17 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
```
|
```
|
||||||
- CMake (Unix):
|
- CMake (Unix):
|
||||||
```sh
|
```sh
|
||||||
mkdir build
|
cmake -B build -DLLAMA_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake .. -DLLAMA_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
cmake --build . --config Release
|
|
||||||
```
|
```
|
||||||
- CMake (Windows):
|
- CMake (Windows):
|
||||||
```cmd
|
```cmd
|
||||||
set CL_BLAST_CMAKE_PKG="C:/CLBlast/lib/cmake/CLBlast"
|
set CL_BLAST_CMAKE_PKG="C:/CLBlast/lib/cmake/CLBlast"
|
||||||
git clone https://github.com/ggerganov/llama.cpp
|
git clone https://github.com/ggerganov/llama.cpp
|
||||||
cd llama.cpp
|
cd llama.cpp
|
||||||
mkdir build
|
cmake -B build -DBUILD_SHARED_LIBS=OFF -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH=%CL_BLAST_CMAKE_PKG% -G "Visual Studio 17 2022" -A x64
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake .. -DBUILD_SHARED_LIBS=OFF -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH=%CL_BLAST_CMAKE_PKG% -G "Visual Studio 17 2022" -A x64
|
cmake --install build --prefix C:/LlamaCPP
|
||||||
cmake --build . --config Release
|
|
||||||
cmake --install . --prefix C:/LlamaCPP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Running Llama with CLBlast
|
##### Running Llama with CLBlast
|
||||||
@ -694,10 +701,8 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
Then, build llama.cpp using the cmake command below:
|
Then, build llama.cpp using the cmake command below:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir -p build
|
cmake -B build -DLLAMA_VULKAN=1
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake .. -DLLAMA_VULKAN=1
|
|
||||||
cmake --build . --config Release
|
|
||||||
# Test the output binary (with "-ngl 33" to offload all layers to GPU)
|
# Test the output binary (with "-ngl 33" to offload all layers to GPU)
|
||||||
./bin/main -m "PATH_TO_MODEL" -p "Hi you how are you" -n 50 -e -ngl 33 -t 4
|
./bin/main -m "PATH_TO_MODEL" -p "Hi you how are you" -n 50 -e -ngl 33 -t 4
|
||||||
|
|
||||||
@ -709,6 +714,8 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
|
|
||||||
To obtain the official LLaMA 2 weights please see the <a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a> section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
|
To obtain the official LLaMA 2 weights please see the <a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a> section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
|
||||||
|
|
||||||
|
Note: `convert.py` does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# obtain the official LLaMA model weights and place them in ./models
|
# obtain the official LLaMA model weights and place them in ./models
|
||||||
ls ./models
|
ls ./models
|
||||||
@ -930,17 +937,25 @@ If your issue is with model generation quality, then please at least scan the fo
|
|||||||
|
|
||||||
### Android
|
### Android
|
||||||
|
|
||||||
|
#### Build on Android using Termux
|
||||||
|
[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required).
|
||||||
|
```
|
||||||
|
apt update && apt upgrade -y
|
||||||
|
apt install git make cmake
|
||||||
|
```
|
||||||
|
|
||||||
|
It's recommended to move your model inside the `~/` directory for best performance:
|
||||||
|
```
|
||||||
|
cd storage/downloads
|
||||||
|
mv model.gguf ~/
|
||||||
|
```
|
||||||
|
|
||||||
|
[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`.
|
||||||
|
|
||||||
#### Building the Project using Android NDK
|
#### Building the Project using Android NDK
|
||||||
You can easily run `llama.cpp` on Android device with [termux](https://termux.dev/).
|
Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake.
|
||||||
|
|
||||||
First, install the essential packages for termux:
|
|
||||||
```
|
|
||||||
pkg install clang wget git cmake
|
|
||||||
```
|
|
||||||
Second, obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake:
|
|
||||||
|
|
||||||
You can execute the following commands on your computer to avoid downloading the NDK to your mobile. Of course, you can also do this in Termux.
|
|
||||||
|
|
||||||
|
Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux:
|
||||||
```
|
```
|
||||||
$ mkdir build-android
|
$ mkdir build-android
|
||||||
$ cd build-android
|
$ cd build-android
|
||||||
@ -948,7 +963,9 @@ $ export NDK=<your_ndk_directory>
|
|||||||
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
|
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
|
||||||
$ make
|
$ make
|
||||||
```
|
```
|
||||||
Install [termux](https://termux.dev/) on your device and run `termux-setup-storage` to get access to your SD card.
|
|
||||||
|
Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice).
|
||||||
|
|
||||||
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
|
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
|
||||||
|
|
||||||
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
|
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
|
||||||
@ -970,53 +987,10 @@ $cd /data/data/com.termux/files/home/bin
|
|||||||
$./main -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
|
$./main -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
|
||||||
```
|
```
|
||||||
|
|
||||||
Here is a demo of an interactive session running on Pixel 5 phone:
|
Here's a demo of an interactive session running on Pixel 5 phone:
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
||||||
|
|
||||||
#### Building the Project using Termux (F-Droid)
|
|
||||||
Termux from F-Droid offers an alternative route to execute the project on an Android device. This method empowers you to construct the project right from within the terminal, negating the requirement for a rooted device or SD Card.
|
|
||||||
|
|
||||||
Outlined below are the directives for installing the project using OpenBLAS and CLBlast. This combination is specifically designed to deliver peak performance on recent devices that feature a GPU.
|
|
||||||
|
|
||||||
If you opt to utilize OpenBLAS, you'll need to install the corresponding package.
|
|
||||||
```
|
|
||||||
apt install libopenblas
|
|
||||||
```
|
|
||||||
|
|
||||||
Subsequently, if you decide to incorporate CLBlast, you'll first need to install the requisite OpenCL packages:
|
|
||||||
```
|
|
||||||
apt install ocl-icd opencl-headers opencl-clhpp clinfo
|
|
||||||
```
|
|
||||||
|
|
||||||
In order to compile CLBlast, you'll need to first clone the respective Git repository, which can be found at this URL: https://github.com/CNugteren/CLBlast. Alongside this, clone this repository into your home directory. Once this is done, navigate to the CLBlast folder and execute the commands detailed below:
|
|
||||||
```
|
|
||||||
cmake .
|
|
||||||
make
|
|
||||||
cp libclblast.so* $PREFIX/lib
|
|
||||||
cp ./include/clblast.h ../llama.cpp
|
|
||||||
```
|
|
||||||
|
|
||||||
Following the previous steps, navigate to the LlamaCpp directory. To compile it with OpenBLAS and CLBlast, execute the command provided below:
|
|
||||||
```
|
|
||||||
cp /data/data/com.termux/files/usr/include/openblas/cblas.h .
|
|
||||||
cp /data/data/com.termux/files/usr/include/openblas/openblas_config.h .
|
|
||||||
make LLAMA_CLBLAST=1 //(sometimes you need to run this command twice)
|
|
||||||
```
|
|
||||||
|
|
||||||
Upon completion of the aforementioned steps, you will have successfully compiled the project. To run it using CLBlast, a slight adjustment is required: a command must be issued to direct the operations towards your device's physical GPU, rather than the virtual one. The necessary command is detailed below:
|
|
||||||
```
|
|
||||||
GGML_OPENCL_PLATFORM=0
|
|
||||||
GGML_OPENCL_DEVICE=0
|
|
||||||
export LD_LIBRARY_PATH=/vendor/lib64:$LD_LIBRARY_PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
(Note: some Android devices, like the Zenfone 8, need the following command instead - "export LD_LIBRARY_PATH=/system/vendor/lib64:$LD_LIBRARY_PATH". Source: https://www.reddit.com/r/termux/comments/kc3ynp/opencl_working_in_termux_more_in_comments/ )
|
|
||||||
|
|
||||||
For easy and swift re-execution, consider documenting this final part in a .sh script file. This will enable you to rerun the process with minimal hassle.
|
|
||||||
|
|
||||||
Place your desired model into the `~/llama.cpp/models/` directory and execute the `./main (...)` script.
|
|
||||||
|
|
||||||
### Docker
|
### Docker
|
||||||
|
|
||||||
#### Prerequisites
|
#### Prerequisites
|
||||||
|
112
ci/run.sh
@ -160,9 +160,8 @@ function gg_run_test_scripts_debug {
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# TODO: too slow, run on dedicated node
|
|
||||||
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
#(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
@ -336,7 +335,8 @@ function gg_run_open_llama_3b_v2 {
|
|||||||
|
|
||||||
(time ./bin/imatrix --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
(time ./bin/imatrix --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
||||||
|
|
||||||
(time ./bin/save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
(time ./bin/save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -fa --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
|
||||||
function check_ppl {
|
function check_ppl {
|
||||||
qnt="$1"
|
qnt="$1"
|
||||||
@ -365,47 +365,6 @@ function gg_run_open_llama_3b_v2 {
|
|||||||
|
|
||||||
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
||||||
|
|
||||||
# lora
|
|
||||||
function compare_ppl {
|
|
||||||
qnt="$1"
|
|
||||||
ppl1=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
ppl2=$(echo "$3" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
|
|
||||||
if [ $(echo "$ppl1 < $ppl2" | bc) -eq 1 ]; then
|
|
||||||
printf ' - %s @ %s (FAIL: %s > %s)\n' "$qnt" "$ppl" "$ppl1" "$ppl2"
|
|
||||||
return 20
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf ' - %s @ %s %s OK\n' "$qnt" "$ppl1" "$ppl2"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
path_lora="../models-mnt/open-llama/3B-v2/lora"
|
|
||||||
path_shakespeare="../models-mnt/shakespeare"
|
|
||||||
|
|
||||||
shakespeare="${path_shakespeare}/shakespeare.txt"
|
|
||||||
lora_shakespeare="${path_lora}/ggml-adapter-model.bin"
|
|
||||||
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_3b_v2_shakespeare_lora/resolve/main/adapter_config.json
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_3b_v2_shakespeare_lora/resolve/main/adapter_model.bin
|
|
||||||
gg_wget ${path_shakespeare} https://huggingface.co/slaren/open_llama_3b_v2_shakespeare_lora/resolve/main/shakespeare.txt
|
|
||||||
|
|
||||||
python3 ../convert-lora-to-ggml.py ${path_lora}
|
|
||||||
|
|
||||||
# f16
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-f16.log
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} --lora ${lora_shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-f16.log
|
|
||||||
compare_ppl "f16 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-f16.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# q8_0
|
|
||||||
(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-q8_0.log
|
|
||||||
(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0.log
|
|
||||||
compare_ppl "q8_0 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# q8_0 + f16 lora-base
|
|
||||||
(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} --lora-base ${model_f16} -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log
|
|
||||||
compare_ppl "q8_0 / f16 base shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -416,7 +375,6 @@ function gg_sum_open_llama_3b_v2 {
|
|||||||
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
||||||
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
||||||
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
||||||
gg_printf '- lora:\n%s\n' "$(cat $OUT/${ci}-lora-ppl.log)"
|
|
||||||
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
||||||
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
||||||
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
||||||
@ -429,11 +387,6 @@ function gg_sum_open_llama_3b_v2 {
|
|||||||
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
||||||
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
||||||
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
||||||
gg_printf '- shakespeare (f16):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-f16.log)"
|
|
||||||
gg_printf '- shakespeare (f16 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log)"
|
|
||||||
gg_printf '- shakespeare (q8_0):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log)"
|
|
||||||
gg_printf '- shakespeare (q8_0 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log)"
|
|
||||||
gg_printf '- shakespeare (q8_0 / f16 base lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log)"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# open_llama_7b_v2
|
# open_llama_7b_v2
|
||||||
@ -517,7 +470,10 @@ function gg_run_open_llama_7b_v2 {
|
|||||||
|
|
||||||
(time ./bin/imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
(time ./bin/imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
|
||||||
|
|
||||||
(time ./bin/save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
(time ./bin/save-load-state -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -fa -ngl 10 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
(time ./bin/save-load-state -fa -ngl 99 --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
|
||||||
|
|
||||||
function check_ppl {
|
function check_ppl {
|
||||||
qnt="$1"
|
qnt="$1"
|
||||||
@ -546,48 +502,6 @@ function gg_run_open_llama_7b_v2 {
|
|||||||
|
|
||||||
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
|
||||||
|
|
||||||
# lora
|
|
||||||
function compare_ppl {
|
|
||||||
qnt="$1"
|
|
||||||
ppl1=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
ppl2=$(echo "$3" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
|
|
||||||
|
|
||||||
if [ $(echo "$ppl1 < $ppl2" | bc) -eq 1 ]; then
|
|
||||||
printf ' - %s @ %s (FAIL: %s > %s)\n' "$qnt" "$ppl" "$ppl1" "$ppl2"
|
|
||||||
return 20
|
|
||||||
fi
|
|
||||||
|
|
||||||
printf ' - %s @ %s %s OK\n' "$qnt" "$ppl1" "$ppl2"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
path_lora="../models-mnt/open-llama/7B-v2/lora"
|
|
||||||
path_shakespeare="../models-mnt/shakespeare"
|
|
||||||
|
|
||||||
shakespeare="${path_shakespeare}/shakespeare.txt"
|
|
||||||
lora_shakespeare="${path_lora}/ggml-adapter-model.bin"
|
|
||||||
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/adapter_config.json
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/adapter_model.bin
|
|
||||||
gg_wget ${path_shakespeare} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/shakespeare.txt
|
|
||||||
|
|
||||||
python3 ../convert-lora-to-ggml.py ${path_lora}
|
|
||||||
|
|
||||||
# f16
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-f16.log
|
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} --lora ${lora_shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-f16.log
|
|
||||||
compare_ppl "f16 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-f16.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# currently not supported by the CUDA backend
|
|
||||||
# q8_0
|
|
||||||
#(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-q8_0.log
|
|
||||||
#(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0.log
|
|
||||||
#compare_ppl "q8_0 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
# q8_0 + f16 lora-base
|
|
||||||
#(time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} --lora-base ${model_f16} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log
|
|
||||||
#compare_ppl "q8_0 / f16 shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log
|
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -598,7 +512,6 @@ function gg_sum_open_llama_7b_v2 {
|
|||||||
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
|
||||||
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
|
||||||
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
|
||||||
gg_printf '- lora:\n%s\n' "$(cat $OUT/${ci}-lora-ppl.log)"
|
|
||||||
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
|
||||||
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
|
||||||
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
|
||||||
@ -611,11 +524,6 @@ function gg_sum_open_llama_7b_v2 {
|
|||||||
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
|
||||||
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
|
||||||
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
|
||||||
gg_printf '- shakespeare (f16):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-f16.log)"
|
|
||||||
gg_printf '- shakespeare (f16 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-f16.log)"
|
|
||||||
#gg_printf '- shakespeare (q8_0):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log)"
|
|
||||||
#gg_printf '- shakespeare (q8_0 lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0.log)"
|
|
||||||
#gg_printf '- shakespeare (q8_0 / f16 base lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log)"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# bge-small
|
# bge-small
|
||||||
@ -691,8 +599,10 @@ test $ret -eq 0 && gg_run ctest_release
|
|||||||
if [ -z ${GG_BUILD_LOW_PERF} ]; then
|
if [ -z ${GG_BUILD_LOW_PERF} ]; then
|
||||||
test $ret -eq 0 && gg_run embd_bge_small
|
test $ret -eq 0 && gg_run embd_bge_small
|
||||||
|
|
||||||
test $ret -eq 0 && gg_run test_scripts_debug
|
if [ -z ${GG_BUILD_CLOUD} ] || [ ${GG_BUILD_EXTRA_TESTS_0} ]; then
|
||||||
test $ret -eq 0 && gg_run test_scripts_release
|
test $ret -eq 0 && gg_run test_scripts_debug
|
||||||
|
test $ret -eq 0 && gg_run test_scripts_release
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then
|
if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then
|
||||||
if [ -z ${GG_BUILD_CUDA} ]; then
|
if [ -z ${GG_BUILD_CUDA} ]; then
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
||||||
|
#define JSON_ASSERT GGML_ASSERT
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
#include "json-schema-to-grammar.h"
|
#include "json-schema-to-grammar.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
@ -67,7 +69,6 @@
|
|||||||
#include <sys/syslimits.h>
|
#include <sys/syslimits.h>
|
||||||
#endif
|
#endif
|
||||||
#define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
|
#define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
|
||||||
#define LLAMA_CURL_MAX_HEADER_LENGTH 256
|
|
||||||
#endif // LLAMA_USE_CURL
|
#endif // LLAMA_USE_CURL
|
||||||
|
|
||||||
using json = nlohmann::ordered_json;
|
using json = nlohmann::ordered_json;
|
||||||
@ -77,7 +78,7 @@ int32_t get_num_physical_cores() {
|
|||||||
// enumerate the set of thread siblings, num entries is num cores
|
// enumerate the set of thread siblings, num entries is num cores
|
||||||
std::unordered_set<std::string> siblings;
|
std::unordered_set<std::string> siblings;
|
||||||
for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
|
for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
|
||||||
std::ifstream thread_siblings("/sys/devices/system/cpu"
|
std::ifstream thread_siblings("/sys/devices/system/cpu/cpu"
|
||||||
+ std::to_string(cpu) + "/topology/thread_siblings");
|
+ std::to_string(cpu) + "/topology/thread_siblings");
|
||||||
if (!thread_siblings.is_open()) {
|
if (!thread_siblings.is_open()) {
|
||||||
break; // no more cpus
|
break; // no more cpus
|
||||||
@ -893,13 +894,17 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
params.image = argv[i];
|
params.image.emplace_back(argv[i]);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "-i" || arg == "--interactive") {
|
if (arg == "-i" || arg == "--interactive") {
|
||||||
params.interactive = true;
|
params.interactive = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "--interactive-specials") {
|
||||||
|
params.interactive_specials = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "--embedding") {
|
if (arg == "--embedding") {
|
||||||
params.embedding = true;
|
params.embedding = true;
|
||||||
return true;
|
return true;
|
||||||
@ -912,6 +917,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
params.instruct = true;
|
params.instruct = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "-cnv" || arg == "--conversation") {
|
||||||
|
params.conversation = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "-cml" || arg == "--chatml") {
|
if (arg == "-cml" || arg == "--chatml") {
|
||||||
params.chatml = true;
|
params.chatml = true;
|
||||||
return true;
|
return true;
|
||||||
@ -948,6 +957,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
params.cont_batching = true;
|
params.cont_batching = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "-fa" || arg == "--flash-attn") {
|
||||||
|
params.flash_attn = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "--color") {
|
if (arg == "--color") {
|
||||||
params.use_color = true;
|
params.use_color = true;
|
||||||
return true;
|
return true;
|
||||||
@ -1324,6 +1337,29 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gpt_params_handle_model_default(gpt_params & params) {
|
||||||
|
if (!params.hf_repo.empty()) {
|
||||||
|
// short-hand to avoid specifying --hf-file -> default it to --model
|
||||||
|
if (params.hf_file.empty()) {
|
||||||
|
if (params.model.empty()) {
|
||||||
|
throw std::invalid_argument("error: --hf-repo requires either --hf-file or --model\n");
|
||||||
|
}
|
||||||
|
params.hf_file = params.model;
|
||||||
|
} else if (params.model.empty()) {
|
||||||
|
params.model = "models/" + string_split(params.hf_file, '/').back();
|
||||||
|
}
|
||||||
|
} else if (!params.model_url.empty()) {
|
||||||
|
if (params.model.empty()) {
|
||||||
|
auto f = string_split(params.model_url, '#').front();
|
||||||
|
f = string_split(f, '?').front();
|
||||||
|
f = string_split(f, '/').back();
|
||||||
|
params.model = "models/" + f;
|
||||||
|
}
|
||||||
|
} else if (params.model.empty()) {
|
||||||
|
params.model = DEFAULT_MODEL_PATH;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
bool invalid_param = false;
|
bool invalid_param = false;
|
||||||
std::string arg;
|
std::string arg;
|
||||||
@ -1335,14 +1371,12 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
|
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
|
||||||
std::replace(arg.begin(), arg.end(), '_', '-');
|
std::replace(arg.begin(), arg.end(), '_', '-');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gpt_params_find_arg(argc, argv, arg, params, i, invalid_param)) {
|
if (!gpt_params_find_arg(argc, argv, arg, params, i, invalid_param)) {
|
||||||
throw std::invalid_argument("error: unknown argument: " + arg);
|
throw std::invalid_argument("error: unknown argument: " + arg);
|
||||||
}
|
}
|
||||||
}
|
if (invalid_param) {
|
||||||
|
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
|
||||||
if (invalid_param) {
|
}
|
||||||
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.prompt_cache_all &&
|
if (params.prompt_cache_all &&
|
||||||
@ -1352,10 +1386,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
|
throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
// short-hand to avoid specifying --hf-file -> default it to --model
|
gpt_params_handle_model_default(params);
|
||||||
if (!params.hf_repo.empty() && params.hf_file.empty()) {
|
|
||||||
params.hf_file = params.model;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (params.escape) {
|
if (params.escape) {
|
||||||
process_escapes(params.prompt);
|
process_escapes(params.prompt);
|
||||||
@ -1393,7 +1424,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" -h, --help show this help message and exit\n");
|
printf(" -h, --help show this help message and exit\n");
|
||||||
printf(" --version show version and build info\n");
|
printf(" --version show version and build info\n");
|
||||||
printf(" -i, --interactive run in interactive mode\n");
|
printf(" -i, --interactive run in interactive mode\n");
|
||||||
|
printf(" --interactive-specials allow special tokens in user text, in interactive mode\n");
|
||||||
printf(" --interactive-first run in interactive mode and wait for input right away\n");
|
printf(" --interactive-first run in interactive mode and wait for input right away\n");
|
||||||
|
printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
|
||||||
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
||||||
printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");
|
printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");
|
||||||
printf(" --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
|
printf(" --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
|
||||||
@ -1494,8 +1527,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" -ns N, --sequences N number of sequences to decode (default: %d)\n", params.n_sequences);
|
printf(" -ns N, --sequences N number of sequences to decode (default: %d)\n", params.n_sequences);
|
||||||
printf(" -ps N, --p-split N speculative decoding split probability (default: %.1f)\n", (double)params.p_split);
|
printf(" -ps N, --p-split N speculative decoding split probability (default: %.1f)\n", (double)params.p_split);
|
||||||
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
|
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
|
||||||
|
printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
|
||||||
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n");
|
printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n");
|
||||||
printf(" --image IMAGE_FILE path to an image file. use with multimodal models\n");
|
printf(" --image IMAGE_FILE path to an image file. use with multimodal models. Specify multiple times for batching\n");
|
||||||
if (llama_supports_mlock()) {
|
if (llama_supports_mlock()) {
|
||||||
printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
||||||
}
|
}
|
||||||
@ -1548,7 +1582,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" --control-vector-layer-range START END\n");
|
printf(" --control-vector-layer-range START END\n");
|
||||||
printf(" layer range to apply the control vector(s) to, start and end inclusive\n");
|
printf(" layer range to apply the control vector(s) to, start and end inclusive\n");
|
||||||
printf(" -m FNAME, --model FNAME\n");
|
printf(" -m FNAME, --model FNAME\n");
|
||||||
printf(" model path (default: %s)\n", params.model.c_str());
|
printf(" model path (default: models/$filename with filename from --hf-file or --model-url if set, otherwise %s)\n", DEFAULT_MODEL_PATH);
|
||||||
printf(" -md FNAME, --model-draft FNAME\n");
|
printf(" -md FNAME, --model-draft FNAME\n");
|
||||||
printf(" draft model for speculative decoding (default: unused)\n");
|
printf(" draft model for speculative decoding (default: unused)\n");
|
||||||
printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
|
printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
|
||||||
@ -1866,6 +1900,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||||||
cparams.cb_eval = params.cb_eval;
|
cparams.cb_eval = params.cb_eval;
|
||||||
cparams.cb_eval_user_data = params.cb_eval_user_data;
|
cparams.cb_eval_user_data = params.cb_eval_user_data;
|
||||||
cparams.offload_kqv = !params.no_kv_offload;
|
cparams.offload_kqv = !params.no_kv_offload;
|
||||||
|
cparams.flash_attn = params.flash_attn;
|
||||||
|
|
||||||
cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
|
cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
|
||||||
cparams.type_v = kv_cache_type_from_str(params.cache_type_v);
|
cparams.type_v = kv_cache_type_from_str(params.cache_type_v);
|
||||||
@ -1896,59 +1931,75 @@ void llama_batch_add(
|
|||||||
|
|
||||||
#ifdef LLAMA_USE_CURL
|
#ifdef LLAMA_USE_CURL
|
||||||
|
|
||||||
static bool llama_download_file(CURL * curl, const char * url, const char * path) {
|
static bool starts_with(const std::string & str, const std::string & prefix) {
|
||||||
|
// While we wait for C++20's std::string::starts_with...
|
||||||
|
return str.rfind(prefix, 0) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool llama_download_file(const std::string & url, const std::string & path) {
|
||||||
|
|
||||||
|
// Initialize libcurl
|
||||||
|
std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> curl(curl_easy_init(), &curl_easy_cleanup);
|
||||||
|
if (!curl) {
|
||||||
|
fprintf(stderr, "%s: error initializing libcurl\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
bool force_download = false;
|
bool force_download = false;
|
||||||
|
|
||||||
// Set the URL, allow to follow http redirection
|
// Set the URL, allow to follow http redirection
|
||||||
curl_easy_setopt(curl, CURLOPT_URL, url);
|
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
|
||||||
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
|
curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
|
||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
|
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
|
||||||
// operating system. Currently implemented under MS-Windows.
|
// operating system. Currently implemented under MS-Windows.
|
||||||
curl_easy_setopt(curl, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
|
curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Check if the file already exists locally
|
// Check if the file already exists locally
|
||||||
struct stat model_file_info;
|
struct stat model_file_info;
|
||||||
auto file_exists = (stat(path, &model_file_info) == 0);
|
auto file_exists = (stat(path.c_str(), &model_file_info) == 0);
|
||||||
|
|
||||||
// If the file exists, check for ${path_model}.etag or ${path_model}.lastModified files
|
// If the file exists, check its JSON metadata companion file.
|
||||||
char etag[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
std::string metadata_path = path + ".json";
|
||||||
char etag_path[PATH_MAX] = {0};
|
nlohmann::json metadata;
|
||||||
snprintf(etag_path, sizeof(etag_path), "%s.etag", path);
|
std::string etag;
|
||||||
|
std::string last_modified;
|
||||||
char last_modified[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
|
||||||
char last_modified_path[PATH_MAX] = {0};
|
|
||||||
snprintf(last_modified_path, sizeof(last_modified_path), "%s.lastModified", path);
|
|
||||||
|
|
||||||
if (file_exists) {
|
if (file_exists) {
|
||||||
auto * f_etag = fopen(etag_path, "r");
|
// Try and read the JSON metadata file (note: stream autoclosed upon exiting this block).
|
||||||
if (f_etag) {
|
std::ifstream metadata_in(metadata_path);
|
||||||
if (!fgets(etag, sizeof(etag), f_etag)) {
|
if (metadata_in.good()) {
|
||||||
fprintf(stderr, "%s: unable to read file %s\n", __func__, etag_path);
|
try {
|
||||||
} else {
|
metadata_in >> metadata;
|
||||||
fprintf(stderr, "%s: previous file found %s: %s\n", __func__, etag_path, etag);
|
fprintf(stderr, "%s: previous metadata file found %s: %s\n", __func__, metadata_path.c_str(), metadata.dump().c_str());
|
||||||
|
if (metadata.contains("url") && metadata.at("url").is_string()) {
|
||||||
|
auto previous_url = metadata.at("url").get<std::string>();
|
||||||
|
if (previous_url != url) {
|
||||||
|
fprintf(stderr, "%s: Model URL mismatch: %s != %s\n", __func__, url.c_str(), previous_url.c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (metadata.contains("etag") && metadata.at("etag").is_string()) {
|
||||||
|
etag = metadata.at("etag");
|
||||||
|
}
|
||||||
|
if (metadata.contains("lastModified") && metadata.at("lastModified").is_string()) {
|
||||||
|
last_modified = metadata.at("lastModified");
|
||||||
|
}
|
||||||
|
} catch (const nlohmann::json::exception & e) {
|
||||||
|
fprintf(stderr, "%s: error reading metadata file %s: %s\n", __func__, metadata_path.c_str(), e.what());
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
fclose(f_etag);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto * f_last_modified = fopen(last_modified_path, "r");
|
|
||||||
if (f_last_modified) {
|
|
||||||
if (!fgets(last_modified, sizeof(last_modified), f_last_modified)) {
|
|
||||||
fprintf(stderr, "%s: unable to read file %s\n", __func__, last_modified_path);
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: previous file found %s: %s\n", __func__, last_modified_path,
|
|
||||||
last_modified);
|
|
||||||
}
|
|
||||||
fclose(f_last_modified);
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: no previous model file found %s\n", __func__, path.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send a HEAD request to retrieve the etag and last-modified headers
|
// Send a HEAD request to retrieve the etag and last-modified headers
|
||||||
struct llama_load_model_from_url_headers {
|
struct llama_load_model_from_url_headers {
|
||||||
char etag[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
std::string etag;
|
||||||
char last_modified[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
std::string last_modified;
|
||||||
};
|
};
|
||||||
llama_load_model_from_url_headers headers;
|
llama_load_model_from_url_headers headers;
|
||||||
{
|
{
|
||||||
@ -1956,38 +2007,37 @@ static bool llama_download_file(CURL * curl, const char * url, const char * path
|
|||||||
auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
|
auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
|
||||||
llama_load_model_from_url_headers *headers = (llama_load_model_from_url_headers *) userdata;
|
llama_load_model_from_url_headers *headers = (llama_load_model_from_url_headers *) userdata;
|
||||||
|
|
||||||
// Convert header field name to lowercase
|
static std::regex header_regex("([^:]+): (.*)\r\n");
|
||||||
for (size_t i = 0; i < n_items && buffer[i] != ':'; ++i) {
|
static std::regex etag_regex("ETag", std::regex_constants::icase);
|
||||||
buffer[i] = tolower(buffer[i]);
|
static std::regex last_modified_regex("Last-Modified", std::regex_constants::icase);
|
||||||
}
|
|
||||||
|
|
||||||
const char * etag_prefix = "etag: ";
|
std::string header(buffer, n_items);
|
||||||
if (strncmp(buffer, etag_prefix, strlen(etag_prefix)) == 0) {
|
std::smatch match;
|
||||||
strncpy(headers->etag, buffer + strlen(etag_prefix), n_items - strlen(etag_prefix) - 2); // Remove CRLF
|
if (std::regex_match(header, match, header_regex)) {
|
||||||
}
|
const std::string & key = match[1];
|
||||||
|
const std::string & value = match[2];
|
||||||
const char * last_modified_prefix = "last-modified: ";
|
if (std::regex_match(key, match, etag_regex)) {
|
||||||
if (strncmp(buffer, last_modified_prefix, strlen(last_modified_prefix)) == 0) {
|
headers->etag = value;
|
||||||
strncpy(headers->last_modified, buffer + strlen(last_modified_prefix),
|
} else if (std::regex_match(key, match, last_modified_regex)) {
|
||||||
n_items - strlen(last_modified_prefix) - 2); // Remove CRLF
|
headers->last_modified = value;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return n_items;
|
return n_items;
|
||||||
};
|
};
|
||||||
|
|
||||||
curl_easy_setopt(curl, CURLOPT_NOBODY, 1L); // will trigger the HEAD verb
|
curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 1L); // will trigger the HEAD verb
|
||||||
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 1L); // hide head request progress
|
curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L); // hide head request progress
|
||||||
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, static_cast<CURLOPT_HEADERFUNCTION_PTR>(header_callback));
|
curl_easy_setopt(curl.get(), CURLOPT_HEADERFUNCTION, static_cast<CURLOPT_HEADERFUNCTION_PTR>(header_callback));
|
||||||
curl_easy_setopt(curl, CURLOPT_HEADERDATA, &headers);
|
curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &headers);
|
||||||
|
|
||||||
CURLcode res = curl_easy_perform(curl);
|
CURLcode res = curl_easy_perform(curl.get());
|
||||||
if (res != CURLE_OK) {
|
if (res != CURLE_OK) {
|
||||||
curl_easy_cleanup(curl);
|
|
||||||
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
long http_code = 0;
|
long http_code = 0;
|
||||||
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
|
curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
|
||||||
if (http_code != 200) {
|
if (http_code != 200) {
|
||||||
// HEAD not supported, we don't know if the file has changed
|
// HEAD not supported, we don't know if the file has changed
|
||||||
// force trigger downloading
|
// force trigger downloading
|
||||||
@ -1996,28 +2046,30 @@ static bool llama_download_file(CURL * curl, const char * url, const char * path
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the ETag or the Last-Modified headers are different: trigger a new download
|
bool should_download = !file_exists || force_download;
|
||||||
bool should_download = !file_exists
|
if (!should_download) {
|
||||||
|| force_download
|
if (!etag.empty() && etag != headers.etag) {
|
||||||
|| (strlen(headers.etag) > 0 && strcmp(etag, headers.etag) != 0)
|
fprintf(stderr, "%s: ETag header is different (%s != %s): triggering a new download\n", __func__, etag.c_str(), headers.etag.c_str());
|
||||||
|| (strlen(headers.last_modified) > 0 && strcmp(last_modified, headers.last_modified) != 0);
|
should_download = true;
|
||||||
|
} else if (!last_modified.empty() && last_modified != headers.last_modified) {
|
||||||
|
fprintf(stderr, "%s: Last-Modified header is different (%s != %s): triggering a new download\n", __func__, last_modified.c_str(), headers.last_modified.c_str());
|
||||||
|
should_download = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (should_download) {
|
if (should_download) {
|
||||||
char path_temporary[PATH_MAX] = {0};
|
std::string path_temporary = path + ".downloadInProgress";
|
||||||
snprintf(path_temporary, sizeof(path_temporary), "%s.downloadInProgress", path);
|
|
||||||
if (file_exists) {
|
if (file_exists) {
|
||||||
fprintf(stderr, "%s: deleting previous downloaded file: %s\n", __func__, path);
|
fprintf(stderr, "%s: deleting previous downloaded file: %s\n", __func__, path.c_str());
|
||||||
if (remove(path) != 0) {
|
if (remove(path.c_str()) != 0) {
|
||||||
curl_easy_cleanup(curl);
|
fprintf(stderr, "%s: unable to delete file: %s\n", __func__, path.c_str());
|
||||||
fprintf(stderr, "%s: unable to delete file: %s\n", __func__, path);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the output file
|
// Set the output file
|
||||||
auto * outfile = fopen(path_temporary, "wb");
|
std::unique_ptr<FILE, decltype(&fclose)> outfile(fopen(path_temporary.c_str(), "wb"), fclose);
|
||||||
if (!outfile) {
|
if (!outfile) {
|
||||||
curl_easy_cleanup(curl);
|
fprintf(stderr, "%s: error opening local file for writing: %s\n", __func__, path.c_str());
|
||||||
fprintf(stderr, "%s: error opening local file for writing: %s\n", __func__, path);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2025,12 +2077,12 @@ static bool llama_download_file(CURL * curl, const char * url, const char * path
|
|||||||
auto write_callback = [](void * data, size_t size, size_t nmemb, void * fd) -> size_t {
|
auto write_callback = [](void * data, size_t size, size_t nmemb, void * fd) -> size_t {
|
||||||
return fwrite(data, size, nmemb, (FILE *)fd);
|
return fwrite(data, size, nmemb, (FILE *)fd);
|
||||||
};
|
};
|
||||||
curl_easy_setopt(curl, CURLOPT_NOBODY, 0L);
|
curl_easy_setopt(curl.get(), CURLOPT_NOBODY, 0L);
|
||||||
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
|
curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast<CURLOPT_WRITEFUNCTION_PTR>(write_callback));
|
||||||
curl_easy_setopt(curl, CURLOPT_WRITEDATA, outfile);
|
curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, outfile.get());
|
||||||
|
|
||||||
// display download progress
|
// display download progress
|
||||||
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
|
curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 0L);
|
||||||
|
|
||||||
// helper function to hide password in URL
|
// helper function to hide password in URL
|
||||||
auto llama_download_hide_password_in_url = [](const std::string & url) -> std::string {
|
auto llama_download_hide_password_in_url = [](const std::string & url) -> std::string {
|
||||||
@ -2049,51 +2101,34 @@ static bool llama_download_file(CURL * curl, const char * url, const char * path
|
|||||||
|
|
||||||
// start the download
|
// start the download
|
||||||
fprintf(stderr, "%s: downloading from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
|
fprintf(stderr, "%s: downloading from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
|
||||||
llama_download_hide_password_in_url(url).c_str(), path, headers.etag, headers.last_modified);
|
llama_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str());
|
||||||
auto res = curl_easy_perform(curl);
|
auto res = curl_easy_perform(curl.get());
|
||||||
if (res != CURLE_OK) {
|
if (res != CURLE_OK) {
|
||||||
fclose(outfile);
|
|
||||||
curl_easy_cleanup(curl);
|
|
||||||
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
long http_code = 0;
|
long http_code = 0;
|
||||||
curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &http_code);
|
curl_easy_getinfo (curl.get(), CURLINFO_RESPONSE_CODE, &http_code);
|
||||||
if (http_code < 200 || http_code >= 400) {
|
if (http_code < 200 || http_code >= 400) {
|
||||||
fclose(outfile);
|
|
||||||
curl_easy_cleanup(curl);
|
|
||||||
fprintf(stderr, "%s: invalid http status code received: %ld\n", __func__, http_code);
|
fprintf(stderr, "%s: invalid http status code received: %ld\n", __func__, http_code);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up
|
// Causes file to be closed explicitly here before we rename it.
|
||||||
fclose(outfile);
|
outfile.reset();
|
||||||
|
|
||||||
// Write the new ETag to the .etag file
|
// Write the updated JSON metadata file.
|
||||||
if (strlen(headers.etag) > 0) {
|
metadata.update({
|
||||||
auto * etag_file = fopen(etag_path, "w");
|
{"url", url},
|
||||||
if (etag_file) {
|
{"etag", headers.etag},
|
||||||
fputs(headers.etag, etag_file);
|
{"lastModified", headers.last_modified}
|
||||||
fclose(etag_file);
|
});
|
||||||
fprintf(stderr, "%s: file etag saved %s: %s\n", __func__, etag_path, headers.etag);
|
std::ofstream(metadata_path) << metadata.dump(4);
|
||||||
}
|
fprintf(stderr, "%s: file metadata saved: %s\n", __func__, metadata_path.c_str());
|
||||||
}
|
|
||||||
|
|
||||||
// Write the new lastModified to the .etag file
|
if (rename(path_temporary.c_str(), path.c_str()) != 0) {
|
||||||
if (strlen(headers.last_modified) > 0) {
|
fprintf(stderr, "%s: unable to rename file: %s to %s\n", __func__, path_temporary.c_str(), path.c_str());
|
||||||
auto * last_modified_file = fopen(last_modified_path, "w");
|
|
||||||
if (last_modified_file) {
|
|
||||||
fputs(headers.last_modified, last_modified_file);
|
|
||||||
fclose(last_modified_file);
|
|
||||||
fprintf(stderr, "%s: file last modified saved %s: %s\n", __func__, last_modified_path,
|
|
||||||
headers.last_modified);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rename(path_temporary, path) != 0) {
|
|
||||||
curl_easy_cleanup(curl);
|
|
||||||
fprintf(stderr, "%s: unable to rename file: %s to %s\n", __func__, path_temporary, path);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2111,15 +2146,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize libcurl
|
if (!llama_download_file(model_url, path_model)) {
|
||||||
auto * curl = curl_easy_init();
|
|
||||||
|
|
||||||
if (!curl) {
|
|
||||||
fprintf(stderr, "%s: error initializing libcurl\n", __func__);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!llama_download_file(curl, model_url, path_model)) {
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2133,7 +2160,6 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
auto * ctx_gguf = gguf_init_from_file(path_model, gguf_params);
|
auto * ctx_gguf = gguf_init_from_file(path_model, gguf_params);
|
||||||
if (!ctx_gguf) {
|
if (!ctx_gguf) {
|
||||||
fprintf(stderr, "\n%s: failed to load input GGUF from %s\n", __func__, path_model);
|
fprintf(stderr, "\n%s: failed to load input GGUF from %s\n", __func__, path_model);
|
||||||
curl_easy_cleanup(curl);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2145,8 +2171,6 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
gguf_free(ctx_gguf);
|
gguf_free(ctx_gguf);
|
||||||
}
|
}
|
||||||
|
|
||||||
curl_easy_cleanup(curl);
|
|
||||||
|
|
||||||
if (n_split > 1) {
|
if (n_split > 1) {
|
||||||
char split_prefix[PATH_MAX] = {0};
|
char split_prefix[PATH_MAX] = {0};
|
||||||
char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
||||||
@ -2177,11 +2201,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
||||||
llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
|
llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
|
||||||
|
|
||||||
auto * curl = curl_easy_init();
|
return llama_download_file(split_url, split_path);
|
||||||
bool res = llama_download_file(curl, split_url, split_path);
|
|
||||||
curl_easy_cleanup(curl);
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}, idx));
|
}, idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2635,6 +2655,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
|||||||
dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
|
dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
|
||||||
fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
|
fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
|
||||||
fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
|
fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
|
||||||
|
fprintf(stream, "interactive_specials: %s # default: false\n", params.interactive_specials ? "true" : "false");
|
||||||
fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
|
fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
|
||||||
fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
|
fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
|
||||||
fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
|
fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
|
||||||
@ -2668,7 +2689,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "mirostat_ent: %f # default: 5.0\n", sparams.mirostat_tau);
|
fprintf(stream, "mirostat_ent: %f # default: 5.0\n", sparams.mirostat_tau);
|
||||||
fprintf(stream, "mirostat_lr: %f # default: 0.1\n", sparams.mirostat_eta);
|
fprintf(stream, "mirostat_lr: %f # default: 0.1\n", sparams.mirostat_eta);
|
||||||
fprintf(stream, "mlock: %s # default: false\n", params.use_mlock ? "true" : "false");
|
fprintf(stream, "mlock: %s # default: false\n", params.use_mlock ? "true" : "false");
|
||||||
fprintf(stream, "model: %s # default: models/7B/ggml-model.bin\n", params.model.c_str());
|
fprintf(stream, "model: %s # default: %s\n", params.model.c_str(), DEFAULT_MODEL_PATH);
|
||||||
fprintf(stream, "model_draft: %s # default:\n", params.model_draft.c_str());
|
fprintf(stream, "model_draft: %s # default:\n", params.model_draft.c_str());
|
||||||
fprintf(stream, "multiline_input: %s # default: false\n", params.multiline_input ? "true" : "false");
|
fprintf(stream, "multiline_input: %s # default: false\n", params.multiline_input ? "true" : "false");
|
||||||
fprintf(stream, "n_gpu_layers: %d # default: -1\n", params.n_gpu_layers);
|
fprintf(stream, "n_gpu_layers: %d # default: -1\n", params.n_gpu_layers);
|
||||||
@ -2703,6 +2724,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "seed: %u # default: -1 (random seed)\n", params.seed);
|
fprintf(stream, "seed: %u # default: -1 (random seed)\n", params.seed);
|
||||||
fprintf(stream, "simple_io: %s # default: false\n", params.simple_io ? "true" : "false");
|
fprintf(stream, "simple_io: %s # default: false\n", params.simple_io ? "true" : "false");
|
||||||
fprintf(stream, "cont_batching: %s # default: false\n", params.cont_batching ? "true" : "false");
|
fprintf(stream, "cont_batching: %s # default: false\n", params.cont_batching ? "true" : "false");
|
||||||
|
fprintf(stream, "flash_attn: %s # default: false\n", params.flash_attn ? "true" : "false");
|
||||||
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
|
||||||
|
|
||||||
const std::vector<float> tensor_split_vector(params.tensor_split, params.tensor_split + llama_max_devices());
|
const std::vector<float> tensor_split_vector(params.tensor_split, params.tensor_split + llama_max_devices());
|
||||||
|
@ -31,6 +31,8 @@
|
|||||||
fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \
|
fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
#define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf"
|
||||||
|
|
||||||
// build info
|
// build info
|
||||||
extern int LLAMA_BUILD_NUMBER;
|
extern int LLAMA_BUILD_NUMBER;
|
||||||
extern char const *LLAMA_COMMIT;
|
extern char const *LLAMA_COMMIT;
|
||||||
@ -92,7 +94,7 @@ struct gpt_params {
|
|||||||
// // sampling parameters
|
// // sampling parameters
|
||||||
struct llama_sampling_params sparams;
|
struct llama_sampling_params sparams;
|
||||||
|
|
||||||
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
std::string model = ""; // model path
|
||||||
std::string model_draft = ""; // draft model for speculative decoding
|
std::string model_draft = ""; // draft model for speculative decoding
|
||||||
std::string model_alias = "unknown"; // model alias
|
std::string model_alias = "unknown"; // model alias
|
||||||
std::string model_url = ""; // model url to download
|
std::string model_url = ""; // model url to download
|
||||||
@ -133,11 +135,13 @@ struct gpt_params {
|
|||||||
bool multiple_choice = false; // compute TruthfulQA score over random tasks from datafile supplied in prompt
|
bool multiple_choice = false; // compute TruthfulQA score over random tasks from datafile supplied in prompt
|
||||||
size_t multiple_choice_tasks = 0; // number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed
|
size_t multiple_choice_tasks = 0; // number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed
|
||||||
|
|
||||||
bool kl_divergence = false; // compute KL-divergence
|
bool kl_divergence = false; // compute KL divergence
|
||||||
|
|
||||||
bool random_prompt = false; // do not randomize prompt if none provided
|
bool random_prompt = false; // do not randomize prompt if none provided
|
||||||
bool use_color = false; // use color to distinguish generations and inputs
|
bool use_color = false; // use color to distinguish generations and inputs
|
||||||
bool interactive = false; // interactive mode
|
bool interactive = false; // interactive mode
|
||||||
|
bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
|
||||||
|
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
|
||||||
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
||||||
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
||||||
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
|
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
|
||||||
@ -148,6 +152,7 @@ struct gpt_params {
|
|||||||
bool multiline_input = false; // reverse the usage of `\`
|
bool multiline_input = false; // reverse the usage of `\`
|
||||||
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
|
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
|
||||||
bool cont_batching = true; // insert new sequences for decoding on-the-fly
|
bool cont_batching = true; // insert new sequences for decoding on-the-fly
|
||||||
|
bool flash_attn = false; // flash attention
|
||||||
|
|
||||||
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
||||||
bool ignore_eos = false; // ignore generated EOS tokens
|
bool ignore_eos = false; // ignore generated EOS tokens
|
||||||
@ -167,10 +172,12 @@ struct gpt_params {
|
|||||||
std::string cache_type_v = "f16"; // KV cache data type for the V
|
std::string cache_type_v = "f16"; // KV cache data type for the V
|
||||||
|
|
||||||
// multimodal models (see examples/llava)
|
// multimodal models (see examples/llava)
|
||||||
std::string mmproj = ""; // path to multimodal projector
|
std::string mmproj = ""; // path to multimodal projector
|
||||||
std::string image = ""; // path to an image file
|
std::vector<std::string> image; // path to image file(s)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void gpt_params_handle_model_default(gpt_params & params);
|
||||||
|
|
||||||
bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
|
||||||
|
|
||||||
bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);
|
bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);
|
||||||
|
@ -142,6 +142,9 @@ namespace grammar_parser {
|
|||||||
pos++;
|
pos++;
|
||||||
last_sym_start = out_elements.size();
|
last_sym_start = out_elements.size();
|
||||||
while (*pos != '"') {
|
while (*pos != '"') {
|
||||||
|
if (!*pos) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto char_pair = parse_char(pos);
|
auto char_pair = parse_char(pos);
|
||||||
pos = char_pair.second;
|
pos = char_pair.second;
|
||||||
out_elements.push_back({LLAMA_GRETYPE_CHAR, char_pair.first});
|
out_elements.push_back({LLAMA_GRETYPE_CHAR, char_pair.first});
|
||||||
@ -156,6 +159,9 @@ namespace grammar_parser {
|
|||||||
}
|
}
|
||||||
last_sym_start = out_elements.size();
|
last_sym_start = out_elements.size();
|
||||||
while (*pos != ']') {
|
while (*pos != ']') {
|
||||||
|
if (!*pos) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto char_pair = parse_char(pos);
|
auto char_pair = parse_char(pos);
|
||||||
pos = char_pair.second;
|
pos = char_pair.second;
|
||||||
enum llama_gretype type = last_sym_start < out_elements.size()
|
enum llama_gretype type = last_sym_start < out_elements.size()
|
||||||
@ -164,6 +170,9 @@ namespace grammar_parser {
|
|||||||
|
|
||||||
out_elements.push_back({type, char_pair.first});
|
out_elements.push_back({type, char_pair.first});
|
||||||
if (pos[0] == '-' && pos[1] != ']') {
|
if (pos[0] == '-' && pos[1] != ']') {
|
||||||
|
if (!pos[1]) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto endchar_pair = parse_char(pos + 1);
|
auto endchar_pair = parse_char(pos + 1);
|
||||||
pos = endchar_pair.second;
|
pos = endchar_pair.second;
|
||||||
out_elements.push_back({LLAMA_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
out_elements.push_back({LLAMA_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
||||||
|
@ -1,4 +1,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
||||||
|
#define JSON_ASSERT GGML_ASSERT
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
|
|
||||||
std::string json_schema_to_grammar(const nlohmann::ordered_json& schema);
|
std::string json_schema_to_grammar(const nlohmann::ordered_json& schema);
|
||||||
|
@ -234,7 +234,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
|||||||
// INTERNAL, DO NOT USE
|
// INTERNAL, DO NOT USE
|
||||||
// USE LOG() INSTEAD
|
// USE LOG() INSTEAD
|
||||||
//
|
//
|
||||||
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
|
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER) || defined(__clang__)
|
||||||
#define LOG_IMPL(str, ...) \
|
#define LOG_IMPL(str, ...) \
|
||||||
do { \
|
do { \
|
||||||
if (LOG_TARGET != nullptr) \
|
if (LOG_TARGET != nullptr) \
|
||||||
@ -257,7 +257,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
|||||||
// INTERNAL, DO NOT USE
|
// INTERNAL, DO NOT USE
|
||||||
// USE LOG_TEE() INSTEAD
|
// USE LOG_TEE() INSTEAD
|
||||||
//
|
//
|
||||||
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER)
|
#if !defined(_MSC_VER) || defined(__INTEL_LLVM_COMPILER) || defined(__clang__)
|
||||||
#define LOG_TEE_IMPL(str, ...) \
|
#define LOG_TEE_IMPL(str, ...) \
|
||||||
do { \
|
do { \
|
||||||
if (LOG_TARGET != nullptr) \
|
if (LOG_TARGET != nullptr) \
|
||||||
|
@ -35,6 +35,8 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
|
|||||||
|
|
||||||
result->prev.resize(params.n_prev);
|
result->prev.resize(params.n_prev);
|
||||||
|
|
||||||
|
result->n_valid = 0;
|
||||||
|
|
||||||
llama_sampling_set_rng_seed(result, params.seed);
|
llama_sampling_set_rng_seed(result, params.seed);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -64,6 +66,7 @@ void llama_sampling_reset(llama_sampling_context * ctx) {
|
|||||||
|
|
||||||
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
||||||
ctx->cur.clear();
|
ctx->cur.clear();
|
||||||
|
ctx->n_valid = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
||||||
@ -253,6 +256,8 @@ static llama_token llama_sampling_sample_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx_sampling->n_valid = temp == 0.0f ? 0 : cur_p.size;
|
||||||
|
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,6 +81,7 @@ struct llama_sampling_context {
|
|||||||
// TODO: replace with ring-buffer
|
// TODO: replace with ring-buffer
|
||||||
std::vector<llama_token> prev;
|
std::vector<llama_token> prev;
|
||||||
std::vector<llama_token_data> cur;
|
std::vector<llama_token_data> cur;
|
||||||
|
size_t n_valid; // Number of correct top tokens with correct probabilities.
|
||||||
|
|
||||||
std::mt19937 rng;
|
std::mt19937 rng;
|
||||||
};
|
};
|
||||||
|
207
convert-hf-to-gguf-update.py
Normal file → Executable file
@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# This script downloads the tokenizer models of the specified models from Huggingface and
|
# This script downloads the tokenizer models of the specified models from Huggingface and
|
||||||
# generates the get_vocab_base_pre() function for convert-hf-to-gguf.py
|
# generates the get_vocab_base_pre() function for convert-hf-to-gguf.py
|
||||||
#
|
#
|
||||||
@ -21,6 +23,7 @@
|
|||||||
# TODO: automate the update of convert-hf-to-gguf.py
|
# TODO: automate the update of convert-hf-to-gguf.py
|
||||||
#
|
#
|
||||||
|
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import requests
|
import requests
|
||||||
import sys
|
import sys
|
||||||
@ -28,49 +31,69 @@ import json
|
|||||||
|
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from enum import IntEnum, auto
|
from enum import IntEnum, auto
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
logger = logging.getLogger("convert-hf-to-gguf-update")
|
||||||
|
|
||||||
|
|
||||||
class TOKENIZER_TYPE(IntEnum):
|
class TOKENIZER_TYPE(IntEnum):
|
||||||
SPM = auto()
|
SPM = auto()
|
||||||
BPE = auto()
|
BPE = auto()
|
||||||
WPM = auto()
|
WPM = auto()
|
||||||
|
|
||||||
|
|
||||||
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
|
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
|
||||||
# will be updated with time - contributions welcome
|
# will be updated with time - contributions welcome
|
||||||
chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
|
chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL'
|
||||||
|
|
||||||
if len(sys.argv) == 2:
|
if len(sys.argv) == 2:
|
||||||
token = sys.argv[1]
|
token = sys.argv[1]
|
||||||
|
if not token.startswith("hf_"):
|
||||||
|
logger.info("Huggingface token seems invalid")
|
||||||
|
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
||||||
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# TODO: add models here, base models preferred
|
# TODO: add models here, base models preferred
|
||||||
models = [
|
models = [
|
||||||
{ "name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
|
{"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", },
|
||||||
{ "name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", },
|
{"name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", },
|
||||||
{ "name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", },
|
{"name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", },
|
||||||
{ "name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", },
|
{"name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", },
|
||||||
{ "name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
|
{"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", },
|
||||||
{ "name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
|
{"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", },
|
||||||
{ "name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
|
{"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", },
|
||||||
{ "name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
|
{"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", },
|
||||||
{ "name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
{"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", },
|
||||||
{ "name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
{"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", },
|
||||||
]
|
{"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", },
|
||||||
|
{"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", },
|
||||||
|
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
|
||||||
|
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
|
||||||
|
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
|
||||||
|
{"name": "jina-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM!
|
||||||
|
{"name": "jina-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
|
||||||
|
{"name": "jina-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
|
||||||
|
]
|
||||||
|
|
||||||
# make directory "models/tokenizers" if it doesn't exist
|
# make directory "models/tokenizers" if it doesn't exist
|
||||||
if not os.path.exists("models/tokenizers"):
|
if not os.path.exists("models/tokenizers"):
|
||||||
os.makedirs("models/tokenizers")
|
os.makedirs("models/tokenizers")
|
||||||
|
|
||||||
|
|
||||||
def download_file_with_auth(url, token, save_path):
|
def download_file_with_auth(url, token, save_path):
|
||||||
headers = {"Authorization": f"Bearer {token}"}
|
headers = {"Authorization": f"Bearer {token}"}
|
||||||
response = requests.get(url, headers=headers)
|
response = requests.get(url, headers=headers)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
with open(save_path, 'wb') as f:
|
with open(save_path, 'wb') as f:
|
||||||
f.write(response.content)
|
f.write(response.content)
|
||||||
print(f"File {save_path} downloaded successfully")
|
logger.info(f"File {save_path} downloaded successfully")
|
||||||
else:
|
else:
|
||||||
print(f"Failed to download file. Status code: {response.status_code}")
|
logger.info(f"Failed to download file. Status code: {response.status_code}")
|
||||||
|
|
||||||
|
|
||||||
# download the tokenizer models
|
# download the tokenizer models
|
||||||
for model in models:
|
for model in models:
|
||||||
@ -81,10 +104,10 @@ for model in models:
|
|||||||
if not os.path.exists(f"models/tokenizers/{name}"):
|
if not os.path.exists(f"models/tokenizers/{name}"):
|
||||||
os.makedirs(f"models/tokenizers/{name}")
|
os.makedirs(f"models/tokenizers/{name}")
|
||||||
else:
|
else:
|
||||||
print(f"Directory models/tokenizers/{name} already exists - skipping")
|
logger.info(f"Directory models/tokenizers/{name} already exists - skipping")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print(f"Downloading {name} to models/tokenizers/{name}")
|
logger.info(f"Downloading {name} to models/tokenizers/{name}")
|
||||||
|
|
||||||
url = f"{repo}/raw/main/config.json"
|
url = f"{repo}/raw/main/config.json"
|
||||||
save_path = f"models/tokenizers/{name}/config.json"
|
save_path = f"models/tokenizers/{name}/config.json"
|
||||||
@ -94,6 +117,14 @@ for model in models:
|
|||||||
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
||||||
download_file_with_auth(url, token, save_path)
|
download_file_with_auth(url, token, save_path)
|
||||||
|
|
||||||
|
# if downloaded file is less than 1KB, we likely need to download an LFS instead
|
||||||
|
if os.path.getsize(save_path) < 1024:
|
||||||
|
# remove the file
|
||||||
|
os.remove(save_path)
|
||||||
|
url = f"{repo}/resolve/main/tokenizer.json"
|
||||||
|
save_path = f"models/tokenizers/{name}/tokenizer.json"
|
||||||
|
download_file_with_auth(url, token, save_path)
|
||||||
|
|
||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM:
|
||||||
url = f"{repo}/resolve/main/tokenizer.model"
|
url = f"{repo}/resolve/main/tokenizer.model"
|
||||||
save_path = f"models/tokenizers/{name}/tokenizer.model"
|
save_path = f"models/tokenizers/{name}/tokenizer.model"
|
||||||
@ -114,77 +145,96 @@ for model in models:
|
|||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
||||||
|
if not os.path.exists(f"models/tokenizers/{name}"):
|
||||||
|
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
||||||
|
continue
|
||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
from transformers import AutoTokenizer
|
try:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
||||||
|
continue # Skip to the next model if the tokenizer can't be loaded
|
||||||
|
|
||||||
chktok = tokenizer.encode(chktxt)
|
chktok = tokenizer.encode(chktxt)
|
||||||
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
||||||
|
|
||||||
print(f"model: {name}")
|
logger.info(f"model: {name}")
|
||||||
print(f"tokt: {tokt}")
|
logger.info(f"tokt: {tokt}")
|
||||||
print(f"repo: {model['repo']}")
|
logger.info(f"repo: {model['repo']}")
|
||||||
print(f"chktok: {chktok}")
|
logger.info(f"chktok: {chktok}")
|
||||||
print(f"chkhsh: {chkhsh}")
|
logger.info(f"chkhsh: {chkhsh}")
|
||||||
|
|
||||||
# print the "pre_tokenizer" content from the tokenizer.json
|
# print the "pre_tokenizer" content from the tokenizer.json
|
||||||
with open(f"models/tokenizers/{name}/tokenizer.json", "r") as f:
|
with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f:
|
||||||
cfg = json.load(f)
|
cfg = json.load(f)
|
||||||
|
normalizer = cfg["normalizer"]
|
||||||
|
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
||||||
pre_tokenizer = cfg["pre_tokenizer"]
|
pre_tokenizer = cfg["pre_tokenizer"]
|
||||||
print("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
||||||
|
if "ignore_merges" in cfg["model"]:
|
||||||
|
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
|
||||||
|
|
||||||
print(f"\n")
|
logger.info("")
|
||||||
|
|
||||||
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
|
src_ifs += f" if chkhsh == \"{chkhsh}\":\n"
|
||||||
src_ifs += f" # ref: {model['repo']}\n"
|
src_ifs += f" # ref: {model['repo']}\n"
|
||||||
src_ifs += f" res = \"{name}\"\n"
|
src_ifs += f" res = \"{name}\"\n"
|
||||||
|
|
||||||
src_func = ""
|
src_func = f"""
|
||||||
src_func += " def get_vocab_base_pre(self, tokenizer) -> str:\n"
|
def get_vocab_base_pre(self, tokenizer) -> str:
|
||||||
src_func += " # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that\n"
|
# encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that
|
||||||
src_func += " # is specific for the BPE pre-tokenizer used by the model\n"
|
# is specific for the BPE pre-tokenizer used by the model
|
||||||
src_func += " # we will use this unique identifier to write a \"tokenizer.ggml.pre\" entry in the GGUF file which we can\n"
|
# we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can
|
||||||
src_func += " # use in llama.cpp to implement the same pre-tokenizer\n"
|
# use in llama.cpp to implement the same pre-tokenizer
|
||||||
src_func += "\n"
|
|
||||||
src_func += f" chktxt = {repr(chktxt)}\n"
|
|
||||||
src_func += "\n"
|
|
||||||
src_func += " chktok = tokenizer.encode(chktxt)\n"
|
|
||||||
src_func += " chkhsh = sha256(str(chktok).encode()).hexdigest()\n"
|
|
||||||
src_func += "\n"
|
|
||||||
src_func += " print(f\"chktok: {chktok}\")\n"
|
|
||||||
src_func += " print(f\"chkhsh: {chkhsh}\")\n"
|
|
||||||
src_func += "\n"
|
|
||||||
src_func += " res = None\n"
|
|
||||||
src_func += "\n"
|
|
||||||
src_func += " # NOTE: if you get an error here, you need to add the model to the if-elif chain below\n"
|
|
||||||
src_func += " # don't do this manually - use the convert-hf-to-gguf-update.py script!\n"
|
|
||||||
src_func += f"{src_ifs}\n"
|
|
||||||
src_func += " if res is None:\n"
|
|
||||||
src_func += " print(\"\\n\")\n"
|
|
||||||
src_func += " print(\"**************************************************************************************\")\n"
|
|
||||||
src_func += " print(\"** WARNING: The BPE pre-tokenizer was not recognized!\")\n"
|
|
||||||
src_func += " print(\"** This means that it was not added yet or you are using an older version.\")\n"
|
|
||||||
src_func += " print(\"** Check convert-hf-to-gguf-update.py and update it accordingly.\")\n"
|
|
||||||
src_func += " print(\"**\")\n"
|
|
||||||
src_func += " print(f\"** chkhsh: {chkhsh}\")\n"
|
|
||||||
src_func += " print(\"**************************************************************************************\")\n"
|
|
||||||
src_func += " print(\"\\n\")\n"
|
|
||||||
src_func += " raise NotImplementedError(\"BPE pre-tokenizer was not recognized - update get_vocab_base_pre()\")\n"
|
|
||||||
src_func += "\n"
|
|
||||||
src_func += " print(f\"tokenizer.ggml.pre: {res}\")\n"
|
|
||||||
src_func += " print(f\"chkhsh: {chkhsh}\")\n"
|
|
||||||
src_func += "\n"
|
|
||||||
src_func += " return res\n"
|
|
||||||
|
|
||||||
print(src_func)
|
chktxt = {repr(chktxt)}
|
||||||
|
|
||||||
print("\n")
|
chktok = tokenizer.encode(chktxt)
|
||||||
print("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!")
|
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
||||||
print("\n")
|
|
||||||
|
logger.debug(f"chktok: {{chktok}}")
|
||||||
|
logger.debug(f"chkhsh: {{chkhsh}}")
|
||||||
|
|
||||||
|
res = None
|
||||||
|
|
||||||
|
# NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script
|
||||||
|
# or pull the latest version of the model from Huggingface
|
||||||
|
# don't edit the hashes manually!
|
||||||
|
{src_ifs}
|
||||||
|
if res is None:
|
||||||
|
logger.warning("\\n")
|
||||||
|
logger.warning("**************************************************************************************")
|
||||||
|
logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
|
||||||
|
logger.warning("** There are 2 possible reasons for this:")
|
||||||
|
logger.warning("** - the model has not been added to convert-hf-to-gguf-update.py yet")
|
||||||
|
logger.warning("** - the pre-tokenization config has changed upstream")
|
||||||
|
logger.warning("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
|
||||||
|
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
||||||
|
logger.warning("**")
|
||||||
|
logger.warning(f"** chkhsh: {{chkhsh}}")
|
||||||
|
logger.warning("**************************************************************************************")
|
||||||
|
logger.warning("\\n")
|
||||||
|
raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()")
|
||||||
|
|
||||||
|
logger.debug(f"tokenizer.ggml.pre: {{repr(res)}}")
|
||||||
|
logger.debug(f"chkhsh: {{chkhsh}}")
|
||||||
|
|
||||||
|
return res
|
||||||
|
"""
|
||||||
|
|
||||||
|
print(src_func) # noqa: NP100
|
||||||
|
|
||||||
|
logger.info("\n")
|
||||||
|
logger.info("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!")
|
||||||
|
logger.info("\n")
|
||||||
|
|
||||||
# generate tests for each tokenizer model
|
# generate tests for each tokenizer model
|
||||||
|
|
||||||
tests = [
|
tests = [
|
||||||
|
"ied 4 ½ months",
|
||||||
|
"Führer",
|
||||||
"",
|
"",
|
||||||
" ",
|
" ",
|
||||||
" ",
|
" ",
|
||||||
@ -225,6 +275,7 @@ tests = [
|
|||||||
"3333333",
|
"3333333",
|
||||||
"33333333",
|
"33333333",
|
||||||
"333333333",
|
"333333333",
|
||||||
|
# "Cửa Việt", # llama-bpe fails on this
|
||||||
chktxt,
|
chktxt,
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -245,11 +296,19 @@ for model in models:
|
|||||||
name = model["name"]
|
name = model["name"]
|
||||||
tokt = model["tokt"]
|
tokt = model["tokt"]
|
||||||
|
|
||||||
# create the tokenizer
|
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
||||||
from transformers import AutoTokenizer
|
if not os.path.exists(f"models/tokenizers/{name}"):
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
||||||
|
continue
|
||||||
|
|
||||||
with open(f"models/ggml-vocab-{name}.gguf.inp", "w") as f:
|
# create the tokenizer
|
||||||
|
try:
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
||||||
|
continue # Skip this model and continue with the next one in the loop
|
||||||
|
|
||||||
|
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
||||||
for text in tests:
|
for text in tests:
|
||||||
f.write(f"{text}")
|
f.write(f"{text}")
|
||||||
f.write("\n__ggml_vocab_test__\n")
|
f.write("\n__ggml_vocab_test__\n")
|
||||||
@ -261,15 +320,15 @@ for model in models:
|
|||||||
f.write(f" {r}")
|
f.write(f" {r}")
|
||||||
f.write("\n")
|
f.write("\n")
|
||||||
|
|
||||||
print(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*")
|
logger.info(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*")
|
||||||
|
|
||||||
# generate commands for creating vocab files
|
# generate commands for creating vocab files
|
||||||
|
|
||||||
print("\nRun the following commands to generate the vocab files for testing:\n")
|
logger.info("\nRun the following commands to generate the vocab files for testing:\n")
|
||||||
|
|
||||||
for model in models:
|
for model in models:
|
||||||
name = model["name"]
|
name = model["name"]
|
||||||
|
|
||||||
print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only")
|
print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100
|
||||||
|
|
||||||
print("\n")
|
logger.info("\n")
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
@ -14,6 +15,8 @@ if 'NO_LOCAL_GGUF' not in os.environ:
|
|||||||
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
||||||
import gguf
|
import gguf
|
||||||
|
|
||||||
|
logger = logging.getLogger("ggml-to-gguf")
|
||||||
|
|
||||||
|
|
||||||
class GGMLFormat(IntEnum):
|
class GGMLFormat(IntEnum):
|
||||||
GGML = 0
|
GGML = 0
|
||||||
@ -125,7 +128,6 @@ class Tensor:
|
|||||||
self.start_offset = offset
|
self.start_offset = offset
|
||||||
self.len_bytes = n_bytes
|
self.len_bytes = n_bytes
|
||||||
offset += n_bytes
|
offset += n_bytes
|
||||||
# print(n_dims, name_len, dtype, self.dims, self.name, pad)
|
|
||||||
return offset - orig_offset
|
return offset - orig_offset
|
||||||
|
|
||||||
|
|
||||||
@ -175,7 +177,7 @@ class GGMLModel:
|
|||||||
offset += self.validate_header(data, offset)
|
offset += self.validate_header(data, offset)
|
||||||
hp = Hyperparameters()
|
hp = Hyperparameters()
|
||||||
offset += hp.load(data, offset)
|
offset += hp.load(data, offset)
|
||||||
print(f'* File format: {self.file_format.name}v{self.format_version} with ftype {hp.ftype.name}')
|
logger.info(f'* File format: {self.file_format.name}v{self.format_version} with ftype {hp.ftype.name}')
|
||||||
self.validate_conversion(hp.ftype)
|
self.validate_conversion(hp.ftype)
|
||||||
vocab = Vocab(load_scores = self.file_format > GGMLFormat.GGML)
|
vocab = Vocab(load_scores = self.file_format > GGMLFormat.GGML)
|
||||||
offset += vocab.load(data, offset, hp.n_vocab)
|
offset += vocab.load(data, offset, hp.n_vocab)
|
||||||
@ -215,12 +217,12 @@ class GGMLToGGUF:
|
|||||||
if float(hp.n_head) / float(x) == gqa:
|
if float(hp.n_head) / float(x) == gqa:
|
||||||
n_kv_head = x
|
n_kv_head = x
|
||||||
assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param"
|
assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param"
|
||||||
print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}')
|
logger.info(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}')
|
||||||
self.n_kv_head = n_kv_head
|
self.n_kv_head = n_kv_head
|
||||||
self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer)
|
self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer)
|
||||||
|
|
||||||
def save(self):
|
def save(self):
|
||||||
print('* Preparing to save GGUF file')
|
logger.info('* Preparing to save GGUF file')
|
||||||
gguf_writer = gguf.GGUFWriter(
|
gguf_writer = gguf.GGUFWriter(
|
||||||
self.cfg.output,
|
self.cfg.output,
|
||||||
gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA],
|
gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA],
|
||||||
@ -230,11 +232,11 @@ class GGMLToGGUF:
|
|||||||
if self.special_vocab is not None:
|
if self.special_vocab is not None:
|
||||||
self.special_vocab.add_to_gguf(gguf_writer)
|
self.special_vocab.add_to_gguf(gguf_writer)
|
||||||
self.add_tensors(gguf_writer)
|
self.add_tensors(gguf_writer)
|
||||||
print(" gguf: write header")
|
logger.info(" gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print(" gguf: write metadata")
|
logger.info(" gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print(" gguf: write tensors")
|
logger.info(" gguf: write tensors")
|
||||||
gguf_writer.write_tensors_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
||||||
@ -250,7 +252,7 @@ class GGMLToGGUF:
|
|||||||
name = cfg.name if cfg.name is not None else cfg.input.name
|
name = cfg.name if cfg.name is not None else cfg.input.name
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
name = None
|
name = None
|
||||||
print('* Adding model parameters and KV items')
|
logger.info('* Adding model parameters and KV items')
|
||||||
if name is not None:
|
if name is not None:
|
||||||
gguf_writer.add_name(name)
|
gguf_writer.add_name(name)
|
||||||
gguf_writer.add_description(desc)
|
gguf_writer.add_description(desc)
|
||||||
@ -287,7 +289,7 @@ class GGMLToGGUF:
|
|||||||
toktypes = []
|
toktypes = []
|
||||||
if self.vocab_override is not None:
|
if self.vocab_override is not None:
|
||||||
vo = self.vocab_override
|
vo = self.vocab_override
|
||||||
print('* Adding vocab item(s)')
|
logger.info('* Adding vocab item(s)')
|
||||||
for (idx, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
|
for (idx, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
|
||||||
tokens.append(vbytes)
|
tokens.append(vbytes)
|
||||||
scores.append(score)
|
scores.append(score)
|
||||||
@ -299,7 +301,7 @@ class GGMLToGGUF:
|
|||||||
if len(toktypes) > 0:
|
if len(toktypes) > 0:
|
||||||
gguf_writer.add_token_types(toktypes)
|
gguf_writer.add_token_types(toktypes)
|
||||||
return
|
return
|
||||||
print(f'* Adding {hp.n_vocab} vocab item(s)')
|
logger.info(f'* Adding {hp.n_vocab} vocab item(s)')
|
||||||
assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab'
|
assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab'
|
||||||
for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items):
|
for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items):
|
||||||
tt = 1 # Normal
|
tt = 1 # Normal
|
||||||
@ -334,7 +336,7 @@ class GGMLToGGUF:
|
|||||||
def add_tensors(self, gguf_writer):
|
def add_tensors(self, gguf_writer):
|
||||||
tensor_map = self.name_map
|
tensor_map = self.name_map
|
||||||
data = self.data
|
data = self.data
|
||||||
print(f'* Adding {len(self.model.tensors)} tensor(s)')
|
logger.info(f'* Adding {len(self.model.tensors)} tensor(s)')
|
||||||
for tensor in self.model.tensors:
|
for tensor in self.model.tensors:
|
||||||
name = str(tensor.name, 'UTF-8')
|
name = str(tensor.name, 'UTF-8')
|
||||||
mapped_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
|
mapped_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
|
||||||
@ -344,7 +346,6 @@ class GGMLToGGUF:
|
|||||||
temp = tempdims[1]
|
temp = tempdims[1]
|
||||||
tempdims[1] = tempdims[0]
|
tempdims[1] = tempdims[0]
|
||||||
tempdims[0] = temp
|
tempdims[0] = temp
|
||||||
# print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}')
|
|
||||||
gguf_writer.add_tensor(
|
gguf_writer.add_tensor(
|
||||||
mapped_name,
|
mapped_name,
|
||||||
data[tensor.start_offset:tensor.start_offset + tensor.len_bytes],
|
data[tensor.start_offset:tensor.start_offset + tensor.len_bytes],
|
||||||
@ -401,33 +402,35 @@ def handle_args():
|
|||||||
help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir")
|
help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir")
|
||||||
parser.add_argument("--vocabtype", default="spm,hfft",
|
parser.add_argument("--vocabtype", default="spm,hfft",
|
||||||
help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm,hfft)")
|
help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm,hfft)")
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
cfg = handle_args()
|
cfg = handle_args()
|
||||||
print(f'* Using config: {cfg}')
|
logging.basicConfig(level=logging.DEBUG if cfg.verbose else logging.INFO)
|
||||||
print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n')
|
logger.info(f'* Using config: {cfg}')
|
||||||
|
logger.warning('=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===')
|
||||||
if cfg.model_metadata_dir is None and (cfg.gqa == 1 or cfg.eps == '5.0e-06'):
|
if cfg.model_metadata_dir is None and (cfg.gqa == 1 or cfg.eps == '5.0e-06'):
|
||||||
print('- Note: If converting LLaMA2, specifying "--eps 1e-5" is required. 70B models also need "--gqa 8".')
|
logger.info('- Note: If converting LLaMA2, specifying "--eps 1e-5" is required. 70B models also need "--gqa 8".')
|
||||||
data = np.memmap(cfg.input, mode = 'r')
|
data = np.memmap(cfg.input, mode = 'r')
|
||||||
model = GGMLModel()
|
model = GGMLModel()
|
||||||
print('* Scanning GGML input file')
|
logger.info('* Scanning GGML input file')
|
||||||
offset = model.load(data, 0) # noqa
|
offset = model.load(data, 0) # noqa
|
||||||
print(f'* GGML model hyperparameters: {model.hyperparameters}')
|
logger.info(f'* GGML model hyperparameters: {model.hyperparameters}')
|
||||||
vocab_override = None
|
vocab_override = None
|
||||||
params_override = None
|
params_override = None
|
||||||
special_vocab = None
|
special_vocab = None
|
||||||
if cfg.model_metadata_dir is not None:
|
if cfg.model_metadata_dir is not None:
|
||||||
(params_override, vocab_override, special_vocab) = handle_metadata(cfg, model.hyperparameters)
|
(params_override, vocab_override, special_vocab) = handle_metadata(cfg, model.hyperparameters)
|
||||||
print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.')
|
logger.info('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.')
|
||||||
print(f'* Overriding params: {params_override}')
|
logger.info(f'* Overriding params: {params_override}')
|
||||||
print(f'* Overriding vocab: {vocab_override}')
|
logger.info(f'* Overriding vocab: {vocab_override}')
|
||||||
print(f'* Special vocab: {special_vocab}')
|
logger.info(f'* Special vocab: {special_vocab}')
|
||||||
else:
|
else:
|
||||||
print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n')
|
logger.warning('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n')
|
||||||
if model.file_format == GGMLFormat.GGML:
|
if model.file_format == GGMLFormat.GGML:
|
||||||
print('! This is a very old GGML file that does not contain vocab scores. Strongly recommend using model metadata!')
|
logger.info('! This is a very old GGML file that does not contain vocab scores. Strongly recommend using model metadata!')
|
||||||
converter = GGMLToGGUF(
|
converter = GGMLToGGUF(
|
||||||
model, data, cfg,
|
model, data, cfg,
|
||||||
params_override = params_override,
|
params_override = params_override,
|
||||||
@ -435,7 +438,7 @@ def main():
|
|||||||
special_vocab = special_vocab
|
special_vocab = special_vocab
|
||||||
)
|
)
|
||||||
converter.save()
|
converter.save()
|
||||||
print(f'* Successful completion. Output saved to: {cfg.output}')
|
logger.info(f'* Successful completion. Output saved to: {cfg.output}')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,148 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import struct
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any, BinaryIO, Sequence
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
|
|
||||||
if 'NO_LOCAL_GGUF' not in os.environ:
|
|
||||||
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
|
|
||||||
import gguf
|
|
||||||
|
|
||||||
NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1}
|
|
||||||
|
|
||||||
|
|
||||||
def write_file_header(fout: BinaryIO, params: dict[str, Any]) -> None:
|
|
||||||
fout.write(b"ggla"[::-1]) # magic (ggml lora)
|
|
||||||
fout.write(struct.pack("i", 1)) # file version
|
|
||||||
fout.write(struct.pack("i", params["r"]))
|
|
||||||
# https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
|
|
||||||
# but some models ship a float value instead
|
|
||||||
# let's convert to int, but fail if lossless conversion is not possible
|
|
||||||
assert (
|
|
||||||
int(params["lora_alpha"]) == params["lora_alpha"]
|
|
||||||
), "cannot convert float to int losslessly"
|
|
||||||
fout.write(struct.pack("i", int(params["lora_alpha"])))
|
|
||||||
|
|
||||||
|
|
||||||
def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_type: np.dtype[Any]) -> None:
|
|
||||||
sname = name.encode("utf-8")
|
|
||||||
fout.write(
|
|
||||||
struct.pack(
|
|
||||||
"iii",
|
|
||||||
len(shape),
|
|
||||||
len(sname),
|
|
||||||
NUMPY_TYPE_TO_FTYPE[data_type.name],
|
|
||||||
)
|
|
||||||
)
|
|
||||||
fout.write(struct.pack("i" * len(shape), *shape[::-1]))
|
|
||||||
fout.write(sname)
|
|
||||||
fout.seek((fout.tell() + 31) & -32)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
if len(sys.argv) < 2:
|
|
||||||
print(f"Usage: python {sys.argv[0]} <path> [arch]")
|
|
||||||
print(
|
|
||||||
"Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
|
|
||||||
)
|
|
||||||
print(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
input_json = os.path.join(sys.argv[1], "adapter_config.json")
|
|
||||||
input_model = os.path.join(sys.argv[1], "adapter_model.bin")
|
|
||||||
output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin")
|
|
||||||
|
|
||||||
if os.path.exists(input_model):
|
|
||||||
model = torch.load(input_model, map_location="cpu")
|
|
||||||
else:
|
|
||||||
input_model = os.path.join(sys.argv[1], "adapter_model.safetensors")
|
|
||||||
# lazy import load_file only if lora is in safetensors format.
|
|
||||||
from safetensors.torch import load_file
|
|
||||||
model = load_file(input_model, device="cpu")
|
|
||||||
|
|
||||||
arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
|
|
||||||
|
|
||||||
if arch_name not in gguf.MODEL_ARCH_NAMES.values():
|
|
||||||
print(f"Error: unsupported architecture {arch_name}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)]
|
|
||||||
name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone
|
|
||||||
|
|
||||||
with open(input_json, "r") as f:
|
|
||||||
params = json.load(f)
|
|
||||||
|
|
||||||
if params["peft_type"] != "LORA":
|
|
||||||
print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if params["fan_in_fan_out"] is True:
|
|
||||||
print("Error: param fan_in_fan_out is not supported")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if params["bias"] is not None and params["bias"] != "none":
|
|
||||||
print("Error: param bias is not supported")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# TODO: these seem to be layers that have been trained but without lora.
|
|
||||||
# doesn't seem widely used but eventually should be supported
|
|
||||||
if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0:
|
|
||||||
print("Error: param modules_to_save is not supported")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
with open(output_path, "wb") as fout:
|
|
||||||
fout.truncate()
|
|
||||||
|
|
||||||
write_file_header(fout, params)
|
|
||||||
for k, v in model.items():
|
|
||||||
orig_k = k
|
|
||||||
if k.endswith(".default.weight"):
|
|
||||||
k = k.replace(".default.weight", ".weight")
|
|
||||||
if k in ["llama_proj.weight", "llama_proj.bias"]:
|
|
||||||
continue
|
|
||||||
if k.endswith("lora_A.weight"):
|
|
||||||
if v.dtype != torch.float16 and v.dtype != torch.float32:
|
|
||||||
v = v.float()
|
|
||||||
v = v.T
|
|
||||||
else:
|
|
||||||
v = v.float()
|
|
||||||
|
|
||||||
t = v.detach().numpy()
|
|
||||||
|
|
||||||
prefix = "base_model.model."
|
|
||||||
if k.startswith(prefix):
|
|
||||||
k = k[len(prefix) :]
|
|
||||||
|
|
||||||
lora_suffixes = (".lora_A.weight", ".lora_B.weight")
|
|
||||||
if k.endswith(lora_suffixes):
|
|
||||||
suffix = k[-len(lora_suffixes[0]):]
|
|
||||||
k = k[: -len(lora_suffixes[0])]
|
|
||||||
else:
|
|
||||||
print(f"Error: unrecognized tensor name {orig_k}")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
tname = name_map.get_name(k)
|
|
||||||
if tname is None:
|
|
||||||
print(f"Error: could not map tensor name {orig_k}")
|
|
||||||
print(" Note: the arch parameter must be specified if the model is not llama")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if suffix == ".lora_A.weight":
|
|
||||||
tname += ".weight.loraA"
|
|
||||||
elif suffix == ".lora_B.weight":
|
|
||||||
tname += ".weight.loraB"
|
|
||||||
else:
|
|
||||||
assert False
|
|
||||||
|
|
||||||
print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
|
|
||||||
write_tensor_header(fout, tname, t.shape, t.dtype)
|
|
||||||
t.tofile(fout)
|
|
||||||
|
|
||||||
print(f"Converted {input_json} and {input_model} to {output_path}")
|
|
@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -14,6 +15,8 @@ if 'NO_LOCAL_GGUF' not in os.environ:
|
|||||||
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
|
||||||
import gguf
|
import gguf
|
||||||
|
|
||||||
|
logger = logging.getLogger("persimmon-to-gguf")
|
||||||
|
|
||||||
|
|
||||||
def _flatten_dict(dct, tensors, prefix=None):
|
def _flatten_dict(dct, tensors, prefix=None):
|
||||||
assert isinstance(dct, dict)
|
assert isinstance(dct, dict)
|
||||||
@ -30,9 +33,9 @@ def _flatten_dict(dct, tensors, prefix=None):
|
|||||||
|
|
||||||
def _get_sentencepiece_tokenizer_info(dir_model: Path):
|
def _get_sentencepiece_tokenizer_info(dir_model: Path):
|
||||||
tokenizer_path = dir_model / 'adept_vocab.model'
|
tokenizer_path = dir_model / 'adept_vocab.model'
|
||||||
print('gguf: getting sentencepiece tokenizer from', tokenizer_path)
|
logger.info('getting sentencepiece tokenizer from', tokenizer_path)
|
||||||
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
||||||
print('gguf: adding tokens')
|
logger.info('adding tokens')
|
||||||
tokens: list[bytes] = []
|
tokens: list[bytes] = []
|
||||||
scores: list[float] = []
|
scores: list[float] = []
|
||||||
toktypes: list[int] = []
|
toktypes: list[int] = []
|
||||||
@ -67,8 +70,10 @@ def main():
|
|||||||
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
||||||
parser.add_argument("--ckpt-path", type=Path, help="path to persimmon checkpoint .pt file")
|
parser.add_argument("--ckpt-path", type=Path, help="path to persimmon checkpoint .pt file")
|
||||||
parser.add_argument("--model-dir", type=Path, help="directory containing model e.g. 8b_chat_model_release")
|
parser.add_argument("--model-dir", type=Path, help="directory containing model e.g. 8b_chat_model_release")
|
||||||
parser.add_argument("--adept-inference-dir", type=str, help="path to adept-inference code directory")
|
parser.add_argument("--adept-inference-dir", type=str, help="path to adept-inference code directory")
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
|
||||||
sys.path.append(str(args.adept_inference_dir))
|
sys.path.append(str(args.adept_inference_dir))
|
||||||
persimmon_model = torch.load(args.ckpt_path)
|
persimmon_model = torch.load(args.ckpt_path)
|
||||||
hparams = persimmon_model['args']
|
hparams = persimmon_model['args']
|
||||||
@ -107,7 +112,7 @@ def main():
|
|||||||
gguf_writer.add_eos_token_id(71013)
|
gguf_writer.add_eos_token_id(71013)
|
||||||
|
|
||||||
tensor_map = gguf.get_tensor_name_map(arch, block_count)
|
tensor_map = gguf.get_tensor_name_map(arch, block_count)
|
||||||
print(tensor_map)
|
logger.info(tensor_map)
|
||||||
for name in tensors.keys():
|
for name in tensors.keys():
|
||||||
data_torch = tensors[name]
|
data_torch = tensors[name]
|
||||||
if name.endswith(".self_attention.rotary_emb.inv_freq"):
|
if name.endswith(".self_attention.rotary_emb.inv_freq"):
|
||||||
@ -117,22 +122,21 @@ def main():
|
|||||||
data = data_torch.to(torch.float32).squeeze().numpy()
|
data = data_torch.to(torch.float32).squeeze().numpy()
|
||||||
new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
|
new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
|
||||||
if new_name is None:
|
if new_name is None:
|
||||||
print("Can not map tensor '" + name + "'")
|
raise ValueError(f"Can not map tensor '{name}'")
|
||||||
sys.exit()
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
logger.debug(f"{new_name}, n_dims = {str(n_dims)}, {str(old_dtype)} --> {str(data.dtype)}")
|
||||||
gguf_writer.add_tensor(new_name, data)
|
gguf_writer.add_tensor(new_name, data)
|
||||||
print("gguf: write header")
|
logger.info("gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print("gguf: write metadata")
|
logger.info("gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print("gguf: write tensors")
|
logger.info("gguf: write tensors")
|
||||||
gguf_writer.write_tensors_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
||||||
print(f"gguf: model successfully exported to '{args.outfile}'")
|
logger.info(f"gguf: model successfully exported to '{args.outfile}'")
|
||||||
print("")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
125
convert.py
@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
import enum
|
import enum
|
||||||
@ -35,6 +36,8 @@ import gguf
|
|||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from typing_extensions import Self, TypeAlias
|
from typing_extensions import Self, TypeAlias
|
||||||
|
|
||||||
|
logger = logging.getLogger("convert")
|
||||||
|
|
||||||
if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
|
if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
|
||||||
faulthandler.register(signal.SIGUSR1)
|
faulthandler.register(signal.SIGUSR1)
|
||||||
|
|
||||||
@ -281,6 +284,7 @@ class Params:
|
|||||||
n_experts = None
|
n_experts = None
|
||||||
n_experts_used = None
|
n_experts_used = None
|
||||||
f_rope_freq_base = None
|
f_rope_freq_base = None
|
||||||
|
n_ff = None
|
||||||
|
|
||||||
# hack to determine LLaMA v1 vs v2 vs CodeLlama
|
# hack to determine LLaMA v1 vs v2 vs CodeLlama
|
||||||
if config.get("moe"):
|
if config.get("moe"):
|
||||||
@ -305,6 +309,8 @@ class Params:
|
|||||||
n_experts_used = config["moe"]["num_experts_per_tok"]
|
n_experts_used = config["moe"]["num_experts_per_tok"]
|
||||||
f_rope_freq_base = 1e6
|
f_rope_freq_base = 1e6
|
||||||
|
|
||||||
|
assert n_ff is not None
|
||||||
|
|
||||||
return Params(
|
return Params(
|
||||||
n_vocab = model["tok_embeddings.weight"].shape[0],
|
n_vocab = model["tok_embeddings.weight"].shape[0],
|
||||||
n_embd = config["dim"],
|
n_embd = config["dim"],
|
||||||
@ -459,7 +465,8 @@ class SentencePieceVocab(Vocab):
|
|||||||
# not found in alternate location either
|
# not found in alternate location either
|
||||||
raise FileNotFoundError('Cannot find tokenizer.model')
|
raise FileNotFoundError('Cannot find tokenizer.model')
|
||||||
|
|
||||||
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
|
self.sentencepiece_tokenizer = SentencePieceProcessor()
|
||||||
|
self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
|
||||||
vocab_size = self.sentencepiece_tokenizer.vocab_size()
|
vocab_size = self.sentencepiece_tokenizer.vocab_size()
|
||||||
|
|
||||||
new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
|
new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
|
||||||
@ -479,23 +486,23 @@ class SentencePieceVocab(Vocab):
|
|||||||
def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
|
||||||
tokenizer = self.sentencepiece_tokenizer
|
tokenizer = self.sentencepiece_tokenizer
|
||||||
for i in range(tokenizer.vocab_size()):
|
for i in range(tokenizer.vocab_size()):
|
||||||
piece = tokenizer.id_to_piece(i)
|
piece = tokenizer.IdToPiece(i)
|
||||||
text = piece.encode("utf-8")
|
text = piece.encode("utf-8")
|
||||||
score: float = tokenizer.get_score(i)
|
score: float = tokenizer.GetScore(i)
|
||||||
|
|
||||||
toktype = gguf.TokenType.NORMAL
|
toktype = gguf.TokenType.NORMAL
|
||||||
if tokenizer.is_unknown(i):
|
if tokenizer.IsUnknown(i):
|
||||||
toktype = gguf.TokenType.UNKNOWN
|
toktype = gguf.TokenType.UNKNOWN
|
||||||
if tokenizer.is_control(i):
|
if tokenizer.IsControl(i):
|
||||||
toktype = gguf.TokenType.CONTROL
|
toktype = gguf.TokenType.CONTROL
|
||||||
|
|
||||||
# NOTE: I think added_tokens are user defined.
|
# NOTE: I think added_tokens are user defined.
|
||||||
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
||||||
# if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
|
# if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
|
||||||
|
|
||||||
if tokenizer.is_unused(i):
|
if tokenizer.IsUnused(i):
|
||||||
toktype = gguf.TokenType.UNUSED
|
toktype = gguf.TokenType.UNUSED
|
||||||
if tokenizer.is_byte(i):
|
if tokenizer.IsByte(i):
|
||||||
toktype = gguf.TokenType.BYTE
|
toktype = gguf.TokenType.BYTE
|
||||||
|
|
||||||
yield text, score, toktype
|
yield text, score, toktype
|
||||||
@ -643,7 +650,6 @@ class LlamaHfVocab(Vocab):
|
|||||||
|
|
||||||
|
|
||||||
def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray:
|
def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray:
|
||||||
# print( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) )
|
|
||||||
if n_head_kv is not None and n_head != n_head_kv:
|
if n_head_kv is not None and n_head != n_head_kv:
|
||||||
n_head = n_head_kv
|
n_head = n_head_kv
|
||||||
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
||||||
@ -904,7 +910,7 @@ class LazyUnpickler(pickle.Unpickler):
|
|||||||
def rebuild_from_type_v2(func, new_type, args, state):
|
def rebuild_from_type_v2(func, new_type, args, state):
|
||||||
return func(*args)
|
return func(*args)
|
||||||
|
|
||||||
CLASSES = {
|
CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = {
|
||||||
# getattr used here as a workaround for mypy not being smart enough to determine
|
# getattr used here as a workaround for mypy not being smart enough to determine
|
||||||
# the staticmethods have a __func__ attribute.
|
# the staticmethods have a __func__ attribute.
|
||||||
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
|
('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'),
|
||||||
@ -1033,12 +1039,12 @@ def check_vocab_size(params: Params, vocab: BaseVocab, pad_vocab: bool = False)
|
|||||||
|
|
||||||
# Check for a vocab size mismatch
|
# Check for a vocab size mismatch
|
||||||
if params.n_vocab == vocab.vocab_size:
|
if params.n_vocab == vocab.vocab_size:
|
||||||
print("Ignoring added_tokens.json since model matches vocab size without it.")
|
logger.warning("Ignoring added_tokens.json since model matches vocab size without it.")
|
||||||
return
|
return
|
||||||
|
|
||||||
if pad_vocab and params.n_vocab > vocab.vocab_size:
|
if pad_vocab and params.n_vocab > vocab.vocab_size:
|
||||||
pad_count = params.n_vocab - vocab.vocab_size
|
pad_count = params.n_vocab - vocab.vocab_size
|
||||||
print(
|
logger.debug(
|
||||||
f"Padding vocab with {pad_count} token(s) - <dummy00001> through <dummy{pad_count:05}>"
|
f"Padding vocab with {pad_count} token(s) - <dummy00001> through <dummy{pad_count:05}>"
|
||||||
)
|
)
|
||||||
for i in range(1, pad_count + 1):
|
for i in range(1, pad_count + 1):
|
||||||
@ -1166,7 +1172,7 @@ class OutputFile:
|
|||||||
elapsed = time.time() - start
|
elapsed = time.time() - start
|
||||||
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
|
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
|
||||||
padi = len(str(len(model)))
|
padi = len(str(len(model)))
|
||||||
print(
|
logger.info(
|
||||||
f"[{i + 1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}"
|
f"[{i + 1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}"
|
||||||
)
|
)
|
||||||
self.gguf.write_tensor_data(ndarray)
|
self.gguf.write_tensor_data(ndarray)
|
||||||
@ -1281,12 +1287,12 @@ def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) ->
|
|||||||
# HF models permut or pack some of the tensors, so we need to undo that
|
# HF models permut or pack some of the tensors, so we need to undo that
|
||||||
for i in itertools.count():
|
for i in itertools.count():
|
||||||
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
|
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
|
||||||
print(f"Permuting layer {i}")
|
logger.debug(f"Permuting layer {i}")
|
||||||
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head)
|
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head)
|
||||||
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv)
|
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv)
|
||||||
# tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
|
# tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
|
||||||
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
|
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
|
||||||
print(f"Unpacking and permuting layer {i}")
|
logger.debug(f"Unpacking and permuting layer {i}")
|
||||||
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head)
|
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head)
|
||||||
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv)
|
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv)
|
||||||
tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
|
tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
|
||||||
@ -1299,15 +1305,15 @@ def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) ->
|
|||||||
tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None)
|
tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None)
|
||||||
if name_new is None:
|
if name_new is None:
|
||||||
if skip_unknown:
|
if skip_unknown:
|
||||||
print(f"Unexpected tensor name: {name} - skipping")
|
logger.warning(f"Unexpected tensor name: {name} - skipping")
|
||||||
continue
|
continue
|
||||||
raise ValueError(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)")
|
raise ValueError(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)")
|
||||||
|
|
||||||
if tensor_type in should_skip:
|
if tensor_type in should_skip:
|
||||||
print(f"skipping tensor {name_new}")
|
logger.debug(f"skipping tensor {name_new}")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}")
|
logger.debug(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}")
|
||||||
out[name_new] = lazy_tensor
|
out[name_new] = lazy_tensor
|
||||||
|
|
||||||
return out
|
return out
|
||||||
@ -1372,7 +1378,7 @@ def load_some_model(path: Path) -> ModelPlus:
|
|||||||
paths = find_multifile_paths(path)
|
paths = find_multifile_paths(path)
|
||||||
models_plus: list[ModelPlus] = []
|
models_plus: list[ModelPlus] = []
|
||||||
for path in paths:
|
for path in paths:
|
||||||
print(f"Loading model file {path}")
|
logger.info(f"Loading model file {path}")
|
||||||
models_plus.append(lazy_load_file(path))
|
models_plus.append(lazy_load_file(path))
|
||||||
|
|
||||||
model_plus = merge_multifile_models(models_plus)
|
model_plus = merge_multifile_models(models_plus)
|
||||||
@ -1413,7 +1419,7 @@ class VocabFactory:
|
|||||||
else:
|
else:
|
||||||
raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}")
|
raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}")
|
||||||
|
|
||||||
print(f"Loaded vocab file {vocab.fname_tokenizer!r}, type {vocab.name!r}")
|
logger.info(f"Loaded vocab file {vocab.fname_tokenizer!r}, type {vocab.name!r}")
|
||||||
return vocab
|
return vocab
|
||||||
|
|
||||||
def load_vocab(self, vocab_types: list[str] | None, model_parent_path: Path) -> tuple[BaseVocab, gguf.SpecialVocab]:
|
def load_vocab(self, vocab_types: list[str] | None, model_parent_path: Path) -> tuple[BaseVocab, gguf.SpecialVocab]:
|
||||||
@ -1438,19 +1444,19 @@ def default_outfile(model_paths: list[Path], file_type: GGMLFileType) -> Path:
|
|||||||
}[file_type]
|
}[file_type]
|
||||||
ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf"
|
ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf"
|
||||||
if ret in model_paths:
|
if ret in model_paths:
|
||||||
sys.stderr.write(
|
logger.error(
|
||||||
f"Error: Default output path ({ret}) would overwrite the input. "
|
f"Error: Default output path ({ret}) would overwrite the input. "
|
||||||
"Please explicitly specify a path using --outfile.\n")
|
"Please explicitly specify a path using --outfile.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def do_dump_model(model_plus: ModelPlus) -> None:
|
def do_dump_model(model_plus: ModelPlus) -> None:
|
||||||
print(f"model_plus.paths = {model_plus.paths!r}")
|
print(f"model_plus.paths = {model_plus.paths!r}") # noqa: NP100
|
||||||
print(f"model_plus.format = {model_plus.format!r}")
|
print(f"model_plus.format = {model_plus.format!r}") # noqa: NP100
|
||||||
print(f"model_plus.vocab = {model_plus.vocab!r}")
|
print(f"model_plus.vocab = {model_plus.vocab!r}") # noqa: NP100
|
||||||
for name, lazy_tensor in model_plus.model.items():
|
for name, lazy_tensor in model_plus.model.items():
|
||||||
print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}")
|
print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}") # noqa: NP100
|
||||||
|
|
||||||
|
|
||||||
def main(args_in: list[str] | None = None) -> None:
|
def main(args_in: list[str] | None = None) -> None:
|
||||||
@ -1473,8 +1479,18 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine")
|
parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine")
|
||||||
parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
|
parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides")
|
||||||
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
|
parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing")
|
||||||
|
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
||||||
|
|
||||||
args = parser.parse_args(args_in)
|
args = parser.parse_args(args_in)
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
elif args.dump_single or args.dump:
|
||||||
|
# Avoid printing anything besides the dump output
|
||||||
|
logging.basicConfig(level=logging.WARNING)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
if args.no_vocab and args.vocab_only:
|
if args.no_vocab and args.vocab_only:
|
||||||
raise ValueError("--vocab-only does not make sense with --no-vocab")
|
raise ValueError("--vocab-only does not make sense with --no-vocab")
|
||||||
|
|
||||||
@ -1491,29 +1507,32 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
if args.dump:
|
if args.dump:
|
||||||
do_dump_model(model_plus)
|
do_dump_model(model_plus)
|
||||||
return
|
return
|
||||||
|
|
||||||
endianess = gguf.GGUFEndian.LITTLE
|
endianess = gguf.GGUFEndian.LITTLE
|
||||||
if args.big_endian:
|
if args.big_endian:
|
||||||
endianess = gguf.GGUFEndian.BIG
|
endianess = gguf.GGUFEndian.BIG
|
||||||
|
|
||||||
params = Params.load(model_plus)
|
params = None
|
||||||
if params.n_ctx == -1:
|
if args.pad_vocab or not args.vocab_only:
|
||||||
if args.ctx is None:
|
params = Params.load(model_plus)
|
||||||
msg = """\
|
if params.n_ctx == -1:
|
||||||
The model doesn't have a context size, and you didn't specify one with --ctx
|
if args.ctx is None:
|
||||||
Please specify one with --ctx:
|
msg = """\
|
||||||
- LLaMA v1: --ctx 2048
|
The model doesn't have a context size, and you didn't specify one with --ctx
|
||||||
- LLaMA v2: --ctx 4096"""
|
Please specify one with --ctx:
|
||||||
parser.error(textwrap.dedent(msg))
|
- LLaMA v1: --ctx 2048
|
||||||
params.n_ctx = args.ctx
|
- LLaMA v2: --ctx 4096"""
|
||||||
|
parser.error(textwrap.dedent(msg))
|
||||||
|
params.n_ctx = args.ctx
|
||||||
|
|
||||||
if args.outtype:
|
if args.outtype:
|
||||||
params.ftype = {
|
params.ftype = {
|
||||||
"f32": GGMLFileType.AllF32,
|
"f32": GGMLFileType.AllF32,
|
||||||
"f16": GGMLFileType.MostlyF16,
|
"f16": GGMLFileType.MostlyF16,
|
||||||
"q8_0": GGMLFileType.MostlyQ8_0,
|
"q8_0": GGMLFileType.MostlyQ8_0,
|
||||||
}[args.outtype]
|
}[args.outtype]
|
||||||
|
|
||||||
print(f"params = {params}")
|
logger.info(f"params = {params}")
|
||||||
|
|
||||||
model_parent_path = model_plus.paths[0].parent
|
model_parent_path = model_plus.paths[0].parent
|
||||||
vocab_path = Path(args.vocab_dir or args.model or model_parent_path)
|
vocab_path = Path(args.vocab_dir or args.model or model_parent_path)
|
||||||
@ -1526,17 +1545,27 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
if not args.outfile:
|
if not args.outfile:
|
||||||
raise ValueError("need --outfile if using --vocab-only")
|
raise ValueError("need --outfile if using --vocab-only")
|
||||||
outfile = args.outfile
|
outfile = args.outfile
|
||||||
|
if params is None:
|
||||||
|
params = Params(
|
||||||
|
n_vocab = vocab.vocab_size,
|
||||||
|
n_embd = 1,
|
||||||
|
n_layer = 1,
|
||||||
|
n_ctx = 1,
|
||||||
|
n_ff = 1,
|
||||||
|
n_head = 1,
|
||||||
|
n_head_kv = 1,
|
||||||
|
f_norm_eps = 1e-5,
|
||||||
|
)
|
||||||
OutputFile.write_vocab_only(outfile, params, vocab, special_vocab,
|
OutputFile.write_vocab_only(outfile, params, vocab, special_vocab,
|
||||||
endianess=endianess, pad_vocab=args.pad_vocab)
|
endianess=endianess, pad_vocab=args.pad_vocab)
|
||||||
print(f"Wrote {outfile}")
|
logger.info(f"Wrote {outfile}")
|
||||||
return
|
return
|
||||||
|
|
||||||
if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab:
|
if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab:
|
||||||
vocab = model_plus.vocab
|
vocab = model_plus.vocab
|
||||||
|
|
||||||
print(f"Vocab info: {vocab}")
|
logger.info(f"Vocab info: {vocab}")
|
||||||
print(f"Special vocab info: {special_vocab}")
|
logger.info(f"Special vocab info: {special_vocab}")
|
||||||
|
|
||||||
model = model_plus.model
|
model = model_plus.model
|
||||||
model = convert_model_names(model, params, args.skip_unknown)
|
model = convert_model_names(model, params, args.skip_unknown)
|
||||||
ftype = pick_output_type(model, args.outtype)
|
ftype = pick_output_type(model, args.outtype)
|
||||||
@ -1544,11 +1573,11 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
|
outfile = args.outfile or default_outfile(model_plus.paths, ftype)
|
||||||
|
|
||||||
params.ftype = ftype
|
params.ftype = ftype
|
||||||
print(f"Writing {outfile}, format {ftype}")
|
logger.info(f"Writing {outfile}, format {ftype}")
|
||||||
|
|
||||||
OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab,
|
OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab,
|
||||||
concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab)
|
concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab)
|
||||||
print(f"Wrote {outfile}")
|
logger.info(f"Wrote {outfile}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -23,7 +23,7 @@ Install BLIS:
|
|||||||
sudo make install
|
sudo make install
|
||||||
```
|
```
|
||||||
|
|
||||||
We recommend using openmp since it's easier to modify the cores been used.
|
We recommend using openmp since it's easier to modify the cores being used.
|
||||||
|
|
||||||
### llama.cpp compilation
|
### llama.cpp compilation
|
||||||
|
|
||||||
|
@ -96,9 +96,9 @@ NOTE: The dimensions in `ggml` are typically in the reverse order of the `pytorc
|
|||||||
|
|
||||||
This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `llama_build_graph`.
|
This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `llama_build_graph`.
|
||||||
|
|
||||||
Have a look to existing implementation like `build_llama`, `build_dbrx` or `build_bert`.
|
Have a look at existing implementation like `build_llama`, `build_dbrx` or `build_bert`.
|
||||||
|
|
||||||
When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support of missing backend operations can be added in another PR.
|
When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support for missing backend operations can be added in another PR.
|
||||||
|
|
||||||
Note: to debug the inference graph: you can use [eval-callback](../examples/eval-callback).
|
Note: to debug the inference graph: you can use [eval-callback](../examples/eval-callback).
|
||||||
|
|
||||||
|
88
docs/debugging-tests.md
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# Debugging Tests Tips
|
||||||
|
|
||||||
|
## How to run & debug a specific test without anything else to keep the feedback loop short?
|
||||||
|
|
||||||
|
There is a script called debug-test.sh in the scripts folder whose parameter takes a REGEX and an optional test number.
|
||||||
|
|
||||||
|
For example, running the following command will output an interactive list from which you can select a test. It takes this form:
|
||||||
|
|
||||||
|
`debug-test.sh [OPTION]... <test_regex> <test_number>`
|
||||||
|
|
||||||
|
It will then build & run in the debugger for you.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/debug-test.sh test-tokenizer
|
||||||
|
|
||||||
|
# Once in the debugger, i.e. at the chevrons prompt, setting a breakpoint could be as follows:
|
||||||
|
>>> b main
|
||||||
|
```
|
||||||
|
|
||||||
|
For further reference use `debug-test.sh -h` to print help.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### How does the script work?
|
||||||
|
If you want to be able to use the concepts contained in the script separately, the important ones are briefly outlined below.
|
||||||
|
|
||||||
|
#### Step 1: Reset and Setup folder context
|
||||||
|
|
||||||
|
From base of this repository, let's create `build-ci-debug` as our build context.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 2: Setup Build Environment and Compile Test Binaries
|
||||||
|
|
||||||
|
Setup and trigger a build under debug mode. You may adapt the arguments as needed, but in this case these are sane defaults.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON ..
|
||||||
|
make -j
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3.1: Identify Test Command for Debugging
|
||||||
|
|
||||||
|
The output of this command will give you the command & arguments needed to run GDB.
|
||||||
|
|
||||||
|
* `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex)
|
||||||
|
* `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB.
|
||||||
|
* `-V` : Verbose Mode
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ctest -R "test-tokenizer" -V -N
|
||||||
|
```
|
||||||
|
|
||||||
|
This may return output similar to below (focusing on key lines to pay attention to):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
...
|
||||||
|
1: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
||||||
|
1: Working Directory: .
|
||||||
|
Labels: main
|
||||||
|
Test #1: test-tokenizer-0-llama-spm
|
||||||
|
...
|
||||||
|
4: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-falcon.gguf"
|
||||||
|
4: Working Directory: .
|
||||||
|
Labels: main
|
||||||
|
Test #4: test-tokenizer-0-falcon
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
So for test #1 we can tell these two pieces of relevant information:
|
||||||
|
* Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0`
|
||||||
|
* Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf`
|
||||||
|
|
||||||
|
#### Step 3.2: Run GDB on test command
|
||||||
|
|
||||||
|
Based on the ctest 'test command' report above we can then run a gdb session via this command below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gdb --args ${Test Binary} ${Test GGUF Model}
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gdb --args ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
||||||
|
```
|
@ -32,7 +32,7 @@ int main(int argc, char ** argv) {
|
|||||||
gpt_params params;
|
gpt_params params;
|
||||||
|
|
||||||
if (argc == 1 || argv[1][0] == '-') {
|
if (argc == 1 || argv[1][0] == '-') {
|
||||||
printf("usage: %s MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [IS_PP_SHARED] [NGL] <PP> <TG> <PL>\n" , argv[0]);
|
printf("usage: %s MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [FATTN] [IS_PP_SHARED] [NGL] <PP> <TG> <PL>\n" , argv[0]);
|
||||||
printf(" <PP>, <TG> and PL are comma-separated lists of numbers without spaces\n\n");
|
printf(" <PP>, <TG> and PL are comma-separated lists of numbers without spaces\n\n");
|
||||||
printf(" example: %s ggml-model-f16.gguf 2048 2048 512 0 999 128,256,512 128,256 1,2,4,8,16,32\n\n", argv[0]);
|
printf(" example: %s ggml-model-f16.gguf 2048 2048 512 0 999 128,256,512 128,256 1,2,4,8,16,32\n\n", argv[0]);
|
||||||
return 1 ;
|
return 1 ;
|
||||||
@ -41,6 +41,7 @@ int main(int argc, char ** argv) {
|
|||||||
int n_kv_max = 2048;
|
int n_kv_max = 2048;
|
||||||
int n_batch = 2048;
|
int n_batch = 2048;
|
||||||
int n_ubatch = 512;
|
int n_ubatch = 512;
|
||||||
|
bool flash_attn = false;
|
||||||
int is_pp_shared = 0;
|
int is_pp_shared = 0;
|
||||||
int n_gpu_layers = 0;
|
int n_gpu_layers = 0;
|
||||||
|
|
||||||
@ -66,23 +67,27 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (argc >= 6) {
|
if (argc >= 6) {
|
||||||
is_pp_shared = std::atoi(argv[5]);
|
flash_attn = std::atoi(argv[5]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc >= 7) {
|
if (argc >= 7) {
|
||||||
n_gpu_layers = std::atoi(argv[6]);
|
is_pp_shared = std::atoi(argv[6]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc >= 8) {
|
if (argc >= 8) {
|
||||||
n_pp = parse_list(argv[7]);
|
n_gpu_layers = std::atoi(argv[7]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc >= 9) {
|
if (argc >= 9) {
|
||||||
n_tg = parse_list(argv[8]);
|
n_pp = parse_list(argv[8]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc >= 10) {
|
if (argc >= 10) {
|
||||||
n_pl = parse_list(argv[9]);
|
n_tg = parse_list(argv[9]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc >= 11) {
|
||||||
|
n_pl = parse_list(argv[10]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
@ -108,10 +113,11 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
llama_context_params ctx_params = llama_context_default_params();
|
llama_context_params ctx_params = llama_context_default_params();
|
||||||
|
|
||||||
ctx_params.seed = 1234;
|
ctx_params.seed = 1234;
|
||||||
ctx_params.n_ctx = n_kv_max;
|
ctx_params.n_ctx = n_kv_max;
|
||||||
ctx_params.n_batch = n_batch;
|
ctx_params.n_batch = n_batch;
|
||||||
ctx_params.n_ubatch = n_ubatch;
|
ctx_params.n_ubatch = n_ubatch;
|
||||||
|
ctx_params.flash_attn = flash_attn;
|
||||||
|
|
||||||
ctx_params.n_threads = params.n_threads;
|
ctx_params.n_threads = params.n_threads;
|
||||||
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
|
||||||
@ -169,7 +175,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, is_pp_shared = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, n_batch, n_ubatch, is_pp_shared, n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
|
LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, is_pp_shared = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, n_batch, n_ubatch, flash_attn, is_pp_shared, n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
|
|
||||||
LOG_TEE("|%6s | %6s | %4s | %6s | %8s | %8s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "B", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s", "T s", "S t/s");
|
LOG_TEE("|%6s | %6s | %4s | %6s | %8s | %8s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "B", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s", "T s", "S t/s");
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
This example reads weights from project [llama2.c](https://github.com/karpathy/llama2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default.
|
This example reads weights from project [llama2.c](https://github.com/karpathy/llama2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default.
|
||||||
|
|
||||||
To convert the model first download the models from the [llma2.c](https://github.com/karpathy/llama2.c) repository:
|
To convert the model first download the models from the [llama2.c](https://github.com/karpathy/llama2.c) repository:
|
||||||
|
|
||||||
`$ make -j`
|
`$ make -j`
|
||||||
|
|
||||||
|
@ -49,6 +49,12 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
|||||||
}
|
}
|
||||||
|
|
||||||
float * out = output + batch.seq_id[i][0] * n_embd;
|
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||||
|
//TODO: I would also add a parameter here to enable normalization or not.
|
||||||
|
/*fprintf(stdout, "unnormalized_embedding:");
|
||||||
|
for (int hh = 0; hh < n_embd; hh++) {
|
||||||
|
fprintf(stdout, "%9.6f ", embd[hh]);
|
||||||
|
}
|
||||||
|
fprintf(stdout, "\n");*/
|
||||||
llama_embd_normalize(embd, out, n_embd);
|
llama_embd_normalize(embd, out, n_embd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -123,10 +129,12 @@ int main(int argc, char ** argv) {
|
|||||||
inputs.push_back(inp);
|
inputs.push_back(inp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// add SEP if not present
|
// check if the last token is SEP
|
||||||
|
// it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true'
|
||||||
for (auto & inp : inputs) {
|
for (auto & inp : inputs) {
|
||||||
if (inp.empty() || inp.back() != llama_token_sep(model)) {
|
if (inp.empty() || inp.back() != llama_token_sep(model)) {
|
||||||
inp.push_back(llama_token_sep(model));
|
fprintf(stderr, "%s: warning: last token in the prompt is not SEP\n", __func__);
|
||||||
|
fprintf(stderr, "%s: 'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,15 +52,15 @@ static void ggml_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne
|
|||||||
size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
||||||
float v;
|
float v;
|
||||||
if (type == GGML_TYPE_F16) {
|
if (type == GGML_TYPE_F16) {
|
||||||
v = ggml_fp16_to_fp32(*(ggml_fp16_t *) data + i);
|
v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[i]);
|
||||||
} else if (type == GGML_TYPE_F32) {
|
} else if (type == GGML_TYPE_F32) {
|
||||||
v = *(float *) data + i;
|
v = *(float *) &data[i];
|
||||||
} else if (type == GGML_TYPE_I32) {
|
} else if (type == GGML_TYPE_I32) {
|
||||||
v = (float) *(int32_t *) data + i;
|
v = (float) *(int32_t *) &data[i];
|
||||||
} else if (type == GGML_TYPE_I16) {
|
} else if (type == GGML_TYPE_I16) {
|
||||||
v = (float) *(int16_t *) data + i;
|
v = (float) *(int16_t *) &data[i];
|
||||||
} else if (type == GGML_TYPE_I8) {
|
} else if (type == GGML_TYPE_I8) {
|
||||||
v = (float) *(int8_t *) data + i;
|
v = (float) *(int8_t *) &data[i];
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
@ -575,7 +575,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
|||||||
GGML_ASSERT(tokens_input->type == GGML_TYPE_I32);
|
GGML_ASSERT(tokens_input->type == GGML_TYPE_I32);
|
||||||
|
|
||||||
auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
|
auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
|
||||||
if (ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16) {
|
if (ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16 || a->type == GGML_TYPE_BF16) {
|
||||||
return ggml_add_cast(ctx, a, b, GGML_TYPE_F32);
|
return ggml_add_cast(ctx, a, b, GGML_TYPE_F32);
|
||||||
} else if (a->type == GGML_TYPE_F32) {
|
} else if (a->type == GGML_TYPE_F32) {
|
||||||
return ggml_add(ctx, a, b);
|
return ggml_add(ctx, a, b);
|
||||||
|
@ -32,6 +32,7 @@ struct split_params {
|
|||||||
int n_split_tensors = 128;
|
int n_split_tensors = 128;
|
||||||
std::string input;
|
std::string input;
|
||||||
std::string output;
|
std::string output;
|
||||||
|
bool no_tensor_first_split = false;
|
||||||
bool dry_run = false;
|
bool dry_run = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -49,6 +50,7 @@ static void split_print_usage(const char * executable) {
|
|||||||
printf(" --merge merge multiple GGUF to a single GGUF\n");
|
printf(" --merge merge multiple GGUF to a single GGUF\n");
|
||||||
printf(" --split-max-tensors max tensors in each split (default: %d)\n", default_params.n_split_tensors);
|
printf(" --split-max-tensors max tensors in each split (default: %d)\n", default_params.n_split_tensors);
|
||||||
printf(" --split-max-size N(M|G) max size per split\n");
|
printf(" --split-max-size N(M|G) max size per split\n");
|
||||||
|
printf(" --no-tensor-first-split do not add tensors to the first split (disabled by default)\n");
|
||||||
printf(" --dry-run only print out a split plan and exit, without writing any new files\n");
|
printf(" --dry-run only print out a split plan and exit, without writing any new files\n");
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
@ -100,6 +102,10 @@ static void split_params_parse_ex(int argc, const char ** argv, split_params & p
|
|||||||
arg_found = true;
|
arg_found = true;
|
||||||
params.dry_run = true;
|
params.dry_run = true;
|
||||||
}
|
}
|
||||||
|
if (arg == "--no-tensor-first-split") {
|
||||||
|
arg_found = true;
|
||||||
|
params.no_tensor_first_split = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (is_op_set) {
|
if (is_op_set) {
|
||||||
throw std::invalid_argument("error: either --split or --merge can be specified, but not both");
|
throw std::invalid_argument("error: either --split or --merge can be specified, but not both");
|
||||||
@ -200,10 +206,10 @@ struct split_strategy {
|
|||||||
// because we need to know list of tensors for each file in advance, we will build all the ctx_out for all output splits
|
// because we need to know list of tensors for each file in advance, we will build all the ctx_out for all output splits
|
||||||
int i_split = -1;
|
int i_split = -1;
|
||||||
struct gguf_context * ctx_out = NULL;
|
struct gguf_context * ctx_out = NULL;
|
||||||
auto new_ctx_out = [&]() {
|
auto new_ctx_out = [&](bool allow_no_tensors) {
|
||||||
i_split++;
|
i_split++;
|
||||||
if (ctx_out != NULL) {
|
if (ctx_out != NULL) {
|
||||||
if (gguf_get_n_tensors(ctx_out) == 0) {
|
if (gguf_get_n_tensors(ctx_out) == 0 && !allow_no_tensors) {
|
||||||
fprintf(stderr, "error: one of splits have 0 tensors. Maybe size or tensors limit is too small\n");
|
fprintf(stderr, "error: one of splits have 0 tensors. Maybe size or tensors limit is too small\n");
|
||||||
exit(EXIT_FAILURE);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
@ -220,7 +226,12 @@ struct split_strategy {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// initialize ctx_out for the first split
|
// initialize ctx_out for the first split
|
||||||
new_ctx_out();
|
new_ctx_out(false);
|
||||||
|
|
||||||
|
// skip first split if no_tensor_first_split is set
|
||||||
|
if (params.no_tensor_first_split) {
|
||||||
|
new_ctx_out(true);
|
||||||
|
}
|
||||||
|
|
||||||
// process tensors one by one
|
// process tensors one by one
|
||||||
size_t curr_tensors_size = 0; // current size by counting only tensors size (without metadata)
|
size_t curr_tensors_size = 0; // current size by counting only tensors size (without metadata)
|
||||||
@ -230,7 +241,7 @@ struct split_strategy {
|
|||||||
size_t n_bytes = GGML_PAD(ggml_nbytes(t), GGUF_DEFAULT_ALIGNMENT);
|
size_t n_bytes = GGML_PAD(ggml_nbytes(t), GGUF_DEFAULT_ALIGNMENT);
|
||||||
size_t next_tensors_size = curr_tensors_size + n_bytes;
|
size_t next_tensors_size = curr_tensors_size + n_bytes;
|
||||||
if (should_split(i, next_tensors_size)) {
|
if (should_split(i, next_tensors_size)) {
|
||||||
new_ctx_out();
|
new_ctx_out(false);
|
||||||
curr_tensors_size = n_bytes;
|
curr_tensors_size = n_bytes;
|
||||||
} else {
|
} else {
|
||||||
curr_tensors_size = next_tensors_size;
|
curr_tensors_size = next_tensors_size;
|
||||||
|
@ -55,15 +55,15 @@ $MAIN --model $WORK_PATH/ggml-model-merge.gguf --random-prompt --n-predict 32
|
|||||||
echo PASS
|
echo PASS
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# 4. Split with no tensor in metadata
|
# 4. Split with no tensors in the first split
|
||||||
#$SPLIT --split-max-tensors 32 --no-tensor-in-metadata $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-32-tensors
|
$SPLIT --split-max-tensors 32 --no-tensor-first-split $WORK_PATH/ggml-model-merge.gguf $WORK_PATH/ggml-model-split-32-tensors
|
||||||
#echo PASS
|
echo PASS
|
||||||
#echo
|
echo
|
||||||
|
|
||||||
# 4b. Test the sharded model is loading properly
|
# 4b. Test the sharded model is loading properly
|
||||||
#$MAIN --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00006.gguf --random-prompt --n-predict 32
|
$MAIN --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00007.gguf --random-prompt --n-predict 32
|
||||||
#echo PASS
|
echo PASS
|
||||||
#echo
|
echo
|
||||||
|
|
||||||
# 5. Merge
|
# 5. Merge
|
||||||
#$SPLIT --merge $WORK_PATH/ggml-model-split-32-tensors-00001-of-00006.gguf $WORK_PATH/ggml-model-merge-2.gguf
|
#$SPLIT --merge $WORK_PATH/ggml-model-split-32-tensors-00001-of-00006.gguf $WORK_PATH/ggml-model-merge-2.gguf
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
struct Stats {
|
struct Stats {
|
||||||
std::vector<float> values;
|
std::vector<float> values;
|
||||||
|
std::vector<int> counts;
|
||||||
int ncall = 0;
|
int ncall = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -121,12 +122,10 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
auto & e = m_stats[wname];
|
auto & e = m_stats[wname];
|
||||||
|
|
||||||
++e.ncall;
|
++e.ncall;
|
||||||
// NOTE: since we select top-k experts, the number of calls for the expert tensors will be k times larger
|
|
||||||
// using the following line, we can correct for that if needed by replacing the line above with:
|
|
||||||
//if (idx == t->src[0]->ne[0] - 1) ++e.ncall;
|
|
||||||
|
|
||||||
if (e.values.empty()) {
|
if (e.values.empty()) {
|
||||||
e.values.resize(src1->ne[0]*n_as, 0);
|
e.values.resize(src1->ne[0]*n_as, 0);
|
||||||
|
e.counts.resize(src1->ne[0]*n_as, 0);
|
||||||
}
|
}
|
||||||
else if (e.values.size() != (size_t)src1->ne[0]*n_as) {
|
else if (e.values.size() != (size_t)src1->ne[0]*n_as) {
|
||||||
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]*n_as);
|
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]*n_as);
|
||||||
@ -153,6 +152,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
|
|
||||||
for (int j = 0; j < (int)src1->ne[0]; ++j) {
|
for (int j = 0; j < (int)src1->ne[0]; ++j) {
|
||||||
e.values[e_start + j] += x[j]*x[j];
|
e.values[e_start + j] += x[j]*x[j];
|
||||||
|
e.counts[e_start + j]++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -170,6 +170,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
auto& e = m_stats[wname];
|
auto& e = m_stats[wname];
|
||||||
if (e.values.empty()) {
|
if (e.values.empty()) {
|
||||||
e.values.resize(src1->ne[0], 0);
|
e.values.resize(src1->ne[0], 0);
|
||||||
|
e.counts.resize(src1->ne[0], 0);
|
||||||
}
|
}
|
||||||
else if (e.values.size() != (size_t)src1->ne[0]) {
|
else if (e.values.size() != (size_t)src1->ne[0]) {
|
||||||
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
|
fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", wname.c_str(), (int)e.values.size(), (int)src1->ne[0]);
|
||||||
@ -183,6 +184,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
const float * x = data + row * src1->ne[0];
|
const float * x = data + row * src1->ne[0];
|
||||||
for (int j = 0; j < (int)src1->ne[0]; ++j) {
|
for (int j = 0; j < (int)src1->ne[0]; ++j) {
|
||||||
e.values[j] += x[j]*x[j];
|
e.values[j] += x[j]*x[j];
|
||||||
|
e.counts[j]++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (e.ncall > m_last_call) {
|
if (e.ncall > m_last_call) {
|
||||||
@ -222,7 +224,13 @@ void IMatrixCollector::save_imatrix(const char * fname, const char * dataset) co
|
|||||||
out.write((const char *) &p.second.ncall, sizeof(p.second.ncall));
|
out.write((const char *) &p.second.ncall, sizeof(p.second.ncall));
|
||||||
int nval = p.second.values.size();
|
int nval = p.second.values.size();
|
||||||
out.write((const char *) &nval, sizeof(nval));
|
out.write((const char *) &nval, sizeof(nval));
|
||||||
if (nval > 0) out.write((const char *) p.second.values.data(), nval * sizeof(float));
|
if (nval > 0) {
|
||||||
|
std::vector<float> tmp(nval);
|
||||||
|
for (int i = 0; i < nval; i++) {
|
||||||
|
tmp[i] = (p.second.values[i] / static_cast<float>(p.second.counts[i])) * static_cast<float>(p.second.ncall);
|
||||||
|
}
|
||||||
|
out.write((const char*)tmp.data(), nval*sizeof(float));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the number of call the matrix was computed with
|
// Write the number of call the matrix was computed with
|
||||||
@ -270,14 +278,28 @@ bool IMatrixCollector::load_imatrix(const char * imatrix_file, std::unordered_ma
|
|||||||
imatrix_data = {};
|
imatrix_data = {};
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
e.values.resize(nval);
|
|
||||||
in.read((char*)e.values.data(), nval*sizeof(float));
|
// When re-called from load_imatrix() with add set, this will already be created.
|
||||||
|
if (e.values.empty()) {
|
||||||
|
e.values.resize(nval, 0);
|
||||||
|
e.counts.resize(nval, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> tmp(nval);
|
||||||
|
in.read((char*)tmp.data(), nval*sizeof(float));
|
||||||
if (in.fail()) {
|
if (in.fail()) {
|
||||||
printf("%s: failed reading data for entry %d\n",__func__,i);
|
printf("%s: failed reading data for entry %d\n",__func__,i);
|
||||||
imatrix_data = {};
|
imatrix_data = {};
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
e.ncall = ncall;
|
|
||||||
|
// Recreate the state as expected by save_imatrix(), and corerct for weighted sum.
|
||||||
|
for (int i = 0; i < nval; i++) {
|
||||||
|
e.values[i] += tmp[i];
|
||||||
|
e.counts[i] += ncall;
|
||||||
|
}
|
||||||
|
e.ncall += ncall;
|
||||||
|
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -26,16 +26,21 @@ options:
|
|||||||
-m, --model <filename> (default: models/7B/ggml-model-q4_0.gguf)
|
-m, --model <filename> (default: models/7B/ggml-model-q4_0.gguf)
|
||||||
-p, --n-prompt <n> (default: 512)
|
-p, --n-prompt <n> (default: 512)
|
||||||
-n, --n-gen <n> (default: 128)
|
-n, --n-gen <n> (default: 128)
|
||||||
-b, --batch-size <n> (default: 512)
|
-pg <pp,tg> (default: 512,128)
|
||||||
-ctk <t>, --cache-type-k <t> (default: f16)
|
-b, --batch-size <n> (default: 2048)
|
||||||
-ctv <t>, --cache-type-v <t> (default: f16)
|
-ub, --ubatch-size <n> (default: 512)
|
||||||
-t, --threads <n> (default: 112)
|
-ctk, --cache-type-k <t> (default: f16)
|
||||||
|
-ctv, --cache-type-v <t> (default: f16)
|
||||||
|
-t, --threads <n> (default: 16)
|
||||||
-ngl, --n-gpu-layers <n> (default: 99)
|
-ngl, --n-gpu-layers <n> (default: 99)
|
||||||
-sm, --split-mode <none|layer|row> (default: layer)
|
-sm, --split-mode <none|layer|row> (default: layer)
|
||||||
-mg, --main-gpu <i> (default: 0)
|
-mg, --main-gpu <i> (default: 0)
|
||||||
-nkvo, --no-kv-offload <0|1> (default: 0)
|
-nkvo, --no-kv-offload <0|1> (default: 0)
|
||||||
|
-fa, --flash-attn <0|1> (default: 0)
|
||||||
-mmp, --mmap <0|1> (default: 1)
|
-mmp, --mmap <0|1> (default: 1)
|
||||||
-ts, --tensor_split <ts0/ts1/..> (default: 0)
|
--numa <distribute|isolate|numactl> (default: disabled)
|
||||||
|
-embd, --embeddings <0|1> (default: 0)
|
||||||
|
-ts, --tensor-split <ts0/ts1/..> (default: 0)
|
||||||
-r, --repetitions <n> (default: 5)
|
-r, --repetitions <n> (default: 5)
|
||||||
-o, --output <csv|json|md|sql> (default: md)
|
-o, --output <csv|json|md|sql> (default: md)
|
||||||
-v, --verbose (default: 0)
|
-v, --verbose (default: 0)
|
||||||
@ -43,10 +48,11 @@ options:
|
|||||||
Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.
|
Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.
|
||||||
```
|
```
|
||||||
|
|
||||||
llama-bench can perform two types of tests:
|
llama-bench can perform three types of tests:
|
||||||
|
|
||||||
- Prompt processing (pp): processing a prompt in batches (`-p`)
|
- Prompt processing (pp): processing a prompt in batches (`-p`)
|
||||||
- Text generation (tg): generating a sequence of tokens (`-n`)
|
- Text generation (tg): generating a sequence of tokens (`-n`)
|
||||||
|
- Prompt processing + text generation (pg): processing a prompt followed by generating a sequence of tokens (`-pg`)
|
||||||
|
|
||||||
With the exception of `-r`, `-o` and `-v`, all options can be specified multiple times to run multiple tests. Each pp and tg test is run with all combinations of the specified options. To specify multiple values for an option, the values can be separated by commas (e.g. `-n 16,32`), or the option can be specified multiple times (e.g. `-n 16 -n 32`).
|
With the exception of `-r`, `-o` and `-v`, all options can be specified multiple times to run multiple tests. Each pp and tg test is run with all combinations of the specified options. To specify multiple values for an option, the values can be separated by commas (e.g. `-n 16,32`), or the option can be specified multiple times (e.g. `-n 16 -n 32`).
|
||||||
|
|
||||||
|
@ -161,10 +161,17 @@ static const char * split_mode_str(llama_split_mode mode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::string pair_str(const std::pair<int, int> & p) {
|
||||||
|
static char buf[32];
|
||||||
|
snprintf(buf, sizeof(buf), "%d,%d", p.first, p.second);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
struct cmd_params {
|
struct cmd_params {
|
||||||
std::vector<std::string> model;
|
std::vector<std::string> model;
|
||||||
std::vector<int> n_prompt;
|
std::vector<int> n_prompt;
|
||||||
std::vector<int> n_gen;
|
std::vector<int> n_gen;
|
||||||
|
std::vector<std::pair<int, int>> n_pg;
|
||||||
std::vector<int> n_batch;
|
std::vector<int> n_batch;
|
||||||
std::vector<int> n_ubatch;
|
std::vector<int> n_ubatch;
|
||||||
std::vector<ggml_type> type_k;
|
std::vector<ggml_type> type_k;
|
||||||
@ -174,9 +181,11 @@ struct cmd_params {
|
|||||||
std::vector<llama_split_mode> split_mode;
|
std::vector<llama_split_mode> split_mode;
|
||||||
std::vector<int> main_gpu;
|
std::vector<int> main_gpu;
|
||||||
std::vector<bool> no_kv_offload;
|
std::vector<bool> no_kv_offload;
|
||||||
|
std::vector<bool> flash_attn;
|
||||||
std::vector<std::vector<float>> tensor_split;
|
std::vector<std::vector<float>> tensor_split;
|
||||||
std::vector<bool> use_mmap;
|
std::vector<bool> use_mmap;
|
||||||
std::vector<bool> embeddings;
|
std::vector<bool> embeddings;
|
||||||
|
ggml_numa_strategy numa;
|
||||||
int reps;
|
int reps;
|
||||||
bool verbose;
|
bool verbose;
|
||||||
output_formats output_format;
|
output_formats output_format;
|
||||||
@ -186,6 +195,7 @@ static const cmd_params cmd_params_defaults = {
|
|||||||
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
||||||
/* n_prompt */ {512},
|
/* n_prompt */ {512},
|
||||||
/* n_gen */ {128},
|
/* n_gen */ {128},
|
||||||
|
/* n_pg */ {{512, 128}},
|
||||||
/* n_batch */ {2048},
|
/* n_batch */ {2048},
|
||||||
/* n_ubatch */ {512},
|
/* n_ubatch */ {512},
|
||||||
/* type_k */ {GGML_TYPE_F16},
|
/* type_k */ {GGML_TYPE_F16},
|
||||||
@ -195,9 +205,11 @@ static const cmd_params cmd_params_defaults = {
|
|||||||
/* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
|
/* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
|
||||||
/* main_gpu */ {0},
|
/* main_gpu */ {0},
|
||||||
/* no_kv_offload */ {false},
|
/* no_kv_offload */ {false},
|
||||||
|
/* flash_attn */ {false},
|
||||||
/* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
|
/* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
|
||||||
/* use_mmap */ {true},
|
/* use_mmap */ {true},
|
||||||
/* embeddings */ {false},
|
/* embeddings */ {false},
|
||||||
|
/* numa */ GGML_NUMA_STRATEGY_DISABLED,
|
||||||
/* reps */ 5,
|
/* reps */ 5,
|
||||||
/* verbose */ false,
|
/* verbose */ false,
|
||||||
/* output_format */ MARKDOWN
|
/* output_format */ MARKDOWN
|
||||||
@ -211,16 +223,19 @@ static void print_usage(int /* argc */, char ** argv) {
|
|||||||
printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
|
printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
|
||||||
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
||||||
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
||||||
|
printf(" -pg <pp,tg> (default: %s)\n", join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str());
|
||||||
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
||||||
printf(" -ub N, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
|
printf(" -ub, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
|
||||||
printf(" -ctk <t>, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
printf(" -ctk, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
||||||
printf(" -ctv <t>, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
printf(" -ctv, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
||||||
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
||||||
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
||||||
printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
|
printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
|
||||||
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
||||||
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
|
||||||
|
printf(" -fa, --flash-attn <0|1> (default: %s)\n", join(cmd_params_defaults.flash_attn, ",").c_str());
|
||||||
printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str());
|
printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str());
|
||||||
|
printf(" --numa <distribute|isolate|numactl> (default: disabled)\n");
|
||||||
printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str());
|
printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str());
|
||||||
printf(" -ts, --tensor-split <ts0/ts1/..> (default: 0)\n");
|
printf(" -ts, --tensor-split <ts0/ts1/..> (default: 0)\n");
|
||||||
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
||||||
@ -298,6 +313,17 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
auto p = split<int>(argv[i], split_delim);
|
auto p = split<int>(argv[i], split_delim);
|
||||||
params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
|
params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
|
||||||
|
} else if (arg == "-pg") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto p = split<std::string>(argv[i], ',');
|
||||||
|
if (p.size() != 2) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.n_pg.push_back({std::stoi(p[0]), std::stoi(p[1])});
|
||||||
} else if (arg == "-b" || arg == "--batch-size") {
|
} else if (arg == "-b" || arg == "--batch-size") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -393,6 +419,24 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
auto p = split<bool>(argv[i], split_delim);
|
auto p = split<bool>(argv[i], split_delim);
|
||||||
params.no_kv_offload.insert(params.no_kv_offload.end(), p.begin(), p.end());
|
params.no_kv_offload.insert(params.no_kv_offload.end(), p.begin(), p.end());
|
||||||
|
} else if (arg == "--numa") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
std::string value(argv[i]);
|
||||||
|
/**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
|
||||||
|
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
|
||||||
|
else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
|
||||||
|
else { invalid_param = true; break; }
|
||||||
|
}
|
||||||
|
} else if (arg == "-fa" || arg == "--flash-attn") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto p = split<bool>(argv[i], split_delim);
|
||||||
|
params.flash_attn.insert(params.flash_attn.end(), p.begin(), p.end());
|
||||||
} else if (arg == "-mmp" || arg == "--mmap") {
|
} else if (arg == "-mmp" || arg == "--mmap") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -469,6 +513,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
if (params.model.empty()) { params.model = cmd_params_defaults.model; }
|
if (params.model.empty()) { params.model = cmd_params_defaults.model; }
|
||||||
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
||||||
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
||||||
|
if (params.n_pg.empty()) { params.n_pg = cmd_params_defaults.n_pg; }
|
||||||
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
||||||
if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
|
if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
|
||||||
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
||||||
@ -477,6 +522,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||||||
if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; }
|
if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; }
|
||||||
if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
|
if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
|
||||||
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
|
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
|
||||||
|
if (params.flash_attn.empty()) { params.flash_attn = cmd_params_defaults.flash_attn; }
|
||||||
if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
|
if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
|
||||||
if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; }
|
if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; }
|
||||||
if (params.embeddings.empty()) { params.embeddings = cmd_params_defaults.embeddings; }
|
if (params.embeddings.empty()) { params.embeddings = cmd_params_defaults.embeddings; }
|
||||||
@ -498,6 +544,7 @@ struct cmd_params_instance {
|
|||||||
llama_split_mode split_mode;
|
llama_split_mode split_mode;
|
||||||
int main_gpu;
|
int main_gpu;
|
||||||
bool no_kv_offload;
|
bool no_kv_offload;
|
||||||
|
bool flash_attn;
|
||||||
std::vector<float> tensor_split;
|
std::vector<float> tensor_split;
|
||||||
bool use_mmap;
|
bool use_mmap;
|
||||||
bool embeddings;
|
bool embeddings;
|
||||||
@ -532,6 +579,7 @@ struct cmd_params_instance {
|
|||||||
cparams.type_k = type_k;
|
cparams.type_k = type_k;
|
||||||
cparams.type_v = type_v;
|
cparams.type_v = type_v;
|
||||||
cparams.offload_kqv = !no_kv_offload;
|
cparams.offload_kqv = !no_kv_offload;
|
||||||
|
cparams.flash_attn = flash_attn;
|
||||||
cparams.embeddings = embeddings;
|
cparams.embeddings = embeddings;
|
||||||
|
|
||||||
return cparams;
|
return cparams;
|
||||||
@ -554,6 +602,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
for (const auto & tk : params.type_k)
|
for (const auto & tk : params.type_k)
|
||||||
for (const auto & tv : params.type_v)
|
for (const auto & tv : params.type_v)
|
||||||
for (const auto & nkvo : params.no_kv_offload)
|
for (const auto & nkvo : params.no_kv_offload)
|
||||||
|
for (const auto & fa : params.flash_attn)
|
||||||
for (const auto & nt : params.n_threads) {
|
for (const auto & nt : params.n_threads) {
|
||||||
for (const auto & n_prompt : params.n_prompt) {
|
for (const auto & n_prompt : params.n_prompt) {
|
||||||
if (n_prompt == 0) {
|
if (n_prompt == 0) {
|
||||||
@ -572,6 +621,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .split_mode = */ sm,
|
/* .split_mode = */ sm,
|
||||||
/* .main_gpu = */ mg,
|
/* .main_gpu = */ mg,
|
||||||
/* .no_kv_offload= */ nkvo,
|
/* .no_kv_offload= */ nkvo,
|
||||||
|
/* .flash_attn = */ fa,
|
||||||
/* .tensor_split = */ ts,
|
/* .tensor_split = */ ts,
|
||||||
/* .use_mmap = */ mmp,
|
/* .use_mmap = */ mmp,
|
||||||
/* .embeddings = */ embd,
|
/* .embeddings = */ embd,
|
||||||
@ -596,6 +646,32 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
|||||||
/* .split_mode = */ sm,
|
/* .split_mode = */ sm,
|
||||||
/* .main_gpu = */ mg,
|
/* .main_gpu = */ mg,
|
||||||
/* .no_kv_offload= */ nkvo,
|
/* .no_kv_offload= */ nkvo,
|
||||||
|
/* .flash_attn = */ fa,
|
||||||
|
/* .tensor_split = */ ts,
|
||||||
|
/* .use_mmap = */ mmp,
|
||||||
|
/* .embeddings = */ embd,
|
||||||
|
};
|
||||||
|
instances.push_back(instance);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & n_pg : params.n_pg) {
|
||||||
|
if (n_pg.first == 0 && n_pg.second == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
cmd_params_instance instance = {
|
||||||
|
/* .model = */ m,
|
||||||
|
/* .n_prompt = */ n_pg.first,
|
||||||
|
/* .n_gen = */ n_pg.second,
|
||||||
|
/* .n_batch = */ nb,
|
||||||
|
/* .n_ubatch = */ nub,
|
||||||
|
/* .type_k = */ tk,
|
||||||
|
/* .type_v = */ tv,
|
||||||
|
/* .n_threads = */ nt,
|
||||||
|
/* .n_gpu_layers = */ nl,
|
||||||
|
/* .split_mode = */ sm,
|
||||||
|
/* .main_gpu = */ mg,
|
||||||
|
/* .no_kv_offload= */ nkvo,
|
||||||
|
/* .flash_attn = */ fa,
|
||||||
/* .tensor_split = */ ts,
|
/* .tensor_split = */ ts,
|
||||||
/* .use_mmap = */ mmp,
|
/* .use_mmap = */ mmp,
|
||||||
/* .embeddings = */ embd,
|
/* .embeddings = */ embd,
|
||||||
@ -633,6 +709,7 @@ struct test {
|
|||||||
llama_split_mode split_mode;
|
llama_split_mode split_mode;
|
||||||
int main_gpu;
|
int main_gpu;
|
||||||
bool no_kv_offload;
|
bool no_kv_offload;
|
||||||
|
bool flash_attn;
|
||||||
std::vector<float> tensor_split;
|
std::vector<float> tensor_split;
|
||||||
bool use_mmap;
|
bool use_mmap;
|
||||||
bool embeddings;
|
bool embeddings;
|
||||||
@ -657,6 +734,7 @@ struct test {
|
|||||||
split_mode = inst.split_mode;
|
split_mode = inst.split_mode;
|
||||||
main_gpu = inst.main_gpu;
|
main_gpu = inst.main_gpu;
|
||||||
no_kv_offload = inst.no_kv_offload;
|
no_kv_offload = inst.no_kv_offload;
|
||||||
|
flash_attn = inst.flash_attn;
|
||||||
tensor_split = inst.tensor_split;
|
tensor_split = inst.tensor_split;
|
||||||
use_mmap = inst.use_mmap;
|
use_mmap = inst.use_mmap;
|
||||||
embeddings = inst.embeddings;
|
embeddings = inst.embeddings;
|
||||||
@ -731,7 +809,7 @@ struct test {
|
|||||||
"n_batch", "n_ubatch",
|
"n_batch", "n_ubatch",
|
||||||
"n_threads", "type_k", "type_v",
|
"n_threads", "type_k", "type_v",
|
||||||
"n_gpu_layers", "split_mode",
|
"n_gpu_layers", "split_mode",
|
||||||
"main_gpu", "no_kv_offload",
|
"main_gpu", "no_kv_offload", "flash_attn",
|
||||||
"tensor_split", "use_mmap", "embeddings",
|
"tensor_split", "use_mmap", "embeddings",
|
||||||
"n_prompt", "n_gen", "test_time",
|
"n_prompt", "n_gen", "test_time",
|
||||||
"avg_ns", "stddev_ns",
|
"avg_ns", "stddev_ns",
|
||||||
@ -753,7 +831,7 @@ struct test {
|
|||||||
}
|
}
|
||||||
if (field == "cuda" || field == "opencl" || field == "vulkan" || field == "kompute" || field == "metal" ||
|
if (field == "cuda" || field == "opencl" || field == "vulkan" || field == "kompute" || field == "metal" ||
|
||||||
field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
|
field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
|
||||||
field == "use_mmap" || field == "embeddings") {
|
field == "flash_attn" || field == "use_mmap" || field == "embeddings") {
|
||||||
return BOOL;
|
return BOOL;
|
||||||
}
|
}
|
||||||
if (field == "avg_ts" || field == "stddev_ts") {
|
if (field == "avg_ts" || field == "stddev_ts") {
|
||||||
@ -787,7 +865,7 @@ struct test {
|
|||||||
std::to_string(n_batch), std::to_string(n_ubatch),
|
std::to_string(n_batch), std::to_string(n_ubatch),
|
||||||
std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v),
|
std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v),
|
||||||
std::to_string(n_gpu_layers), split_mode_str(split_mode),
|
std::to_string(n_gpu_layers), split_mode_str(split_mode),
|
||||||
std::to_string(main_gpu), std::to_string(no_kv_offload),
|
std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn),
|
||||||
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
|
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
|
||||||
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
std::to_string(n_prompt), std::to_string(n_gen), test_time,
|
||||||
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
std::to_string(avg_ns()), std::to_string(stdev_ns()),
|
||||||
@ -933,6 +1011,9 @@ struct markdown_printer : public printer {
|
|||||||
if (field == "n_gpu_layers") {
|
if (field == "n_gpu_layers") {
|
||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
if (field == "test") {
|
||||||
|
return 13;
|
||||||
|
}
|
||||||
|
|
||||||
int width = std::max((int)field.length(), 10);
|
int width = std::max((int)field.length(), 10);
|
||||||
|
|
||||||
@ -955,6 +1036,9 @@ struct markdown_printer : public printer {
|
|||||||
if (field == "no_kv_offload") {
|
if (field == "no_kv_offload") {
|
||||||
return "nkvo";
|
return "nkvo";
|
||||||
}
|
}
|
||||||
|
if (field == "flash_attn") {
|
||||||
|
return "fa";
|
||||||
|
}
|
||||||
if (field == "use_mmap") {
|
if (field == "use_mmap") {
|
||||||
return "mmap";
|
return "mmap";
|
||||||
}
|
}
|
||||||
@ -1001,6 +1085,9 @@ struct markdown_printer : public printer {
|
|||||||
if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) {
|
if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) {
|
||||||
fields.emplace_back("no_kv_offload");
|
fields.emplace_back("no_kv_offload");
|
||||||
}
|
}
|
||||||
|
if (params.flash_attn.size() > 1 || params.flash_attn != cmd_params_defaults.flash_attn) {
|
||||||
|
fields.emplace_back("flash_attn");
|
||||||
|
}
|
||||||
if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) {
|
if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) {
|
||||||
fields.emplace_back("tensor_split");
|
fields.emplace_back("tensor_split");
|
||||||
}
|
}
|
||||||
@ -1053,12 +1140,11 @@ struct markdown_printer : public printer {
|
|||||||
value = test::get_backend();
|
value = test::get_backend();
|
||||||
} else if (field == "test") {
|
} else if (field == "test") {
|
||||||
if (t.n_prompt > 0 && t.n_gen == 0) {
|
if (t.n_prompt > 0 && t.n_gen == 0) {
|
||||||
snprintf(buf, sizeof(buf), "pp %d", t.n_prompt);
|
snprintf(buf, sizeof(buf), "pp%d", t.n_prompt);
|
||||||
} else if (t.n_gen > 0 && t.n_prompt == 0) {
|
} else if (t.n_gen > 0 && t.n_prompt == 0) {
|
||||||
snprintf(buf, sizeof(buf), "tg %d", t.n_gen);
|
snprintf(buf, sizeof(buf), "tg%d", t.n_gen);
|
||||||
} else {
|
} else {
|
||||||
assert(false);
|
snprintf(buf, sizeof(buf), "pp%d+tg%d", t.n_prompt, t.n_gen);
|
||||||
exit(1);
|
|
||||||
}
|
}
|
||||||
value = buf;
|
value = buf;
|
||||||
} else if (field == "t/s") {
|
} else if (field == "t/s") {
|
||||||
@ -1191,6 +1277,7 @@ int main(int argc, char ** argv) {
|
|||||||
llama_log_set(llama_null_log_callback, NULL);
|
llama_log_set(llama_null_log_callback, NULL);
|
||||||
}
|
}
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize printer
|
// initialize printer
|
||||||
std::unique_ptr<printer> p;
|
std::unique_ptr<printer> p;
|
||||||
@ -1258,6 +1345,7 @@ int main(int argc, char ** argv) {
|
|||||||
llama_kv_cache_clear(ctx);
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
uint64_t t_start = get_time_ns();
|
uint64_t t_start = get_time_ns();
|
||||||
|
|
||||||
if (t.n_prompt > 0) {
|
if (t.n_prompt > 0) {
|
||||||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ python ./examples/llava/convert-image-encoder-to-gguf.py -m ../clip-vit-large-pa
|
|||||||
python ./convert.py ../llava-v1.5-7b --skip-unknown
|
python ./convert.py ../llava-v1.5-7b --skip-unknown
|
||||||
```
|
```
|
||||||
|
|
||||||
Now both the LLaMA part and the image encoder is in the `llava-v1.5-7b` directory.
|
Now both the LLaMA part and the image encoder are in the `llava-v1.5-7b` directory.
|
||||||
|
|
||||||
## LLaVA 1.6 gguf conversion
|
## LLaVA 1.6 gguf conversion
|
||||||
1) First clone a LLaVA 1.6 model:
|
1) First clone a LLaVA 1.6 model:
|
||||||
|
@ -573,13 +573,13 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
|||||||
struct ggml_tensor * embeddings = inp;
|
struct ggml_tensor * embeddings = inp;
|
||||||
if (ctx->has_class_embedding) {
|
if (ctx->has_class_embedding) {
|
||||||
embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
||||||
|
ggml_set_name(embeddings, "embeddings");
|
||||||
|
ggml_set_input(embeddings);
|
||||||
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
||||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
||||||
embeddings = ggml_acc(ctx0, embeddings, inp,
|
embeddings = ggml_acc(ctx0, embeddings, inp,
|
||||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
||||||
}
|
}
|
||||||
ggml_set_name(embeddings, "embeddings");
|
|
||||||
ggml_set_input(embeddings);
|
|
||||||
|
|
||||||
|
|
||||||
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
||||||
@ -1846,7 +1846,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||||||
const int image_size = hparams.image_size;
|
const int image_size = hparams.image_size;
|
||||||
const int patch_size = hparams.patch_size;
|
const int patch_size = hparams.patch_size;
|
||||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||||
const int num_positions = num_patches + 1;
|
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
|
||||||
|
|
||||||
{
|
{
|
||||||
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
|
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
|
||||||
@ -1874,12 +1874,14 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
|
if (ctx->has_class_embedding) {
|
||||||
|
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
|
||||||
|
|
||||||
void* zero_mem = malloc(ggml_nbytes(embeddings));
|
void* zero_mem = malloc(ggml_nbytes(embeddings));
|
||||||
memset(zero_mem, 0, ggml_nbytes(embeddings));
|
memset(zero_mem, 0, ggml_nbytes(embeddings));
|
||||||
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
|
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
|
||||||
free(zero_mem);
|
free(zero_mem);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -113,11 +113,11 @@ struct llava_context {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static void show_additional_info(int /*argc*/, char ** argv) {
|
static void show_additional_info(int /*argc*/, char ** argv) {
|
||||||
LOG_TEE("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
|
LOG_TEE("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
|
||||||
LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
|
LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params) {
|
static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params, const std::string & fname) {
|
||||||
|
|
||||||
// load and preprocess the image
|
// load and preprocess the image
|
||||||
llava_image_embed * embed = NULL;
|
llava_image_embed * embed = NULL;
|
||||||
@ -133,9 +133,9 @@ static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_para
|
|||||||
}
|
}
|
||||||
params->prompt = remove_image_from_prompt(prompt);
|
params->prompt = remove_image_from_prompt(prompt);
|
||||||
} else {
|
} else {
|
||||||
embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->n_threads, params->image.c_str());
|
embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->n_threads, fname.c_str());
|
||||||
if (!embed) {
|
if (!embed) {
|
||||||
LOG_TEE("%s: is %s really an image file?\n", __func__, params->image.c_str());
|
fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str());
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -189,6 +189,11 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
|||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
|
|
||||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
||||||
|
if (!ctx_sampling) {
|
||||||
|
fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
std::string response = "";
|
std::string response = "";
|
||||||
for (int i = 0; i < max_tgt_len; i++) {
|
for (int i = 0; i < max_tgt_len; i++) {
|
||||||
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
||||||
@ -207,17 +212,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
|||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct llama_model * llava_init(gpt_params * params) {
|
||||||
static struct llava_context * llava_init(gpt_params * params) {
|
|
||||||
const char * clip_path = params->mmproj.c_str();
|
|
||||||
|
|
||||||
auto prompt = params->prompt;
|
|
||||||
if (prompt.empty()) {
|
|
||||||
prompt = "describe the image in detail.";
|
|
||||||
}
|
|
||||||
|
|
||||||
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
llama_numa_init(params->numa);
|
llama_numa_init(params->numa);
|
||||||
|
|
||||||
@ -228,6 +223,19 @@ static struct llava_context * llava_init(gpt_params * params) {
|
|||||||
LOG_TEE("%s: error: unable to load model\n" , __func__);
|
LOG_TEE("%s: error: unable to load model\n" , __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
return model;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct llava_context * llava_init_context(gpt_params * params, llama_model * model) {
|
||||||
|
const char * clip_path = params->mmproj.c_str();
|
||||||
|
|
||||||
|
auto prompt = params->prompt;
|
||||||
|
if (prompt.empty()) {
|
||||||
|
prompt = "describe the image in detail.";
|
||||||
|
}
|
||||||
|
|
||||||
|
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
||||||
|
|
||||||
|
|
||||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
|
llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
|
||||||
ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
|
ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
|
||||||
@ -286,24 +294,30 @@ int main(int argc, char ** argv) {
|
|||||||
show_additional_info(argc, argv);
|
show_additional_info(argc, argv);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
auto model = llava_init(¶ms);
|
||||||
auto ctx_llava = llava_init(¶ms);
|
if (model == NULL) {
|
||||||
if (ctx_llava == NULL) {
|
fprintf(stderr, "%s: error: failed to init llava model\n", __func__);
|
||||||
LOG_TEE("%s: error: failed to init llava\n", __func__);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto image_embed = load_image(ctx_llava, ¶ms);
|
for (auto & image : params.image) {
|
||||||
if (!image_embed) {
|
auto ctx_llava = llava_init_context(¶ms, model);
|
||||||
return 1;
|
|
||||||
|
auto image_embed = load_image(ctx_llava, ¶ms, image);
|
||||||
|
if (!image_embed) {
|
||||||
|
std::cerr << "error: failed to load image " << image << ". Terminating\n\n";
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// process the prompt
|
||||||
|
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
||||||
|
|
||||||
|
llama_print_timings(ctx_llava->ctx_llama);
|
||||||
|
llava_image_embed_free(image_embed);
|
||||||
|
ctx_llava->model = NULL;
|
||||||
|
llava_free(ctx_llava);
|
||||||
}
|
}
|
||||||
|
llama_free_model(model);
|
||||||
|
|
||||||
// process the prompt
|
|
||||||
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
|
||||||
|
|
||||||
llama_print_timings(ctx_llava->ctx_llama);
|
|
||||||
|
|
||||||
llava_image_embed_free(image_embed);
|
|
||||||
llava_free(ctx_llava);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -17,11 +17,9 @@ In this case, CLBlast was already installed so the CMake package is referenced i
|
|||||||
```cmd
|
```cmd
|
||||||
git clone https://github.com/ggerganov/llama.cpp
|
git clone https://github.com/ggerganov/llama.cpp
|
||||||
cd llama.cpp
|
cd llama.cpp
|
||||||
mkdir build
|
cmake -B build -DBUILD_SHARED_LIBS=OFF -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH=C:/CLBlast/lib/cmake/CLBlast -G "Visual Studio 17 2022" -A x64
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake .. -DBUILD_SHARED_LIBS=OFF -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH=C:/CLBlast/lib/cmake/CLBlast -G "Visual Studio 17 2022" -A x64
|
cmake --install build --prefix C:/LlamaCPP
|
||||||
cmake --build . --config Release
|
|
||||||
cmake --install . --prefix C:/LlamaCPP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Build main-cmake-pkg
|
### Build main-cmake-pkg
|
||||||
@ -29,9 +27,7 @@ cmake --install . --prefix C:/LlamaCPP
|
|||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
cd ..\examples\main-cmake-pkg
|
cd ..\examples\main-cmake-pkg
|
||||||
mkdir build
|
cmake -B build -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/CLBlast/lib/cmake/CLBlast;C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64
|
||||||
cd build
|
cmake --build build --config Release
|
||||||
cmake .. -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/CLBlast/lib/cmake/CLBlast;C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64
|
cmake --install build --prefix C:/MyLlamaApp
|
||||||
cmake --build . --config Release
|
|
||||||
cmake --install . --prefix C:/MyLlamaApp
|
|
||||||
```
|
```
|
||||||
|
@ -66,7 +66,7 @@ main.exe -m models\7B\ggml-model.bin --ignore-eos -n -1 --random-prompt
|
|||||||
|
|
||||||
In this section, we cover the most commonly used options for running the `main` program with the LLaMA models:
|
In this section, we cover the most commonly used options for running the `main` program with the LLaMA models:
|
||||||
|
|
||||||
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`; inferred from `--model-url` if set).
|
||||||
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf).
|
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf).
|
||||||
- `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses.
|
- `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses.
|
||||||
- `-ins, --instruct`: Run the program in instruction mode, which is particularly useful when working with Alpaca models.
|
- `-ins, --instruct`: Run the program in instruction mode, which is particularly useful when working with Alpaca models.
|
||||||
@ -143,7 +143,7 @@ The `--ctx-size` option allows you to set the size of the prompt context used by
|
|||||||
|
|
||||||
### Extended Context Size
|
### Extended Context Size
|
||||||
|
|
||||||
Some fine-tuned models have extended the context length by scaling RoPE. For example, if the original pre-trained model have a context length (max sequence length) of 4096 (4k) and the fine-tuned model have 32k. That is a scaling factor of 8, and should work by setting the above `--ctx-size` to 32768 (32k) and `--rope-scale` to 8.
|
Some fine-tuned models have extended the context length by scaling RoPE. For example, if the original pre-trained model has a context length (max sequence length) of 4096 (4k) and the fine-tuned model has 32k. That is a scaling factor of 8, and should work by setting the above `--ctx-size` to 32768 (32k) and `--rope-scale` to 8.
|
||||||
|
|
||||||
- `--rope-scale N`: Where N is the linear scaling factor used by the fine-tuned model.
|
- `--rope-scale N`: Where N is the linear scaling factor used by the fine-tuned model.
|
||||||
|
|
||||||
@ -286,7 +286,7 @@ These options help improve the performance and memory usage of the LLaMA models.
|
|||||||
|
|
||||||
- `--numa distribute`: Pin an equal proportion of the threads to the cores on each NUMA node. This will spread the load amongst all cores on the system, utilitizing all memory channels at the expense of potentially requiring memory to travel over the slow links between nodes.
|
- `--numa distribute`: Pin an equal proportion of the threads to the cores on each NUMA node. This will spread the load amongst all cores on the system, utilitizing all memory channels at the expense of potentially requiring memory to travel over the slow links between nodes.
|
||||||
- `--numa isolate`: Pin all threads to the NUMA node that the program starts on. This limits the number of cores and amount of memory that can be used, but guarantees all memory access remains local to the NUMA node.
|
- `--numa isolate`: Pin all threads to the NUMA node that the program starts on. This limits the number of cores and amount of memory that can be used, but guarantees all memory access remains local to the NUMA node.
|
||||||
- `--numa numactl`: Pin threads to the CPUMAP that is passed to the program by starting it with the numactl utility. This is the most flexible mode, and allow arbitraty core usage patterns, for example a map that uses all the cores on one NUMA nodes, and just enough cores on a second node to saturate the inter-node memory bus.
|
- `--numa numactl`: Pin threads to the CPUMAP that is passed to the program by starting it with the numactl utility. This is the most flexible mode, and allow arbitrary core usage patterns, for example a map that uses all the cores on one NUMA nodes, and just enough cores on a second node to saturate the inter-node memory bus.
|
||||||
|
|
||||||
These flags attempt optimizations that help on some systems with non-uniform memory access. This currently consists of one of the above strategies, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
|
These flags attempt optimizations that help on some systems with non-uniform memory access. This currently consists of one of the above strategies, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
|
||||||
|
|
||||||
|
@ -324,7 +324,7 @@ int main(int argc, char ** argv) {
|
|||||||
log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
|
log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
|
||||||
|
|
||||||
// if we will use the cache for the full prompt without reaching the end of the cache, force
|
// if we will use the cache for the full prompt without reaching the end of the cache, force
|
||||||
// reevaluation of the last token token to recalculate the cached logits
|
// reevaluation of the last token to recalculate the cached logits
|
||||||
if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
|
if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
|
||||||
LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
|
LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
|
||||||
|
|
||||||
@ -362,6 +362,9 @@ int main(int argc, char ** argv) {
|
|||||||
params.interactive_first = true;
|
params.interactive_first = true;
|
||||||
params.antiprompt.emplace_back("<|im_start|>user\n");
|
params.antiprompt.emplace_back("<|im_start|>user\n");
|
||||||
}
|
}
|
||||||
|
else if (params.conversation) {
|
||||||
|
params.interactive_first = true;
|
||||||
|
}
|
||||||
|
|
||||||
// enable interactive mode if interactive start is specified
|
// enable interactive mode if interactive start is specified
|
||||||
if (params.interactive_first) {
|
if (params.interactive_first) {
|
||||||
@ -520,6 +523,10 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
|
||||||
|
if (!ctx_sampling) {
|
||||||
|
fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
||||||
// predict
|
// predict
|
||||||
@ -544,7 +551,7 @@ int main(int argc, char ** argv) {
|
|||||||
// if we run out of context:
|
// if we run out of context:
|
||||||
// - take the n_keep first tokens from the original prompt (via n_past)
|
// - take the n_keep first tokens from the original prompt (via n_past)
|
||||||
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
|
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
|
||||||
if (n_past + (int) embd.size() + std::max<int>(0, guidance_offset) > n_ctx) {
|
if (n_past + (int) embd.size() + std::max<int>(0, guidance_offset) >= n_ctx) {
|
||||||
if (params.n_predict == -2) {
|
if (params.n_predict == -2) {
|
||||||
LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
|
LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
|
||||||
break;
|
break;
|
||||||
@ -733,7 +740,7 @@ int main(int argc, char ** argv) {
|
|||||||
// display text
|
// display text
|
||||||
if (input_echo && display) {
|
if (input_echo && display) {
|
||||||
for (auto id : embd) {
|
for (auto id : embd) {
|
||||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
const std::string token_str = llama_token_to_piece(ctx, id, !params.conversation);
|
||||||
printf("%s", token_str.c_str());
|
printf("%s", token_str.c_str());
|
||||||
|
|
||||||
if (embd.size() > 1) {
|
if (embd.size() > 1) {
|
||||||
@ -796,7 +803,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// deal with end of generation tokens in interactive mode
|
// deal with end of generation tokens in interactive mode
|
||||||
if (llama_token_is_eog(model, llama_sampling_last(ctx_sampling))) {
|
if (llama_token_is_eog(model, llama_sampling_last(ctx_sampling))) {
|
||||||
LOG("found EOS token\n");
|
LOG("found an EOG token\n");
|
||||||
|
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
if (!params.antiprompt.empty()) {
|
if (!params.antiprompt.empty()) {
|
||||||
@ -816,7 +823,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (n_past > 0 && is_interacting) {
|
if (n_past > 0 && is_interacting) {
|
||||||
LOG("waiting for user input\n");
|
LOG("waiting for user input\n");
|
||||||
|
|
||||||
if (params.instruct || params.chatml) {
|
if (params.conversation || params.instruct || params.chatml) {
|
||||||
printf("\n> ");
|
printf("\n> ");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -826,7 +833,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string buffer;
|
std::string buffer;
|
||||||
if (!params.input_prefix.empty()) {
|
if (!params.input_prefix.empty() && !params.conversation) {
|
||||||
LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
|
LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
|
||||||
printf("%s", params.input_prefix.c_str());
|
printf("%s", params.input_prefix.c_str());
|
||||||
}
|
}
|
||||||
@ -850,7 +857,7 @@ int main(int argc, char ** argv) {
|
|||||||
// Entering a empty line lets the user pass control back
|
// Entering a empty line lets the user pass control back
|
||||||
if (buffer.length() > 1) {
|
if (buffer.length() > 1) {
|
||||||
// append input suffix if any
|
// append input suffix if any
|
||||||
if (!params.input_suffix.empty()) {
|
if (!params.input_suffix.empty() && !params.conversation) {
|
||||||
LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
|
LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
|
||||||
printf("%s", params.input_suffix.c_str());
|
printf("%s", params.input_suffix.c_str());
|
||||||
}
|
}
|
||||||
@ -876,7 +883,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
||||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
|
const auto line_inp = ::llama_tokenize(ctx, buffer, false, params.interactive_specials);
|
||||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||||
|
|
||||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||||
|
@ -1,8 +1,118 @@
|
|||||||
# perplexity
|
# Perplexity
|
||||||
|
|
||||||
TODO
|
The `perplexity` example can be used to calculate the so-called perplexity value of a language model over a given text corpus.
|
||||||
|
Perplexity measures how well the model can predict the next token with lower values being better.
|
||||||
|
Note that perplexity is **not** directly comparable between models, especially if they use different tokenizers.
|
||||||
|
Also note that finetunes typically result in a higher perplexity value even though the human-rated quality of outputs increases.
|
||||||
|
|
||||||
|
Within llama.cpp the perplexity of base models is used primarily to judge the quality loss from e.g. quantized models vs. FP16.
|
||||||
|
The convention among contributors is to use the Wikitext-2 test set for testing unless noted otherwise (can be obtained with `scripts/get-wikitext-2.sh`).
|
||||||
|
|
||||||
|
By default only the mean perplexity value and the corresponding uncertainty is calculated.
|
||||||
|
The uncertainty is determined empirically by assuming a Gaussian distribution of the "correct" logits per and then applying error propagation.
|
||||||
|
|
||||||
|
More statistics can be obtained by recording the logits from the FP16 version of a model.
|
||||||
|
To do this, supply `perplexity` with `--kl-divergence-base path/to/logit/binary/file.kld`.
|
||||||
|
The program will then record all logits and save them to the provided path in binary format.
|
||||||
|
**The logit file will be very large, 11 GiB for LLaMA 2 or 37 GiB for LLaMA 3 when using the Wikitext-2 test set.**
|
||||||
|
Once you have the file, supply `perplexity` with the quantized model, the logits file via `--kl-divergence-base`,
|
||||||
|
and finally the `--kl-divergence` argument to indicate that the program should calculate the so-called Kullback-Leibler divergence.
|
||||||
|
This is a measure of how similar the FP16 and the quantized logit distributions are with a value of 0 indicating that the distribution are the same.
|
||||||
|
The uncertainty on the mean KL divergence is calculated by assuming the KL divergence per token follows a Gaussian distribution.
|
||||||
|
|
||||||
|
In addition to the KL divergence the following statistics are calculated with `--kl-divergence`:
|
||||||
|
|
||||||
|
* Ratio of mean FP16 PPL and quantized PPL. Uncertainty is estimated on logits, then propagated. The logarithm of this metric is also calculated and printed, it is 0 if the logit distributions are the same.
|
||||||
|
* Difference of mean FP16 PPL and quantized PPL. Uncertainty is estimated on logits, then propagated.
|
||||||
|
* Mean change in "correct" token probability. Positive values mean the model gets better at prediction, negative values mean it gets worse.
|
||||||
|
* Pearson correlation coefficient of the "correct" token probabilites between models.
|
||||||
|
* Percentiles of change in "correct" token probability. Positive values mean the model gets better at prediction, negative values mean it gets worse. Can be used to judge noise vs. quality loss from quantization. If the percentiles are symmetric then the quantization is essentially just adding noise. If the negative values are significantly larger than the positive values then this indicates that the model is actually becoming worse from the quantization.
|
||||||
|
* The root mean square of the change in token probabilities. If you were to assume that the quantization simply causes Gaussian noise on the token probabilities then this would be the standard deviation of said noise. The uncertainty on the value is calculated that the change in token probabilities follows a Gaussian distribution. Related discussion: https://github.com/ggerganov/llama.cpp/discussions/2875 .
|
||||||
|
* Same top p: Percentage of how often the token was assigned the highest probabilites by both models. The uncertainty is calculated from the Gaussian approximation of the binomial distribution.
|
||||||
|
|
||||||
|
## LLaMA 3 8b Scoreboard
|
||||||
|
|
||||||
|
Results are sorted by Kullback-Leibler divergence relative to FP16.
|
||||||
|
The "WT" importance matrices were created using varying numbers of Wikitext tokens and can be found [here](https://huggingface.co/JohannesGaessler/llama.cpp_importance_matrices/blob/main/imatrix-llama_3-8b-f16-2.7m_tokens.dat).
|
||||||
|
|
||||||
|
| Quantization | imatrix | Model size [GiB] | PPL | ΔPPL | KLD | Mean Δp | RMS Δp |
|
||||||
|
|--------------|---------|------------------|------------------------|------------------------|-----------------------|-------------------|------------------|
|
||||||
|
| f16 | None | 14.97 | 6.233160 ± 0.037828 | - | - | - | - |
|
||||||
|
| q8_0 | None | 7.96 | 6.234284 ± 0.037878 | 0.002650 ± 0.001006 | 0.001355 ± 0.000006 | -0.019 ± 0.003 % | 1.198 ± 0.007 % |
|
||||||
|
| q6_K | None | 6.14 | 6.253382 ± 0.038078 | 0.021748 ± 0.001852 | 0.005452 ± 0.000035 | -0.007 ± 0.006 % | 2.295 ± 0.019 % |
|
||||||
|
| q5_K_M | None | 5.33 | 6.288607 ± 0.038338 | 0.056974 ± 0.002598 | 0.010762 ± 0.000079 | -0.114 ± 0.008 % | 3.160 ± 0.031 % |
|
||||||
|
| q5_K_S | None | 5.21 | 6.336598 ± 0.038755 | 0.104964 ± 0.003331 | 0.016595 ± 0.000122 | -0.223 ± 0.010 % | 3.918 ± 0.036 % |
|
||||||
|
| q5_1 | None | 5.65 | 6.337857 ± 0.038677 | 0.106223 ± 0.003476 | 0.018045 ± 0.000139 | -0.287 ± 0.011 % | 4.123 ± 0.039 % |
|
||||||
|
| q5_0 | None | 5.21 | 6.363224 ± 0.038861 | 0.131591 ± 0.003894 | 0.022239 ± 0.000166 | -0.416 ± 0.012 % | 4.634 ± 0.043 % |
|
||||||
|
| q4_K_M | WT 10m | 4.58 | 6.382937 ± 0.039055 | 0.151303 ± 0.004429 | 0.028152 ± 0.000240 | -0.389 ± 0.014 % | 5.251 ± 0.049 % |
|
||||||
|
| q4_K_M | None | 4.58 | 6.407115 ± 0.039119 | 0.175482 ± 0.004620 | 0.031273 ± 0.000238 | -0.596 ± 0.014 % | 5.519 ± 0.050 % |
|
||||||
|
| q4_K_S | WT 10m | 4.37 | 6.409697 ± 0.039189 | 0.178064 ± 0.004744 | 0.031951 ± 0.000259 | -0.531 ± 0.015 % | 5.645 ± 0.051 % |
|
||||||
|
| iq4_NL | WT 10m | 4.35 | 6.455593 ± 0.039630 | 0.223959 ± 0.005201 | 0.035742 ± 0.000288 | -0.590 ± 0.016 % | 5.998 ± 0.054 % |
|
||||||
|
| iq4_XS | WT 10m | 4.14 | 6.459705 ± 0.039595 | 0.228071 ± 0.005207 | 0.036334 ± 0.000284 | -0.668 ± 0.016 % | 6.044 ± 0.054 % |
|
||||||
|
| q4_K_S | None | 4.37 | 6.500529 ± 0.039778 | 0.268895 ± 0.005638 | 0.043136 ± 0.000314 | -0.927 ± 0.017 % | 6.562 ± 0.055 % |
|
||||||
|
| q4_1 | None | 4.78 | 6.682737 ± 0.041285 | 0.451103 ± 0.008030 | 0.071683 ± 0.000505 | -0.927 ± 0.017 % | 8.512 ± 0.063 % |
|
||||||
|
| q4_0 | None | 4.34 | 6.700147 ± 0.041226 | 0.468514 ± 0.007951 | 0.071940 ± 0.000491 | -1.588 ± 0.022 % | 8.434 ± 0.061 % |
|
||||||
|
| q3_K_L | WT 10m | 4.03 | 6.671223 ± 0.041427 | 0.439590 ± 0.008154 | 0.073077 ± 0.000529 | -0.940 ± 0.023 % | 8.662 ± 0.064 % |
|
||||||
|
| q3_K_M | WT 10m | 3.74 | 6.734255 ± 0.041838 | 0.502622 ± 0.008901 | 0.084358 ± 0.000588 | -1.198 ± 0.024 % | 9.292 ± 0.065 % |
|
||||||
|
| q3_K_L | None | 4.03 | 6.787876 ± 0.042104 | 0.556242 ± 0.009171 | 0.087176 ± 0.000614 | -1.532 ± 0.025 % | 9.432 ± 0.067 % |
|
||||||
|
| q3_K_M | None | 3.74 | 6.888498 ± 0.042669 | 0.656864 ± 0.010071 | 0.101913 ± 0.000677 | -1.990 ± 0.026 % | 10.203 ± 0.068 % |
|
||||||
|
| iq3_M | WT 10m | 3.53 | 6.898327 ± 0.041643 | 0.666694 ± 0.009449 | 0.102534 ± 0.000663 | -3.178 ± 0.026 % | 10.513 ± 0.066 % |
|
||||||
|
| iq3_S | WT 10m | 3.42 | 6.965501 ± 0.042406 | 0.733867 ± 0.010245 | 0.111278 ± 0.000710 | -3.066 ± 0.027 % | 10.845 ± 0.068 % |
|
||||||
|
| iq3_XS | WT 10m | 3.28 | 7.163043 ± 0.043772 | 0.931409 ± 0.012084 | 0.138693 ± 0.000857 | -3.667 ± 0.031 % | 12.148 ± 0.070 % |
|
||||||
|
| iq3_XXS | WT 10m | 3.05 | 7.458436 ± 0.046404 | 1.226803 ± 0.015234 | 0.183625 ± 0.001042 | -3.918 ± 0.035 % | 13.836 ± 0.074 % |
|
||||||
|
| q3_K_S | WT 10m | 3.41 | 7.602878 ± 0.046848 | 1.371244 ± 0.015688 | 0.199821 ± 0.001008 | -5.046 ± 0.037 % | 14.980 ± 0.070 % |
|
||||||
|
| q3_K_S | None | 3.41 | 7.863786 ± 0.048885 | 1.632152 ± 0.017733 | 0.228217 ± 0.001079 | -5.604 ± 0.038 % | 15.541 ± 0.070 % |
|
||||||
|
| iq2_M | WT 10m | 2.74 | 8.600799 ± 0.055124 | 2.369166 ± 0.025244 | 0.325989 ± 0.00160 | -6.463 ± 0.046 % | 18.519 ± 0.080 % |
|
||||||
|
| q2_K | WT 10k | 2.96 | 8.652290 ± 0.055572 | 2.420657 ± 0.025587 | 0.331393 ± 0.001562 | -6.606 ± 0.046 % | 18.790 ± 0.078 % |
|
||||||
|
| q2_K | WT 100k | 2.96 | 8.641993 ± 0.055406 | 2.410359 ± 0.025495 | 0.331672 ± 0.001569 | -6.628 ± 0.047 % | 18.856 ± 0.078 % |
|
||||||
|
| q2_K | WT 10m | 2.96 | 8.647825 ± 0.055610 | 2.416191 ± 0.025683 | 0.332223 ± 0.001572 | -6.500 ± 0.047 % | 18.881 ± 0.078 % |
|
||||||
|
| q2_K | WT 1m | 2.96 | 8.674365 ± 0.055743 | 2.442732 ± 0.025843 | 0.335308 ± 0.001576 | -6.634 ± 0.047 % | 19.009 ± 0.079 % |
|
||||||
|
| q2_K | WT 1k | 2.96 | 8.682605 ± 0.055916 | 2.450972 ± 0.026069 | 0.337093 ± 0.001596 | -6.596 ± 0.047 % | 18.977 ± 0.079 % |
|
||||||
|
| q2_K_S | WT 10m | 2.96 | 9.323778 ± 0.061551 | 3.092145 ± 0.031914 | 0.403360 ± 0.001787 | -7.131 ± 0.049 % | 20.050 ± 0.081 % |
|
||||||
|
| q2_K_S | WT 1m | 2.96 | 9.329321 ± 0.061378 | 3.097688 ± 0.031816 | 0.403590 ± 0.001797 | -7.289 ± 0.049 % | 20.123 ± 0.081 % |
|
||||||
|
| q2_K_S | WT 100k | 2.96 | 9.362973 ± 0.061740 | 3.131339 ± 0.032169 | 0.408367 ± 0.001802 | -7.198 ± 0.050 % | 20.132 ± 0.081 % |
|
||||||
|
| q2_K_S | WT 10k | 2.96 | 9.376479 ± 0.062045 | 3.144846 ± 0.032464 | 0.408662 ± 0.001819 | -7.141 ± 0.050 % | 20.120 ± 0.081 % |
|
||||||
|
| q2_K_S | WT 1k | 2.96 | 9.415200 ± 0.062475 | 3.183567 ± 0.032993 | 0.415865 ± 0.001846 | -7.153 ± 0.050 % | 20.311 ± 0.082 % |
|
||||||
|
| iq2_S | WT 10m | 2.56 | 9.650781 ± 0.063209 | 3.419148 ± 0.034017 | 0.439197 ± 0.001976 | -8.319 ± 0.052 % | 21.491 ± 0.083 % |
|
||||||
|
| q2_K | None | 2.96 | 9.751568 ± 0.063312 | 3.519934 ± 0.033863 | 0.445132 ± 0.001835 | -9.123 ± 0.051 % | 21.421 ± 0.079 % |
|
||||||
|
| iq2_XS | WT 10m | 2.43 | 10.761424 ± 0.071056 | 4.529791 ± 0.042229 | 0.546290 ± 0.002133 | -10.576 ± 0.056 % | 23.872 ± 0.082 % |
|
||||||
|
| iq2_XXS | WT 10m | 2.24 | 14.091782 ± 0.098396 | 7.860148 ± 0.070752 | 0.812022 ± 0.002741 | -14.363 ± 0.065 % | 28.576 ± 0.084 % |
|
||||||
|
| iq1_M | WT 10m | 2.01 | 25.493722 ± 0.177903 | 19.262089 ± 0.152396 | 1.393084 ± 0.003529 | -24.672 ± 0.077 % | 38.287 ± 0.084 % |
|
||||||
|
| iq1_S | WT 1m | 1.88 | 58.097760 ± 0.438604 | 51.866126 ± 0.416604 | 2.211278 ± 0.004688 | -32.471 ± 0.087 % | 46.418 ± 0.085 % |
|
||||||
|
| iq1_S | WT 1k | 1.88 | 58.267851 ± 0.446208 | 52.036218 ± 0.424373 | 2.214858 ± 0.004778 | -31.880 ± 0.089 % | 46.330 ± 0.086 % |
|
||||||
|
| iq1_S | WT 100k | 1.88 | 58.581498 ± 0.453145 | 52.349864 ± 0.431360 | 2.220834 ± 0.004818 | -32.261 ± 0.089 % | 46.002 ± 0.086 % |
|
||||||
|
| iq1_S | WT 10m | 1.88 | 60.694593 ± 0.471290 | 54.462959 ± 0.449644 | 2.254554 ± 0.004868 | -31.973 ± 0.088 % | 46.271 ± 0.086 % |
|
||||||
|
| iq1_S | WT 10k | 1.88 | 63.221324 ± 0.493077 | 56.989691 ± 0.471423 | 2.293527 ± 0.004885 | -32.261 ± 0.089 % | 46.562 ± 0.086 % |
|
||||||
|
|
||||||
|
There seems to be no consistent improvement from using more Wikitext tokens for the importance matrix.
|
||||||
|
K-quants score better on mean Δp than the legacy quants than e.g. KL divergence would suggest.
|
||||||
|
|
||||||
|
## LLaMA 2 vs. LLaMA 3 Quantization comparison
|
||||||
|
|
||||||
|
| Metric | L2 7b q2_K | L3 8b q2_K | L2 7b q4_K_M | L3 8b q4_K_M | L2 7b q6_K | L3 8b q6_K | L2 7b q8_0 | L3 8b q8_0 |
|
||||||
|
|-----------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|
|
||||||
|
| Mean PPL | 5.794552 ± 0.032298 | 9.751568 ± 0.063312 | 5.877078 ± 0.032781 | 6.407115 ± 0.039119 | 5.808494 ± 0.032425 | 6.253382 ± 0.038078 | 5.798542 ± 0.032366 | 6.234284 ± 0.037878 |
|
||||||
|
| Mean PPL ratio | 1.107955 ± 0.001427 | 1.564849 ± 0.004525 | 1.014242 ± 0.000432 | 1.028160 ± 0.000723 | 1.002406 ± 0.000191 | 1.003490 ± 0.000296 | 1.000689 ± 0.000107 | 1.000425 ± 0.000161 |
|
||||||
|
| Mean ΔPPL | 0.625552 ± 0.008725 | 3.519934 ± 0.033863 | 0.082526 ± 0.002530 | 0.175482 ± 0.004620 | 0.013941 ± 0.001110 | 0.021748 ± 0.001852 | 0.003990 ± 0.000624 | 0.002650 ± 0.001006 |
|
||||||
|
| PPL correlation | 97.36% | 89.62% | 99.71% | 99.34% | 99.94% | 99.88% | 99.98% | 99.96% |
|
||||||
|
| Mean KLD | 0.108903 ± 0.000645 | 0.445132 ± 0.001835 | 0.012686 ± 0.000079 | 0.031273 ± 0.000238 | 0.002098 ± 0.000014 | 0.005452 ± 0.000035 | 0.000369 ± 0.000007 | 0.001355 ± 0.000006 |
|
||||||
|
| Mean Δp | -2.710 ± 0.023 % | -9.123 ± 0.051 % | -0.416 ± 0.008 % | -0.596 ± 0.014 % | -0.035 ± 0.003 % | -0.007 ± 0.006 % | -0.005 ± 0.002 % | -0.019 ± 0.003 % |
|
||||||
|
| Maximum Δp | 85.136% | 94.268% | 45.209% | 95.054% | 23.593% | 53.601% | 43.925% | 28.734% |
|
||||||
|
| 99.9% Δp | 37.184% | 50.003% | 17.461% | 27.084% | 7.798% | 13.613% | 3.387% | 6.402% |
|
||||||
|
| 99.0% Δp | 18.131% | 25.875% | 7.798% | 12.084% | 3.838% | 6.407% | 1.867% | 3.544% |
|
||||||
|
| Median Δp | -0.391% | -2.476% | -0.026% | -0.024% | -0.001% | 0.000% | -0.000% | -0.000% |
|
||||||
|
| 1.0% Δp | -39.762% | -87.173% | -11.433% | -19.567% | -4.222% | -6.767% | -1.862% | -3.698% |
|
||||||
|
| 0.1% Δp | -79.002% | -98.897% | -26.433% | -56.054% | -9.091% | -16.584% | -3.252% | -6.579% |
|
||||||
|
| Minimum Δp | -99.915% | -99.965% | -83.383% | -98.699% | -43.142% | -68.487% | -9.343% | -24.301% |
|
||||||
|
| RMS Δp | 9.762 ± 0.053 % | 21.421 ± 0.079 % | 3.252 ± 0.024 % | 5.519 ± 0.050 % | 1.339 ± 0.010 % | 2.295 ± 0.019 % | 0.618 ± 0.011 % | 1.198 ± 0.007 % |
|
||||||
|
| Same top p | 85.584 ± 0.086 % | 71.138 ± 0.119 % | 94.665 ± 0.055 % | 91.901 ± 0.072 % | 97.520 ± 0.038 % | 96.031 ± 0.051 % | 98.846 ± 0.026 % | 97.674 ± 0.040 % |
|
||||||
|
|
||||||
|
|
||||||
|
## Old Numbers
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Llama 2 70B Scoreboard</summary>
|
||||||
|
|
||||||
## Llama 2 70B Scorechart
|
|
||||||
| Quantization | Model size (GiB) | Perplexity | Delta to fp16 |
|
| Quantization | Model size (GiB) | Perplexity | Delta to fp16 |
|
||||||
|--------------|------------------|------------|---------------|
|
|--------------|------------------|------------|---------------|
|
||||||
| Q4_0 | 36.20 | 3.5550 | 3.61% |
|
| Q4_0 | 36.20 | 3.5550 | 3.61% |
|
||||||
@ -18,3 +128,5 @@ TODO
|
|||||||
| Q5_K_M | 45.41 | 3.4451 | 0.40% |
|
| Q5_K_M | 45.41 | 3.4451 | 0.40% |
|
||||||
| Q6_K | 52.70 | 3.4367 | 0.16% |
|
| Q6_K | 52.70 | 3.4367 | 0.16% |
|
||||||
| fp16 | 128.5 | 3.4313 | - |
|
| fp16 | 128.5 | 3.4313 | - |
|
||||||
|
|
||||||
|
</details>
|
||||||
|
@ -216,17 +216,22 @@ static void process_logits(std::ostream& out, int n_vocab, const float * logits,
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct kl_divergence_result {
|
struct kl_divergence_result {
|
||||||
double sum_nll = 0;
|
double sum_nll = 0.0;
|
||||||
double sum_nll2 = 0;
|
double sum_nll2 = 0.0;
|
||||||
double sum_kld = 0;
|
double sum_nll_base = 0.0;
|
||||||
double sum_kld2 = 0;
|
double sum_nll_base2 = 0.0;
|
||||||
double sum_nll_diff = 0;
|
double sum_nll_nll_base = 0.0;
|
||||||
double sum_nll_diff2 = 0;
|
double sum_kld = 0.0;
|
||||||
size_t n_same_top = 0;
|
double sum_kld2 = 0.0;
|
||||||
size_t count = 0;
|
double sum_p_diff = 0.0;
|
||||||
|
double sum_p_diff2 = 0.0;
|
||||||
|
double sum_p_diff4 = 0.0;
|
||||||
|
float max_p_diff = 0.0f;
|
||||||
|
size_t n_same_top = 0.0;
|
||||||
|
size_t count = 0.0;
|
||||||
};
|
};
|
||||||
|
|
||||||
static double log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
|
static std::pair<double, float> log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
|
||||||
float max_logit = logits[0];
|
float max_logit = logits[0];
|
||||||
int imax = 0;
|
int imax = 0;
|
||||||
for (int i = 1; i < n_vocab; ++i) {
|
for (int i = 1; i < n_vocab; ++i) {
|
||||||
@ -244,12 +249,17 @@ static double log_softmax(int n_vocab, const float * logits, const uint16_t * ba
|
|||||||
const float scale = d[0];
|
const float scale = d[0];
|
||||||
const float min_log_prob = d[1];
|
const float min_log_prob = d[1];
|
||||||
base_log_prob += 4;
|
base_log_prob += 4;
|
||||||
float nll = max_logit + log_sum_exp - logits[tok];
|
|
||||||
|
const float nll = max_logit + log_sum_exp - logits[tok];
|
||||||
kld.sum_nll += nll;
|
kld.sum_nll += nll;
|
||||||
kld.sum_nll2 += nll*nll;
|
kld.sum_nll2 += nll*nll;
|
||||||
nll += (scale*base_log_prob[tok] + min_log_prob);
|
|
||||||
kld.sum_nll_diff += nll;
|
const float nll_base = -(scale*base_log_prob[tok] + min_log_prob);
|
||||||
kld.sum_nll_diff2 += nll*nll;
|
kld.sum_nll_base += nll_base;
|
||||||
|
kld.sum_nll_base2 += nll_base*nll_base;
|
||||||
|
|
||||||
|
kld.sum_nll_nll_base += nll*nll_base;
|
||||||
|
|
||||||
max_logit += log_sum_exp;
|
max_logit += log_sum_exp;
|
||||||
double sum = 0;
|
double sum = 0;
|
||||||
int imax_base = -1;
|
int imax_base = -1;
|
||||||
@ -269,34 +279,50 @@ static double log_softmax(int n_vocab, const float * logits, const uint16_t * ba
|
|||||||
kld.sum_kld2 += sum*sum;
|
kld.sum_kld2 += sum*sum;
|
||||||
++kld.count;
|
++kld.count;
|
||||||
if (imax == imax_base) ++kld.n_same_top;
|
if (imax == imax_base) ++kld.n_same_top;
|
||||||
return sum;
|
|
||||||
|
const float p_base = expf(-nll_base);
|
||||||
|
const float p = expf(-nll);
|
||||||
|
const float p_diff = p - p_base;
|
||||||
|
kld.sum_p_diff += p_diff;
|
||||||
|
const double p_diff2 = p_diff*p_diff;
|
||||||
|
kld.sum_p_diff2 += p_diff2;
|
||||||
|
kld.sum_p_diff4 += p_diff2*p_diff2;
|
||||||
|
kld.max_p_diff = std::max(kld.max_p_diff, std::fabs(p_diff));
|
||||||
|
|
||||||
|
return std::make_pair(sum, p_diff);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token,
|
static void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token,
|
||||||
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld,
|
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld,
|
||||||
float * kld_values) {
|
float * kld_values, float * p_diff_values) {
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
const int nv = 2*((n_vocab + 1)/2) + 4;
|
const int nv = 2*((n_vocab + 1)/2) + 4;
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv, kld_values] () {
|
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv, kld_values, p_diff_values] () {
|
||||||
kl_divergence_result local_kld;
|
kl_divergence_result local_kld;
|
||||||
while (true) {
|
while (true) {
|
||||||
std::unique_lock<std::mutex> lock(mutex);
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
int i = counter++;
|
int i = counter++;
|
||||||
if (i >= n_token) {
|
if (i >= n_token) {
|
||||||
kld.sum_nll += local_kld.sum_nll;
|
kld.sum_nll += local_kld.sum_nll;
|
||||||
kld.sum_nll2 += local_kld.sum_nll2;
|
kld.sum_nll2 += local_kld.sum_nll2;
|
||||||
kld.sum_kld += local_kld.sum_kld;
|
kld.sum_nll_base += local_kld.sum_nll_base;
|
||||||
kld.sum_kld2 += local_kld.sum_kld2;
|
kld.sum_nll_base2 += local_kld.sum_nll_base2;
|
||||||
kld.sum_nll_diff += local_kld.sum_nll_diff;
|
kld.sum_nll_nll_base += local_kld.sum_nll_nll_base;
|
||||||
kld.sum_nll_diff2 += local_kld.sum_nll_diff2;
|
kld.sum_kld += local_kld.sum_kld;
|
||||||
kld.n_same_top += local_kld.n_same_top;
|
kld.sum_kld2 += local_kld.sum_kld2;
|
||||||
kld.count += local_kld.count;
|
kld.sum_p_diff += local_kld.sum_p_diff;
|
||||||
|
kld.sum_p_diff2 += local_kld.sum_p_diff2;
|
||||||
|
kld.sum_p_diff4 += local_kld.sum_p_diff4;
|
||||||
|
kld.n_same_top += local_kld.n_same_top;
|
||||||
|
kld.max_p_diff = std::max(kld.max_p_diff, local_kld.max_p_diff);
|
||||||
|
kld.count += local_kld.count;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
double v = log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
|
std::pair<double, float> v = log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
|
||||||
kld_values[i] = (float)v;
|
kld_values[i] = (float)v.first;
|
||||||
|
p_diff_values[i] = v.second;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
for (auto & w : workers) {
|
for (auto & w : workers) {
|
||||||
@ -1711,7 +1737,8 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
|||||||
GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1);
|
GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1);
|
||||||
|
|
||||||
std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv);
|
std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv);
|
||||||
std::vector<float> kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
|
std::vector<float> kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
|
||||||
|
std::vector<float> p_diff_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
|
||||||
std::vector<float> logits;
|
std::vector<float> logits;
|
||||||
if (num_batches > 1) {
|
if (num_batches > 1) {
|
||||||
logits.reserve(n_ctx * n_vocab);
|
logits.reserve(n_ctx * n_vocab);
|
||||||
@ -1728,9 +1755,18 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
|||||||
df = df > 0 && count > 10 ? sqrt(df/(count-1)) : 0.;
|
df = df > 0 && count > 10 ? sqrt(df/(count-1)) : 0.;
|
||||||
return std::make_pair(f, df);
|
return std::make_pair(f, df);
|
||||||
};
|
};
|
||||||
|
auto covariance = [] (double suma, double sumb, double sumab, size_t count) {
|
||||||
|
if (count < 10) {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
double var = sumab/count - (suma/count)*(sumb/count);
|
||||||
|
var /= count - 1;
|
||||||
|
return var;
|
||||||
|
};
|
||||||
|
|
||||||
kl_divergence_result kld;
|
kl_divergence_result kld;
|
||||||
auto kld_ptr = kld_values.data();
|
auto kld_ptr = kld_values.data();
|
||||||
|
auto p_diff_ptr = p_diff_values.data();
|
||||||
|
|
||||||
for (int i = 0; i < n_chunk; ++i) {
|
for (int i = 0; i < n_chunk; ++i) {
|
||||||
const int start = i * n_ctx;
|
const int start = i * n_ctx;
|
||||||
@ -1785,24 +1821,42 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
|||||||
}
|
}
|
||||||
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
||||||
|
|
||||||
printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence Same top\n");
|
printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL Divergence Δp RMS Same top p\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
const int first = n_ctx/2;
|
const int first = n_ctx/2;
|
||||||
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
|
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
|
||||||
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
||||||
workers, log_probs_uint16, kld, kld_ptr);
|
workers, log_probs_uint16, kld, kld_ptr, p_diff_ptr);
|
||||||
kld_ptr += n_ctx - 1 - first;
|
p_diff_ptr += n_ctx - 1 - first;
|
||||||
|
kld_ptr += n_ctx - 1 - first;
|
||||||
|
|
||||||
auto ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count);
|
printf("%4d", i+1);
|
||||||
auto log_ppl_ratio = mean_and_uncertainty(kld.sum_nll_diff, kld.sum_nll_diff2, kld.count);
|
|
||||||
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
|
|
||||||
auto p_top = 1.*kld.n_same_top/kld.count;
|
|
||||||
auto d_p_top = sqrt(p_top*(1 - p_top)/(kld.count - 1));
|
|
||||||
|
|
||||||
printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf %.5f ± %.5f\n", i+1, exp(ppl.first),
|
auto log_ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count);
|
||||||
log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second,
|
const double ppl_val = exp(log_ppl.first);
|
||||||
p_top, d_p_top);
|
const double ppl_unc = ppl_val * log_ppl.second; // ppl_unc = sqrt( (dexp(x) / dx) ** 2 * log_ppl.second ** 2 )
|
||||||
|
printf(" %9.4lf ± %9.4lf", ppl_val, ppl_unc);
|
||||||
|
|
||||||
|
auto log_ppl_base = mean_and_uncertainty(kld.sum_nll_base, kld.sum_nll_base2, kld.count);
|
||||||
|
const double log_ppl_cov = covariance(kld.sum_nll, kld.sum_nll_base, kld.sum_nll_nll_base, kld.count);
|
||||||
|
const double log_ppl_ratio_val = log_ppl.first - log_ppl_base.first;
|
||||||
|
const double log_ppl_ratio_unc = sqrt(log_ppl.second*log_ppl.second + log_ppl_base.second*log_ppl_base.second - 2.0*log_ppl_cov);
|
||||||
|
printf(" %10.5lf ± %10.5lf", log_ppl_ratio_val, log_ppl_ratio_unc);
|
||||||
|
|
||||||
|
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
|
||||||
|
printf(" %10.5lf ± %10.5lf", kl_div.first, kl_div.second);
|
||||||
|
|
||||||
|
auto p_diff_mse = mean_and_uncertainty(kld.sum_p_diff2, kld.sum_p_diff4, kld.count);
|
||||||
|
const double p_diff_rms_val = sqrt(p_diff_mse.first);
|
||||||
|
const double p_diff_rms_unc = 0.5/p_diff_rms_val * p_diff_mse.second;
|
||||||
|
printf(" %6.3lf ± %6.3lf %%", 100.0*p_diff_rms_val, 100.0*p_diff_rms_unc);
|
||||||
|
|
||||||
|
double p_top_val = 1.*kld.n_same_top/kld.count;
|
||||||
|
double p_top_unc = sqrt(p_top_val*(1 - p_top_val)/(kld.count - 1));
|
||||||
|
printf(" %6.3lf ± %6.3lf %%", 100.0*p_top_val, 100.0*p_top_unc);
|
||||||
|
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
@ -1813,31 +1867,97 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
|||||||
if (kld.count < 100) return; // we do not wish to do statistics on so few values
|
if (kld.count < 100) return; // we do not wish to do statistics on so few values
|
||||||
|
|
||||||
std::sort(kld_values.begin(), kld_values.end());
|
std::sort(kld_values.begin(), kld_values.end());
|
||||||
|
std::sort(p_diff_values.begin(), p_diff_values.end());
|
||||||
|
|
||||||
printf("===== KL-divergence statistics\n");
|
printf("====== Perplexity statistics ======\n");
|
||||||
|
|
||||||
|
auto log_ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count);
|
||||||
|
const double ppl_val = exp(log_ppl.first);
|
||||||
|
const double ppl_unc = ppl_val * log_ppl.second; // ppl_unc = sqrt( (dexp(x) / dx) ** 2 * log_ppl.second ** 2 )
|
||||||
|
printf("Mean PPL(Q) : %10.6lf ± %10.6lf\n", ppl_val, ppl_unc);
|
||||||
|
|
||||||
|
auto log_ppl_base = mean_and_uncertainty(kld.sum_nll_base, kld.sum_nll_base2, kld.count);
|
||||||
|
const double ppl_base_val = exp(log_ppl_base.first);
|
||||||
|
const double ppl_base_unc = ppl_base_val * log_ppl_base.second; // ppl_base_unc = sqrt( (dexp(x) / dx) ** 2 * log_ppl_base.second ** 2 )
|
||||||
|
printf("Mean PPL(base) : %10.6lf ± %10.6lf\n", ppl_base_val, ppl_base_unc);
|
||||||
|
|
||||||
|
const double log_ppl_cov = covariance(kld.sum_nll, kld.sum_nll_base, kld.sum_nll_nll_base, kld.count);
|
||||||
|
// printf("Cov(ln(PPL(Q)), ln(PPL(base))): %10.6lf\n", log_ppl_cov);
|
||||||
|
const double log_ppl_cor = log_ppl_cov / (log_ppl.second*log_ppl_base.second);
|
||||||
|
printf("Cor(ln(PPL(Q)), ln(PPL(base))): %6.2lf%%\n", 100.0*log_ppl_cor);
|
||||||
|
|
||||||
|
const double log_ppl_ratio_val = log_ppl.first - log_ppl_base.first;
|
||||||
|
const double log_ppl_ratio_unc = sqrt(log_ppl.second*log_ppl.second + log_ppl_base.second*log_ppl_base.second - 2.0*log_ppl_cov);
|
||||||
|
printf("Mean ln(PPL(Q)/PPL(base)) : %10.6lf ± %10.6lf\n", log_ppl_ratio_val, log_ppl_ratio_unc);
|
||||||
|
|
||||||
|
const double ppl_ratio_val = exp(log_ppl_ratio_val);
|
||||||
|
const double ppl_ratio_unc = ppl_ratio_val * log_ppl_ratio_unc; // ppl_ratio_unc = sqrt( (dexp(x) / dx) ** 2 * log_ppl_ratio.second ** 2 )
|
||||||
|
printf("Mean PPL(Q)/PPL(base) : %10.6lf ± %10.6lf\n", ppl_ratio_val, ppl_ratio_unc);
|
||||||
|
|
||||||
|
const double ppl_cov = ppl_val * ppl_base_val * log_ppl_cov;
|
||||||
|
const double ppl_diff_val = ppl_val - ppl_base_val;
|
||||||
|
const double ppl_diff_unc = sqrt(ppl_unc*ppl_unc + ppl_base_unc*ppl_base_unc - 2.0*ppl_cov);
|
||||||
|
printf("Mean PPL(Q)-PPL(base) : %10.6lf ± %10.6lf\n", ppl_diff_val, ppl_diff_unc);
|
||||||
|
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
|
printf("====== KL divergence statistics ======\n");
|
||||||
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
|
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
|
||||||
printf("Average: %10.6f ±%10.6lf\n", kl_div.first, kl_div.second);
|
printf("Mean KLD: %10.6lf ± %10.6lf\n", kl_div.first, kl_div.second);
|
||||||
auto kld_median = kld_values.size()%2 == 0 ? 0.5f*(kld_values[kld_values.size()/2] + kld_values[kld_values.size()/2-1])
|
auto kld_median = kld_values.size()%2 == 0 ? 0.5f*(kld_values[kld_values.size()/2] + kld_values[kld_values.size()/2-1])
|
||||||
: kld_values[kld_values.size()/2];
|
: kld_values[kld_values.size()/2];
|
||||||
printf("Median : %10.6f\n", kld_median);
|
|
||||||
|
|
||||||
auto percentile = [&kld_values] (float fraction) {
|
auto percentile = [] (std::vector<float> values, float fraction) {
|
||||||
if (fraction <= 0) return kld_values.front();
|
if (fraction <= 0) return values.front();
|
||||||
if (fraction >= 1) return kld_values.back();
|
if (fraction >= 1) return values.back();
|
||||||
float p = fraction*(kld_values.size() - 1);
|
float p = fraction*(values.size() - 1);
|
||||||
size_t ip = size_t(p); p -= ip;
|
size_t ip = size_t(p); p -= ip;
|
||||||
return (1 - p)*kld_values[ip] + p*kld_values[std::min(ip+1, kld_values.size()-1)];
|
return (1 - p)*values[ip] + p*values[std::min(ip+1, values.size()-1)];
|
||||||
};
|
};
|
||||||
|
|
||||||
printf("Maximum: %10.6f\n", kld_values.back());
|
printf("Maximum KLD: %10.6f\n", kld_values.back());
|
||||||
printf("KLD_99 : %10.6f\n", percentile(0.99f));
|
printf("99.9%% KLD: %10.6f\n", percentile(kld_values, 0.999f));
|
||||||
printf("KLD_95 : %10.6f\n", percentile(0.95f));
|
printf("99.0%% KLD: %10.6f\n", percentile(kld_values, 0.990f));
|
||||||
printf("KLD_90 : %10.6f\n", percentile(0.90f));
|
printf("99.0%% KLD: %10.6f\n", percentile(kld_values, 0.990f));
|
||||||
|
printf("Median KLD: %10.6f\n", kld_median);
|
||||||
|
printf("10.0%% KLD: %10.6f\n", percentile(kld_values, 0.100f));
|
||||||
|
printf(" 5.0%% KLD: %10.6f\n", percentile(kld_values, 0.050f));
|
||||||
|
printf(" 1.0%% KLD: %10.6f\n", percentile(kld_values, 0.010f));
|
||||||
|
printf("Minimum KLD: %10.6f\n", kld_values.front());
|
||||||
|
|
||||||
printf("Minimum: %10.6f\n", kld_values.front());
|
printf("\n");
|
||||||
printf("KLD_01 : %10.6f\n", percentile(0.01f));
|
|
||||||
printf("KLD_05 : %10.6f\n", percentile(0.05f));
|
printf("====== Token probability statistics ======\n");
|
||||||
printf("KLD_10 : %10.6f\n", percentile(0.10f));
|
|
||||||
|
auto p_diff = mean_and_uncertainty(kld.sum_p_diff, kld.sum_p_diff2, kld.count);
|
||||||
|
printf("Mean Δp: %6.3lf ± %5.3lf %%\n", 100.0*p_diff.first, 100.0*p_diff.second);
|
||||||
|
|
||||||
|
auto p_diff_median = p_diff_values.size()%2 == 0 ? 0.5f*(p_diff_values[p_diff_values.size()/2] + p_diff_values[p_diff_values.size()/2-1])
|
||||||
|
: p_diff_values[p_diff_values.size()/2];
|
||||||
|
|
||||||
|
printf("Maximum Δp: %6.3lf%%\n", 100.0*p_diff_values.back());
|
||||||
|
printf("99.9%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.999f));
|
||||||
|
printf("99.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.990f));
|
||||||
|
printf("95.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.950f));
|
||||||
|
printf("90.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.900f));
|
||||||
|
printf("75.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.750f));
|
||||||
|
printf("Median Δp: %6.3lf%%\n", 100.0*p_diff_median);
|
||||||
|
printf("25.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.250f));
|
||||||
|
printf("10.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.100f));
|
||||||
|
printf(" 5.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.050f));
|
||||||
|
printf(" 1.0%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.010f));
|
||||||
|
printf(" 0.1%% Δp: %6.3lf%%\n", 100.0*percentile(p_diff_values, 0.001f));
|
||||||
|
printf("Minimum Δp: %6.3lf%%\n", 100.0*p_diff_values.front());
|
||||||
|
|
||||||
|
auto p_diff_mse = mean_and_uncertainty(kld.sum_p_diff2, kld.sum_p_diff4, kld.count);
|
||||||
|
// printf("MSE Δp : %10.6lf ± %10.6lf\n", p_diff_mse.first, p_diff_mse.second);
|
||||||
|
|
||||||
|
const double p_diff_rms_val = sqrt(p_diff_mse.first);
|
||||||
|
const double p_diff_rms_unc = 0.5/p_diff_rms_val * p_diff_mse.second;
|
||||||
|
printf("RMS Δp : %6.3lf ± %5.3lf %%\n", 100.0*p_diff_rms_val, 100.0*p_diff_rms_unc);
|
||||||
|
|
||||||
|
const double same_top_p = 1.0*kld.n_same_top/kld.count;
|
||||||
|
printf("Same top p: %6.3lf ± %5.3lf %%\n", 100.0*same_top_p, 100.0*sqrt(same_top_p*(1.0 - same_top_p)/(kld.count - 1)));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct quantize_stats_params {
|
struct quantize_stats_params {
|
||||||
std::string model = "models/7B/ggml-model-f16.gguf";
|
std::string model = DEFAULT_MODEL_PATH;
|
||||||
bool verbose = false;
|
bool verbose = false;
|
||||||
bool per_layer_stats = false;
|
bool per_layer_stats = false;
|
||||||
bool print_histogram = false;
|
bool print_histogram = false;
|
||||||
|
@ -46,7 +46,8 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
|||||||
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", },
|
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0008 ppl @ LLaMA-v1-7B", },
|
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0008 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
|
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
|
||||||
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "13.00G @ 7B", },
|
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, -0.0020 ppl @ Mistral-7B", },
|
||||||
|
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
|
||||||
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
||||||
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
|
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
|
||||||
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
|
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
|
||||||
|
@ -62,6 +62,18 @@ page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/
|
|||||||
- `--chat-template JINJA_TEMPLATE`: Set custom jinja chat template. This parameter accepts a string, not a file name. Default: template taken from model's metadata. We only support [some pre-defined templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
|
- `--chat-template JINJA_TEMPLATE`: Set custom jinja chat template. This parameter accepts a string, not a file name. Default: template taken from model's metadata. We only support [some pre-defined templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
|
||||||
- `--log-disable`: Output logs to stdout only, not to `llama.log`. Default: enabled
|
- `--log-disable`: Output logs to stdout only, not to `llama.log`. Default: enabled
|
||||||
- `--log-format FORMAT`: Define the log output to FORMAT: json or text Default: `json`
|
- `--log-format FORMAT`: Define the log output to FORMAT: json or text Default: `json`
|
||||||
|
- `--rope-scaling` : RoPE scaling method. Defaults to linear unless otherwise specified by the model. Options are `none`, `linear`, `yarn`
|
||||||
|
- `--rope-freq-base N` : RoPE frequency base (default: loaded from model)
|
||||||
|
- `--rope-freq-scale N`: RoPE frequency scaling factor, expands context by a factor of 1/N (e.g. 0.25)
|
||||||
|
- `--yarn-ext-factor N` : YaRN: extrapolation mix factor (Default: 1.0, 0.0 = full interpolation)
|
||||||
|
- `--yarn-attn-factor N` : YaRN: scale sqrt(t) or attention magnitude (default: 1.0)
|
||||||
|
- `--yarn-beta-slow N`: YaRN: High correction dim or alpha (default: 1.0)
|
||||||
|
- `--yarn-beta-fast N`: YaRN: low correction dim or beta (default: 32.0)
|
||||||
|
- `--pooling` : Pooling type for embeddings, use model default if unspecified. Options are `none`, `mean`, `cls`
|
||||||
|
- `-dt N`, `--defrag-thold N`: KV cache defragmentation threshold (default: -1.0, < 0 = disabled)
|
||||||
|
- `-fa`, `--flash-attn` : enable flash attention (default: disabled).
|
||||||
|
- `-ctk TYPE`, `--cache-type-k TYPE` : KV cache data type for K (default: `f16`, options `f32`, `f16`, `q8_0`, `q4_0`, `q4_1`, `iq4_nl`, `q5_0`, or `q5_1`)
|
||||||
|
- `-ctv TYPE`, `--cache-type-v TYPE` : KV cache type for V (default `f16`, see `-ctk` for options)
|
||||||
|
|
||||||
**If compiled with `LLAMA_SERVER_SSL=ON`**
|
**If compiled with `LLAMA_SERVER_SSL=ON`**
|
||||||
- `--ssl-key-file FNAME`: path to file a PEM-encoded SSL private key
|
- `--ssl-key-file FNAME`: path to file a PEM-encoded SSL private key
|
||||||
@ -74,15 +86,18 @@ page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/
|
|||||||
- Using `make`:
|
- Using `make`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make
|
make server
|
||||||
```
|
```
|
||||||
|
|
||||||
- Using `CMake`:
|
- Using `CMake`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cmake --build . --config Release
|
cmake -B build
|
||||||
|
cmake --build build --config Release -t server
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Binary is at `./build/bin/server`
|
||||||
|
|
||||||
## Build with SSL
|
## Build with SSL
|
||||||
|
|
||||||
`server` can also be built with SSL support using OpenSSL 3
|
`server` can also be built with SSL support using OpenSSL 3
|
||||||
@ -99,10 +114,8 @@ page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/
|
|||||||
- Using `CMake`:
|
- Using `CMake`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build
|
cmake -B build -DLLAMA_SERVER_SSL=ON
|
||||||
cd build
|
cmake --build build --config Release -t server
|
||||||
cmake .. -DLLAMA_SERVER_SSL=ON
|
|
||||||
make server
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
@ -259,7 +272,7 @@ node index.js
|
|||||||
|
|
||||||
`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced. The tokens can also be represented as strings, e.g. `[["Hello, World!",-0.5]]` will reduce the likelihood of all the individual tokens that represent the string `Hello, World!`, just like the `presence_penalty` does. Default: `[]`
|
`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced. The tokens can also be represented as strings, e.g. `[["Hello, World!",-0.5]]` will reduce the likelihood of all the individual tokens that represent the string `Hello, World!`, just like the `presence_penalty` does. Default: `[]`
|
||||||
|
|
||||||
`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token. Default: `0`
|
`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token given the sampling settings. Note that for temperature < 0 the tokens are sampled greedily but token probabilities are still being calculated via a simple softmax of the logits without considering any other sampler settings. Default: `0`
|
||||||
|
|
||||||
`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0`
|
`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0`
|
||||||
|
|
||||||
@ -318,7 +331,7 @@ Notice that each `probs` is an array of length `n_probs`.
|
|||||||
|
|
||||||
`content`: Set the text to tokenize.
|
`content`: Set the text to tokenize.
|
||||||
|
|
||||||
Note that a special `BOS` token is never inserted.
|
`add_special`: Boolean indicating if special tokens, i.e. `BOS`, should be inserted. Default: `false`
|
||||||
|
|
||||||
- **POST** `/detokenize`: Convert tokens to text.
|
- **POST** `/detokenize`: Convert tokens to text.
|
||||||
|
|
||||||
|
@ -268,6 +268,7 @@ def start_server_background(args):
|
|||||||
server_args.extend(['--defrag-thold', "0.1"])
|
server_args.extend(['--defrag-thold', "0.1"])
|
||||||
server_args.append('--cont-batching')
|
server_args.append('--cont-batching')
|
||||||
server_args.append('--metrics')
|
server_args.append('--metrics')
|
||||||
|
server_args.append('--flash-attn')
|
||||||
server_args.extend(['--log-format', "text"])
|
server_args.extend(['--log-format', "text"])
|
||||||
args = [str(arg) for arg in [server_path, *server_args]]
|
args = [str(arg) for arg in [server_path, *server_args]]
|
||||||
print(f"bench: starting server with: {' '.join(args)}")
|
print(f"bench: starting server with: {' '.join(args)}")
|
||||||
|
BIN
examples/server/public/favicon.ico
Normal file
After Width: | Height: | Size: 4.0 KiB |
@ -12,6 +12,8 @@
|
|||||||
// increase max payload length to allow use of larger context size
|
// increase max payload length to allow use of larger context size
|
||||||
#define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
|
#define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
|
||||||
#include "httplib.h"
|
#include "httplib.h"
|
||||||
|
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
||||||
|
#define JSON_ASSERT GGML_ASSERT
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
|
|
||||||
// auto generated files (update with ./deps.sh)
|
// auto generated files (update with ./deps.sh)
|
||||||
@ -649,9 +651,6 @@ struct server_context {
|
|||||||
std::string system_prompt;
|
std::string system_prompt;
|
||||||
std::vector<llama_token> system_tokens;
|
std::vector<llama_token> system_tokens;
|
||||||
|
|
||||||
std::string name_user; // this should be the antiprompt
|
|
||||||
std::string name_assistant;
|
|
||||||
|
|
||||||
// slots / clients
|
// slots / clients
|
||||||
std::vector<server_slot> slots;
|
std::vector<server_slot> slots;
|
||||||
json default_generation_settings_for_props;
|
json default_generation_settings_for_props;
|
||||||
@ -671,6 +670,8 @@ struct server_context {
|
|||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
model = nullptr;
|
model = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool load_model(const gpt_params & params_) {
|
bool load_model(const gpt_params & params_) {
|
||||||
@ -859,7 +860,7 @@ struct server_context {
|
|||||||
slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
|
slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
|
||||||
|
|
||||||
// process "json_schema" and "grammar"
|
// process "json_schema" and "grammar"
|
||||||
if (data.contains("json_schema") && !data["json_schema"].is_null() && data.contains("grammar") && !data["grammar"].is_null()) {
|
if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
|
||||||
send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST);
|
send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST);
|
||||||
return false;
|
return false;
|
||||||
} else if (data.contains("json_schema") && !data.contains("grammar")) {
|
} else if (data.contains("json_schema") && !data.contains("grammar")) {
|
||||||
@ -1096,15 +1097,11 @@ struct server_context {
|
|||||||
system_need_update = false;
|
system_need_update = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void system_prompt_set(const json & sys_props) {
|
bool system_prompt_set(const std::string & sys_prompt) {
|
||||||
system_prompt = sys_props.value("prompt", "");
|
system_prompt = sys_prompt;
|
||||||
name_user = sys_props.value("anti_prompt", "");
|
|
||||||
name_assistant = sys_props.value("assistant_name", "");
|
|
||||||
|
|
||||||
LOG_VERBOSE("system prompt process", {
|
LOG_VERBOSE("system prompt process", {
|
||||||
{"system_prompt", system_prompt},
|
{"system_prompt", system_prompt},
|
||||||
{"name_user", name_user},
|
|
||||||
{"name_assistant", name_assistant},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// release all slots
|
// release all slots
|
||||||
@ -1113,6 +1110,7 @@ struct server_context {
|
|||||||
}
|
}
|
||||||
|
|
||||||
system_need_update = true;
|
system_need_update = true;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool process_token(completion_token_output & result, server_slot & slot) {
|
bool process_token(completion_token_output & result, server_slot & slot) {
|
||||||
@ -1383,9 +1381,10 @@ struct server_context {
|
|||||||
if (!slot.params.stream && slot.stopped_word) {
|
if (!slot.params.stream && slot.stopped_word) {
|
||||||
const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
|
const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
|
||||||
|
|
||||||
|
size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
|
||||||
probs = std::vector<completion_token_output>(
|
probs = std::vector<completion_token_output>(
|
||||||
slot.generated_token_probs.begin(),
|
slot.generated_token_probs.begin(),
|
||||||
slot.generated_token_probs.end() - stop_word_toks.size());
|
slot.generated_token_probs.end() - safe_offset);
|
||||||
} else {
|
} else {
|
||||||
probs = std::vector<completion_token_output>(
|
probs = std::vector<completion_token_output>(
|
||||||
slot.generated_token_probs.begin(),
|
slot.generated_token_probs.begin(),
|
||||||
@ -1511,7 +1510,7 @@ struct server_context {
|
|||||||
// add subtasks
|
// add subtasks
|
||||||
for (int i = 0; i < prompt_count; i++) {
|
for (int i = 0; i < prompt_count; i++) {
|
||||||
json subtask_data = multiprompt_task.data;
|
json subtask_data = multiprompt_task.data;
|
||||||
subtask_data["prompt"] = subtask_data["prompt"][i];
|
subtask_data["prompt"] = subtask_data.at("prompt")[i];
|
||||||
|
|
||||||
// subtasks inherit everything else (infill mode, embedding mode, etc.)
|
// subtasks inherit everything else (infill mode, embedding mode, etc.)
|
||||||
request_completion(subtask_ids[i], id_multi, subtask_data, multiprompt_task.infill, multiprompt_task.embedding);
|
request_completion(subtask_ids[i], id_multi, subtask_data, multiprompt_task.infill, multiprompt_task.embedding);
|
||||||
@ -1531,7 +1530,8 @@ struct server_context {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (task.data.contains("system_prompt")) {
|
if (task.data.contains("system_prompt")) {
|
||||||
system_prompt_set(task.data["system_prompt"]);
|
std::string sys_prompt = json_value(task.data, "system_prompt", std::string());
|
||||||
|
system_prompt_set(sys_prompt);
|
||||||
|
|
||||||
for (server_slot & slot : slots) {
|
for (server_slot & slot : slots) {
|
||||||
slot.n_past = 0;
|
slot.n_past = 0;
|
||||||
@ -1643,7 +1643,7 @@ struct server_context {
|
|||||||
} break;
|
} break;
|
||||||
case SERVER_TASK_TYPE_SLOT_SAVE:
|
case SERVER_TASK_TYPE_SLOT_SAVE:
|
||||||
{
|
{
|
||||||
int id_slot = task.data["id_slot"];
|
int id_slot = task.data.at("id_slot");
|
||||||
server_slot * slot = get_slot(id_slot);
|
server_slot * slot = get_slot(id_slot);
|
||||||
if (slot == nullptr) {
|
if (slot == nullptr) {
|
||||||
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
|
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
|
||||||
@ -1653,8 +1653,8 @@ struct server_context {
|
|||||||
const size_t token_count = slot->cache_tokens.size();
|
const size_t token_count = slot->cache_tokens.size();
|
||||||
const int64_t t_start = ggml_time_us();
|
const int64_t t_start = ggml_time_us();
|
||||||
|
|
||||||
std::string filename = task.data["filename"];
|
std::string filename = task.data.at("filename");
|
||||||
std::string filepath = task.data["filepath"];
|
std::string filepath = task.data.at("filepath");
|
||||||
|
|
||||||
const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), token_count);
|
const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id + 1, slot->cache_tokens.data(), token_count);
|
||||||
|
|
||||||
@ -1678,7 +1678,7 @@ struct server_context {
|
|||||||
} break;
|
} break;
|
||||||
case SERVER_TASK_TYPE_SLOT_RESTORE:
|
case SERVER_TASK_TYPE_SLOT_RESTORE:
|
||||||
{
|
{
|
||||||
int id_slot = task.data["id_slot"];
|
int id_slot = task.data.at("id_slot");
|
||||||
server_slot * slot = get_slot(id_slot);
|
server_slot * slot = get_slot(id_slot);
|
||||||
if (slot == nullptr) {
|
if (slot == nullptr) {
|
||||||
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
|
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
|
||||||
@ -1687,8 +1687,8 @@ struct server_context {
|
|||||||
|
|
||||||
const int64_t t_start = ggml_time_us();
|
const int64_t t_start = ggml_time_us();
|
||||||
|
|
||||||
std::string filename = task.data["filename"];
|
std::string filename = task.data.at("filename");
|
||||||
std::string filepath = task.data["filepath"];
|
std::string filepath = task.data.at("filepath");
|
||||||
|
|
||||||
slot->cache_tokens.resize(slot->n_ctx);
|
slot->cache_tokens.resize(slot->n_ctx);
|
||||||
size_t token_count = 0;
|
size_t token_count = 0;
|
||||||
@ -1720,7 +1720,7 @@ struct server_context {
|
|||||||
} break;
|
} break;
|
||||||
case SERVER_TASK_TYPE_SLOT_ERASE:
|
case SERVER_TASK_TYPE_SLOT_ERASE:
|
||||||
{
|
{
|
||||||
int id_slot = task.data["id_slot"];
|
int id_slot = task.data.at("id_slot");
|
||||||
server_slot * slot = get_slot(id_slot);
|
server_slot * slot = get_slot(id_slot);
|
||||||
if (slot == nullptr) {
|
if (slot == nullptr) {
|
||||||
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
|
send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
|
||||||
@ -2265,17 +2265,31 @@ struct server_context {
|
|||||||
llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
|
llama_token_data_array cur_p = { slot.ctx_sampling->cur.data(), slot.ctx_sampling->cur.size(), false };
|
||||||
result.tok = id;
|
result.tok = id;
|
||||||
|
|
||||||
const int32_t n_probs = slot.sparams.n_probs;
|
const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
|
||||||
if (slot.sparams.temp <= 0 && n_probs > 0) {
|
if (n_probs > 0) {
|
||||||
// for llama_sample_token_greedy we need to sort candidates
|
const size_t n_valid = slot.ctx_sampling->n_valid;
|
||||||
llama_sample_softmax(ctx, &cur_p);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < std::min(cur_p.size, (size_t) n_probs); ++i) {
|
// Make sure at least n_probs top tokens are at the front of the vector:
|
||||||
result.probs.push_back({
|
if (slot.sparams.temp == 0.0f && n_probs > n_valid) {
|
||||||
cur_p.data[i].id,
|
llama_sample_top_k(ctx, &cur_p, n_probs, 0);
|
||||||
cur_p.data[i].p
|
}
|
||||||
});
|
|
||||||
|
if (slot.sparams.temp == 0.0f) {
|
||||||
|
// With greedy sampling the probabilities have possibly not been calculated.
|
||||||
|
for (size_t i = 0; i < n_probs; ++i) {
|
||||||
|
result.probs.push_back({
|
||||||
|
cur_p.data[i].id,
|
||||||
|
i == 0 ? 1.0f : 0.0f
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (size_t i = 0; i < n_probs; ++i) {
|
||||||
|
result.probs.push_back({
|
||||||
|
cur_p.data[i].id,
|
||||||
|
i >= n_valid ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!process_token(result, slot)) {
|
if (!process_token(result, slot)) {
|
||||||
@ -2353,7 +2367,7 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co
|
|||||||
printf(" disable KV offload\n");
|
printf(" disable KV offload\n");
|
||||||
}
|
}
|
||||||
printf(" -m FNAME, --model FNAME\n");
|
printf(" -m FNAME, --model FNAME\n");
|
||||||
printf(" model path (default: %s)\n", params.model.c_str());
|
printf(" model path (default: models/$filename with filename from --hf-file or --model-url if set, otherwise %s)\n", DEFAULT_MODEL_PATH);
|
||||||
printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
|
printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
|
||||||
printf(" model download url (default: unused)\n");
|
printf(" model download url (default: unused)\n");
|
||||||
printf(" -hfr REPO, --hf-repo REPO\n");
|
printf(" -hfr REPO, --hf-repo REPO\n");
|
||||||
@ -2377,6 +2391,7 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co
|
|||||||
printf(" --embeddings enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
printf(" --embeddings enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
|
||||||
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
|
printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
|
||||||
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: enabled)\n");
|
printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: enabled)\n");
|
||||||
|
printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
|
||||||
printf(" -spf FNAME, --system-prompt-file FNAME\n");
|
printf(" -spf FNAME, --system-prompt-file FNAME\n");
|
||||||
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
|
printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
|
||||||
printf(" -ctk TYPE, --cache-type-k TYPE\n");
|
printf(" -ctk TYPE, --cache-type-k TYPE\n");
|
||||||
@ -2742,6 +2757,8 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|||||||
params.embedding = true;
|
params.embedding = true;
|
||||||
} else if (arg == "-cb" || arg == "--cont-batching") {
|
} else if (arg == "-cb" || arg == "--cont-batching") {
|
||||||
params.cont_batching = true;
|
params.cont_batching = true;
|
||||||
|
} else if (arg == "-fa" || arg == "--flash-attn") {
|
||||||
|
params.flash_attn = true;
|
||||||
} else if (arg == "-np" || arg == "--parallel") {
|
} else if (arg == "-np" || arg == "--parallel") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -2835,6 +2852,8 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gpt_params_handle_model_default(params);
|
||||||
|
|
||||||
if (!params.kv_overrides.empty()) {
|
if (!params.kv_overrides.empty()) {
|
||||||
params.kv_overrides.emplace_back();
|
params.kv_overrides.emplace_back();
|
||||||
params.kv_overrides.back().key[0] = 0;
|
params.kv_overrides.back().key[0] = 0;
|
||||||
@ -2896,7 +2915,7 @@ int main(int argc, char ** argv) {
|
|||||||
server_params_parse(argc, argv, sparams, params);
|
server_params_parse(argc, argv, sparams, params);
|
||||||
|
|
||||||
if (!sparams.system_prompt.empty()) {
|
if (!sparams.system_prompt.empty()) {
|
||||||
ctx_server.system_prompt_set(json::parse(sparams.system_prompt));
|
ctx_server.system_prompt_set(sparams.system_prompt);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.model_alias == "unknown") {
|
if (params.model_alias == "unknown") {
|
||||||
@ -3116,8 +3135,8 @@ int main(int argc, char ** argv) {
|
|||||||
server_task_result result = ctx_server.queue_results.recv(task.id);
|
server_task_result result = ctx_server.queue_results.recv(task.id);
|
||||||
ctx_server.queue_results.remove_waiting_task_id(task.id);
|
ctx_server.queue_results.remove_waiting_task_id(task.id);
|
||||||
|
|
||||||
const int n_idle_slots = result.data["idle"];
|
const int n_idle_slots = result.data.at("idle");
|
||||||
const int n_processing_slots = result.data["processing"];
|
const int n_processing_slots = result.data.at("processing");
|
||||||
|
|
||||||
json health = {
|
json health = {
|
||||||
{"status", "ok"},
|
{"status", "ok"},
|
||||||
@ -3127,7 +3146,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
res.status = 200; // HTTP OK
|
res.status = 200; // HTTP OK
|
||||||
if (sparams.slots_endpoint && req.has_param("include_slots")) {
|
if (sparams.slots_endpoint && req.has_param("include_slots")) {
|
||||||
health["slots"] = result.data["slots"];
|
health["slots"] = result.data.at("slots");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n_idle_slots == 0) {
|
if (n_idle_slots == 0) {
|
||||||
@ -3171,7 +3190,7 @@ int main(int argc, char ** argv) {
|
|||||||
server_task_result result = ctx_server.queue_results.recv(task.id);
|
server_task_result result = ctx_server.queue_results.recv(task.id);
|
||||||
ctx_server.queue_results.remove_waiting_task_id(task.id);
|
ctx_server.queue_results.remove_waiting_task_id(task.id);
|
||||||
|
|
||||||
res.set_content(result.data["slots"].dump(), "application/json");
|
res.set_content(result.data.at("slots").dump(), "application/json");
|
||||||
res.status = 200; // HTTP OK
|
res.status = 200; // HTTP OK
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -3198,32 +3217,32 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
json data = result.data;
|
json data = result.data;
|
||||||
|
|
||||||
const uint64_t n_prompt_tokens_processed = data["n_prompt_tokens_processed"];
|
const uint64_t n_prompt_tokens_processed = data.at("n_prompt_tokens_processed");
|
||||||
const uint64_t t_prompt_processing = data["t_prompt_processing"];
|
const uint64_t t_prompt_processing = data.at("t_prompt_processing");
|
||||||
|
|
||||||
const uint64_t n_tokens_predicted = data["n_tokens_predicted"];
|
const uint64_t n_tokens_predicted = data.at("n_tokens_predicted");
|
||||||
const uint64_t t_tokens_generation = data["t_tokens_generation"];
|
const uint64_t t_tokens_generation = data.at("t_tokens_generation");
|
||||||
|
|
||||||
const int32_t kv_cache_used_cells = data["kv_cache_used_cells"];
|
const int32_t kv_cache_used_cells = data.at("kv_cache_used_cells");
|
||||||
|
|
||||||
// metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
|
// metrics definition: https://prometheus.io/docs/practices/naming/#metric-names
|
||||||
json all_metrics_def = json {
|
json all_metrics_def = json {
|
||||||
{"counter", {{
|
{"counter", {{
|
||||||
{"name", "prompt_tokens_total"},
|
{"name", "prompt_tokens_total"},
|
||||||
{"help", "Number of prompt tokens processed."},
|
{"help", "Number of prompt tokens processed."},
|
||||||
{"value", (uint64_t) data["n_prompt_tokens_processed_total"]}
|
{"value", (uint64_t) data.at("n_prompt_tokens_processed_total")}
|
||||||
}, {
|
}, {
|
||||||
{"name", "prompt_seconds_total"},
|
{"name", "prompt_seconds_total"},
|
||||||
{"help", "Prompt process time"},
|
{"help", "Prompt process time"},
|
||||||
{"value", (uint64_t) data["t_prompt_processing_total"] / 1.e3}
|
{"value", (uint64_t) data.at("t_prompt_processing_total") / 1.e3}
|
||||||
}, {
|
}, {
|
||||||
{"name", "tokens_predicted_total"},
|
{"name", "tokens_predicted_total"},
|
||||||
{"help", "Number of generation tokens processed."},
|
{"help", "Number of generation tokens processed."},
|
||||||
{"value", (uint64_t) data["n_tokens_predicted_total"]}
|
{"value", (uint64_t) data.at("n_tokens_predicted_total")}
|
||||||
}, {
|
}, {
|
||||||
{"name", "tokens_predicted_seconds_total"},
|
{"name", "tokens_predicted_seconds_total"},
|
||||||
{"help", "Predict process time"},
|
{"help", "Predict process time"},
|
||||||
{"value", (uint64_t) data["t_tokens_generation_total"] / 1.e3}
|
{"value", (uint64_t) data.at("t_tokens_generation_total") / 1.e3}
|
||||||
}}},
|
}}},
|
||||||
{"gauge", {{
|
{"gauge", {{
|
||||||
{"name", "prompt_tokens_seconds"},
|
{"name", "prompt_tokens_seconds"},
|
||||||
@ -3240,15 +3259,15 @@ int main(int argc, char ** argv) {
|
|||||||
},{
|
},{
|
||||||
{"name", "kv_cache_tokens"},
|
{"name", "kv_cache_tokens"},
|
||||||
{"help", "KV-cache tokens."},
|
{"help", "KV-cache tokens."},
|
||||||
{"value", (uint64_t) data["kv_cache_tokens_count"]}
|
{"value", (uint64_t) data.at("kv_cache_tokens_count")}
|
||||||
},{
|
},{
|
||||||
{"name", "requests_processing"},
|
{"name", "requests_processing"},
|
||||||
{"help", "Number of request processing."},
|
{"help", "Number of request processing."},
|
||||||
{"value", (uint64_t) data["processing"]}
|
{"value", (uint64_t) data.at("processing")}
|
||||||
},{
|
},{
|
||||||
{"name", "requests_deferred"},
|
{"name", "requests_deferred"},
|
||||||
{"help", "Number of request deferred."},
|
{"help", "Number of request deferred."},
|
||||||
{"value", (uint64_t) data["deferred"]}
|
{"value", (uint64_t) data.at("deferred")}
|
||||||
}}}
|
}}}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -3259,8 +3278,8 @@ int main(int argc, char ** argv) {
|
|||||||
const auto & metrics_def = el.value();
|
const auto & metrics_def = el.value();
|
||||||
|
|
||||||
for (const auto & metric_def : metrics_def) {
|
for (const auto & metric_def : metrics_def) {
|
||||||
const std::string name = metric_def["name"];
|
const std::string name = metric_def.at("name");
|
||||||
const std::string help = metric_def["help"];
|
const std::string help = metric_def.at("help");
|
||||||
|
|
||||||
auto value = json_value(metric_def, "value", 0.);
|
auto value = json_value(metric_def, "value", 0.);
|
||||||
prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
|
prometheus << "# HELP llamacpp:" << name << " " << help << "\n"
|
||||||
@ -3269,7 +3288,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t t_start = data["t_start"];
|
const int64_t t_start = data.at("t_start");
|
||||||
res.set_header("Process-Start-Time-Unix", std::to_string(t_start));
|
res.set_header("Process-Start-Time-Unix", std::to_string(t_start));
|
||||||
|
|
||||||
res.set_content(prometheus.str(), "text/plain; version=0.0.4");
|
res.set_content(prometheus.str(), "text/plain; version=0.0.4");
|
||||||
@ -3278,7 +3297,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
const auto handle_slots_save = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
const auto handle_slots_save = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
||||||
json request_data = json::parse(req.body);
|
json request_data = json::parse(req.body);
|
||||||
std::string filename = request_data["filename"];
|
std::string filename = request_data.at("filename");
|
||||||
if (!validate_file_name(filename)) {
|
if (!validate_file_name(filename)) {
|
||||||
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
||||||
return;
|
return;
|
||||||
@ -3308,7 +3327,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
const auto handle_slots_restore = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
const auto handle_slots_restore = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
|
||||||
json request_data = json::parse(req.body);
|
json request_data = json::parse(req.body);
|
||||||
std::string filename = request_data["filename"];
|
std::string filename = request_data.at("filename");
|
||||||
if (!validate_file_name(filename)) {
|
if (!validate_file_name(filename)) {
|
||||||
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
|
||||||
return;
|
return;
|
||||||
@ -3385,8 +3404,7 @@ int main(int argc, char ** argv) {
|
|||||||
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||||
json data = {
|
json data = {
|
||||||
{ "user_name", ctx_server.name_user.c_str() },
|
{ "system_prompt", ctx_server.system_prompt.c_str() },
|
||||||
{ "assistant_name", ctx_server.name_assistant.c_str() },
|
|
||||||
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
||||||
{ "total_slots", ctx_server.params.n_parallel }
|
{ "total_slots", ctx_server.params.n_parallel }
|
||||||
};
|
};
|
||||||
@ -3627,7 +3645,8 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
std::vector<llama_token> tokens;
|
std::vector<llama_token> tokens;
|
||||||
if (body.count("content") != 0) {
|
if (body.count("content") != 0) {
|
||||||
tokens = ctx_server.tokenize(body["content"], false);
|
const bool add_special = json_value(body, "add_special", false);
|
||||||
|
tokens = ctx_server.tokenize(body.at("content"), add_special);
|
||||||
}
|
}
|
||||||
const json data = format_tokenizer_response(tokens);
|
const json data = format_tokenizer_response(tokens);
|
||||||
return res.set_content(data.dump(), "application/json; charset=utf-8");
|
return res.set_content(data.dump(), "application/json; charset=utf-8");
|
||||||
@ -3639,7 +3658,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
std::string content;
|
std::string content;
|
||||||
if (body.count("tokens") != 0) {
|
if (body.count("tokens") != 0) {
|
||||||
const std::vector<llama_token> tokens = body["tokens"];
|
const std::vector<llama_token> tokens = body.at("tokens");
|
||||||
content = tokens_to_str(ctx_server.ctx, tokens.cbegin(), tokens.cend());
|
content = tokens_to_str(ctx_server.ctx, tokens.cbegin(), tokens.cend());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3662,10 +3681,10 @@ int main(int argc, char ** argv) {
|
|||||||
json prompt;
|
json prompt;
|
||||||
if (body.count("input") != 0) {
|
if (body.count("input") != 0) {
|
||||||
is_openai = true;
|
is_openai = true;
|
||||||
prompt = body["input"];
|
prompt = body.at("input");
|
||||||
} else if (body.count("content") != 0) {
|
} else if (body.count("content") != 0) {
|
||||||
// with "content", we only support single prompt
|
// with "content", we only support single prompt
|
||||||
prompt = std::vector<std::string>{body["content"]};
|
prompt = std::vector<std::string>{body.at("content")};
|
||||||
} else {
|
} else {
|
||||||
res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST));
|
res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST));
|
||||||
return;
|
return;
|
||||||
@ -3684,7 +3703,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (!result.error) {
|
if (!result.error) {
|
||||||
if (result.data.count("results")) {
|
if (result.data.count("results")) {
|
||||||
// result for multi-task
|
// result for multi-task
|
||||||
responses = result.data["results"];
|
responses = result.data.at("results");
|
||||||
} else {
|
} else {
|
||||||
// result for single task
|
// result for single task
|
||||||
responses = std::vector<json>{result.data};
|
responses = std::vector<json>{result.data};
|
||||||
|
@ -5,7 +5,7 @@ Feature: llama.cpp server
|
|||||||
Background: Server startup
|
Background: Server startup
|
||||||
Given a server listening on localhost:8080
|
Given a server listening on localhost:8080
|
||||||
And a model url https://huggingface.co/ggml-org/models/resolve/main/bert-bge-small/ggml-model-f16.gguf
|
And a model url https://huggingface.co/ggml-org/models/resolve/main/bert-bge-small/ggml-model-f16.gguf
|
||||||
And a model file ggml-model-f16.gguf
|
And a model file bert-bge-small.gguf
|
||||||
And a model alias bert-bge-small
|
And a model alias bert-bge-small
|
||||||
And 42 as server seed
|
And 42 as server seed
|
||||||
And 2 slots
|
And 2 slots
|
||||||
|
@ -7,44 +7,16 @@ Feature: Results
|
|||||||
And a model file tinyllamas/split/stories15M-00001-of-00003.gguf from HF repo ggml-org/models
|
And a model file tinyllamas/split/stories15M-00001-of-00003.gguf from HF repo ggml-org/models
|
||||||
And a model file test-model-00001-of-00003.gguf
|
And a model file test-model-00001-of-00003.gguf
|
||||||
And 128 as batch size
|
And 128 as batch size
|
||||||
And 256 KV cache size
|
And 1024 KV cache size
|
||||||
And 128 max tokens to predict
|
And 128 max tokens to predict
|
||||||
|
|
||||||
Scenario Outline: Multi users completion
|
|
||||||
Given <n_slots> slots
|
|
||||||
And continuous batching
|
And continuous batching
|
||||||
|
|
||||||
|
Scenario Outline: consistent results with same seed
|
||||||
|
Given <n_slots> slots
|
||||||
Then the server is starting
|
Then the server is starting
|
||||||
Then the server is healthy
|
Then the server is healthy
|
||||||
|
|
||||||
Given 42 as seed
|
Given 4 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 42
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long story about AI.
|
|
||||||
"""
|
|
||||||
|
|
||||||
Given 42 as seed
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long story about AI.
|
|
||||||
"""
|
|
||||||
|
|
||||||
Given 42 as seed
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long story about AI.
|
|
||||||
"""
|
|
||||||
|
|
||||||
Given 42 as seed
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long story about AI.
|
|
||||||
"""
|
|
||||||
|
|
||||||
Given 42 as seed
|
|
||||||
And a prompt:
|
|
||||||
"""
|
|
||||||
Write a very long story about AI.
|
|
||||||
"""
|
|
||||||
|
|
||||||
Given concurrent completion requests
|
Given concurrent completion requests
|
||||||
Then the server is busy
|
Then the server is busy
|
||||||
@ -55,3 +27,55 @@ Feature: Results
|
|||||||
| n_slots |
|
| n_slots |
|
||||||
| 1 |
|
| 1 |
|
||||||
| 2 |
|
| 2 |
|
||||||
|
|
||||||
|
Scenario Outline: different results with different seed
|
||||||
|
Given <n_slots> slots
|
||||||
|
Then the server is starting
|
||||||
|
Then the server is healthy
|
||||||
|
|
||||||
|
Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 42
|
||||||
|
Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 43
|
||||||
|
Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 44
|
||||||
|
Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 45
|
||||||
|
|
||||||
|
Given concurrent completion requests
|
||||||
|
Then the server is busy
|
||||||
|
Then the server is idle
|
||||||
|
And all slots are idle
|
||||||
|
Then all predictions are different
|
||||||
|
Examples:
|
||||||
|
| n_slots |
|
||||||
|
| 1 |
|
||||||
|
| 2 |
|
||||||
|
|
||||||
|
Scenario Outline: consistent results with same seed and varying batch size
|
||||||
|
Given 4 slots
|
||||||
|
And <temp> temperature
|
||||||
|
# And 0 as draft
|
||||||
|
Then the server is starting
|
||||||
|
Then the server is healthy
|
||||||
|
|
||||||
|
Given 1 prompts "Write a very long story about AI." with seed 42
|
||||||
|
And concurrent completion requests
|
||||||
|
# Then the server is busy # Not all slots will be utilized.
|
||||||
|
Then the server is idle
|
||||||
|
And all slots are idle
|
||||||
|
|
||||||
|
Given <n_parallel> prompts "Write a very long story about AI." with seed 42
|
||||||
|
And concurrent completion requests
|
||||||
|
# Then the server is busy # Not all slots will be utilized.
|
||||||
|
Then the server is idle
|
||||||
|
And all slots are idle
|
||||||
|
|
||||||
|
Then all predictions are equal
|
||||||
|
Examples:
|
||||||
|
| n_parallel | temp |
|
||||||
|
| 1 | 0.0 |
|
||||||
|
| 2 | 0.0 |
|
||||||
|
| 4 | 0.0 |
|
||||||
|
| 1 | 1.0 |
|
||||||
|
# FIXME: These tests fail on master. The problem seems to be the unified KV cache.
|
||||||
|
# See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227
|
||||||
|
# and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574 .
|
||||||
|
# | 2 | 1.0 |
|
||||||
|
# | 4 | 1.0 |
|
||||||
|
@ -7,6 +7,7 @@ Feature: llama.cpp server
|
|||||||
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
||||||
And a model file test-model.gguf
|
And a model file test-model.gguf
|
||||||
And a model alias tinyllama-2
|
And a model alias tinyllama-2
|
||||||
|
And BOS token is 1
|
||||||
And 42 as server seed
|
And 42 as server seed
|
||||||
# KV Cache corresponds to the total amount of tokens
|
# KV Cache corresponds to the total amount of tokens
|
||||||
# that can be stored across all independent sequences: #4130
|
# that can be stored across all independent sequences: #4130
|
||||||
@ -91,7 +92,18 @@ Feature: llama.cpp server
|
|||||||
"""
|
"""
|
||||||
What is the capital of France ?
|
What is the capital of France ?
|
||||||
"""
|
"""
|
||||||
Then tokens can be detokenize
|
Then tokens can be detokenized
|
||||||
|
And tokens do not begin with BOS
|
||||||
|
|
||||||
|
Scenario: Tokenize w/ BOS
|
||||||
|
Given adding special tokens
|
||||||
|
When tokenizing:
|
||||||
|
"""
|
||||||
|
What is the capital of Germany?
|
||||||
|
"""
|
||||||
|
Then tokens begin with BOS
|
||||||
|
Given first token is removed
|
||||||
|
Then tokens can be detokenized
|
||||||
|
|
||||||
Scenario: Models available
|
Scenario: Models available
|
||||||
Given available models
|
Given available models
|
||||||
|
@ -65,6 +65,7 @@ def step_server_config(context, server_fqdn, server_port):
|
|||||||
context.server_seed = None
|
context.server_seed = None
|
||||||
context.user_api_key = None
|
context.user_api_key = None
|
||||||
context.response_format = None
|
context.response_format = None
|
||||||
|
context.temperature = None
|
||||||
|
|
||||||
context.tasks_result = []
|
context.tasks_result = []
|
||||||
context.concurrent_tasks = []
|
context.concurrent_tasks = []
|
||||||
@ -232,15 +233,17 @@ async def step_all_slots_status(context, expected_slot_status_string):
|
|||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_request_completion(context, api_error):
|
async def step_request_completion(context, api_error):
|
||||||
expect_api_error = api_error == 'raised'
|
expect_api_error = api_error == 'raised'
|
||||||
|
seeds = await completions_seed(context, num_seeds=1)
|
||||||
completion = await request_completion(context.prompts.pop(),
|
completion = await request_completion(context.prompts.pop(),
|
||||||
|
seeds[0] if seeds is not None else seeds,
|
||||||
context.base_url,
|
context.base_url,
|
||||||
debug=context.debug,
|
debug=context.debug,
|
||||||
n_predict=context.n_predict,
|
n_predict=context.n_predict,
|
||||||
cache_prompt=context.cache_prompt,
|
cache_prompt=context.cache_prompt,
|
||||||
id_slot=context.id_slot,
|
id_slot=context.id_slot,
|
||||||
seed=await completions_seed(context),
|
|
||||||
expect_api_error=expect_api_error,
|
expect_api_error=expect_api_error,
|
||||||
user_api_key=context.user_api_key)
|
user_api_key=context.user_api_key,
|
||||||
|
temperature=context.temperature)
|
||||||
context.tasks_result.append(completion)
|
context.tasks_result.append(completion)
|
||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"Completion response: {completion}")
|
print(f"Completion response: {completion}")
|
||||||
@ -269,6 +272,15 @@ async def step_predictions_equal(context):
|
|||||||
context.tasks_result = []
|
context.tasks_result = []
|
||||||
|
|
||||||
|
|
||||||
|
@step('all predictions are different')
|
||||||
|
@async_run_until_complete
|
||||||
|
async def step_predictions_equal(context):
|
||||||
|
n_completions = await gather_tasks_results(context)
|
||||||
|
assert n_completions >= 2, "need at least 2 completions"
|
||||||
|
assert_all_predictions_different(context.tasks_result)
|
||||||
|
context.tasks_result = []
|
||||||
|
|
||||||
|
|
||||||
@step('the completion is truncated')
|
@step('the completion is truncated')
|
||||||
def step_assert_completion_truncated(context):
|
def step_assert_completion_truncated(context):
|
||||||
step_assert_completion_truncated(context, '')
|
step_assert_completion_truncated(context, '')
|
||||||
@ -311,6 +323,11 @@ def step_response_format(context, response_format):
|
|||||||
context.response_format = json.loads(response_format)
|
context.response_format = json.loads(response_format)
|
||||||
|
|
||||||
|
|
||||||
|
@step('{temperature:f} temperature')
|
||||||
|
def step_temperature(context, temperature):
|
||||||
|
context.temperature = temperature
|
||||||
|
|
||||||
|
|
||||||
@step('streaming is {enable_streaming}')
|
@step('streaming is {enable_streaming}')
|
||||||
def step_streaming(context, enable_streaming):
|
def step_streaming(context, enable_streaming):
|
||||||
context.enable_streaming = enable_streaming == 'enabled'
|
context.enable_streaming = enable_streaming == 'enabled'
|
||||||
@ -353,7 +370,15 @@ def step_n_ubatch(context, n_ubatch):
|
|||||||
|
|
||||||
@step('{seed:d} as seed')
|
@step('{seed:d} as seed')
|
||||||
def step_seed(context, seed):
|
def step_seed(context, seed):
|
||||||
context.seed = seed
|
if context.seed is None:
|
||||||
|
context.seed = [seed]
|
||||||
|
else:
|
||||||
|
context.seed.append(seed)
|
||||||
|
|
||||||
|
|
||||||
|
@step('BOS token is {bos:d}')
|
||||||
|
def step_bos_token(context, bos):
|
||||||
|
context.bos = bos
|
||||||
|
|
||||||
|
|
||||||
@step('a prefix prompt')
|
@step('a prefix prompt')
|
||||||
@ -413,7 +438,9 @@ async def step_oai_chat_completions(context, api_error):
|
|||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"Submitting OAI compatible completions request...")
|
print(f"Submitting OAI compatible completions request...")
|
||||||
expect_api_error = api_error == 'raised'
|
expect_api_error = api_error == 'raised'
|
||||||
|
seeds = await completions_seed(context, num_seeds=1),
|
||||||
completion = await oai_chat_completions(context.prompts.pop(),
|
completion = await oai_chat_completions(context.prompts.pop(),
|
||||||
|
seeds[0] if seeds is not None else seeds,
|
||||||
context.system_prompt,
|
context.system_prompt,
|
||||||
context.base_url,
|
context.base_url,
|
||||||
'/v1/chat',
|
'/v1/chat',
|
||||||
@ -429,8 +456,6 @@ async def step_oai_chat_completions(context, api_error):
|
|||||||
response_format=context.response_format
|
response_format=context.response_format
|
||||||
if hasattr(context, 'response_format') else None,
|
if hasattr(context, 'response_format') else None,
|
||||||
|
|
||||||
seed=await completions_seed(context),
|
|
||||||
|
|
||||||
user_api_key=context.user_api_key
|
user_api_key=context.user_api_key
|
||||||
if hasattr(context, 'user_api_key') else None,
|
if hasattr(context, 'user_api_key') else None,
|
||||||
|
|
||||||
@ -457,20 +482,31 @@ def step_a_prompt_prompt(context, prompt):
|
|||||||
context.n_prompts = len(context.prompts)
|
context.n_prompts = len(context.prompts)
|
||||||
|
|
||||||
|
|
||||||
|
@step('{num_prompts:d} prompts {prompt} with seed {seed:d}')
|
||||||
|
def step_many_prompts(context, num_prompts, prompt, seed):
|
||||||
|
if context.seed is None:
|
||||||
|
context.seed = []
|
||||||
|
for _ in range(num_prompts):
|
||||||
|
context.seed.append(seed)
|
||||||
|
context.prompts.append(prompt)
|
||||||
|
context.n_prompts = len(context.prompts)
|
||||||
|
|
||||||
|
|
||||||
@step('concurrent completion requests')
|
@step('concurrent completion requests')
|
||||||
@async_run_until_complete()
|
@async_run_until_complete()
|
||||||
async def step_concurrent_completion_requests(context):
|
async def step_concurrent_completion_requests(context):
|
||||||
await concurrent_requests(context,
|
await concurrent_requests(
|
||||||
request_completion,
|
context,
|
||||||
# prompt is inserted automatically
|
request_completion,
|
||||||
context.base_url,
|
# prompt is inserted automatically
|
||||||
debug=context.debug,
|
context.base_url,
|
||||||
prompt_prefix=context.prompt_prefix,
|
debug=context.debug,
|
||||||
prompt_suffix=context.prompt_suffix,
|
prompt_prefix=context.prompt_prefix,
|
||||||
n_predict=context.n_predict if hasattr(context, 'n_predict') else None,
|
prompt_suffix=context.prompt_suffix,
|
||||||
seed=await completions_seed(context),
|
n_predict=context.n_predict if hasattr(context, 'n_predict') else None,
|
||||||
user_api_key=context.user_api_key if hasattr(context,
|
user_api_key=context.user_api_key if hasattr(context, 'user_api_key') else None,
|
||||||
'user_api_key') else None)
|
temperature=context.temperature,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@step('concurrent OAI completions requests')
|
@step('concurrent OAI completions requests')
|
||||||
@ -490,7 +526,6 @@ async def step_oai_chat_completions(context):
|
|||||||
if hasattr(context, 'enable_streaming') else None,
|
if hasattr(context, 'enable_streaming') else None,
|
||||||
response_format=context.response_format
|
response_format=context.response_format
|
||||||
if hasattr(context, 'response_format') else None,
|
if hasattr(context, 'response_format') else None,
|
||||||
seed=await completions_seed(context),
|
|
||||||
user_api_key=context.user_api_key
|
user_api_key=context.user_api_key
|
||||||
if hasattr(context, 'user_api_key') else None)
|
if hasattr(context, 'user_api_key') else None)
|
||||||
|
|
||||||
@ -512,10 +547,6 @@ async def step_oai_chat_completions(context):
|
|||||||
if hasattr(context, 'enable_streaming') else None,
|
if hasattr(context, 'enable_streaming') else None,
|
||||||
response_format=context.response_format
|
response_format=context.response_format
|
||||||
if hasattr(context, 'response_format') else None,
|
if hasattr(context, 'response_format') else None,
|
||||||
seed=context.seed
|
|
||||||
if hasattr(context, 'seed') else
|
|
||||||
context.server_seed
|
|
||||||
if hasattr(context, 'server_seed') else None,
|
|
||||||
user_api_key=context.user_api_key
|
user_api_key=context.user_api_key
|
||||||
if hasattr(context, 'user_api_key') else None)
|
if hasattr(context, 'user_api_key') else None)
|
||||||
|
|
||||||
@ -544,7 +575,7 @@ async def all_prompts_are_predicted(context, expected_predicted_n=None):
|
|||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_compute_embedding(context):
|
async def step_compute_embedding(context):
|
||||||
context.n_prompts = 1
|
context.n_prompts = 1
|
||||||
context.embeddings = await request_embedding(context_text(context), base_url=context.base_url)
|
context.embeddings = await request_embedding(context_text(context), None, base_url=context.base_url)
|
||||||
|
|
||||||
|
|
||||||
@step('all embeddings are the same')
|
@step('all embeddings are the same')
|
||||||
@ -585,7 +616,7 @@ def step_assert_embeddings(context):
|
|||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_oai_compute_embeddings(context):
|
async def step_oai_compute_embeddings(context):
|
||||||
context.n_prompts = 1
|
context.n_prompts = 1
|
||||||
context.embeddings = await request_oai_embeddings(context_text(context),
|
context.embeddings = await request_oai_embeddings(context_text(context), None,
|
||||||
base_url=context.base_url,
|
base_url=context.base_url,
|
||||||
user_api_key=context.user_api_key,
|
user_api_key=context.user_api_key,
|
||||||
model=context.model)
|
model=context.model)
|
||||||
@ -594,7 +625,7 @@ async def step_oai_compute_embeddings(context):
|
|||||||
@step('an OAI compatible embeddings computation request for multiple inputs')
|
@step('an OAI compatible embeddings computation request for multiple inputs')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_oai_compute_embeddings_multiple_inputs(context):
|
async def step_oai_compute_embeddings_multiple_inputs(context):
|
||||||
context.embeddings = await request_oai_embeddings(context.prompts,
|
context.embeddings = await request_oai_embeddings(context.prompts, None,
|
||||||
base_url=context.base_url,
|
base_url=context.base_url,
|
||||||
user_api_key=context.user_api_key,
|
user_api_key=context.user_api_key,
|
||||||
model=context.model)
|
model=context.model)
|
||||||
@ -630,21 +661,29 @@ async def all_embeddings_are_generated(context):
|
|||||||
assert_embeddings(context.tasks_result.pop().pop())
|
assert_embeddings(context.tasks_result.pop().pop())
|
||||||
|
|
||||||
|
|
||||||
|
@step('adding special tokens')
|
||||||
|
def step_tokenize_set_add_special(context):
|
||||||
|
context.tokenize_add_special = True
|
||||||
|
|
||||||
|
|
||||||
@step('tokenizing')
|
@step('tokenizing')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_tokenize(context):
|
async def step_tokenize(context):
|
||||||
context.tokenized_text = context_text(context)
|
context.tokenized_text = context_text(context)
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tokenize_args = {
|
||||||
|
"content": context.tokenized_text,
|
||||||
|
}
|
||||||
|
if getattr(context, 'tokenize_add_special', None) is not None:
|
||||||
|
tokenize_args['add_special'] = context.tokenize_add_special
|
||||||
async with session.post(f'{context.base_url}/tokenize',
|
async with session.post(f'{context.base_url}/tokenize',
|
||||||
json={
|
json=tokenize_args) as response:
|
||||||
"content": context.tokenized_text,
|
|
||||||
}) as response:
|
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
tokenize_json = await response.json()
|
tokenize_json = await response.json()
|
||||||
context.tokens = tokenize_json['tokens']
|
context.tokens = tokenize_json['tokens']
|
||||||
|
|
||||||
|
|
||||||
@step('tokens can be detokenize')
|
@step('tokens can be detokenized')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_detokenize(context):
|
async def step_detokenize(context):
|
||||||
assert len(context.tokens) > 0
|
assert len(context.tokens) > 0
|
||||||
@ -659,6 +698,21 @@ async def step_detokenize(context):
|
|||||||
assert context.tokenized_text == detokenize_json['content'].strip()
|
assert context.tokenized_text == detokenize_json['content'].strip()
|
||||||
|
|
||||||
|
|
||||||
|
@step('tokens begin with BOS')
|
||||||
|
def step_strings_for_tokenization(context):
|
||||||
|
assert context.tokens[0] == context.bos
|
||||||
|
|
||||||
|
|
||||||
|
@step('tokens do not begin with BOS')
|
||||||
|
def step_strings_for_tokenization(context):
|
||||||
|
assert context.tokens[0] != context.bos
|
||||||
|
|
||||||
|
|
||||||
|
@step('first token is removed')
|
||||||
|
def step_strings_for_tokenization(context):
|
||||||
|
context.tokens = context.tokens[1:]
|
||||||
|
|
||||||
|
|
||||||
@step('an OPTIONS request is sent from {origin}')
|
@step('an OPTIONS request is sent from {origin}')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_options_request(context, origin):
|
async def step_options_request(context, origin):
|
||||||
@ -740,8 +794,9 @@ async def concurrent_requests(context, f_completion, *args, **kwargs):
|
|||||||
if context.debug:
|
if context.debug:
|
||||||
print(f"starting {context.n_prompts} concurrent completion requests...")
|
print(f"starting {context.n_prompts} concurrent completion requests...")
|
||||||
assert context.n_prompts > 0
|
assert context.n_prompts > 0
|
||||||
|
seeds = await completions_seed(context)
|
||||||
for prompt_no in range(context.n_prompts):
|
for prompt_no in range(context.n_prompts):
|
||||||
shifted_args = [context.prompts.pop(), *args]
|
shifted_args = [context.prompts.pop(), seeds[prompt_no], *args]
|
||||||
context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs)))
|
context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs)))
|
||||||
await asyncio.sleep(0.1)
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
@ -781,6 +836,7 @@ def step_server_responds_with_status_code(context, status_code):
|
|||||||
|
|
||||||
|
|
||||||
async def request_completion(prompt,
|
async def request_completion(prompt,
|
||||||
|
seed,
|
||||||
base_url,
|
base_url,
|
||||||
debug=False,
|
debug=False,
|
||||||
prompt_prefix=None,
|
prompt_prefix=None,
|
||||||
@ -788,9 +844,9 @@ async def request_completion(prompt,
|
|||||||
n_predict=None,
|
n_predict=None,
|
||||||
cache_prompt=False,
|
cache_prompt=False,
|
||||||
id_slot=None,
|
id_slot=None,
|
||||||
seed=None,
|
|
||||||
expect_api_error=None,
|
expect_api_error=None,
|
||||||
user_api_key=None):
|
user_api_key=None,
|
||||||
|
temperature=None):
|
||||||
if debug:
|
if debug:
|
||||||
print(f"Sending completion request: {prompt}")
|
print(f"Sending completion request: {prompt}")
|
||||||
origin = "my.super.domain"
|
origin = "my.super.domain"
|
||||||
@ -811,7 +867,8 @@ async def request_completion(prompt,
|
|||||||
"n_predict": n_predict if n_predict is not None else -1,
|
"n_predict": n_predict if n_predict is not None else -1,
|
||||||
"cache_prompt": cache_prompt,
|
"cache_prompt": cache_prompt,
|
||||||
"id_slot": id_slot,
|
"id_slot": id_slot,
|
||||||
"seed": seed if seed is not None else 42
|
"seed": seed if seed is not None else 42,
|
||||||
|
"temperature": temperature if temperature is not None else "0.8f",
|
||||||
},
|
},
|
||||||
headers=headers,
|
headers=headers,
|
||||||
timeout=3600) as response:
|
timeout=3600) as response:
|
||||||
@ -824,6 +881,7 @@ async def request_completion(prompt,
|
|||||||
|
|
||||||
|
|
||||||
async def oai_chat_completions(user_prompt,
|
async def oai_chat_completions(user_prompt,
|
||||||
|
seed,
|
||||||
system_prompt,
|
system_prompt,
|
||||||
base_url,
|
base_url,
|
||||||
base_path,
|
base_path,
|
||||||
@ -833,7 +891,6 @@ async def oai_chat_completions(user_prompt,
|
|||||||
n_predict=None,
|
n_predict=None,
|
||||||
enable_streaming=None,
|
enable_streaming=None,
|
||||||
response_format=None,
|
response_format=None,
|
||||||
seed=None,
|
|
||||||
user_api_key=None,
|
user_api_key=None,
|
||||||
expect_api_error=None):
|
expect_api_error=None):
|
||||||
if debug:
|
if debug:
|
||||||
@ -882,7 +939,7 @@ async def oai_chat_completions(user_prompt,
|
|||||||
while event_received:
|
while event_received:
|
||||||
event_received = False
|
event_received = False
|
||||||
async for line_in_bytes in response.content:
|
async for line_in_bytes in response.content:
|
||||||
line = line_in_bytes.decode('utf8')
|
line = line_in_bytes.decode('utf-8')
|
||||||
line = line.rstrip('\n').rstrip('\r')
|
line = line.rstrip('\n').rstrip('\r')
|
||||||
if line == '':
|
if line == '':
|
||||||
continue
|
continue
|
||||||
@ -952,7 +1009,7 @@ async def oai_chat_completions(user_prompt,
|
|||||||
return completion_response
|
return completion_response
|
||||||
|
|
||||||
|
|
||||||
async def request_embedding(content, base_url=None):
|
async def request_embedding(content, seed, base_url=None):
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
async with session.post(f'{base_url}/embedding',
|
async with session.post(f'{base_url}/embedding',
|
||||||
json={
|
json={
|
||||||
@ -963,7 +1020,7 @@ async def request_embedding(content, base_url=None):
|
|||||||
return [response_json['embedding']]
|
return [response_json['embedding']]
|
||||||
|
|
||||||
|
|
||||||
async def request_oai_embeddings(input,
|
async def request_oai_embeddings(input, seed,
|
||||||
base_url=None, user_api_key=None,
|
base_url=None, user_api_key=None,
|
||||||
model=None, async_client=False):
|
model=None, async_client=False):
|
||||||
# openai client always expects an api_key
|
# openai client always expects an api_key
|
||||||
@ -1036,21 +1093,31 @@ def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re
|
|||||||
f' {n_predicted} <> {expected_predicted_n}')
|
f' {n_predicted} <> {expected_predicted_n}')
|
||||||
|
|
||||||
def assert_all_predictions_equal(completion_responses):
|
def assert_all_predictions_equal(completion_responses):
|
||||||
content_0 = completion_responses[0]['content']
|
|
||||||
|
|
||||||
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
||||||
print(f"content 0: {content_0}")
|
for i, response_i in enumerate(completion_responses):
|
||||||
|
content_i = response_i['content']
|
||||||
|
print(f"content {i}: {content_i}")
|
||||||
|
for i, response_i in enumerate(completion_responses):
|
||||||
|
content_i = response_i['content']
|
||||||
|
for j, response_j in enumerate(completion_responses):
|
||||||
|
if i == j:
|
||||||
|
continue
|
||||||
|
content_j = response_j['content']
|
||||||
|
assert content_i == content_j, "contents not equal"
|
||||||
|
|
||||||
i = 1
|
|
||||||
for response in completion_responses[1:]:
|
|
||||||
content = response['content']
|
|
||||||
|
|
||||||
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
def assert_all_predictions_different(completion_responses):
|
||||||
print(f"content {i}: {content}")
|
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON':
|
||||||
|
for i, response_i in enumerate(completion_responses):
|
||||||
assert content == content_0, "contents not equal"
|
content_i = response_i['content']
|
||||||
|
print(f"content {i}: {content_i}")
|
||||||
i += 1
|
for i, response_i in enumerate(completion_responses):
|
||||||
|
content_i = response_i['content']
|
||||||
|
for j, response_j in enumerate(completion_responses):
|
||||||
|
if i == j:
|
||||||
|
continue
|
||||||
|
content_j = response_j['content']
|
||||||
|
assert content_i != content_j, "contents not different"
|
||||||
|
|
||||||
|
|
||||||
async def gather_tasks_results(context):
|
async def gather_tasks_results(context):
|
||||||
@ -1145,9 +1212,22 @@ def assert_slots_status(slots, expected_slots):
|
|||||||
f" = {expected[key]} != {slot[key]}")
|
f" = {expected[key]} != {slot[key]}")
|
||||||
|
|
||||||
|
|
||||||
async def completions_seed(context):
|
async def completions_seed(context, num_seeds=None):
|
||||||
return context.seed if hasattr(context, 'seed') and context.seed is not None \
|
if hasattr(context, "seed") and context.seed is not None:
|
||||||
else context.server_seed if hasattr(context, 'server_seed') else None
|
assert len(context.seed) == context.n_prompts
|
||||||
|
if num_seeds is None:
|
||||||
|
num_seeds = context.n_prompts
|
||||||
|
assert num_seeds <= context.n_prompts
|
||||||
|
seeds = context.seed[:num_seeds]
|
||||||
|
context.seed = context.seed[num_seeds:] if num_seeds < context.n_prompts else None
|
||||||
|
return seeds
|
||||||
|
|
||||||
|
if hasattr(context, "server_seed") and context.server_seed is not None:
|
||||||
|
if num_seeds is None:
|
||||||
|
return [context.server_seed] * context.n_prompts
|
||||||
|
else:
|
||||||
|
return [context.server_seed] * num_seeds
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def context_text(context):
|
def context_text(context):
|
||||||
|
5
examples/server/themes/README.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# LLaMA.cpp Server Wild Theme
|
||||||
|
|
||||||
|
Simple themes directory of sample "public" directories. To try any of these add --path to your run like `server --path=wild`.
|
||||||
|
|
||||||
|
![image](wild/wild.png)
|
7
examples/server/themes/buttons-top/README.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# LLaMA.cpp Server Buttons Top Theme
|
||||||
|
|
||||||
|
Simple tweaks to the UI. Chat buttons at the top of the page instead of bottom so you can hit Stop instead of chasing it down the page.
|
||||||
|
|
||||||
|
To use simply run server with `--path=themes/buttons_top`
|
||||||
|
|
||||||
|
![image](buttons_top.png)
|
BIN
examples/server/themes/buttons-top/buttons_top.png
Normal file
After Width: | Height: | Size: 117 KiB |
BIN
examples/server/themes/buttons-top/favicon.ico
Normal file
After Width: | Height: | Size: 4.0 KiB |
1057
examples/server/themes/buttons-top/index.html
Normal file
5
examples/server/themes/wild/README.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# LLaMA.cpp Server Wild Theme
|
||||||
|
|
||||||
|
Simple tweaks to the UI. To use simply run server with `--path=themes/wild`
|
||||||
|
|
||||||
|
![image](wild.png)
|
BIN
examples/server/themes/wild/favicon.ico
Normal file
After Width: | Height: | Size: 4.0 KiB |
1061
examples/server/themes/wild/index.html
Normal file
BIN
examples/server/themes/wild/llama_cpp.png
Normal file
After Width: | Height: | Size: 75 KiB |
BIN
examples/server/themes/wild/llamapattern.png
Normal file
After Width: | Height: | Size: 254 KiB |
BIN
examples/server/themes/wild/wild.png
Normal file
After Width: | Height: | Size: 485 KiB |
@ -3,6 +3,8 @@
|
|||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
|
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
||||||
|
#define JSON_ASSERT GGML_ASSERT
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
@ -49,18 +51,18 @@ extern bool server_log_json;
|
|||||||
#define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
|
#define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
|
||||||
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
||||||
|
|
||||||
static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra);
|
static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra);
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static T json_value(const json &body, const std::string &key, const T &default_value) {
|
static T json_value(const json & body, const std::string & key, const T & default_value) {
|
||||||
// Fallback null to default value
|
// Fallback null to default value
|
||||||
if (body.contains(key) && !body.at(key).is_null()){
|
if (body.contains(key) && !body.at(key).is_null()) {
|
||||||
try {
|
try {
|
||||||
return body.value(key, default_value);
|
return body.at(key);
|
||||||
}
|
} catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
|
||||||
catch (nlohmann::json_abi_v3_11_3::detail::type_error const&){
|
std::stringstream ss;
|
||||||
std::string message = "Wrong type supplied for parameter '" + key + "'. Expected '" + typeid(default_value).name() + "', using default value.";
|
ss << "Wrong type supplied for parameter '" << key << "'. Expected '" << json(default_value).type_name() << "', using default value.";
|
||||||
server_log("WARN", __func__, __LINE__, message.c_str(), body);
|
LOG_WARNING(ss.str().c_str(), body);
|
||||||
return default_value;
|
return default_value;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -68,16 +70,16 @@ static T json_value(const json &body, const std::string &key, const T &default_v
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void server_log(const char *level, const char *function, int line, const char *message, const nlohmann::ordered_json &extra) {
|
static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra) {
|
||||||
std::stringstream ss_tid;
|
std::stringstream ss_tid;
|
||||||
ss_tid << std::this_thread::get_id();
|
ss_tid << std::this_thread::get_id();
|
||||||
json log = nlohmann::ordered_json{
|
json log = json{
|
||||||
{"tid", ss_tid.str()},
|
{"tid", ss_tid.str()},
|
||||||
{"timestamp", time(nullptr)},
|
{"timestamp", time(nullptr)},
|
||||||
};
|
};
|
||||||
|
|
||||||
if (server_log_json) {
|
if (server_log_json) {
|
||||||
log.merge_patch( {
|
log.merge_patch({
|
||||||
{"level", level},
|
{"level", level},
|
||||||
{"function", function},
|
{"function", function},
|
||||||
{"line", line},
|
{"line", line},
|
||||||
@ -98,7 +100,7 @@ static inline void server_log(const char *level, const char *function, int line,
|
|||||||
}
|
}
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss << buf << " |";
|
ss << buf << " |";
|
||||||
for (const auto& el : log.items())
|
for (const auto & el : log.items())
|
||||||
{
|
{
|
||||||
const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace);
|
const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace);
|
||||||
ss << " " << el.key() << "=" << value;
|
ss << " " << el.key() << "=" << value;
|
||||||
@ -373,11 +375,11 @@ static json oaicompat_completion_params_parse(
|
|||||||
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
||||||
|
|
||||||
// Apply chat template to the list of messages
|
// Apply chat template to the list of messages
|
||||||
llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
|
llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
|
||||||
|
|
||||||
// Handle "stop" field
|
// Handle "stop" field
|
||||||
if (body.contains("stop") && body["stop"].is_string()) {
|
if (body.contains("stop") && body.at("stop").is_string()) {
|
||||||
llama_params["stop"] = json::array({body["stop"].get<std::string>()});
|
llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
|
||||||
} else {
|
} else {
|
||||||
llama_params["stop"] = json_value(body, "stop", json::array());
|
llama_params["stop"] = json_value(body, "stop", json::array());
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# llama.cpp/example/sycl
|
# llama.cpp/example/sycl
|
||||||
|
|
||||||
This example program provide the tools for llama.cpp for SYCL on Intel GPU.
|
This example program provides the tools for llama.cpp for SYCL on Intel GPU.
|
||||||
|
|
||||||
## Tool
|
## Tool
|
||||||
|
|
||||||
|
30
flake.lock
generated
@ -5,11 +5,11 @@
|
|||||||
"nixpkgs-lib": "nixpkgs-lib"
|
"nixpkgs-lib": "nixpkgs-lib"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1712014858,
|
"lastModified": 1714641030,
|
||||||
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
|
"narHash": "sha256-yzcRNDoyVP7+SCNX0wmuDju1NUCt8Dz9+lyUXEI0dbI=",
|
||||||
"owner": "hercules-ci",
|
"owner": "hercules-ci",
|
||||||
"repo": "flake-parts",
|
"repo": "flake-parts",
|
||||||
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
|
"rev": "e5d10a24b66c3ea8f150e47dfdb0416ab7c3390e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -20,11 +20,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1714076141,
|
"lastModified": 1714635257,
|
||||||
"narHash": "sha256-Drmja/f5MRHZCskS6mvzFqxEaZMeciScCTFxWVLqWEY=",
|
"narHash": "sha256-4cPymbty65RvF1DWQfc+Bc8B233A1BWxJnNULJKQ1EY=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "7bb2ccd8cdc44c91edba16c48d2c8f331fb3d856",
|
"rev": "63c3a29ca82437c87573e4c6919b09a24ea61b0f",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -36,20 +36,14 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs-lib": {
|
"nixpkgs-lib": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"dir": "lib",
|
"lastModified": 1714640452,
|
||||||
"lastModified": 1711703276,
|
"narHash": "sha256-QBx10+k6JWz6u7VsohfSw8g8hjdBZEf8CFzXH1/1Z94=",
|
||||||
"narHash": "sha256-iMUFArF0WCatKK6RzfUJknjem0H9m4KgorO/p3Dopkk=",
|
"type": "tarball",
|
||||||
"owner": "NixOS",
|
"url": "https://github.com/NixOS/nixpkgs/archive/50eb7ecf4cd0a5756d7275c8ba36790e5bd53e33.tar.gz"
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "d8fe5e6c92d0d190646fb9f1056741a229980089",
|
|
||||||
"type": "github"
|
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"dir": "lib",
|
"type": "tarball",
|
||||||
"owner": "NixOS",
|
"url": "https://github.com/NixOS/nixpkgs/archive/50eb7ecf4cd0a5756d7275c8ba36790e5bd53e33.tar.gz"
|
||||||
"ref": "nixos-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
|
@ -1182,9 +1182,9 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
|
|||||||
static char * fmt_size(size_t size) {
|
static char * fmt_size(size_t size) {
|
||||||
static char buffer[128];
|
static char buffer[128];
|
||||||
if (size >= 1024*1024) {
|
if (size >= 1024*1024) {
|
||||||
sprintf(buffer, "%zuM", size/1024/1024);
|
snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024);
|
||||||
} else {
|
} else {
|
||||||
sprintf(buffer, "%zuK", size/1024);
|
snprintf(buffer, sizeof(buffer), "%zuK", size/1024);
|
||||||
}
|
}
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
330
ggml-cuda.cu
@ -4,7 +4,6 @@
|
|||||||
|
|
||||||
#include "ggml-cuda/common.cuh"
|
#include "ggml-cuda/common.cuh"
|
||||||
#include "ggml-cuda/acc.cuh"
|
#include "ggml-cuda/acc.cuh"
|
||||||
#include "ggml-cuda/alibi.cuh"
|
|
||||||
#include "ggml-cuda/arange.cuh"
|
#include "ggml-cuda/arange.cuh"
|
||||||
#include "ggml-cuda/argsort.cuh"
|
#include "ggml-cuda/argsort.cuh"
|
||||||
#include "ggml-cuda/binbcast.cuh"
|
#include "ggml-cuda/binbcast.cuh"
|
||||||
@ -14,6 +13,7 @@
|
|||||||
#include "ggml-cuda/cpy.cuh"
|
#include "ggml-cuda/cpy.cuh"
|
||||||
#include "ggml-cuda/diagmask.cuh"
|
#include "ggml-cuda/diagmask.cuh"
|
||||||
#include "ggml-cuda/dmmv.cuh"
|
#include "ggml-cuda/dmmv.cuh"
|
||||||
|
#include "ggml-cuda/fattn.cuh"
|
||||||
#include "ggml-cuda/getrows.cuh"
|
#include "ggml-cuda/getrows.cuh"
|
||||||
#include "ggml-cuda/im2col.cuh"
|
#include "ggml-cuda/im2col.cuh"
|
||||||
#include "ggml-cuda/mmq.cuh"
|
#include "ggml-cuda/mmq.cuh"
|
||||||
@ -112,7 +112,7 @@ static ggml_cuda_device_info ggml_cuda_init() {
|
|||||||
for (int id = 0; id < info.device_count; ++id) {
|
for (int id = 0; id < info.device_count; ++id) {
|
||||||
int device_vmm = 0;
|
int device_vmm = 0;
|
||||||
|
|
||||||
#if !defined(GGML_USE_HIPBLAS)
|
#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM)
|
||||||
CUdevice device;
|
CUdevice device;
|
||||||
CU_CHECK(cuDeviceGet(&device, id));
|
CU_CHECK(cuDeviceGet(&device, id));
|
||||||
CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device));
|
CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device));
|
||||||
@ -140,6 +140,7 @@ static ggml_cuda_device_info ggml_cuda_init() {
|
|||||||
info.devices[id].cc = 100*prop.major + 10*prop.minor;
|
info.devices[id].cc = 100*prop.major + 10*prop.minor;
|
||||||
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
info.devices[id].smpb = prop.sharedMemPerBlock;
|
info.devices[id].smpb = prop.sharedMemPerBlock;
|
||||||
|
info.devices[id].nsm = prop.multiProcessorCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int id = 0; id < info.device_count; ++id) {
|
for (int id = 0; id < info.device_count; ++id) {
|
||||||
@ -257,7 +258,7 @@ struct ggml_cuda_pool_leg : public ggml_cuda_pool {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// pool with virtual memory
|
// pool with virtual memory
|
||||||
#if !defined(GGML_USE_HIPBLAS)
|
#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM)
|
||||||
struct ggml_cuda_pool_vmm : public ggml_cuda_pool {
|
struct ggml_cuda_pool_vmm : public ggml_cuda_pool {
|
||||||
static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB
|
static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB
|
||||||
|
|
||||||
@ -354,7 +355,7 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool {
|
|||||||
#endif // !defined(GGML_USE_HIPBLAS)
|
#endif // !defined(GGML_USE_HIPBLAS)
|
||||||
|
|
||||||
std::unique_ptr<ggml_cuda_pool> ggml_backend_cuda_context::new_pool_for_device(int device) {
|
std::unique_ptr<ggml_cuda_pool> ggml_backend_cuda_context::new_pool_for_device(int device) {
|
||||||
#if !defined(GGML_USE_HIPBLAS)
|
#if !defined(GGML_USE_HIPBLAS) && !defined(GGML_CUDA_NO_VMM)
|
||||||
if (ggml_cuda_info().devices[device].vmm) {
|
if (ggml_cuda_info().devices[device].vmm) {
|
||||||
return std::unique_ptr<ggml_cuda_pool>(new ggml_cuda_pool_vmm(device));
|
return std::unique_ptr<ggml_cuda_pool>(new ggml_cuda_pool_vmm(device));
|
||||||
}
|
}
|
||||||
@ -1645,7 +1646,7 @@ static void ggml_cuda_op_mul_mat(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
|
static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||||
GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
|
GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_cuda(src0->buffer));
|
GGML_ASSERT(ggml_backend_buffer_is_cuda(src0->buffer));
|
||||||
GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
|
GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
|
||||||
@ -1668,7 +1669,7 @@ static void ggml_cuda_mul_mat_vec_p021(ggml_backend_cuda_context & ctx, const gg
|
|||||||
ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream);
|
ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
|
static void ggml_cuda_mul_mat_vec_nc(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||||
GGML_ASSERT(!ggml_is_transposed(src0));
|
GGML_ASSERT(!ggml_is_transposed(src0));
|
||||||
GGML_ASSERT(!ggml_is_transposed(src1));
|
GGML_ASSERT(!ggml_is_transposed(src1));
|
||||||
GGML_ASSERT(!ggml_is_permuted(src0));
|
GGML_ASSERT(!ggml_is_permuted(src0));
|
||||||
@ -2203,6 +2204,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
|||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
ggml_cuda_op_relu(ctx, dst);
|
ggml_cuda_op_relu(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
|
ggml_cuda_op_sigmoid(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_UNARY_OP_HARDSIGMOID:
|
case GGML_UNARY_OP_HARDSIGMOID:
|
||||||
ggml_cuda_op_hardsigmoid(ctx, dst);
|
ggml_cuda_op_hardsigmoid(ctx, dst);
|
||||||
break;
|
break;
|
||||||
@ -2275,9 +2279,6 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
|||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
ggml_cuda_op_rope(ctx, dst);
|
ggml_cuda_op_rope(ctx, dst);
|
||||||
break;
|
break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
ggml_cuda_op_alibi(ctx, dst);
|
|
||||||
break;
|
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
ggml_cuda_op_im2col(ctx, dst);
|
ggml_cuda_op_im2col(ctx, dst);
|
||||||
break;
|
break;
|
||||||
@ -2290,6 +2291,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
|||||||
case GGML_OP_ARGSORT:
|
case GGML_OP_ARGSORT:
|
||||||
ggml_cuda_op_argsort(ctx, dst);
|
ggml_cuda_op_argsort(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_FLASH_ATTN_EXT:
|
||||||
|
ggml_cuda_flash_attn_ext(ctx, dst);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -2405,44 +2409,318 @@ GGML_CALL static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
|
|||||||
GGML_UNUSED(backend);
|
GGML_UNUSED(backend);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_ggml_graph_node_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) {
|
||||||
|
graph_node_properties->node_address = node->data;
|
||||||
|
graph_node_properties->node_op = node->op;
|
||||||
|
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
||||||
|
graph_node_properties->ne[i] = node->ne[i];
|
||||||
|
graph_node_properties->nb[i] = node->nb[i];
|
||||||
|
}
|
||||||
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
||||||
|
graph_node_properties->src_address[i] = node->src[i] ? node->src[i]->data : nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool ggml_graph_node_has_matching_properties(ggml_tensor * node, ggml_graph_node_properties * graph_node_properties) {
|
||||||
|
if (node->data != graph_node_properties->node_address &&
|
||||||
|
node->op != GGML_OP_CPY &&
|
||||||
|
node->op != GGML_OP_VIEW) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (node->op != graph_node_properties->node_op) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < GGML_MAX_DIMS; i++) {
|
||||||
|
if (node->ne[i] != graph_node_properties->ne[i]) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (node->nb[i] != graph_node_properties->nb[i]) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
||||||
|
if (node->src[i] &&
|
||||||
|
node->src[i]->data != graph_node_properties->src_address[i] &&
|
||||||
|
node->op != GGML_OP_CPY &&
|
||||||
|
node->op != GGML_OP_VIEW
|
||||||
|
) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
|
GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
|
||||||
ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
|
ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
|
||||||
|
|
||||||
ggml_cuda_set_device(cuda_ctx->device);
|
ggml_cuda_set_device(cuda_ctx->device);
|
||||||
|
|
||||||
for (int i = 0; i < cgraph->n_nodes; i++) {
|
#ifdef USE_CUDA_GRAPH
|
||||||
ggml_tensor * node = cgraph->nodes[i];
|
static const bool disable_cuda_graphs_due_to_env = (getenv("GGML_CUDA_DISABLE_GRAPHS") != nullptr);
|
||||||
|
|
||||||
if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
|
// Objects required for CUDA Graph
|
||||||
continue;
|
if (cuda_ctx->cuda_graph == nullptr) {
|
||||||
|
cuda_ctx->cuda_graph.reset(new ggml_cuda_graph());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool use_cuda_graph = true;
|
||||||
|
bool cuda_graph_update_required = false;
|
||||||
|
// pointer to CUDA cpy kernel, which is required to identify
|
||||||
|
// kernel parameters which need updated in the graph for each token
|
||||||
|
void * ggml_cuda_cpy_fn_ptr = nullptr;
|
||||||
|
|
||||||
|
if (cuda_ctx->cuda_graph->graph == nullptr) {
|
||||||
|
if (ggml_cuda_info().devices[cuda_ctx->device].cc < CC_AMPERE) {
|
||||||
|
cuda_ctx->cuda_graph->disable_due_to_gpu_arch = true;
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: disabling CUDA graphs due to GPU architecture\n", __func__);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable CUDA graphs in presence of env var, old GPU, use-case which is changing too rapidly,
|
||||||
|
// or previous graph capture failure.
|
||||||
|
// Also disable for multi-gpu for now. TO DO investigate
|
||||||
|
if (disable_cuda_graphs_due_to_env
|
||||||
|
|| cuda_ctx->cuda_graph->disable_due_to_gpu_arch
|
||||||
|
|| cuda_ctx->cuda_graph->disable_due_to_too_many_updates
|
||||||
|
|| cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture) {
|
||||||
|
use_cuda_graph = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (use_cuda_graph) {
|
||||||
|
if (cuda_ctx->cuda_graph->instance == nullptr) {
|
||||||
|
cuda_graph_update_required = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if the graph size has changed
|
||||||
|
if (cuda_ctx->cuda_graph->ggml_graph_properties.size() != (size_t)cgraph->n_nodes) {
|
||||||
|
cuda_graph_update_required = true;
|
||||||
|
cuda_ctx->cuda_graph->ggml_graph_properties.resize(cgraph->n_nodes);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over nodes in GGML graph to determine if CUDA graph update is required
|
||||||
|
// and store properties to allow this comparison for the next token
|
||||||
|
for (int i = 0; i < cgraph->n_nodes; i++) {
|
||||||
|
bool has_matching_properties = true;
|
||||||
|
if (!cuda_graph_update_required) {
|
||||||
|
has_matching_properties = ggml_graph_node_has_matching_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]);
|
||||||
|
}
|
||||||
|
if (!has_matching_properties) {
|
||||||
|
cuda_graph_update_required = true;
|
||||||
|
}
|
||||||
|
set_ggml_graph_node_properties(cgraph->nodes[i], &cuda_ctx->cuda_graph->ggml_graph_properties[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over nodes in GGML graph to obtain info needed for CUDA graph
|
||||||
|
cuda_ctx->cuda_graph->updated_kernel_arg.clear();
|
||||||
|
for (int i = 0; i < cgraph->n_nodes; i++) {
|
||||||
|
ggml_tensor * node = cgraph->nodes[i];
|
||||||
|
|
||||||
|
if (node->src[0] && ggml_backend_buffer_is_cuda_split(node->src[0]->buffer)) {
|
||||||
|
use_cuda_graph = false; // Split buffers are not supported by CUDA graph capture
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device));
|
fprintf(stderr, "%s: disabling CUDA graphs due to split buffer\n", __func__);
|
||||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
#endif
|
||||||
if (node->src[j] != nullptr) {
|
}
|
||||||
assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer));
|
|
||||||
|
if (node->op == GGML_OP_MUL_MAT_ID) {
|
||||||
|
use_cuda_graph = false; // This node type is not supported by CUDA graph capture
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: disabling CUDA graphs due to mul_mat_id\n", __func__);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1) {
|
||||||
|
// disable CUDA graphs for batch size > 1 for now.
|
||||||
|
// Changes in batch size or context size can cause changes to the grid size of some kernels.
|
||||||
|
use_cuda_graph = false;
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
if (node->op == GGML_OP_CPY) {
|
||||||
|
// store the copy op parameter which changes with each token.
|
||||||
|
cuda_ctx->cuda_graph->updated_kernel_arg.push_back((char **) &(node->src[1]->data));
|
||||||
|
if (ggml_cuda_cpy_fn_ptr == nullptr) {
|
||||||
|
// store a pointer to the copy op CUDA kernel to identify it later
|
||||||
|
ggml_cuda_cpy_fn_ptr = ggml_cuda_cpy_fn(node->src[0], node->src[1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!use_cuda_graph) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Disable CUDA graphs (from the next token) if the use-case is demanding too many consecutive graph updates.
|
||||||
|
if (cuda_graph_update_required) {
|
||||||
|
cuda_ctx->cuda_graph->number_consecutive_updates++;
|
||||||
|
} else {
|
||||||
|
cuda_ctx->cuda_graph->number_consecutive_updates = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cuda_ctx->cuda_graph->number_consecutive_updates >= 4) {
|
||||||
|
cuda_ctx->cuda_graph->disable_due_to_too_many_updates = true;
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: disabling CUDA graphs due to too many consecutive updates\n", __func__);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture
|
||||||
|
CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed));
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
bool use_cuda_graph = false;
|
||||||
|
bool cuda_graph_update_required = false;
|
||||||
|
#endif // USE_CUDA_GRAPH
|
||||||
|
|
||||||
|
bool graph_evaluated_or_captured = false;
|
||||||
|
|
||||||
|
while (!graph_evaluated_or_captured) {
|
||||||
|
// Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph.
|
||||||
|
// With the use of CUDA graphs, the execution will be performed by the graph launch.
|
||||||
|
if (!use_cuda_graph || cuda_graph_update_required) {
|
||||||
|
for (int i = 0; i < cgraph->n_nodes; i++) {
|
||||||
|
ggml_tensor * node = cgraph->nodes[i];
|
||||||
|
|
||||||
|
if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef NDEBUG
|
||||||
|
assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device));
|
||||||
|
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||||
|
if (node->src[j] != nullptr) {
|
||||||
|
assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || ggml_backend_buffer_is_cuda_split(node->src[j]->buffer));
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool ok = ggml_cuda_compute_forward(*cuda_ctx, node);
|
bool ok = ggml_cuda_compute_forward(*cuda_ctx, node);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
|
fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
|
||||||
|
}
|
||||||
|
GGML_ASSERT(ok);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
GGML_ASSERT(ok);
|
|
||||||
|
#ifdef USE_CUDA_GRAPH
|
||||||
|
if (use_cuda_graph && cuda_graph_update_required) { // End CUDA graph capture
|
||||||
|
if (cuda_ctx->cuda_graph->graph != nullptr) {
|
||||||
|
CUDA_CHECK(cudaGraphDestroy(cuda_ctx->cuda_graph->graph));
|
||||||
|
cuda_ctx->cuda_graph->graph = nullptr;
|
||||||
|
}
|
||||||
|
CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_ctx->cuda_graph->graph));
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
if (disable_cuda_graphs_due_to_failed_capture) {
|
||||||
|
use_cuda_graph = false;
|
||||||
|
cuda_ctx->cuda_graph->disable_due_to_failed_graph_capture = true;
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: disabling CUDA graphs due to failed graph capture\n", __func__);
|
||||||
|
#endif
|
||||||
|
} else {
|
||||||
|
graph_evaluated_or_captured = true; // CUDA graph has been captured
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
graph_evaluated_or_captured = true; // CUDA graph has been captured
|
||||||
|
} else {
|
||||||
|
graph_evaluated_or_captured = true; // ggml graph has been directly evaluated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (use_cuda_graph) {
|
||||||
|
if (cuda_ctx->cuda_graph->instance == nullptr) { // Create executable graph from captured graph.
|
||||||
|
CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform update to graph (if required for this token), and change copy parameter (required for every token)
|
||||||
|
|
||||||
|
if (cuda_graph_update_required) {
|
||||||
|
// Extract nodes from graph
|
||||||
|
if (cuda_ctx->cuda_graph->num_nodes == 0) {
|
||||||
|
// First call with null argument gets number of nodes in graph
|
||||||
|
CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, nullptr, &cuda_ctx->cuda_graph->num_nodes));
|
||||||
|
}
|
||||||
|
// Subsequent call with non-null argument gets nodes
|
||||||
|
cuda_ctx->cuda_graph->nodes.resize(cuda_ctx->cuda_graph->num_nodes);
|
||||||
|
cuda_ctx->cuda_graph->params.resize(cuda_ctx->cuda_graph->num_nodes);
|
||||||
|
if (cuda_ctx->cuda_graph->num_nodes > 0) {
|
||||||
|
CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, cuda_ctx->cuda_graph->nodes.data(), &cuda_ctx->cuda_graph->num_nodes));
|
||||||
|
|
||||||
|
// Loop over nodes, and extract kernel parameters from each node
|
||||||
|
for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) {
|
||||||
|
cudaGraphNodeType node_type;
|
||||||
|
CUDA_CHECK(cudaGraphNodeGetType(cuda_ctx->cuda_graph->nodes[i], &node_type));
|
||||||
|
if (node_type == cudaGraphNodeTypeKernel) {
|
||||||
|
cudaError_t stat = cudaGraphKernelNodeGetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i]); // Get params using runtime
|
||||||
|
if (stat == cudaErrorInvalidDeviceFunction) {
|
||||||
|
// Fails due to incorrect handling by CUDA runtime of CUDA BLAS node.
|
||||||
|
// We don't need to update blas nodes, so clear error and move on.
|
||||||
|
cudaGetLastError();
|
||||||
|
} else {
|
||||||
|
GGML_ASSERT(stat == cudaSuccess);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// One of the arguments to the copy kernel is updated for each token, hence we need to
|
||||||
|
// replace that argument with the updated value in the CUDA graph
|
||||||
|
if (!cuda_graph_update_required) { // on update steps, the live parameters will already be captured
|
||||||
|
int k = 0;
|
||||||
|
for (size_t i = 0; i < cuda_ctx->cuda_graph->num_nodes; i++) {
|
||||||
|
if (cuda_ctx->cuda_graph->params[i].func == ggml_cuda_cpy_fn_ptr) {
|
||||||
|
char ** updated_kernel_arg_ptr = cuda_ctx->cuda_graph->updated_kernel_arg.at(k++);
|
||||||
|
cuda_ctx->cuda_graph->params[i].kernelParams[1] = updated_kernel_arg_ptr;
|
||||||
|
CUDA_CHECK(cudaGraphKernelNodeSetParams(cuda_ctx->cuda_graph->nodes[i], &cuda_ctx->cuda_graph->params[i]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update graph executable
|
||||||
|
cudaGraphExecUpdateResultInfo result_info;
|
||||||
|
cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &result_info);
|
||||||
|
if (stat == cudaErrorGraphExecUpdateFailure) {
|
||||||
|
#ifndef NDEBUG
|
||||||
|
fprintf(stderr, "%s: CUDA graph update failed\n", __func__);
|
||||||
|
#endif
|
||||||
|
// The pre-existing graph exec cannot be updated due to violated constraints
|
||||||
|
// so instead clear error and re-instantiate
|
||||||
|
cudaGetLastError();
|
||||||
|
CUDA_CHECK(cudaGraphExecDestroy(cuda_ctx->cuda_graph->instance));
|
||||||
|
cuda_ctx->cuda_graph->instance = nullptr;
|
||||||
|
CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0));
|
||||||
|
} else {
|
||||||
|
GGML_ASSERT(stat == cudaSuccess);
|
||||||
|
}
|
||||||
|
// Launch graph
|
||||||
|
CUDA_CHECK(cudaGraphLaunch(cuda_ctx->cuda_graph->instance, cuda_ctx->stream()));
|
||||||
|
#else
|
||||||
|
graph_evaluated_or_captured = true;
|
||||||
|
#endif // USE_CUDA_GRAPH
|
||||||
}
|
}
|
||||||
|
|
||||||
return GGML_STATUS_SUCCESS;
|
return GGML_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
|
GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
|
||||||
|
ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *) backend->context;
|
||||||
switch (op->op) {
|
switch (op->op) {
|
||||||
case GGML_OP_UNARY:
|
case GGML_OP_UNARY:
|
||||||
switch (ggml_get_unary_op(op)) {
|
switch (ggml_get_unary_op(op)) {
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
case GGML_UNARY_OP_SILU:
|
case GGML_UNARY_OP_SILU:
|
||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
case GGML_UNARY_OP_HARDSIGMOID:
|
case GGML_UNARY_OP_HARDSIGMOID:
|
||||||
case GGML_UNARY_OP_HARDSWISH:
|
case GGML_UNARY_OP_HARDSWISH:
|
||||||
case GGML_UNARY_OP_GELU_QUICK:
|
case GGML_UNARY_OP_GELU_QUICK:
|
||||||
@ -2552,7 +2830,6 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|||||||
case GGML_OP_DIAG_MASK_INF:
|
case GGML_OP_DIAG_MASK_INF:
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
case GGML_OP_POOL_2D:
|
case GGML_OP_POOL_2D:
|
||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
@ -2565,6 +2842,15 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|||||||
case GGML_OP_TIMESTEP_EMBEDDING:
|
case GGML_OP_TIMESTEP_EMBEDDING:
|
||||||
case GGML_OP_LEAKY_RELU:
|
case GGML_OP_LEAKY_RELU:
|
||||||
return true;
|
return true;
|
||||||
|
case GGML_OP_FLASH_ATTN_EXT:
|
||||||
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
return op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128;
|
||||||
|
#else
|
||||||
|
if (op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return ggml_cuda_info().devices[cuda_ctx->device].cc >= CC_VOLTA;
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1,63 +0,0 @@
|
|||||||
#include "alibi.cuh"
|
|
||||||
|
|
||||||
static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows,
|
|
||||||
const int n_heads_log2_floor, const float m0, const float m1) {
|
|
||||||
const int col = blockDim.x*blockIdx.x + threadIdx.x;
|
|
||||||
|
|
||||||
if (col >= ncols) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int row = blockDim.y*blockIdx.y + threadIdx.y;
|
|
||||||
const int i = row*ncols + col;
|
|
||||||
|
|
||||||
const int k = row/k_rows;
|
|
||||||
|
|
||||||
float m_k;
|
|
||||||
if (k < n_heads_log2_floor) {
|
|
||||||
m_k = powf(m0, k + 1);
|
|
||||||
} else {
|
|
||||||
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
dst[i] = col * m_k + x[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
|
|
||||||
const int k_rows, const int n_heads_log2_floor, const float m0,
|
|
||||||
const float m1, cudaStream_t stream) {
|
|
||||||
const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1);
|
|
||||||
const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE);
|
|
||||||
const dim3 block_nums(num_blocks_x, nrows, 1);
|
|
||||||
alibi_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
|
||||||
const float * src0_d = (const float *)src0->data;
|
|
||||||
float * dst_d = (float *)dst->data;
|
|
||||||
cudaStream_t stream = ctx.stream();
|
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
|
||||||
const int64_t ne01 = src0->ne[1];
|
|
||||||
const int64_t ne02 = src0->ne[2];
|
|
||||||
const int64_t nrows = ggml_nrows(src0);
|
|
||||||
|
|
||||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
|
||||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
|
||||||
float max_bias;
|
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
|
||||||
|
|
||||||
//GGML_ASSERT(ne01 + n_past == ne00);
|
|
||||||
GGML_ASSERT(n_head == ne02);
|
|
||||||
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
|
||||||
|
|
||||||
alibi_f32_cuda(src0_d, dst_d, ne00, nrows, ne01, n_heads_log2_floor, m0, m1, stream);
|
|
||||||
}
|
|
@ -1,5 +0,0 @@
|
|||||||
#include "common.cuh"
|
|
||||||
|
|
||||||
#define CUDA_ALIBI_BLOCK_SIZE 32
|
|
||||||
|
|
||||||
void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
@ -31,5 +31,4 @@ void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|||||||
memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
|
memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream);
|
clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cfloat>
|
#include <cfloat>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#if defined(GGML_USE_HIPBLAS)
|
#if defined(GGML_USE_HIPBLAS)
|
||||||
#include <hip/hip_runtime.h>
|
#include <hip/hip_runtime.h>
|
||||||
@ -137,11 +138,13 @@
|
|||||||
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
|
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
|
||||||
|
|
||||||
#define WARP_SIZE 32
|
#define WARP_SIZE 32
|
||||||
#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
|
#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
|
||||||
|
#define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons
|
||||||
|
|
||||||
#define CC_PASCAL 600
|
#define CC_PASCAL 600
|
||||||
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
|
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
|
||||||
#define CC_VOLTA 700
|
#define CC_VOLTA 700
|
||||||
|
#define CC_AMPERE 800
|
||||||
#define CC_OFFSET_AMD 1000000
|
#define CC_OFFSET_AMD 1000000
|
||||||
#define CC_RDNA1 (CC_OFFSET_AMD + 1010)
|
#define CC_RDNA1 (CC_OFFSET_AMD + 1010)
|
||||||
#define CC_RDNA2 (CC_OFFSET_AMD + 1030)
|
#define CC_RDNA2 (CC_OFFSET_AMD + 1030)
|
||||||
@ -231,83 +234,6 @@ typedef float dfloat; // dequantize float
|
|||||||
typedef float2 dfloat2;
|
typedef float2 dfloat2;
|
||||||
#endif //GGML_CUDA_F16
|
#endif //GGML_CUDA_F16
|
||||||
|
|
||||||
[[noreturn]]
|
|
||||||
static __device__ void no_device_code(
|
|
||||||
const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) {
|
|
||||||
|
|
||||||
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
|
||||||
printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n",
|
|
||||||
file_name, line, function_name, arch);
|
|
||||||
GGML_UNUSED(arch_list);
|
|
||||||
#else
|
|
||||||
printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n",
|
|
||||||
file_name, line, function_name, arch, arch_list);
|
|
||||||
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
|
||||||
__trap();
|
|
||||||
|
|
||||||
GGML_UNUSED(no_device_code); // suppress unused function warning
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef __CUDA_ARCH__
|
|
||||||
#define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__))
|
|
||||||
#else
|
|
||||||
#define NO_DEVICE_CODE //GGML_ASSERT(false && "NO_DEVICE_CODE not valid in host code.")
|
|
||||||
#endif // __CUDA_ARCH__
|
|
||||||
|
|
||||||
static __device__ __forceinline__ float warp_reduce_sum(float x) {
|
|
||||||
#pragma unroll
|
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
|
||||||
x += __shfl_xor_sync(0xffffffff, x, mask, 32);
|
|
||||||
}
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) {
|
|
||||||
#pragma unroll
|
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
|
||||||
a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32);
|
|
||||||
a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32);
|
|
||||||
}
|
|
||||||
return a;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef GGML_CUDA_F16
|
|
||||||
static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
|
|
||||||
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
|
||||||
#pragma unroll
|
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
|
||||||
a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32));
|
|
||||||
}
|
|
||||||
return a;
|
|
||||||
#else
|
|
||||||
GGML_UNUSED(a);
|
|
||||||
NO_DEVICE_CODE;
|
|
||||||
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
|
||||||
}
|
|
||||||
#endif // GGML_CUDA_F16
|
|
||||||
|
|
||||||
static __device__ __forceinline__ float warp_reduce_max(float x) {
|
|
||||||
#pragma unroll
|
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
|
||||||
x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
|
||||||
}
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
//static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
|
|
||||||
//#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
|
||||||
//#pragma unroll
|
|
||||||
// for (int mask = 16; mask > 0; mask >>= 1) {
|
|
||||||
// x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
|
||||||
// }
|
|
||||||
// return x;
|
|
||||||
//#else
|
|
||||||
// GGML_UNUSED(x);
|
|
||||||
// NO_DEVICE_CODE;
|
|
||||||
//#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
|
||||||
//}
|
|
||||||
|
|
||||||
|
|
||||||
#if defined(GGML_USE_HIPBLAS)
|
#if defined(GGML_USE_HIPBLAS)
|
||||||
#define __CUDA_ARCH__ 1300
|
#define __CUDA_ARCH__ 1300
|
||||||
|
|
||||||
@ -391,6 +317,147 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
|
|||||||
}
|
}
|
||||||
#endif // defined(GGML_USE_HIPBLAS)
|
#endif // defined(GGML_USE_HIPBLAS)
|
||||||
|
|
||||||
|
#define FP16_AVAILABLE (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= CC_PASCAL
|
||||||
|
|
||||||
|
#define FP16_MMA_AVAILABLE !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA
|
||||||
|
|
||||||
|
static bool fast_fp16_available(const int cc) {
|
||||||
|
return cc >= CC_PASCAL && cc != 610;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool fp16_mma_available(const int cc) {
|
||||||
|
return cc < CC_OFFSET_AMD && cc >= CC_VOLTA;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[noreturn]]
|
||||||
|
static __device__ void no_device_code(
|
||||||
|
const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) {
|
||||||
|
|
||||||
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n",
|
||||||
|
file_name, line, function_name, arch);
|
||||||
|
GGML_UNUSED(arch_list);
|
||||||
|
#else
|
||||||
|
printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n",
|
||||||
|
file_name, line, function_name, arch, arch_list);
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
__trap();
|
||||||
|
|
||||||
|
GGML_UNUSED(no_device_code); // suppress unused function warning
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef __CUDA_ARCH__
|
||||||
|
#define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__))
|
||||||
|
#else
|
||||||
|
#define NO_DEVICE_CODE //GGML_ASSERT(false && "NO_DEVICE_CODE not valid in host code.")
|
||||||
|
#endif // __CUDA_ARCH__
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float warp_reduce_sum(float x) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
x += __shfl_xor_sync(0xffffffff, x, mask, 32);
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32);
|
||||||
|
a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32);
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
|
||||||
|
#if FP16_AVAILABLE
|
||||||
|
|
||||||
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
const half2 a_other = __shfl_xor_sync(0xffffffff, a, mask, 32);
|
||||||
|
reinterpret_cast<half&>(a.x) += __low2half(a_other);
|
||||||
|
reinterpret_cast<half&>(a.y) += __high2half(a_other);
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
#else
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32));
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
return a;
|
||||||
|
#endif // FP16_AVAILABLE
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float warp_reduce_max(float x) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) {
|
||||||
|
#if FP16_AVAILABLE
|
||||||
|
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
|
||||||
|
return __float2half(fmaxf(__half2float(a), __half2float(b)));
|
||||||
|
#else
|
||||||
|
return __hmax(a, b);
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX
|
||||||
|
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
GGML_UNUSED(b);
|
||||||
|
return a;
|
||||||
|
#endif // FP16_AVAILABLE
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) {
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
|
||||||
|
#if CUDART_VERSION >= CUDART_HMAX
|
||||||
|
return __hmax2(a, b);
|
||||||
|
#else
|
||||||
|
half2 ret;
|
||||||
|
reinterpret_cast<half&>(ret.x) = __float2half(fmaxf( __low2float(a), __low2float(b)));
|
||||||
|
reinterpret_cast<half&>(ret.y) = __float2half(fmaxf(__high2float(a), __high2float(b)));
|
||||||
|
return ret;
|
||||||
|
#endif // CUDART_VERSION >= CUDART_HMAX
|
||||||
|
|
||||||
|
#else
|
||||||
|
GGML_UNUSED(a);
|
||||||
|
GGML_UNUSED(b);
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
#else
|
||||||
|
GGML_UNUSED(x);
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
||||||
|
}
|
||||||
|
|
||||||
|
#if CUDART_VERSION < CUDART_HMASK
|
||||||
|
static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half2 b) {
|
||||||
|
const uint32_t mask_low = 0x0000FFFF * (float( __low2half(a)) > float( __low2half(b)));
|
||||||
|
const uint32_t mask_high = 0xFFFF0000 * (float(__high2half(a)) > float(__high2half(b)));
|
||||||
|
return mask_low | mask_high;
|
||||||
|
}
|
||||||
|
#endif // CUDART_VERSION < 12000
|
||||||
|
|
||||||
// TODO: move to ggml-common.h
|
// TODO: move to ggml-common.h
|
||||||
static const __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
static const __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
||||||
|
|
||||||
@ -404,6 +471,7 @@ struct ggml_cuda_device_info {
|
|||||||
|
|
||||||
struct cuda_device_info {
|
struct cuda_device_info {
|
||||||
int cc; // compute capability
|
int cc; // compute capability
|
||||||
|
int nsm; // number of streaming multiprocessors
|
||||||
size_t smpb; // max. shared memory per block
|
size_t smpb; // max. shared memory per block
|
||||||
bool vmm; // virtual memory support
|
bool vmm; // virtual memory support
|
||||||
size_t vmm_granularity; // granularity of virtual memory
|
size_t vmm_granularity; // granularity of virtual memory
|
||||||
@ -479,6 +547,43 @@ struct ggml_tensor_extra_gpu {
|
|||||||
cudaEvent_t events[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; // events for synchronizing multiple GPUs
|
cudaEvent_t events[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; // events for synchronizing multiple GPUs
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#if (CUDART_VERSION >= 12000) && defined(GGML_CUDA_USE_GRAPHS)
|
||||||
|
#define USE_CUDA_GRAPH
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct ggml_graph_node_properties {
|
||||||
|
void * node_address;
|
||||||
|
ggml_op node_op;
|
||||||
|
int64_t ne[GGML_MAX_DIMS];
|
||||||
|
size_t nb[GGML_MAX_DIMS];
|
||||||
|
void * src_address[GGML_MAX_SRC];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_cuda_graph {
|
||||||
|
#ifdef USE_CUDA_GRAPH
|
||||||
|
~ggml_cuda_graph() {
|
||||||
|
if (instance != nullptr) {
|
||||||
|
CUDA_CHECK(cudaGraphExecDestroy(instance));
|
||||||
|
}
|
||||||
|
if (graph != nullptr) {
|
||||||
|
CUDA_CHECK(cudaGraphDestroy(graph));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cudaGraph_t graph = nullptr;
|
||||||
|
cudaGraphExec_t instance = nullptr;
|
||||||
|
size_t num_nodes = 0;
|
||||||
|
std::vector<cudaGraphNode_t> nodes;
|
||||||
|
std::vector<cudaKernelNodeParams> params;
|
||||||
|
bool disable_due_to_gpu_arch = false;
|
||||||
|
bool disable_due_to_too_many_updates = false;
|
||||||
|
bool disable_due_to_failed_graph_capture = false;
|
||||||
|
int number_consecutive_updates = 0;
|
||||||
|
std::vector<ggml_graph_node_properties> ggml_graph_properties;
|
||||||
|
std::vector<char **> updated_kernel_arg;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
struct ggml_backend_cuda_context {
|
struct ggml_backend_cuda_context {
|
||||||
int device;
|
int device;
|
||||||
std::string name;
|
std::string name;
|
||||||
@ -487,6 +592,8 @@ struct ggml_backend_cuda_context {
|
|||||||
cudaStream_t streams[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { { nullptr } };
|
cudaStream_t streams[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { { nullptr } };
|
||||||
cublasHandle_t cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
cublasHandle_t cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
||||||
|
|
||||||
|
std::unique_ptr<ggml_cuda_graph> cuda_graph;
|
||||||
|
|
||||||
explicit ggml_backend_cuda_context(int device) :
|
explicit ggml_backend_cuda_context(int device) :
|
||||||
device(device),
|
device(device),
|
||||||
name(GGML_CUDA_NAME + std::to_string(device)) {
|
name(GGML_CUDA_NAME + std::to_string(device)) {
|
||||||
|
@ -727,7 +727,6 @@ static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict_
|
|||||||
}
|
}
|
||||||
|
|
||||||
to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
|
to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
|
||||||
int id;
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case GGML_TYPE_Q4_0:
|
case GGML_TYPE_Q4_0:
|
||||||
return dequantize_row_q4_0_cuda;
|
return dequantize_row_q4_0_cuda;
|
||||||
@ -738,8 +737,7 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
|
|||||||
case GGML_TYPE_Q5_1:
|
case GGML_TYPE_Q5_1:
|
||||||
return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>;
|
return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>;
|
||||||
case GGML_TYPE_Q8_0:
|
case GGML_TYPE_Q8_0:
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
if (ggml_cuda_info().devices[ggml_cuda_get_device()].cc >= CC_PASCAL) {
|
||||||
if (ggml_cuda_info().devices[id].cc >= CC_PASCAL) {
|
|
||||||
return dequantize_block_q8_0_f16_cuda;
|
return dequantize_block_q8_0_f16_cuda;
|
||||||
}
|
}
|
||||||
return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>;
|
return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>;
|
||||||
|
@ -459,3 +459,32 @@ void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
ggml_cuda_cpy(ctx, src0, dst);
|
ggml_cuda_cpy(ctx, src0, dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
|
||||||
|
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
|
||||||
|
return (void*) cpy_f32_f16<cpy_1_f32_f32>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
|
||||||
|
return (void*) cpy_f32_f16<cpy_1_f32_f16>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) {
|
||||||
|
return (void*) cpy_f32_q<cpy_blck_f32_q8_0, QK8_0>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) {
|
||||||
|
return (void*) cpy_f32_q<cpy_blck_f32_q4_0, QK4_0>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) {
|
||||||
|
return (void*) cpy_f32_q<cpy_blck_f32_q4_1, QK4_1>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) {
|
||||||
|
return (void*) cpy_f32_q<cpy_blck_f32_q5_0, QK5_0>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) {
|
||||||
|
return (void*) cpy_f32_q<cpy_blck_f32_iq4_nl, QK4_NL>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) {
|
||||||
|
return (void*) cpy_f32_q<cpy_blck_f32_q5_1, QK5_1>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) {
|
||||||
|
return (void*) cpy_f32_f16<cpy_1_f32_f16>;
|
||||||
|
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
|
||||||
|
return (void*) cpy_f32_f16<cpy_1_f16_f32>;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__,
|
||||||
|
ggml_type_name(src0->type), ggml_type_name(src1->type));
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -5,3 +5,5 @@
|
|||||||
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1);
|
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1);
|
||||||
|
|
||||||
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1);
|
||||||
|
47
ggml-cuda/fattn-common.cuh
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#define FATTN_KQ_STRIDE 256
|
||||||
|
#define HALF_MAX_HALF __float2half(65504.0f/2) // Use neg. of this instead of -INFINITY to initialize KQ max vals to avoid NaN upon subtraction.
|
||||||
|
#define SOFTMAX_FTZ_THRESHOLD -20.0f // Softmax exp. of values smaller than this are flushed to zero to avoid NaNs.
|
||||||
|
|
||||||
|
template<int D, int parallel_blocks> // D == head size
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
__launch_bounds__(D, 1)
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
static __global__ void flash_attn_combine_results(
|
||||||
|
const float * __restrict__ VKQ_parts,
|
||||||
|
const float2 * __restrict__ VKQ_meta,
|
||||||
|
float * __restrict__ dst) {
|
||||||
|
VKQ_parts += parallel_blocks*D * gridDim.y*blockIdx.x;
|
||||||
|
VKQ_meta += parallel_blocks * gridDim.y*blockIdx.x;
|
||||||
|
dst += D * gridDim.y*blockIdx.x;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
__builtin_assume(tid < D);
|
||||||
|
|
||||||
|
__shared__ float2 meta[parallel_blocks];
|
||||||
|
if (tid < 2*parallel_blocks) {
|
||||||
|
((float *) meta)[threadIdx.x] = ((const float *)VKQ_meta) [blockIdx.y*(2*parallel_blocks) + tid];
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
float kqmax = meta[0].x;
|
||||||
|
#pragma unroll
|
||||||
|
for (int l = 1; l < parallel_blocks; ++l) {
|
||||||
|
kqmax = max(kqmax, meta[l].x);
|
||||||
|
}
|
||||||
|
|
||||||
|
float VKQ_numerator = 0.0f;
|
||||||
|
float VKQ_denominator = 0.0f;
|
||||||
|
#pragma unroll
|
||||||
|
for (int l = 0; l < parallel_blocks; ++l) {
|
||||||
|
const float diff = meta[l].x - kqmax;
|
||||||
|
const float KQ_max_scale = expf(diff);
|
||||||
|
const uint32_t ftz_mask = 0xFFFFFFFF * (diff > SOFTMAX_FTZ_THRESHOLD);
|
||||||
|
*((uint32_t *) &KQ_max_scale) &= ftz_mask;
|
||||||
|
|
||||||
|
VKQ_numerator += KQ_max_scale * VKQ_parts[l*gridDim.y*D + blockIdx.y*D + tid];
|
||||||
|
VKQ_denominator += KQ_max_scale * meta[l].y;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[blockIdx.y*D + tid] = VKQ_numerator / VKQ_denominator;
|
||||||
|
}
|
430
ggml-cuda/fattn-vec-f16.cu
Normal file
@ -0,0 +1,430 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
#include "fattn-common.cuh"
|
||||||
|
#include "fattn-vec-f16.cuh"
|
||||||
|
|
||||||
|
template<int D, int ncols, int parallel_blocks> // D == head size
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
__launch_bounds__(D, 1)
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
static __global__ void flash_attn_vec_ext_f16(
|
||||||
|
const char * __restrict__ Q,
|
||||||
|
const char * __restrict__ K,
|
||||||
|
const char * __restrict__ V,
|
||||||
|
const char * __restrict__ mask,
|
||||||
|
float * __restrict__ dst,
|
||||||
|
float2 * __restrict__ dst_meta,
|
||||||
|
const float scale,
|
||||||
|
const float max_bias,
|
||||||
|
const float m0,
|
||||||
|
const float m1,
|
||||||
|
const uint32_t n_head_log2,
|
||||||
|
const int ne00,
|
||||||
|
const int ne01,
|
||||||
|
const int ne02,
|
||||||
|
const int ne03,
|
||||||
|
const int ne10,
|
||||||
|
const int ne11,
|
||||||
|
const int ne12,
|
||||||
|
const int ne13,
|
||||||
|
const int ne31,
|
||||||
|
const int nb31,
|
||||||
|
const int nb01,
|
||||||
|
const int nb02,
|
||||||
|
const int nb03,
|
||||||
|
const int nb11,
|
||||||
|
const int nb12,
|
||||||
|
const int nb13,
|
||||||
|
const int ne0,
|
||||||
|
const int ne1,
|
||||||
|
const int ne2,
|
||||||
|
const int ne3) {
|
||||||
|
#if FP16_AVAILABLE
|
||||||
|
//In this kernel Q, K, V are matrices while i, j, k are matrix indices.
|
||||||
|
|
||||||
|
const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on.
|
||||||
|
const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
|
||||||
|
|
||||||
|
const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
|
||||||
|
const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0);
|
||||||
|
const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio));
|
||||||
|
const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
|
||||||
|
const half * maskh = (const half *) mask + ne11*ic0;
|
||||||
|
|
||||||
|
const int stride_KV = nb11 / sizeof(half);
|
||||||
|
const int stride_KV2 = nb11 / sizeof(half2);
|
||||||
|
|
||||||
|
half slopeh = __float2half(1.0f);
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const int h = blockIdx.y;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
slopeh = __float2half(powf(base, exph));
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
|
||||||
|
constexpr int nwarps = D / WARP_SIZE;
|
||||||
|
const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
|
||||||
|
__builtin_assume(tid < D);
|
||||||
|
|
||||||
|
__shared__ half KQ[ncols*D];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
KQ[j*D + tid] = -HALF_MAX_HALF;
|
||||||
|
}
|
||||||
|
half2 * KQ2 = (half2 *) KQ;
|
||||||
|
|
||||||
|
half kqmax[ncols];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
kqmax[j] = -HALF_MAX_HALF;
|
||||||
|
}
|
||||||
|
half kqsum[ncols] = {0.0f};
|
||||||
|
|
||||||
|
__shared__ half kqmax_shared[ncols][WARP_SIZE];
|
||||||
|
__shared__ half kqsum_shared[ncols][WARP_SIZE];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
if (threadIdx.y == 0) {
|
||||||
|
kqmax_shared[j][threadIdx.x] = -HALF_MAX_HALF;
|
||||||
|
kqsum_shared[j][threadIdx.x] = 0.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
// Convert Q to half2 and store in registers:
|
||||||
|
half2 Q_h2[ncols][D/(2*WARP_SIZE)];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
|
||||||
|
const int i = i0 + threadIdx.x;
|
||||||
|
|
||||||
|
const float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i];
|
||||||
|
Q_h2[j][i0/WARP_SIZE] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
half2 VKQ[ncols] = {{0.0f, 0.0f}};
|
||||||
|
|
||||||
|
const int k_start = parallel_blocks == 1 ? 0 : ip*D;
|
||||||
|
for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) {
|
||||||
|
// Calculate KQ tile and keep track of new maximum KQ values:
|
||||||
|
|
||||||
|
// For unknown reasons using a half array of size 1 for kqmax_new causes a performance regression,
|
||||||
|
// see https://github.com/ggerganov/llama.cpp/pull/7061 .
|
||||||
|
// Therefore this variable is defined twice but only used once (so that the compiler can optimize out the unused variable).
|
||||||
|
half kqmax_new = kqmax[0];
|
||||||
|
half kqmax_new_arr[ncols];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
kqmax_new_arr[j] = kqmax[j];
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) {
|
||||||
|
const int i_KQ = i_KQ_0 + threadIdx.y;
|
||||||
|
|
||||||
|
if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
half2 sum2[ncols] = {{0.0f, 0.0f}};
|
||||||
|
#pragma unroll
|
||||||
|
for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) {
|
||||||
|
const int k_KQ = k_KQ_0 + threadIdx.x;
|
||||||
|
|
||||||
|
const half2 K_ik = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
sum2[j] += K_ik * Q_h2[j][k_KQ_0/WARP_SIZE];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
sum2[j] = warp_reduce_sum(sum2[j]);
|
||||||
|
half sum = __low2half(sum2[j]) + __high2half(sum2[j]);
|
||||||
|
sum += mask ? slopeh*maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f);
|
||||||
|
|
||||||
|
if (ncols == 1) {
|
||||||
|
kqmax_new = ggml_cuda_hmax(kqmax_new, sum);
|
||||||
|
} else {
|
||||||
|
kqmax_new_arr[j] = ggml_cuda_hmax(kqmax_new_arr[j], sum);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
KQ[j*D + i_KQ] = sum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
half kqmax_new_j = ncols == 1 ? kqmax_new : kqmax_new_arr[j];
|
||||||
|
|
||||||
|
kqmax_new_j = warp_reduce_max(kqmax_new_j);
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
kqmax_shared[j][threadIdx.y] = kqmax_new_j;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
half kqmax_new_j = kqmax_shared[j][threadIdx.x];
|
||||||
|
kqmax_new_j = warp_reduce_max(kqmax_new_j);
|
||||||
|
|
||||||
|
const half KQ_max_scale = hexp(kqmax[j] - kqmax_new_j);
|
||||||
|
kqmax[j] = kqmax_new_j;
|
||||||
|
|
||||||
|
const half val = hexp(KQ[j*D + tid] - kqmax[j]);
|
||||||
|
kqsum[j] = kqsum[j]*KQ_max_scale + val;
|
||||||
|
KQ[j*D + tid] = val;
|
||||||
|
|
||||||
|
VKQ[j] *= __half2half2(KQ_max_scale);
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < D; k0 += 2) {
|
||||||
|
if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k0 >= ne11) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
half2 V_k;
|
||||||
|
reinterpret_cast<half&>(V_k.x) = V_h[(k_VKQ_0 + k0 + 0)*stride_KV + tid];
|
||||||
|
reinterpret_cast<half&>(V_k.y) = V_h[(k_VKQ_0 + k0 + 1)*stride_KV + tid];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
VKQ[j] += V_k*KQ2[j*(D/2) + k0/2];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
kqsum[j] = warp_reduce_sum(kqsum[j]);
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
kqsum_shared[j][threadIdx.y] = kqsum[j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) {
|
||||||
|
kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x];
|
||||||
|
kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]);
|
||||||
|
|
||||||
|
half dst_val = (__low2half(VKQ[j_VKQ]) + __high2half(VKQ[j_VKQ]));
|
||||||
|
if (parallel_blocks == 1) {
|
||||||
|
dst_val /= kqsum[j_VKQ];
|
||||||
|
}
|
||||||
|
const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
|
||||||
|
dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parallel_blocks != 1 && tid != 0) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
dst_meta[(ic0 + j)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j], kqsum[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif // FP16_AVAILABLE
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int D, int cols_per_block, int parallel_blocks> void launch_fattn_vec_f16(
|
||||||
|
const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, ggml_tensor * KQV, const ggml_tensor * mask,
|
||||||
|
ggml_cuda_pool & pool, cudaStream_t main_stream
|
||||||
|
) {
|
||||||
|
ggml_cuda_pool_alloc<float> dst_tmp(pool);
|
||||||
|
ggml_cuda_pool_alloc<float2> dst_tmp_meta(pool);
|
||||||
|
|
||||||
|
if (parallel_blocks > 1) {
|
||||||
|
dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV));
|
||||||
|
dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int nwarps = (D + WARP_SIZE - 1) / WARP_SIZE;
|
||||||
|
const dim3 block_dim(WARP_SIZE, nwarps, 1);
|
||||||
|
const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]);
|
||||||
|
const int shmem = 0;
|
||||||
|
|
||||||
|
float scale = 1.0f;
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
const uint32_t n_head = Q->ne[2];
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
|
flash_attn_vec_ext_f16<D, cols_per_block, parallel_blocks>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data,
|
||||||
|
(const char *) K->data,
|
||||||
|
(const char *) V->data,
|
||||||
|
mask ? ((const char *) mask->data) : nullptr,
|
||||||
|
parallel_blocks == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
|
||||||
|
scale, max_bias, m0, m1, n_head_log2,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
|
if (parallel_blocks == 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const dim3 block_dim_combine(D, 1, 1);
|
||||||
|
const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z);
|
||||||
|
const int shmem_combine = 0;
|
||||||
|
|
||||||
|
flash_attn_combine_results<D, parallel_blocks>
|
||||||
|
<<<blocks_num_combine, block_dim_combine, shmem_combine, main_stream>>>
|
||||||
|
(dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data);
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * Q = dst->src[0];
|
||||||
|
const ggml_tensor * K = dst->src[1];
|
||||||
|
const ggml_tensor * V = dst->src[2];
|
||||||
|
|
||||||
|
const ggml_tensor * mask = dst->src[3];
|
||||||
|
|
||||||
|
ggml_tensor * KQV = dst;
|
||||||
|
|
||||||
|
const int32_t precision = KQV->op_params[2];
|
||||||
|
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
|
||||||
|
|
||||||
|
constexpr int cols_per_block = 1;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 256:
|
||||||
|
launch_fattn_vec_f16<256, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * Q = dst->src[0];
|
||||||
|
const ggml_tensor * K = dst->src[1];
|
||||||
|
const ggml_tensor * V = dst->src[2];
|
||||||
|
|
||||||
|
const ggml_tensor * mask = dst->src[3];
|
||||||
|
|
||||||
|
ggml_tensor * KQV = dst;
|
||||||
|
|
||||||
|
const int32_t precision = KQV->op_params[2];
|
||||||
|
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
|
||||||
|
GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
|
||||||
|
|
||||||
|
if (Q->ne[1] == 1) {
|
||||||
|
constexpr int cols_per_block = 1;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] == 2) {
|
||||||
|
constexpr int cols_per_block = 2;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] <= 4) {
|
||||||
|
constexpr int cols_per_block = 4;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] <= 8) {
|
||||||
|
constexpr int cols_per_block = 8;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int cols_per_block = 8;
|
||||||
|
constexpr int parallel_blocks = 1;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
5
ggml-cuda/fattn-vec-f16.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
384
ggml-cuda/fattn-vec-f32.cu
Normal file
@ -0,0 +1,384 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
#include "fattn-common.cuh"
|
||||||
|
#include "fattn-vec-f32.cuh"
|
||||||
|
|
||||||
|
template<int D, int ncols, int parallel_blocks> // D == head size
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
__launch_bounds__(D, 1)
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
static __global__ void flash_attn_vec_ext_f32(
|
||||||
|
const char * __restrict__ Q,
|
||||||
|
const char * __restrict__ K,
|
||||||
|
const char * __restrict__ V,
|
||||||
|
const char * __restrict__ mask,
|
||||||
|
float * __restrict__ dst,
|
||||||
|
float2 * __restrict__ dst_meta,
|
||||||
|
const float scale,
|
||||||
|
const float max_bias,
|
||||||
|
const float m0,
|
||||||
|
const float m1,
|
||||||
|
const uint32_t n_head_log2,
|
||||||
|
const int ne00,
|
||||||
|
const int ne01,
|
||||||
|
const int ne02,
|
||||||
|
const int ne03,
|
||||||
|
const int ne10,
|
||||||
|
const int ne11,
|
||||||
|
const int ne12,
|
||||||
|
const int ne13,
|
||||||
|
const int ne31,
|
||||||
|
const int nb31,
|
||||||
|
const int nb01,
|
||||||
|
const int nb02,
|
||||||
|
const int nb03,
|
||||||
|
const int nb11,
|
||||||
|
const int nb12,
|
||||||
|
const int nb13,
|
||||||
|
const int ne0,
|
||||||
|
const int ne1,
|
||||||
|
const int ne2,
|
||||||
|
const int ne3) {
|
||||||
|
//In this kernel Q, K, V are matrices while i, j, k are matrix indices.
|
||||||
|
|
||||||
|
const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on.
|
||||||
|
const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
|
||||||
|
|
||||||
|
const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
|
||||||
|
const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0);
|
||||||
|
const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio));
|
||||||
|
const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
|
||||||
|
const half * maskh = (const half *) mask + ne11*ic0;
|
||||||
|
|
||||||
|
const int stride_KV = nb11 / sizeof(half);
|
||||||
|
const int stride_KV2 = nb11 / sizeof(half2);
|
||||||
|
|
||||||
|
float slope = 1.0f;
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const int h = blockIdx.y;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
slope = powf(base, exph);
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
|
||||||
|
constexpr int nwarps = D / WARP_SIZE;
|
||||||
|
const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
|
||||||
|
__builtin_assume(tid < D);
|
||||||
|
|
||||||
|
__shared__ float KQ[ncols*D];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
KQ[j*D + tid] = -FLT_MAX/2.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
float kqmax[ncols];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
kqmax[j] = -FLT_MAX/2.0f;
|
||||||
|
}
|
||||||
|
float kqsum[ncols] = {0.0f};
|
||||||
|
|
||||||
|
__shared__ float kqmax_shared[ncols][WARP_SIZE];
|
||||||
|
__shared__ float kqsum_shared[ncols][WARP_SIZE];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
if (threadIdx.y == 0) {
|
||||||
|
kqmax_shared[j][threadIdx.x] = -FLT_MAX/2.0f;
|
||||||
|
kqsum_shared[j][threadIdx.x] = 0.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
// Convert Q to half2 and store in registers:
|
||||||
|
float2 Q_h2[ncols][D/(2*WARP_SIZE)];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
|
||||||
|
const int i = i0 + threadIdx.x;
|
||||||
|
|
||||||
|
Q_h2[j][i0/WARP_SIZE] = Q_f2[j*(nb01/sizeof(float2)) + i];
|
||||||
|
Q_h2[j][i0/WARP_SIZE].x *= scale;
|
||||||
|
Q_h2[j][i0/WARP_SIZE].y *= scale;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
float VKQ[ncols] = {0.0f};
|
||||||
|
|
||||||
|
const int k_start = parallel_blocks == 1 ? 0 : ip*D;
|
||||||
|
for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) {
|
||||||
|
// Calculate KQ tile and keep track of new maximum KQ values:
|
||||||
|
|
||||||
|
float kqmax_new_arr[ncols];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
kqmax_new_arr[j] = kqmax[j];
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) {
|
||||||
|
const int i_KQ = i_KQ_0 + threadIdx.y;
|
||||||
|
|
||||||
|
if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
float sum[ncols] = {0.0f};
|
||||||
|
#pragma unroll
|
||||||
|
for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) {
|
||||||
|
const int k_KQ = k_KQ_0 + threadIdx.x;
|
||||||
|
|
||||||
|
const half2 K_ik = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
sum[j] += __low2float(K_ik) * Q_h2[j][k_KQ_0/WARP_SIZE].x;
|
||||||
|
sum[j] += __high2float(K_ik) * Q_h2[j][k_KQ_0/WARP_SIZE].y;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
sum[j] = warp_reduce_sum(sum[j]);
|
||||||
|
sum[j] += mask ? slope*__half2float(maskh[j*ne11 + k_VKQ_0 + i_KQ]) : 0.0f;
|
||||||
|
|
||||||
|
kqmax_new_arr[j] = fmaxf(kqmax_new_arr[j], sum[j]);
|
||||||
|
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
KQ[j*D + i_KQ] = sum[j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
float kqmax_new_j = kqmax_new_arr[j];
|
||||||
|
|
||||||
|
kqmax_new_j = warp_reduce_max(kqmax_new_j);
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
kqmax_shared[j][threadIdx.y] = kqmax_new_j;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
float kqmax_new_j = kqmax_shared[j][threadIdx.x];
|
||||||
|
kqmax_new_j = warp_reduce_max(kqmax_new_j);
|
||||||
|
|
||||||
|
const float KQ_max_scale = expf(kqmax[j] - kqmax_new_j);
|
||||||
|
kqmax[j] = kqmax_new_j;
|
||||||
|
|
||||||
|
const float val = expf(KQ[j*D + tid] - kqmax[j]);
|
||||||
|
kqsum[j] = kqsum[j]*KQ_max_scale + val;
|
||||||
|
KQ[j*D + tid] = val;
|
||||||
|
|
||||||
|
VKQ[j] *= KQ_max_scale;
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int k = 0; k < D; ++k) {
|
||||||
|
if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k >= ne11) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float V_ki = __half2float(V_h[(k_VKQ_0 + k)*stride_KV + tid]);
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
VKQ[j] += V_ki*KQ[j*D + k];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
kqsum[j] = warp_reduce_sum(kqsum[j]);
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
kqsum_shared[j][threadIdx.y] = kqsum[j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) {
|
||||||
|
kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x];
|
||||||
|
kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]);
|
||||||
|
|
||||||
|
float dst_val = VKQ[j_VKQ];
|
||||||
|
if (parallel_blocks == 1) {
|
||||||
|
dst_val /= kqsum[j_VKQ];
|
||||||
|
}
|
||||||
|
const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
|
||||||
|
dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parallel_blocks != 1 && tid != 0) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols; ++j) {
|
||||||
|
dst_meta[(ic0 + j)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j], kqsum[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int D, int cols_per_block, int parallel_blocks> void launch_fattn_vec_f32(
|
||||||
|
const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, ggml_tensor * KQV, const ggml_tensor * mask,
|
||||||
|
ggml_cuda_pool & pool, cudaStream_t main_stream
|
||||||
|
) {
|
||||||
|
ggml_cuda_pool_alloc<float> dst_tmp(pool);
|
||||||
|
ggml_cuda_pool_alloc<float2> dst_tmp_meta(pool);
|
||||||
|
|
||||||
|
if (parallel_blocks > 1) {
|
||||||
|
dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV));
|
||||||
|
dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int nwarps = (D + WARP_SIZE - 1) / WARP_SIZE;
|
||||||
|
const dim3 block_dim(WARP_SIZE, nwarps, 1);
|
||||||
|
const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]);
|
||||||
|
const int shmem = 0;
|
||||||
|
|
||||||
|
float scale = 1.0f;
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
const uint32_t n_head = Q->ne[2];
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
|
flash_attn_vec_ext_f32<D, cols_per_block, parallel_blocks>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data,
|
||||||
|
(const char *) K->data,
|
||||||
|
(const char *) V->data,
|
||||||
|
mask ? ((const char *) mask->data) : nullptr,
|
||||||
|
parallel_blocks == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
|
||||||
|
scale, max_bias, m0, m1, n_head_log2,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
|
if (parallel_blocks == 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const dim3 block_dim_combine(D, 1, 1);
|
||||||
|
const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z);
|
||||||
|
const int shmem_combine = 0;
|
||||||
|
|
||||||
|
flash_attn_combine_results<D, parallel_blocks>
|
||||||
|
<<<blocks_num_combine, block_dim_combine, shmem_combine, main_stream>>>
|
||||||
|
(dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data);
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * Q = dst->src[0];
|
||||||
|
const ggml_tensor * K = dst->src[1];
|
||||||
|
const ggml_tensor * V = dst->src[2];
|
||||||
|
|
||||||
|
const ggml_tensor * mask = dst->src[3];
|
||||||
|
|
||||||
|
ggml_tensor * KQV = dst;
|
||||||
|
|
||||||
|
GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
|
||||||
|
|
||||||
|
if (Q->ne[1] == 1) {
|
||||||
|
constexpr int cols_per_block = 1;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] == 2) {
|
||||||
|
constexpr int cols_per_block = 2;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] <= 4) {
|
||||||
|
constexpr int cols_per_block = 4;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] <= 8) {
|
||||||
|
constexpr int cols_per_block = 8;
|
||||||
|
constexpr int parallel_blocks = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int cols_per_block = 8;
|
||||||
|
constexpr int parallel_blocks = 1;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_vec_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_vec_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
3
ggml-cuda/fattn-vec-f32.cuh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
699
ggml-cuda/fattn.cu
Normal file
@ -0,0 +1,699 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
#include "fattn-common.cuh"
|
||||||
|
#include "fattn-vec-f16.cuh"
|
||||||
|
#include "fattn-vec-f32.cuh"
|
||||||
|
#include "fattn.cuh"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#if FP16_MMA_AVAILABLE
|
||||||
|
#include <mma.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// D == head size, VKQ_stride == num VKQ rows calculated in parallel:
|
||||||
|
template<int D, int ncols, int nwarps, int VKQ_stride, int parallel_blocks, typename KQ_acc_t>
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
__launch_bounds__(nwarps*WARP_SIZE, 1)
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||||
|
static __global__ void flash_attn_ext_f16(
|
||||||
|
const char * __restrict__ Q,
|
||||||
|
const char * __restrict__ K,
|
||||||
|
const char * __restrict__ V,
|
||||||
|
const char * __restrict__ mask,
|
||||||
|
float * __restrict__ dst,
|
||||||
|
float2 * __restrict__ dst_meta,
|
||||||
|
const float scale,
|
||||||
|
const float max_bias,
|
||||||
|
const float m0,
|
||||||
|
const float m1,
|
||||||
|
const uint32_t n_head_log2,
|
||||||
|
const int ne00,
|
||||||
|
const int ne01,
|
||||||
|
const int ne02,
|
||||||
|
const int ne03,
|
||||||
|
const int ne10,
|
||||||
|
const int ne11,
|
||||||
|
const int ne12,
|
||||||
|
const int ne13,
|
||||||
|
const int ne31,
|
||||||
|
const int nb31,
|
||||||
|
const int nb01,
|
||||||
|
const int nb02,
|
||||||
|
const int nb03,
|
||||||
|
const int nb11,
|
||||||
|
const int nb12,
|
||||||
|
const int nb13,
|
||||||
|
const int ne0,
|
||||||
|
const int ne1,
|
||||||
|
const int ne2,
|
||||||
|
const int ne3) {
|
||||||
|
#if FP16_MMA_AVAILABLE
|
||||||
|
//In this kernel Q, K, V are matrices while i, j, k are matrix indices.
|
||||||
|
|
||||||
|
const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on.
|
||||||
|
const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
|
||||||
|
|
||||||
|
static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE.");
|
||||||
|
static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16.");
|
||||||
|
constexpr int frag_m = ncols == 8 ? 32 : 16;
|
||||||
|
constexpr int frag_n = ncols == 8 ? 8 : 16;
|
||||||
|
static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0.");
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, frag_m, frag_n, 16, half, nvcuda::wmma::row_major> frag_a_K;
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, frag_m, frag_n, 16, half, nvcuda::wmma::col_major> frag_a_V;
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, frag_m, frag_n, 16, half, nvcuda::wmma::col_major> frag_b;
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::accumulator, frag_m, frag_n, 16, KQ_acc_t> frag_c_KQ;
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::accumulator, frag_m, frag_n, 16, half> frag_c_VKQ;
|
||||||
|
|
||||||
|
constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel.
|
||||||
|
constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy.
|
||||||
|
static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps.");
|
||||||
|
|
||||||
|
// Pad internal representation of KQ, KQV to reduce shared memory bank conflicts:
|
||||||
|
constexpr int D_padded = D + 8;
|
||||||
|
constexpr int kqs_padded = FATTN_KQ_STRIDE + 8;
|
||||||
|
constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half);
|
||||||
|
|
||||||
|
const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
|
||||||
|
const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0);
|
||||||
|
const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio));
|
||||||
|
const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
|
||||||
|
const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0;
|
||||||
|
const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2);
|
||||||
|
|
||||||
|
const int stride_Q = nb01 / sizeof(float);
|
||||||
|
const int stride_KV = nb11 / sizeof(half);
|
||||||
|
|
||||||
|
half slopeh = __float2half(1.0f);
|
||||||
|
half2 slope2 = make_half2(1.0f, 1.0f);
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const int h = blockIdx.y;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
slopeh = __float2half(powf(base, exph));
|
||||||
|
slope2 = make_half2(slopeh, slopeh);
|
||||||
|
}
|
||||||
|
|
||||||
|
frag_b Q_b[D/16][ncols/frag_n];
|
||||||
|
|
||||||
|
// A single buffer for temporarily holding tiles of KQ and VKQ parts:
|
||||||
|
constexpr int mem_KQ = ncols*kqs_padded*kqar;
|
||||||
|
constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded;
|
||||||
|
__shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts];
|
||||||
|
float * KQ_f = (float *) KQ;
|
||||||
|
half2 * KQ2 = (half2 *) KQ;
|
||||||
|
|
||||||
|
float KQ_rowsum_f[ncols/nwarps] = {0.0f};
|
||||||
|
float KQ_max_f[ncols/nwarps];
|
||||||
|
float KQ_max_scale_f[ncols/nwarps] = {0.0f};
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols/nwarps; ++j) {
|
||||||
|
KQ_max_f[j] = -FLT_MAX/2.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}};
|
||||||
|
half2 KQ_max_h2[ncols/nwarps];
|
||||||
|
half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}};
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols/nwarps; ++j) {
|
||||||
|
KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF);
|
||||||
|
}
|
||||||
|
|
||||||
|
__shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice.
|
||||||
|
half2 * VKQ2 = (half2 *) VKQ;
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += nwarps) {
|
||||||
|
const int j = j0 + threadIdx.y;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
|
||||||
|
const int i = i0 + threadIdx.x;
|
||||||
|
if (i0 + WARP_SIZE > D/2 && i >= D/2) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert Q to half and apply scale, temporarily store in KQ:
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += nwarps) {
|
||||||
|
const int j = j0 + threadIdx.y;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
|
||||||
|
const int i = i0 + threadIdx.x;
|
||||||
|
if (i0 + WARP_SIZE > D && i >= D) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
KQ[j*D_padded + i] = ic0 + j < ne01 ? Q_f[j*stride_Q + i] * scale : 0.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
// Load Q into tensor core fragments/registers since it will be used frequently:
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D; i0 += 16) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += frag_n) {
|
||||||
|
nvcuda::wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
// Iterate over ne11 == previous tokens:
|
||||||
|
for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) {
|
||||||
|
// Calculate tile of KQ:
|
||||||
|
#pragma unroll
|
||||||
|
for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) {
|
||||||
|
frag_c_KQ KQ_c[ncols/frag_n];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols/frag_n; ++j) {
|
||||||
|
nvcuda::wmma::fill_fragment(KQ_c[j], 0.0f);
|
||||||
|
}
|
||||||
|
#pragma unroll
|
||||||
|
for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) {
|
||||||
|
frag_a_K K_a;
|
||||||
|
nvcuda::wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV);
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols/frag_n; ++j) {
|
||||||
|
nvcuda::wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += frag_n) {
|
||||||
|
nvcuda::wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, nvcuda::wmma::mem_col_major);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
// Calculate softmax for each KQ column using the current max. value.
|
||||||
|
// The divisor is stored in KQ_rowsum and will be applied at the end.
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += nwarps) {
|
||||||
|
const int j = j0 + threadIdx.y;
|
||||||
|
|
||||||
|
if (std::is_same<KQ_acc_t, float>::value) {
|
||||||
|
float KQ_f_tmp[FATTN_KQ_STRIDE / WARP_SIZE];
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
|
||||||
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
|
KQ_f_tmp[k0/WARP_SIZE] = KQ_f[j*kqs_padded + k];
|
||||||
|
}
|
||||||
|
|
||||||
|
float KQ_max_new = KQ_max_f[j0/nwarps];
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
|
||||||
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
|
KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f;
|
||||||
|
KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]);
|
||||||
|
}
|
||||||
|
KQ_max_new = warp_reduce_max(KQ_max_new);
|
||||||
|
|
||||||
|
const float diff = KQ_max_f[j0/nwarps] - KQ_max_new;
|
||||||
|
KQ_max_scale_f[j0/nwarps] = expf(diff);
|
||||||
|
if (diff <= SOFTMAX_FTZ_THRESHOLD) {
|
||||||
|
KQ_max_scale_f[j0/nwarps] = 0.0f;
|
||||||
|
}
|
||||||
|
KQ_max_f[j0/nwarps] = KQ_max_new;
|
||||||
|
|
||||||
|
float KQ_rowsum_add = 0.0f;
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
|
||||||
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
|
const float diff = KQ_f_tmp[k0/WARP_SIZE] - KQ_max_f[j0/nwarps];
|
||||||
|
KQ_f_tmp[k0/WARP_SIZE] = expf(diff);
|
||||||
|
if (diff <= SOFTMAX_FTZ_THRESHOLD) {
|
||||||
|
KQ_f_tmp[k0/WARP_SIZE] = 0.0f;
|
||||||
|
}
|
||||||
|
KQ_rowsum_add += KQ_f_tmp[k0/WARP_SIZE];
|
||||||
|
KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/WARP_SIZE];
|
||||||
|
}
|
||||||
|
KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
|
||||||
|
|
||||||
|
// Scale previous KQ_rowsum to account for a potential increase in KQ_max:
|
||||||
|
KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add;
|
||||||
|
} else {
|
||||||
|
half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*WARP_SIZE)];
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
|
||||||
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
|
KQ2_tmp[k0/WARP_SIZE] = KQ2[j*(kqs_padded/2) + k];
|
||||||
|
}
|
||||||
|
|
||||||
|
half2 KQ_max_new = KQ_max_h2[j0/nwarps];
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
|
||||||
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
|
KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
|
||||||
|
KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
|
||||||
|
}
|
||||||
|
KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
|
||||||
|
const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new;
|
||||||
|
KQ_max_scale_h2[j0/nwarps] = h2exp(diff);
|
||||||
|
const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
|
||||||
|
*((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask;
|
||||||
|
KQ_max_h2[j0/nwarps] = KQ_max_new;
|
||||||
|
|
||||||
|
half2 KQ_rowsum_add = make_half2(0.0f, 0.0f);
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
|
||||||
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
|
const half2 diff = KQ2_tmp[k0/WARP_SIZE] - KQ_max_h2[j0/nwarps];
|
||||||
|
KQ2_tmp[k0/WARP_SIZE] = h2exp(diff);
|
||||||
|
const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
|
||||||
|
*((uint32_t *) &KQ2_tmp[k0/WARP_SIZE]) &= ftz_mask;
|
||||||
|
KQ_rowsum_add += KQ2_tmp[k0/WARP_SIZE];
|
||||||
|
KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/WARP_SIZE];
|
||||||
|
}
|
||||||
|
KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
|
||||||
|
|
||||||
|
// Scale previous KQ_rowsum to account for a potential increase in KQ_max:
|
||||||
|
KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n];
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += frag_n) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
|
||||||
|
const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
|
||||||
|
nvcuda::wmma::load_matrix_sync(
|
||||||
|
KQ_b[k0/(VKQ_ratio*16)][j0/frag_n],
|
||||||
|
KQ + j0*(kqar*kqs_padded) + k,
|
||||||
|
kqar*kqs_padded);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n];
|
||||||
|
#pragma unroll
|
||||||
|
for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols/frag_n; ++j) {
|
||||||
|
nvcuda::wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], 0.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
|
||||||
|
const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
|
||||||
|
|
||||||
|
frag_a_V v_a;
|
||||||
|
nvcuda::wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV);
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < ncols/frag_n; ++j) {
|
||||||
|
nvcuda::wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded);
|
||||||
|
#pragma unroll
|
||||||
|
for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += frag_n) {
|
||||||
|
nvcuda::wmma::store_matrix_sync(
|
||||||
|
KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio),
|
||||||
|
VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n],
|
||||||
|
D_padded, nvcuda::wmma::mem_col_major);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += nwarps) {
|
||||||
|
const int j = j0 + threadIdx.y;
|
||||||
|
|
||||||
|
half2 VKQ_scale;
|
||||||
|
if (std::is_same<KQ_acc_t, float>::value) {
|
||||||
|
VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]);
|
||||||
|
} else {
|
||||||
|
VKQ_scale = KQ_max_scale_h2[j0/nwarps];
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
|
||||||
|
const int i = i0 + threadIdx.x;
|
||||||
|
if (i0 + WARP_SIZE > D/2 && i >= D/2) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
half2 VKQ_add = make_half2(0.0f, 0.0f);
|
||||||
|
#pragma unroll
|
||||||
|
for (int l = 0; l < VKQ_ratio; ++l) {
|
||||||
|
VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i];
|
||||||
|
}
|
||||||
|
VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < ncols; j0 += nwarps) {
|
||||||
|
const int j_VKQ = j0 + threadIdx.y;
|
||||||
|
if (ic0 + j_VKQ >= ne01) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
|
||||||
|
|
||||||
|
float KQ_rowsum_j;
|
||||||
|
if (std::is_same<KQ_acc_t, float>::value) {
|
||||||
|
KQ_rowsum_j = KQ_rowsum_f[j0/nwarps];
|
||||||
|
} else {
|
||||||
|
KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
|
||||||
|
const int i = i0 + threadIdx.x;
|
||||||
|
if (i0 + WARP_SIZE > D && i >= D) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
float dst_val = VKQ[j_VKQ*D_padded + i];
|
||||||
|
if (parallel_blocks == 1) {
|
||||||
|
dst_val /= KQ_rowsum_j;
|
||||||
|
}
|
||||||
|
dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (parallel_blocks == 1 || threadIdx.x != 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
float2 dst_meta_val;
|
||||||
|
if (std::is_same<KQ_acc_t, float>::value) {
|
||||||
|
dst_meta_val.x = KQ_max_f[j0/nwarps];
|
||||||
|
} else {
|
||||||
|
dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]);
|
||||||
|
}
|
||||||
|
dst_meta_val.y = KQ_rowsum_j;
|
||||||
|
dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif // FP16_MMA_AVAILABLE
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int get_max_power_of_2(int x) {
|
||||||
|
return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert(get_max_power_of_2(1) == 1, "Test failed.");
|
||||||
|
static_assert(get_max_power_of_2(2) == 2, "Test failed.");
|
||||||
|
static_assert(get_max_power_of_2(4) == 4, "Test failed.");
|
||||||
|
static_assert(get_max_power_of_2(6) == 2, "Test failed.");
|
||||||
|
|
||||||
|
// Number of VKQ rows calculated in parallel:
|
||||||
|
constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) {
|
||||||
|
return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m;
|
||||||
|
}
|
||||||
|
|
||||||
|
static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed.");
|
||||||
|
static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed.");
|
||||||
|
|
||||||
|
template <int D, int cols_per_block, int nwarps, int parallel_blocks, typename KQ_acc_t> void launch_fattn_f16_impl(
|
||||||
|
const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, ggml_tensor * KQV, const ggml_tensor * mask,
|
||||||
|
ggml_cuda_pool & pool, cudaStream_t main_stream
|
||||||
|
) {
|
||||||
|
ggml_cuda_pool_alloc<float> dst_tmp(pool);
|
||||||
|
ggml_cuda_pool_alloc<float2> dst_tmp_meta(pool);
|
||||||
|
|
||||||
|
if (parallel_blocks > 1) {
|
||||||
|
dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV));
|
||||||
|
dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int frag_m = (cols_per_block) == 8 && (D) % 32 == 0 ? 32 : 16;
|
||||||
|
const dim3 block_dim(WARP_SIZE, nwarps, 1);
|
||||||
|
const dim3 blocks_num(parallel_blocks*(Q->ne[1] + cols_per_block - 1) / cols_per_block, Q->ne[2], Q->ne[3]);
|
||||||
|
const int shmem = 0;
|
||||||
|
|
||||||
|
float scale = 1.0f;
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
const uint32_t n_head = Q->ne[2];
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
|
flash_attn_ext_f16<D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data,
|
||||||
|
(const char *) K->data,
|
||||||
|
(const char *) V->data,
|
||||||
|
mask ? ((const char *) mask->data) : nullptr,
|
||||||
|
(parallel_blocks) == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
|
||||||
|
scale, max_bias, m0, m1, n_head_log2,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
|
||||||
|
if ((parallel_blocks) == 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const dim3 block_dim_combine(D, 1, 1);
|
||||||
|
const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z);
|
||||||
|
const int shmem_combine = 0;
|
||||||
|
|
||||||
|
flash_attn_combine_results<D, parallel_blocks>
|
||||||
|
<<<blocks_num_combine, block_dim_combine, shmem_combine, main_stream>>>
|
||||||
|
(dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data);
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int D, int cols_per_block, int nwarps, typename KQ_acc_t> void launch_fattn_f16(
|
||||||
|
const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, ggml_tensor * KQV, const ggml_tensor * mask,
|
||||||
|
const int nsm, ggml_cuda_pool & pool, cudaStream_t main_stream
|
||||||
|
) {
|
||||||
|
const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3];
|
||||||
|
|
||||||
|
if (4*blocks_num_pb1 < 2*nsm) {
|
||||||
|
launch_fattn_f16_impl<D, cols_per_block, nwarps, 4, KQ_acc_t>(Q, K, V, KQV, mask, pool, main_stream);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (2*blocks_num_pb1 < 2*nsm) {
|
||||||
|
launch_fattn_f16_impl<D, cols_per_block, nwarps, 2, KQ_acc_t>(Q, K, V, KQV, mask, pool, main_stream);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
launch_fattn_f16_impl<D, cols_per_block, nwarps, 1, KQ_acc_t>(Q, K, V, KQV, mask, pool, main_stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * Q = dst->src[0];
|
||||||
|
const ggml_tensor * K = dst->src[1];
|
||||||
|
const ggml_tensor * V = dst->src[2];
|
||||||
|
|
||||||
|
const ggml_tensor * mask = dst->src[3];
|
||||||
|
|
||||||
|
ggml_tensor * KQV = dst;
|
||||||
|
|
||||||
|
GGML_ASSERT(Q->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(K->type == GGML_TYPE_F16);
|
||||||
|
GGML_ASSERT(V->type == GGML_TYPE_F16);
|
||||||
|
GGML_ASSERT(KQV->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
GGML_ASSERT(!mask || mask->type == GGML_TYPE_F16);
|
||||||
|
GGML_ASSERT(!mask || mask->ne[1] >= GGML_PAD(Q->ne[1], 16) &&
|
||||||
|
"the Flash-Attention CUDA kernel requires the mask to be padded to 16 and at least n_queries big");
|
||||||
|
|
||||||
|
GGML_ASSERT(K->ne[1] % FATTN_KQ_STRIDE == 0 && "Incorrect KV cache padding.");
|
||||||
|
|
||||||
|
ggml_cuda_set_device(ctx.device);
|
||||||
|
|
||||||
|
const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
|
||||||
|
const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm;
|
||||||
|
|
||||||
|
const int32_t precision = KQV->op_params[2];
|
||||||
|
|
||||||
|
if (!fast_fp16_available(cc)) {
|
||||||
|
ggml_cuda_flash_attn_ext_vec_f32(ctx, dst);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!fp16_mma_available(cc)) {
|
||||||
|
ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (precision != GGML_PREC_DEFAULT) {
|
||||||
|
if (Q->ne[1] == 1 && (Q->ne[0] == 64 || Q->ne[0] == 128)) {
|
||||||
|
ggml_cuda_flash_attn_ext_vec_f32(ctx, dst);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] <= 32 || Q->ne[0] > 128) {
|
||||||
|
constexpr int cols_per_block = 16;
|
||||||
|
constexpr int nwarps = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_f16< 64, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 80:
|
||||||
|
launch_fattn_f16< 80, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 96:
|
||||||
|
launch_fattn_f16< 96, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 112:
|
||||||
|
launch_fattn_f16<112, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_f16<128, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 256:
|
||||||
|
launch_fattn_f16<256, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
constexpr int cols_per_block = 32;
|
||||||
|
constexpr int nwarps = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_f16< 64, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 80:
|
||||||
|
launch_fattn_f16< 80, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 96:
|
||||||
|
launch_fattn_f16< 96, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 112:
|
||||||
|
launch_fattn_f16<112, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_f16<128, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
// case 256:
|
||||||
|
// launch_fattn_f16<256, cols_per_block, nwarps, float>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
// break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] == 1 && Q->ne[0] % (2*WARP_SIZE) == 0) {
|
||||||
|
ggml_cuda_flash_attn_ext_vec_f16(ctx, dst);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] <= 8 && Q->ne[0] % WARP_SIZE == 0) {
|
||||||
|
constexpr int cols_per_block = 8;
|
||||||
|
constexpr int nwarps = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_f16< 64, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 96:
|
||||||
|
launch_fattn_f16< 96, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_f16<128, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 256:
|
||||||
|
launch_fattn_f16<256, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Q->ne[1] <= 32) {
|
||||||
|
constexpr int cols_per_block = 16;
|
||||||
|
constexpr int nwarps = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_f16< 64, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 80:
|
||||||
|
launch_fattn_f16< 80, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 96:
|
||||||
|
launch_fattn_f16< 96, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 112:
|
||||||
|
launch_fattn_f16<112, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_f16<128, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 256:
|
||||||
|
launch_fattn_f16<256, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr int cols_per_block = 32;
|
||||||
|
constexpr int nwarps = 4;
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
launch_fattn_f16< 64, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 80:
|
||||||
|
launch_fattn_f16< 80, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 96:
|
||||||
|
launch_fattn_f16< 96, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 112:
|
||||||
|
launch_fattn_f16<112, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
launch_fattn_f16<128, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
case 256:
|
||||||
|
launch_fattn_f16<256, cols_per_block, nwarps, half>(Q, K, V, KQV, mask, nsm, ctx.pool(), ctx.stream());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
3
ggml-cuda/fattn.cuh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
@ -1735,8 +1735,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -1780,8 +1779,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -1825,8 +1823,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -1870,8 +1867,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -1915,8 +1911,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -1960,8 +1955,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -2007,8 +2001,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda(
|
|||||||
|
|
||||||
#if QK_K == 256
|
#if QK_K == 256
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -2053,8 +2046,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -2098,8 +2090,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
@ -2143,8 +2134,7 @@ static void ggml_mul_mat_q6_K_q8_1_cuda(
|
|||||||
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
const void * vx, const void * vy, float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
||||||
|
|
||||||
int mmq_x, mmq_y, nwarps;
|
int mmq_x, mmq_y, nwarps;
|
||||||
|
@ -89,8 +89,7 @@ static void mul_mat_vec_q_cuda(
|
|||||||
GGML_ASSERT(ncols_x % qk == 0);
|
GGML_ASSERT(ncols_x % qk == 0);
|
||||||
GGML_ASSERT(ncols_y <= MMVQ_MAX_BATCH_SIZE);
|
GGML_ASSERT(ncols_y <= MMVQ_MAX_BATCH_SIZE);
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
|
|
||||||
int64_t nwarps = 1;
|
int64_t nwarps = 1;
|
||||||
int64_t rows_per_cuda_block = 1;
|
int64_t rows_per_cuda_block = 1;
|
||||||
@ -328,8 +327,7 @@ void ggml_cuda_op_mul_mat_vec_q(
|
|||||||
|
|
||||||
const int64_t ne0 = dst->ne[0];
|
const int64_t ne0 = dst->ne[0];
|
||||||
|
|
||||||
int id;
|
int id = ggml_cuda_get_device();
|
||||||
CUDA_CHECK(cudaGetDevice(&id));
|
|
||||||
|
|
||||||
// the main device has a larger memory buffer to hold the results from all GPUs
|
// the main device has a larger memory buffer to hold the results from all GPUs
|
||||||
// nrows_dst == nrows of the matrix that the kernel writes into
|
// nrows_dst == nrows of the matrix that the kernel writes into
|
||||||
|
@ -28,5 +28,4 @@ void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|||||||
memcpy(&scale, dst->op_params, sizeof(float));
|
memcpy(&scale, dst->op_params, sizeof(float));
|
||||||
|
|
||||||
scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream);
|
scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream);
|
||||||
CUDA_CHECK(cudaGetLastError());
|
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,17 @@
|
|||||||
#include "softmax.cuh"
|
#include "softmax.cuh"
|
||||||
|
|
||||||
template <bool vals_smem, int ncols_template, int block_size_template>
|
template <typename T>
|
||||||
static __global__ void soft_max_f32(const float * x, const float * mask, const float * pos, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) {
|
static __device__ __forceinline__ float t2f32(T val) {
|
||||||
|
return (float) val;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
__device__ float __forceinline__ t2f32<half>(half val) {
|
||||||
|
return __half2float(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
||||||
|
static __global__ void soft_max_f32(const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) {
|
||||||
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
||||||
|
|
||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.x;
|
||||||
@ -13,16 +23,16 @@ static __global__ void soft_max_f32(const float * x, const float * mask, const f
|
|||||||
const int warp_id = threadIdx.x / WARP_SIZE;
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
const int lane_id = threadIdx.x % WARP_SIZE;
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||||
|
|
||||||
float slope = 0.0f;
|
float slope = 1.0f;
|
||||||
|
|
||||||
// ALiBi
|
// ALiBi
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
const int h = rowx/nrows_y; // head index
|
const int h = rowx/nrows_y; // head index
|
||||||
|
|
||||||
const float base = h < n_head_log2 ? m0 : m1;
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
slope = powf(base, exp);
|
slope = powf(base, exph);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern __shared__ float data_soft_max_f32[];
|
extern __shared__ float data_soft_max_f32[];
|
||||||
@ -43,7 +53,7 @@ static __global__ void soft_max_f32(const float * x, const float * mask, const f
|
|||||||
const int64_t ix = (int64_t)rowx*ncols + col;
|
const int64_t ix = (int64_t)rowx*ncols + col;
|
||||||
const int64_t iy = (int64_t)rowy*ncols + col;
|
const int64_t iy = (int64_t)rowy*ncols + col;
|
||||||
|
|
||||||
const float val = x[ix]*scale + (mask ? mask[iy] : 0.0f) + (pos ? slope*pos[col] : 0.0f);
|
const float val = x[ix]*scale + (mask ? slope*t2f32(mask[iy]) : 0.0f);
|
||||||
|
|
||||||
vals[col] = val;
|
vals[col] = val;
|
||||||
max_val = max(max_val, val);
|
max_val = max(max_val, val);
|
||||||
@ -114,7 +124,8 @@ static __global__ void soft_max_f32(const float * x, const float * mask, const f
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void soft_max_f32_cuda(const float * x, const float * mask, const float * pos, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, cudaStream_t stream) {
|
template<typename T>
|
||||||
|
static void soft_max_f32_cuda(const float * x, const T * mask, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, cudaStream_t stream) {
|
||||||
int nth = WARP_SIZE;
|
int nth = WARP_SIZE;
|
||||||
while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
||||||
const dim3 block_dims(nth, 1, 1);
|
const dim3 block_dims(nth, 1, 1);
|
||||||
@ -122,8 +133,8 @@ static void soft_max_f32_cuda(const float * x, const float * mask, const float *
|
|||||||
const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float);
|
const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float);
|
||||||
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
||||||
|
|
||||||
const uint32_t n_head_kv = nrows_x/nrows_y;
|
const uint32_t n_head = nrows_x/nrows_y;
|
||||||
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
@ -131,51 +142,53 @@ static void soft_max_f32_cuda(const float * x, const float * mask, const float *
|
|||||||
if (shmem < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) {
|
if (shmem < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) {
|
||||||
switch (ncols_x) {
|
switch (ncols_x) {
|
||||||
case 32:
|
case 32:
|
||||||
soft_max_f32<true, 32, 32><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 32, 32><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 64:
|
case 64:
|
||||||
soft_max_f32<true, 64, 64><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 64, 64><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 128:
|
case 128:
|
||||||
soft_max_f32<true, 128, 128><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 128, 128><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 256:
|
case 256:
|
||||||
soft_max_f32<true, 256, 256><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 256, 256><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 512:
|
case 512:
|
||||||
soft_max_f32<true, 512, 512><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 512, 512><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 1024:
|
case 1024:
|
||||||
soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 2048:
|
case 2048:
|
||||||
soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 4096:
|
case 4096:
|
||||||
soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
soft_max_f32<true, 0, 0><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 0, 0><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const size_t shmem_low = WARP_SIZE*sizeof(float);
|
const size_t shmem_low = WARP_SIZE*sizeof(float);
|
||||||
soft_max_f32<false, 0, 0><<<block_nums, block_dims, shmem_low, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<false, 0, 0><<<block_nums, block_dims, shmem_low, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const ggml_tensor * src1 = dst->src[1];
|
const ggml_tensor * src1 = dst->src[1];
|
||||||
|
|
||||||
const float * src0_d = (const float *)src0->data;
|
const float * src0_d = (const float *)src0->data;
|
||||||
const float * src1_d = src1 ? (const float *)src1->data : nullptr;
|
const void * src1_d = src1 ? (const void *)src1->data : nullptr;
|
||||||
|
|
||||||
float * dst_d = (float *)dst->data;
|
float * dst_d = (float *)dst->data;
|
||||||
cudaStream_t stream = ctx.stream();
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0->ne[0];
|
||||||
const int64_t nrows_x = ggml_nrows(src0);
|
const int64_t nrows_x = ggml_nrows(src0);
|
||||||
@ -187,15 +200,15 @@ void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|||||||
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
||||||
memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
|
memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
// positions tensor
|
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
|
||||||
float * src2_dd = nullptr;
|
|
||||||
|
|
||||||
ggml_tensor * src2 = dst->src[2];
|
if (use_f16) {
|
||||||
const bool use_src2 = src2 != nullptr;
|
const half * src1_dd = (const half *)src1_d;
|
||||||
|
|
||||||
if (use_src2) {
|
soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
||||||
src2_dd = (float *)src2->data;
|
} else {
|
||||||
|
const float * src1_dd = (const float *)src1_d;
|
||||||
|
|
||||||
|
soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
soft_max_f32_cuda(src0_d, src1_d, src2_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
|
||||||
}
|
}
|
||||||
|
@ -48,6 +48,15 @@ static __global__ void relu_f32(const float * x, float * dst, const int k) {
|
|||||||
dst[i] = fmaxf(x[i], 0);
|
dst[i] = fmaxf(x[i], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __global__ void sigmoid_f32(const float * x, float * dst, const int k) {
|
||||||
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (i >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
dst[i] = 1.0f / (1.0f + expf(-x[i]));
|
||||||
|
}
|
||||||
|
|
||||||
static __global__ void hardsigmoid_f32(const float * x, float * dst, const int k) {
|
static __global__ void hardsigmoid_f32(const float * x, float * dst, const int k) {
|
||||||
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
@ -108,6 +117,11 @@ static void relu_f32_cuda(const float * x, float * dst, const int k, cudaStream_
|
|||||||
relu_f32<<<num_blocks, CUDA_RELU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
relu_f32<<<num_blocks, CUDA_RELU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + CUDA_SIGMOID_BLOCK_SIZE - 1) / CUDA_SIGMOID_BLOCK_SIZE;
|
||||||
|
sigmoid_f32<<<num_blocks, CUDA_SIGMOID_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
|
}
|
||||||
|
|
||||||
static void hardsigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
static void hardsigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||||
const int num_blocks = (k + CUDA_HARDSIGMOID_BLOCK_SIZE - 1) / CUDA_HARDSIGMOID_BLOCK_SIZE;
|
const int num_blocks = (k + CUDA_HARDSIGMOID_BLOCK_SIZE - 1) / CUDA_HARDSIGMOID_BLOCK_SIZE;
|
||||||
hardsigmoid_f32<<<num_blocks, CUDA_HARDSIGMOID_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
hardsigmoid_f32<<<num_blocks, CUDA_HARDSIGMOID_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
@ -188,6 +202,18 @@ void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|||||||
relu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
relu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
sigmoid_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const float * src0_d = (const float *)src0->data;
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#define CUDA_SILU_BLOCK_SIZE 256
|
#define CUDA_SILU_BLOCK_SIZE 256
|
||||||
#define CUDA_TANH_BLOCK_SIZE 256
|
#define CUDA_TANH_BLOCK_SIZE 256
|
||||||
#define CUDA_RELU_BLOCK_SIZE 256
|
#define CUDA_RELU_BLOCK_SIZE 256
|
||||||
|
#define CUDA_SIGMOID_BLOCK_SIZE 256
|
||||||
#define CUDA_HARDSIGMOID_BLOCK_SIZE 256
|
#define CUDA_HARDSIGMOID_BLOCK_SIZE 256
|
||||||
#define CUDA_HARDSWISH_BLOCK_SIZE 256
|
#define CUDA_HARDSWISH_BLOCK_SIZE 256
|
||||||
#define CUDA_SQR_BLOCK_SIZE 256
|
#define CUDA_SQR_BLOCK_SIZE 256
|
||||||
@ -18,6 +19,8 @@ void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|||||||
|
|
||||||
void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
79
ggml-impl.h
@ -17,6 +17,83 @@
|
|||||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts brain16 to float32.
|
||||||
|
*
|
||||||
|
* The bfloat16 floating point format has the following structure:
|
||||||
|
*
|
||||||
|
* ┌sign
|
||||||
|
* │
|
||||||
|
* │ ┌exponent
|
||||||
|
* │ │
|
||||||
|
* │ │ ┌mantissa
|
||||||
|
* │ │ │
|
||||||
|
* │┌──┴───┐┌─┴───┐
|
||||||
|
* 0b0000000000000000 brain16
|
||||||
|
*
|
||||||
|
* Since bf16 has the same number of exponent bits as a 32bit float,
|
||||||
|
* encoding and decoding numbers becomes relatively straightforward.
|
||||||
|
*
|
||||||
|
* ┌sign
|
||||||
|
* │
|
||||||
|
* │ ┌exponent
|
||||||
|
* │ │
|
||||||
|
* │ │ ┌mantissa
|
||||||
|
* │ │ │
|
||||||
|
* │┌──┴───┐┌─┴───────────────────┐
|
||||||
|
* 0b00000000000000000000000000000000 IEEE binary32
|
||||||
|
*
|
||||||
|
* For comparison, the standard fp16 format has fewer exponent bits.
|
||||||
|
*
|
||||||
|
* ┌sign
|
||||||
|
* │
|
||||||
|
* │ ┌exponent
|
||||||
|
* │ │
|
||||||
|
* │ │ ┌mantissa
|
||||||
|
* │ │ │
|
||||||
|
* │┌─┴─┐┌─┴──────┐
|
||||||
|
* 0b0000000000000000 IEEE binary16
|
||||||
|
*
|
||||||
|
* @see IEEE 754-2008
|
||||||
|
*/
|
||||||
|
static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) {
|
||||||
|
union {
|
||||||
|
float f;
|
||||||
|
uint32_t i;
|
||||||
|
} u;
|
||||||
|
u.i = (uint32_t)h.bits << 16;
|
||||||
|
return u.f;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts float32 to brain16.
|
||||||
|
*
|
||||||
|
* This function is binary identical to AMD Zen4 VCVTNEPS2BF16.
|
||||||
|
* Subnormals shall be flushed to zero, and NANs will be quiet.
|
||||||
|
* This code should vectorize nicely if using modern compilers.
|
||||||
|
*/
|
||||||
|
static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
|
||||||
|
ggml_bf16_t h;
|
||||||
|
union {
|
||||||
|
float f;
|
||||||
|
uint32_t i;
|
||||||
|
} u;
|
||||||
|
u.f = s;
|
||||||
|
if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
|
||||||
|
h.bits = (u.i >> 16) | 64; /* force to quiet */
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
if (!(u.i & 0x7f800000)) { /* subnormal */
|
||||||
|
h.bits = (u.i & 0x80000000) >> 16; /* flush to zero */
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
|
||||||
|
#define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
@ -313,7 +390,7 @@ inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b)
|
|||||||
|
|
||||||
#endif // defined(__ARM_NEON)
|
#endif // defined(__ARM_NEON)
|
||||||
|
|
||||||
#if defined(__ARM_NEON) && !defined(__MSC_VER)
|
#if defined(__ARM_NEON) && !defined(_MSC_VER)
|
||||||
|
|
||||||
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
|
||||||
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
|
||||||
|
@ -1427,6 +1427,7 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
|
|||||||
for (int i = node_start; i < node_end; ++i) {
|
for (int i = node_start; i < node_end; ++i) {
|
||||||
struct ggml_tensor * src0 = gf->nodes[i]->src[0];
|
struct ggml_tensor * src0 = gf->nodes[i]->src[0];
|
||||||
struct ggml_tensor * src1 = gf->nodes[i]->src[1];
|
struct ggml_tensor * src1 = gf->nodes[i]->src[1];
|
||||||
|
struct ggml_tensor * src2 = gf->nodes[i]->src[2]; GGML_UNUSED(src2);
|
||||||
struct ggml_tensor * dst = gf->nodes[i];
|
struct ggml_tensor * dst = gf->nodes[i];
|
||||||
GGML_ASSERT(dst->data != nullptr);
|
GGML_ASSERT(dst->data != nullptr);
|
||||||
|
|
||||||
@ -1558,7 +1559,19 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
|
|||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
{
|
{
|
||||||
float scale;
|
float scale;
|
||||||
memcpy(&scale, dst->op_params, sizeof(float));
|
float max_bias;
|
||||||
|
|
||||||
|
memcpy(&scale, (float *)dst->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *)dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
#pragma message("TODO: add ggml_vk_soft_max() F16 src1 support")
|
||||||
|
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
|
||||||
|
GGML_ASSERT(!src1 || src1t == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
#pragma message("TODO: add ALiBi support")
|
||||||
|
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/7192")
|
||||||
|
GGML_ASSERT(max_bias == 0.0f);
|
||||||
|
|
||||||
ggml_vk_soft_max(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, ne03, scale);
|
ggml_vk_soft_max(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, ne03, scale);
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_DIAG_MASK_INF:
|
case GGML_OP_DIAG_MASK_INF:
|
||||||
|
709
ggml-metal.m
@ -40,14 +40,17 @@ enum ggml_metal_kernel_type {
|
|||||||
GGML_METAL_KERNEL_TYPE_CLAMP,
|
GGML_METAL_KERNEL_TYPE_CLAMP,
|
||||||
GGML_METAL_KERNEL_TYPE_TANH,
|
GGML_METAL_KERNEL_TYPE_TANH,
|
||||||
GGML_METAL_KERNEL_TYPE_RELU,
|
GGML_METAL_KERNEL_TYPE_RELU,
|
||||||
|
GGML_METAL_KERNEL_TYPE_SIGMOID,
|
||||||
GGML_METAL_KERNEL_TYPE_GELU,
|
GGML_METAL_KERNEL_TYPE_GELU,
|
||||||
GGML_METAL_KERNEL_TYPE_GELU_4,
|
GGML_METAL_KERNEL_TYPE_GELU_4,
|
||||||
GGML_METAL_KERNEL_TYPE_GELU_QUICK,
|
GGML_METAL_KERNEL_TYPE_GELU_QUICK,
|
||||||
GGML_METAL_KERNEL_TYPE_GELU_QUICK_4,
|
GGML_METAL_KERNEL_TYPE_GELU_QUICK_4,
|
||||||
GGML_METAL_KERNEL_TYPE_SILU,
|
GGML_METAL_KERNEL_TYPE_SILU,
|
||||||
GGML_METAL_KERNEL_TYPE_SILU_4,
|
GGML_METAL_KERNEL_TYPE_SILU_4,
|
||||||
GGML_METAL_KERNEL_TYPE_SOFT_MAX,
|
GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16,
|
||||||
GGML_METAL_KERNEL_TYPE_SOFT_MAX_4,
|
GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4,
|
||||||
|
GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32,
|
||||||
|
GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4,
|
||||||
GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,
|
GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,
|
||||||
GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,
|
GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,
|
||||||
GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,
|
GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,
|
||||||
@ -167,7 +170,6 @@ enum ggml_metal_kernel_type {
|
|||||||
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
|
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_ROPE_F32,
|
GGML_METAL_KERNEL_TYPE_ROPE_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_ROPE_F16,
|
GGML_METAL_KERNEL_TYPE_ROPE_F16,
|
||||||
GGML_METAL_KERNEL_TYPE_ALIBI_F32,
|
|
||||||
GGML_METAL_KERNEL_TYPE_IM2COL_F16,
|
GGML_METAL_KERNEL_TYPE_IM2COL_F16,
|
||||||
GGML_METAL_KERNEL_TYPE_IM2COL_F32,
|
GGML_METAL_KERNEL_TYPE_IM2COL_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
|
GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
|
||||||
@ -177,6 +179,14 @@ enum ggml_metal_kernel_type {
|
|||||||
GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
|
GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
|
||||||
GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,
|
GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,
|
||||||
GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,
|
GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128,
|
||||||
|
GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256,
|
||||||
GGML_METAL_KERNEL_TYPE_CPY_F32_F16,
|
GGML_METAL_KERNEL_TYPE_CPY_F32_F16,
|
||||||
GGML_METAL_KERNEL_TYPE_CPY_F32_F32,
|
GGML_METAL_KERNEL_TYPE_CPY_F32_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,
|
GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,
|
||||||
@ -255,11 +265,20 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
|
|||||||
|
|
||||||
static void * ggml_metal_host_malloc(size_t n) {
|
static void * ggml_metal_host_malloc(size_t n) {
|
||||||
void * data = NULL;
|
void * data = NULL;
|
||||||
|
|
||||||
|
#if TARGET_OS_OSX
|
||||||
|
kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE);
|
||||||
|
if (err != KERN_SUCCESS) {
|
||||||
|
GGML_METAL_LOG_ERROR("%s: error: vm_allocate failed\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#else
|
||||||
const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
|
const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
|
||||||
if (result != 0) {
|
if (result != 0) {
|
||||||
GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
|
GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
@ -443,7 +462,7 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \
|
GGML_METAL_LOG_INFO("%s: loaded %-40s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \
|
||||||
(int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \
|
(int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \
|
||||||
(int) kernel->pipeline.threadExecutionWidth); \
|
(int) kernel->pipeline.threadExecutionWidth); \
|
||||||
*/
|
*/
|
||||||
@ -459,172 +478,182 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
return NULL; \
|
return NULL; \
|
||||||
} \
|
} \
|
||||||
} else { \
|
} else { \
|
||||||
GGML_METAL_LOG_WARN("%s: skipping %-32s (not supported)\n", __func__, "kernel_"#name); \
|
GGML_METAL_LOG_WARN("%s: skipping %-40s (not supported)\n", __func__, "kernel_"#name); \
|
||||||
}
|
}
|
||||||
|
|
||||||
// simd_sum and simd_max requires MTLGPUFamilyApple7
|
// simd_sum and simd_max requires MTLGPUFamilyApple7
|
||||||
|
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP, clamp, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP, clamp, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIGMOID, sigmoid, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, gelu_quick_4, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, gelu_quick_4, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4, silu_4, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX, soft_max, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4, silu_4, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_4, soft_max_4, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16, soft_max_f16, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4, soft_max_f16_4, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32, soft_max_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4, soft_max_f32_4, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S, get_rows_iq3_s, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S, get_rows_iq2_s, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M, get_rows_iq1_m, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S, get_rows_iq3_s, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S, get_rows_iq2_s, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M, get_rows_iq1_m, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32, mul_mv_iq3_s_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32, mul_mv_iq2_s_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32, mul_mv_iq1_m_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32, mul_mv_iq3_s_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32, mul_mv_iq2_s_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32, mul_mv_iq1_m_f32, ctx->support_simdgroup_reduction);
|
||||||
//GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
|
||||||
//GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction);
|
||||||
//GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction);
|
//GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction);
|
//GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction);
|
//GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32, mul_mv_id_iq3_s_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32, mul_mv_id_iq2_s_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32, mul_mv_id_iq3_s_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32, mul_mv_id_iq2_s_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32, mul_mm_iq3_s_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32, mul_mm_iq2_s_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32, mul_mm_iq3_s_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32, mul_mm_iq2_s_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32, mul_mm_id_iq3_s_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32, mul_mm_id_iq2_s_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32, mul_mm_id_iq1_m_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32, mul_mm_id_iq3_s_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32, mul_mm_id_iq2_s_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32, mul_mm_id_iq1_m_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ALIBI_F32, alibi_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64, flash_attn_ext_f16_h64, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80, flash_attn_ext_f16_h80, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96, flash_attn_ext_f16_h96, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112, flash_attn_ext_f16_h112, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128, flash_attn_ext_f16_h128, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL, cpy_f32_iq4_nl, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, flash_attn_ext_f16_h256, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, flash_attn_ext_vec_f16_h128, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256, flash_attn_ext_vec_f16_h256, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL, cpy_f32_iq4_nl, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
[metal_library release];
|
[metal_library release];
|
||||||
@ -703,6 +732,7 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
|||||||
switch (ggml_get_unary_op(op)) {
|
switch (ggml_get_unary_op(op)) {
|
||||||
case GGML_UNARY_OP_TANH:
|
case GGML_UNARY_OP_TANH:
|
||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
case GGML_UNARY_OP_GELU_QUICK:
|
case GGML_UNARY_OP_GELU_QUICK:
|
||||||
case GGML_UNARY_OP_SILU:
|
case GGML_UNARY_OP_SILU:
|
||||||
@ -730,7 +760,6 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
|||||||
case GGML_OP_GROUP_NORM:
|
case GGML_OP_GROUP_NORM:
|
||||||
return ctx->support_simdgroup_reduction;
|
return ctx->support_simdgroup_reduction;
|
||||||
case GGML_OP_NORM:
|
case GGML_OP_NORM:
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
return true;
|
return true;
|
||||||
@ -744,6 +773,8 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
|||||||
case GGML_OP_ARGSORT:
|
case GGML_OP_ARGSORT:
|
||||||
case GGML_OP_LEAKY_RELU:
|
case GGML_OP_LEAKY_RELU:
|
||||||
return true;
|
return true;
|
||||||
|
case GGML_OP_FLASH_ATTN_EXT:
|
||||||
|
return ctx->support_simdgroup_mm; // TODO: over-restricted for vec-kernels
|
||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
return ctx->support_simdgroup_reduction &&
|
return ctx->support_simdgroup_reduction &&
|
||||||
@ -782,7 +813,7 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
|||||||
case GGML_OP_DIAG_MASK_INF:
|
case GGML_OP_DIAG_MASK_INF:
|
||||||
case GGML_OP_GET_ROWS:
|
case GGML_OP_GET_ROWS:
|
||||||
{
|
{
|
||||||
return op->ne[3] == 1;
|
return op->src[0]->type != GGML_TYPE_BF16 && op->ne[3] == 1;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
@ -1164,24 +1195,24 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_CLAMP:
|
case GGML_OP_CLAMP:
|
||||||
{
|
{
|
||||||
id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
|
id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
|
||||||
|
|
||||||
float min;
|
float min;
|
||||||
float max;
|
float max;
|
||||||
memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float));
|
memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float));
|
||||||
memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float));
|
memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float));
|
||||||
|
|
||||||
[encoder setComputePipelineState:pipeline];
|
[encoder setComputePipelineState:pipeline];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
[encoder setBytes:&min length:sizeof(min) atIndex:2];
|
[encoder setBytes:&min length:sizeof(min) atIndex:2];
|
||||||
[encoder setBytes:&max length:sizeof(max) atIndex:3];
|
[encoder setBytes:&max length:sizeof(max) atIndex:3];
|
||||||
|
|
||||||
const int64_t n = ggml_nelements(dst);
|
const int64_t n = ggml_nelements(dst);
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_UNARY:
|
case GGML_OP_UNARY:
|
||||||
switch (ggml_get_unary_op(gf->nodes[i])) {
|
switch (ggml_get_unary_op(gf->nodes[i])) {
|
||||||
// we are not taking into account the strides, so for now require contiguous tensors
|
// we are not taking into account the strides, so for now require contiguous tensors
|
||||||
@ -1209,6 +1240,18 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
|
|
||||||
const int64_t n = ggml_nelements(dst);
|
const int64_t n = ggml_nelements(dst);
|
||||||
|
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
|
} break;
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
|
{
|
||||||
|
id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SIGMOID].pipeline;
|
||||||
|
|
||||||
|
[encoder setComputePipelineState:pipeline];
|
||||||
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
|
|
||||||
|
const int64_t n = ggml_nelements(dst);
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
@ -1326,20 +1369,32 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
{
|
{
|
||||||
|
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
int nth = 32; // SIMD width
|
int nth = 32; // SIMD width
|
||||||
|
|
||||||
id<MTLComputePipelineState> pipeline = nil;
|
id<MTLComputePipelineState> pipeline = nil;
|
||||||
|
|
||||||
|
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
|
||||||
|
|
||||||
if (ne00%4 == 0) {
|
if (ne00%4 == 0) {
|
||||||
while (nth < ne00/4 && nth < 256) {
|
while (nth < ne00/4 && nth < 256) {
|
||||||
nth *= 2;
|
nth *= 2;
|
||||||
}
|
}
|
||||||
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_4].pipeline;
|
if (use_f16) {
|
||||||
|
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4].pipeline;
|
||||||
|
} else {
|
||||||
|
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4].pipeline;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
while (nth < ne00 && nth < 1024) {
|
while (nth < ne00 && nth < 1024) {
|
||||||
nth *= 2;
|
nth *= 2;
|
||||||
}
|
}
|
||||||
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX].pipeline;
|
if (use_f16) {
|
||||||
|
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16].pipeline;
|
||||||
|
} else {
|
||||||
|
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32].pipeline;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
float scale;
|
float scale;
|
||||||
@ -1351,8 +1406,8 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
const int64_t nrows_x = ggml_nrows(src0);
|
const int64_t nrows_x = ggml_nrows(src0);
|
||||||
const int64_t nrows_y = src0->ne[1];
|
const int64_t nrows_y = src0->ne[1];
|
||||||
|
|
||||||
const uint32_t n_head_kv = nrows_x/nrows_y;
|
const uint32_t n_head = nrows_x/nrows_y;
|
||||||
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
@ -1364,20 +1419,15 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
} else {
|
} else {
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
|
||||||
}
|
}
|
||||||
if (id_src2) {
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
[encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
|
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
|
||||||
} else {
|
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:2];
|
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
|
||||||
}
|
[encoder setBytes:&scale length:sizeof(scale) atIndex:6];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:3];
|
[encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:7];
|
||||||
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:4];
|
[encoder setBytes:&m0 length:sizeof(m0) atIndex:8];
|
||||||
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:5];
|
[encoder setBytes:&m1 length:sizeof(m1) atIndex:9];
|
||||||
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:6];
|
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:10];
|
||||||
[encoder setBytes:&scale length:sizeof(scale) atIndex:7];
|
|
||||||
[encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:8];
|
|
||||||
[encoder setBytes:&m0 length:sizeof(m0) atIndex:9];
|
|
||||||
[encoder setBytes:&m1 length:sizeof(m1) atIndex:10];
|
|
||||||
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:11];
|
|
||||||
[encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
|
[encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
@ -2182,49 +2232,6 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
{
|
|
||||||
GGML_ASSERT((src0t == GGML_TYPE_F32));
|
|
||||||
|
|
||||||
const int nth = MIN(1024, ne00);
|
|
||||||
|
|
||||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
|
||||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
|
||||||
|
|
||||||
float max_bias;
|
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
|
||||||
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
|
||||||
|
|
||||||
id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ALIBI_F32].pipeline;
|
|
||||||
|
|
||||||
[encoder setComputePipelineState:pipeline];
|
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
|
||||||
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
|
||||||
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
|
|
||||||
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
|
|
||||||
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
|
|
||||||
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
|
|
||||||
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
|
|
||||||
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
|
|
||||||
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
|
|
||||||
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
|
|
||||||
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
|
|
||||||
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
|
|
||||||
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
|
|
||||||
[encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
|
|
||||||
[encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
|
|
||||||
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
|
||||||
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
|
||||||
[encoder setBytes:&m0 length:sizeof( float) atIndex:18];
|
|
||||||
[encoder setBytes:&m1 length:sizeof( float) atIndex:19];
|
|
||||||
[encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
|
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
|
||||||
} break;
|
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(ne10 == ne02);
|
GGML_ASSERT(ne10 == ne02);
|
||||||
@ -2503,6 +2510,173 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_OP_FLASH_ATTN_EXT:
|
||||||
|
{
|
||||||
|
GGML_ASSERT(ne00 % 4 == 0);
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
struct ggml_tensor * src3 = gf->nodes[i]->src[3];
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_are_same_shape(src1, src2));
|
||||||
|
GGML_ASSERT(src3);
|
||||||
|
|
||||||
|
size_t offs_src3 = 0;
|
||||||
|
|
||||||
|
id<MTLBuffer> id_src3 = src3 ? ggml_metal_get_buffer(src3, &offs_src3) : nil;
|
||||||
|
|
||||||
|
GGML_ASSERT(!src3 || src3->type == GGML_TYPE_F16);
|
||||||
|
GGML_ASSERT(!src3 || src3->ne[1] >= GGML_PAD(src0->ne[1], 8) &&
|
||||||
|
"the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big");
|
||||||
|
|
||||||
|
const int64_t ne30 = src3 ? src3->ne[0] : 0; GGML_UNUSED(ne30);
|
||||||
|
//const int64_t ne31 = src3 ? src3->ne[1] : 0;
|
||||||
|
const int64_t ne32 = src3 ? src3->ne[2] : 0; GGML_UNUSED(ne32);
|
||||||
|
const int64_t ne33 = src3 ? src3->ne[3] : 0; GGML_UNUSED(ne33);
|
||||||
|
|
||||||
|
const uint64_t nb30 = src3 ? src3->nb[0] : 0; GGML_UNUSED(nb30);
|
||||||
|
const uint64_t nb31 = src3 ? src3->nb[1] : 0;
|
||||||
|
const uint64_t nb32 = src3 ? src3->nb[2] : 0; GGML_UNUSED(nb32);
|
||||||
|
const uint64_t nb33 = src3 ? src3->nb[3] : 0; GGML_UNUSED(nb33);
|
||||||
|
|
||||||
|
const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t);
|
||||||
|
|
||||||
|
float scale;
|
||||||
|
float max_bias;
|
||||||
|
|
||||||
|
memcpy(&scale, ((int32_t *) dst->op_params) + 0, sizeof(scale));
|
||||||
|
memcpy(&max_bias, ((int32_t *) dst->op_params) + 1, sizeof(max_bias));
|
||||||
|
|
||||||
|
const uint32_t n_head = src0->ne[2];
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
|
id<MTLComputePipelineState> pipeline = nil;
|
||||||
|
|
||||||
|
bool use_vec_kernel = false;
|
||||||
|
|
||||||
|
if (ne01 >= 4 || (ne00%128 != 0)) {
|
||||||
|
switch (ne00) {
|
||||||
|
case 64: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64 ].pipeline; break;
|
||||||
|
case 80: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80 ].pipeline; break;
|
||||||
|
case 96: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96 ].pipeline; break;
|
||||||
|
case 112: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112].pipeline; break;
|
||||||
|
case 128: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128].pipeline; break;
|
||||||
|
case 256: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256].pipeline; break;
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
GGML_METAL_LOG_ERROR("unsupported size: %lld\n", ne00);
|
||||||
|
GGML_METAL_LOG_ERROR("add template specialization for this size\n");
|
||||||
|
GGML_ASSERT(false && "add template specialization for this size");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
use_vec_kernel = true;
|
||||||
|
|
||||||
|
switch (ne00) {
|
||||||
|
case 128: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128].pipeline; break;
|
||||||
|
case 256: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256].pipeline; break;
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
GGML_METAL_LOG_ERROR("unsupported size: %lld\n", ne00);
|
||||||
|
GGML_METAL_LOG_ERROR("add template specialization for this size\n");
|
||||||
|
GGML_ASSERT(false && "add template specialization for this size");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
[encoder setComputePipelineState:pipeline];
|
||||||
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
|
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
||||||
|
[encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
|
||||||
|
[encoder setBuffer:id_src3 offset:offs_src3 atIndex:3];
|
||||||
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:4];
|
||||||
|
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:5];
|
||||||
|
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:6];
|
||||||
|
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:7];
|
||||||
|
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:8];
|
||||||
|
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:9];
|
||||||
|
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:10];
|
||||||
|
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:11];
|
||||||
|
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:12];
|
||||||
|
[encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:13];
|
||||||
|
[encoder setBytes:&ne11 length:sizeof( int64_t) atIndex:14];
|
||||||
|
[encoder setBytes:&ne12 length:sizeof( int64_t) atIndex:15];
|
||||||
|
[encoder setBytes:&ne13 length:sizeof( int64_t) atIndex:16];
|
||||||
|
[encoder setBytes:&nb10 length:sizeof(uint64_t) atIndex:17];
|
||||||
|
[encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:18];
|
||||||
|
[encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:19];
|
||||||
|
[encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:20];
|
||||||
|
[encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:21];
|
||||||
|
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:22];
|
||||||
|
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:23];
|
||||||
|
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:24];
|
||||||
|
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:25];
|
||||||
|
[encoder setBytes:&scale length:sizeof( float) atIndex:26];
|
||||||
|
[encoder setBytes:&max_bias length:sizeof( float) atIndex:27];
|
||||||
|
[encoder setBytes:&m0 length:sizeof(m0) atIndex:28];
|
||||||
|
[encoder setBytes:&m1 length:sizeof(m1) atIndex:29];
|
||||||
|
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:30];
|
||||||
|
|
||||||
|
if (!use_vec_kernel) {
|
||||||
|
// half8x8 kernel
|
||||||
|
const int64_t nqptg = 8; // queries per threadgroup !! sync with kernel template arguments !!
|
||||||
|
const int64_t ncpsg = 32; // cache values per simdgroup !! sync with kernel template arguments !!
|
||||||
|
|
||||||
|
GGML_ASSERT(nqptg <= 32);
|
||||||
|
GGML_ASSERT(nqptg % 8 == 0);
|
||||||
|
GGML_ASSERT(ncpsg % 32 == 0);
|
||||||
|
|
||||||
|
int64_t nsgmax = 2;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const size_t smem = nqptg*(ne00 + 2*nsgmax*(ncpsg + nqptg))*(sizeof(float)/2);
|
||||||
|
if (smem > ctx->device.maxThreadgroupMemoryLength) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
nsgmax *= 2;
|
||||||
|
}
|
||||||
|
nsgmax /= 2;
|
||||||
|
|
||||||
|
// simdgroups per threadgroup (a.k.a. warps)
|
||||||
|
const int64_t nsg = ne01 <= nqptg ? MAX(4, MIN(nsgmax, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32))) : 4;
|
||||||
|
|
||||||
|
const size_t smem = nqptg*(ne00 + 2*nsg*(ncpsg + nqptg))*(sizeof(float)/2);
|
||||||
|
|
||||||
|
//printf("smem: %zu, max: %zu\n", smem, ctx->device.maxThreadgroupMemoryLength);
|
||||||
|
GGML_ASSERT(smem <= ctx->device.maxThreadgroupMemoryLength);
|
||||||
|
|
||||||
|
[encoder setThreadgroupMemoryLength:GGML_PAD(smem, 16) atIndex:0];
|
||||||
|
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + nqptg - 1)/nqptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
|
||||||
|
} else {
|
||||||
|
// half1x4 kernel
|
||||||
|
const int64_t nqptg = 1; // queries per threadgroup !! sync with kernel template arguments !!
|
||||||
|
const int64_t ncpsg = 32; // cache values per simdgroup !! sync with kernel template arguments !!
|
||||||
|
|
||||||
|
GGML_ASSERT(nqptg <= 32);
|
||||||
|
GGML_ASSERT(nqptg % 1 == 0);
|
||||||
|
GGML_ASSERT(ncpsg % 32 == 0);
|
||||||
|
|
||||||
|
// simdgroups per threadgroup (a.k.a. warps)
|
||||||
|
const int64_t nsgt = MAX(2, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32));
|
||||||
|
|
||||||
|
int64_t nsg = 1;
|
||||||
|
while (nsg <= nsgt) {
|
||||||
|
nsg *= 2;
|
||||||
|
}
|
||||||
|
nsg /= 2;
|
||||||
|
|
||||||
|
const size_t smem = (nqptg*(ne00 + 2*nsg*(ncpsg + nqptg)) + nsg*ne00)*(sizeof(float)/2);
|
||||||
|
|
||||||
|
//printf("smem: %zu, max: %zu\n", smem, ctx->device.maxThreadgroupMemoryLength);
|
||||||
|
GGML_ASSERT(smem <= ctx->device.maxThreadgroupMemoryLength);
|
||||||
|
[encoder setThreadgroupMemoryLength:GGML_PAD(smem, 16) atIndex:0];
|
||||||
|
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + nqptg - 1)/nqptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)];
|
||||||
|
}
|
||||||
|
} break;
|
||||||
case GGML_OP_DUP:
|
case GGML_OP_DUP:
|
||||||
case GGML_OP_CPY:
|
case GGML_OP_CPY:
|
||||||
case GGML_OP_CONT:
|
case GGML_OP_CONT:
|
||||||
@ -2590,6 +2764,11 @@ static enum ggml_status ggml_metal_graph_compute(
|
|||||||
MTLCommandBufferStatus status = [command_buffer status];
|
MTLCommandBufferStatus status = [command_buffer status];
|
||||||
if (status != MTLCommandBufferStatusCompleted) {
|
if (status != MTLCommandBufferStatusCompleted) {
|
||||||
GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
|
GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
|
||||||
|
if (status == MTLCommandBufferStatusError) {
|
||||||
|
NSString * error_code = [command_buffer error].localizedDescription;
|
||||||
|
GGML_METAL_LOG_INFO("error: %s\n", [error_code UTF8String]);
|
||||||
|
}
|
||||||
|
|
||||||
return GGML_STATUS_FAILED;
|
return GGML_STATUS_FAILED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2646,7 +2825,11 @@ GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_
|
|||||||
ggml_backend_metal_free_device();
|
ggml_backend_metal_free_device();
|
||||||
|
|
||||||
if (ctx->owned) {
|
if (ctx->owned) {
|
||||||
|
#if TARGET_OS_OSX
|
||||||
|
vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)ctx->all_data, ctx->all_size);
|
||||||
|
#else
|
||||||
free(ctx->all_data);
|
free(ctx->all_data);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
free(ctx);
|
free(ctx);
|
||||||
@ -2706,10 +2889,13 @@ GGML_CALL static const char * ggml_backend_metal_buffer_type_get_name(ggml_backe
|
|||||||
UNUSED(buft);
|
UNUSED(buft);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device) {
|
static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device, size_t size_aligned) {
|
||||||
|
#ifndef GGML_METAL_NDEBUG
|
||||||
#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
|
#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
|
||||||
if (@available(macOS 10.12, iOS 16.0, *)) {
|
if (@available(macOS 10.12, iOS 16.0, *)) {
|
||||||
GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)",
|
GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f / %8.2f)",
|
||||||
|
__func__,
|
||||||
|
size_aligned / 1024.0 / 1024.0,
|
||||||
device.currentAllocatedSize / 1024.0 / 1024.0,
|
device.currentAllocatedSize / 1024.0 / 1024.0,
|
||||||
device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
||||||
|
|
||||||
@ -2719,10 +2905,15 @@ static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device) {
|
|||||||
GGML_METAL_LOG_INFO("\n");
|
GGML_METAL_LOG_INFO("\n");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0);
|
GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f)\n",
|
||||||
|
__func__,
|
||||||
|
size_aligned / 1024.0 / 1024.0,
|
||||||
|
device.currentAllocatedSize / 1024.0 / 1024.0);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
UNUSED(device);
|
UNUSED(device);
|
||||||
|
UNUSED(size_aligned);
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||||
@ -2742,22 +2933,23 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buff
|
|||||||
ctx->owned = true;
|
ctx->owned = true;
|
||||||
ctx->n_buffers = 1;
|
ctx->n_buffers = 1;
|
||||||
|
|
||||||
ctx->buffers[0].data = ctx->all_data;
|
if (ctx->all_data != NULL) {
|
||||||
ctx->buffers[0].size = size;
|
ctx->buffers[0].data = ctx->all_data;
|
||||||
ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
|
ctx->buffers[0].size = size;
|
||||||
length:size_aligned
|
ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
|
||||||
options:MTLResourceStorageModeShared
|
length:size_aligned
|
||||||
deallocator:nil];
|
options:MTLResourceStorageModeShared
|
||||||
|
deallocator:nil];
|
||||||
|
}
|
||||||
|
|
||||||
if (ctx->buffers[0].metal == nil) {
|
if (ctx->all_data == NULL || ctx->buffers[0].metal == nil) {
|
||||||
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
|
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
|
||||||
free(ctx);
|
free(ctx);
|
||||||
ggml_backend_metal_free_device();
|
ggml_backend_metal_free_device();
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
|
//ggml_backend_metal_log_allocated_size(device, size_aligned);
|
||||||
ggml_backend_metal_log_allocated_size(device);
|
|
||||||
|
|
||||||
return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size);
|
return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size);
|
||||||
}
|
}
|
||||||
@ -2844,7 +3036,7 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
|
ggml_backend_metal_log_allocated_size(device, size_aligned);
|
||||||
|
|
||||||
++ctx->n_buffers;
|
++ctx->n_buffers;
|
||||||
} else {
|
} else {
|
||||||
@ -2867,7 +3059,8 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, offs = %12ld", __func__, size_step_aligned / 1024.0 / 1024.0, i);
|
ggml_backend_metal_log_allocated_size(device, size_step_aligned);
|
||||||
|
|
||||||
if (i + size_step < size) {
|
if (i + size_step < size) {
|
||||||
GGML_METAL_LOG_INFO("\n");
|
GGML_METAL_LOG_INFO("\n");
|
||||||
}
|
}
|
||||||
@ -2876,8 +3069,6 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_metal_log_allocated_size(device);
|
|
||||||
|
|
||||||
return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size);
|
return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
775
ggml-metal.metal
@ -229,6 +229,13 @@ kernel void kernel_relu(
|
|||||||
dst[tpig] = max(0.0f, src0[tpig]);
|
dst[tpig] = max(0.0f, src0[tpig]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kernel void kernel_sigmoid(
|
||||||
|
device const float * src0,
|
||||||
|
device float * dst,
|
||||||
|
uint tpig[[thread_position_in_grid]]) {
|
||||||
|
dst[tpig] = 1.0f / (1.0f + exp(-src0[tpig]));
|
||||||
|
}
|
||||||
|
|
||||||
kernel void kernel_tanh(
|
kernel void kernel_tanh(
|
||||||
device const float * src0,
|
device const float * src0,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
@ -352,11 +359,11 @@ kernel void kernel_sum_rows(
|
|||||||
dst_row[0] = row_sum;
|
dst_row[0] = row_sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
kernel void kernel_soft_max(
|
kernel void kernel_soft_max(
|
||||||
device const float * src0,
|
device const char * src0,
|
||||||
device const float * src1,
|
device const char * src1,
|
||||||
device const float * src2,
|
device char * dst,
|
||||||
device float * dst,
|
|
||||||
constant int64_t & ne00,
|
constant int64_t & ne00,
|
||||||
constant int64_t & ne01,
|
constant int64_t & ne01,
|
||||||
constant int64_t & ne02,
|
constant int64_t & ne02,
|
||||||
@ -375,12 +382,11 @@ kernel void kernel_soft_max(
|
|||||||
const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
|
const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
|
||||||
const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
|
const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
|
||||||
|
|
||||||
device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
device const float * psrc0 = (device const float *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
||||||
device const float * pmask = src1 != src0 ? src1 + i01*ne00 : nullptr;
|
device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00 : nullptr;
|
||||||
device const float * ppos = src2 != src0 ? src2 : nullptr;
|
device float * pdst = (device float *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
||||||
device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
|
||||||
|
|
||||||
float slope = 0.0f;
|
float slope = 1.0f;
|
||||||
|
|
||||||
// ALiBi
|
// ALiBi
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
@ -396,7 +402,7 @@ kernel void kernel_soft_max(
|
|||||||
float lmax = -INFINITY;
|
float lmax = -INFINITY;
|
||||||
|
|
||||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||||
lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f));
|
lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f));
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the max value in the block
|
// find the max value in the block
|
||||||
@ -421,7 +427,7 @@ kernel void kernel_soft_max(
|
|||||||
// parallel sum
|
// parallel sum
|
||||||
float lsum = 0.0f;
|
float lsum = 0.0f;
|
||||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||||
const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)) - max_val);
|
const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val);
|
||||||
lsum += exp_psrc0;
|
lsum += exp_psrc0;
|
||||||
pdst[i00] = exp_psrc0;
|
pdst[i00] = exp_psrc0;
|
||||||
}
|
}
|
||||||
@ -456,11 +462,11 @@ kernel void kernel_soft_max(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
kernel void kernel_soft_max_4(
|
kernel void kernel_soft_max_4(
|
||||||
device const float * src0,
|
device const char * src0,
|
||||||
device const float * src1,
|
device const char * src1,
|
||||||
device const float * src2,
|
device char * dst,
|
||||||
device float * dst,
|
|
||||||
constant int64_t & ne00,
|
constant int64_t & ne00,
|
||||||
constant int64_t & ne01,
|
constant int64_t & ne01,
|
||||||
constant int64_t & ne02,
|
constant int64_t & ne02,
|
||||||
@ -479,12 +485,11 @@ kernel void kernel_soft_max_4(
|
|||||||
const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
|
const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
|
||||||
const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
|
const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
|
||||||
|
|
||||||
device const float4 * psrc4 = (device const float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
device const float4 * psrc4 = (device const float4 *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4;
|
||||||
device const float4 * pmask = src1 != src0 ? (device const float4 *)(src1 + i01*ne00) : nullptr;
|
device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00/4 : nullptr;
|
||||||
device const float4 * ppos = src2 != src0 ? (device const float4 *)(src2) : nullptr;
|
device float4 * pdst4 = (device float4 *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4;
|
||||||
device float4 * pdst4 = (device float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
|
||||||
|
|
||||||
float slope = 0.0f;
|
float slope = 1.0f;
|
||||||
|
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
const int64_t h = i02;
|
const int64_t h = i02;
|
||||||
@ -499,7 +504,7 @@ kernel void kernel_soft_max_4(
|
|||||||
float4 lmax4 = -INFINITY;
|
float4 lmax4 = -INFINITY;
|
||||||
|
|
||||||
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
||||||
lmax4 = fmax(lmax4, psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f));
|
lmax4 = fmax(lmax4, psrc4[i00]*scale + (float4)((pmask ? slope*pmask[i00] : 0.0f)));
|
||||||
}
|
}
|
||||||
|
|
||||||
const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
|
const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
|
||||||
@ -525,7 +530,7 @@ kernel void kernel_soft_max_4(
|
|||||||
// parallel sum
|
// parallel sum
|
||||||
float4 lsum4 = 0.0f;
|
float4 lsum4 = 0.0f;
|
||||||
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
||||||
const float4 exp_psrc4 = exp((psrc4[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)) - max_val);
|
const float4 exp_psrc4 = exp((psrc4[i00]*scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val);
|
||||||
lsum4 += exp_psrc4;
|
lsum4 += exp_psrc4;
|
||||||
pdst4[i00] = exp_psrc4;
|
pdst4[i00] = exp_psrc4;
|
||||||
}
|
}
|
||||||
@ -562,6 +567,14 @@ kernel void kernel_soft_max_4(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef decltype(kernel_soft_max<float>) kernel_soft_max_t;
|
||||||
|
typedef decltype(kernel_soft_max_4<float4>) kernel_soft_max_4_t;
|
||||||
|
|
||||||
|
template [[host_name("kernel_soft_max_f16")]] kernel kernel_soft_max_t kernel_soft_max<half>;
|
||||||
|
template [[host_name("kernel_soft_max_f32")]] kernel kernel_soft_max_t kernel_soft_max<float>;
|
||||||
|
template [[host_name("kernel_soft_max_f16_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4<half4>;
|
||||||
|
template [[host_name("kernel_soft_max_f32_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4<float4>;
|
||||||
|
|
||||||
kernel void kernel_diag_mask_inf(
|
kernel void kernel_diag_mask_inf(
|
||||||
device const float * src0,
|
device const float * src0,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
@ -1585,60 +1598,6 @@ kernel void kernel_mul_mv_f16_f32_l4(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel void kernel_alibi_f32(
|
|
||||||
device const float * src0,
|
|
||||||
device float * dst,
|
|
||||||
constant int64_t & ne00,
|
|
||||||
constant int64_t & ne01,
|
|
||||||
constant int64_t & ne02,
|
|
||||||
constant int64_t & ne03,
|
|
||||||
constant uint64_t & nb00,
|
|
||||||
constant uint64_t & nb01,
|
|
||||||
constant uint64_t & nb02,
|
|
||||||
constant uint64_t & nb03,
|
|
||||||
constant int64_t & ne0,
|
|
||||||
constant int64_t & ne1,
|
|
||||||
constant int64_t & ne2,
|
|
||||||
constant int64_t & ne3,
|
|
||||||
constant uint64_t & nb0,
|
|
||||||
constant uint64_t & nb1,
|
|
||||||
constant uint64_t & nb2,
|
|
||||||
constant uint64_t & nb3,
|
|
||||||
constant float & m0,
|
|
||||||
constant float & m1,
|
|
||||||
constant int & n_heads_log2_floor,
|
|
||||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
|
||||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
|
||||||
uint3 ntg[[threads_per_threadgroup]]) {
|
|
||||||
const int64_t i03 = tgpig[2];
|
|
||||||
const int64_t i02 = tgpig[1];
|
|
||||||
const int64_t i01 = tgpig[0];
|
|
||||||
|
|
||||||
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
|
||||||
|
|
||||||
const int64_t i3 = n / (ne2*ne1*ne0);
|
|
||||||
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
|
||||||
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
|
||||||
//const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
|
||||||
|
|
||||||
const int64_t k = i3*ne3 + i2;
|
|
||||||
|
|
||||||
float m_k;
|
|
||||||
if (k < n_heads_log2_floor) {
|
|
||||||
m_k = pow(m0, k + 1);
|
|
||||||
} else {
|
|
||||||
m_k = pow(m1, 2 * (k - n_heads_log2_floor) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
device char * dst_row = (device char *) dst + i3*nb3 + i2*nb2 + i1*nb1;
|
|
||||||
device const char * src_row = (device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01;
|
|
||||||
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
|
||||||
const float src_v = *(device float *)(src_row + i00*nb00);
|
|
||||||
device float * dst_v = (device float *)(dst_row + i00*nb0);
|
|
||||||
*dst_v = i00 * m_k + src_v;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static float rope_yarn_ramp(const float low, const float high, const int i0) {
|
static float rope_yarn_ramp(const float low, const float high, const int i0) {
|
||||||
const float y = (i0 / 2 - low) / max(0.001f, high - low);
|
const float y = (i0 / 2 - low) / max(0.001f, high - low);
|
||||||
return 1.0f - min(1.0f, max(0.0f, y));
|
return 1.0f - min(1.0f, max(0.0f, y));
|
||||||
@ -2084,6 +2043,667 @@ kernel void kernel_leaky_relu_f32(
|
|||||||
dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * slope;
|
dst[tpig] = src0[tpig] > 0.0f ? src0[tpig] : src0[tpig] * slope;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef void (flash_attn_ext_f16_t)(
|
||||||
|
device const char * q,
|
||||||
|
device const char * k,
|
||||||
|
device const char * v,
|
||||||
|
device const char * mask,
|
||||||
|
device float * dst,
|
||||||
|
constant int64_t & ne00,
|
||||||
|
constant int64_t & ne01,
|
||||||
|
constant int64_t & ne02,
|
||||||
|
constant int64_t & ne03,
|
||||||
|
constant uint64_t & nb00,
|
||||||
|
constant uint64_t & nb01,
|
||||||
|
constant uint64_t & nb02,
|
||||||
|
constant uint64_t & nb03,
|
||||||
|
constant int64_t & ne10,
|
||||||
|
constant int64_t & ne11,
|
||||||
|
constant int64_t & ne12,
|
||||||
|
constant int64_t & ne13,
|
||||||
|
constant uint64_t & nb10,
|
||||||
|
constant uint64_t & nb11,
|
||||||
|
constant uint64_t & nb12,
|
||||||
|
constant uint64_t & nb13,
|
||||||
|
constant uint64_t & nb31,
|
||||||
|
constant int64_t & ne0,
|
||||||
|
constant int64_t & ne1,
|
||||||
|
constant int64_t & ne2,
|
||||||
|
constant int64_t & ne3,
|
||||||
|
constant float & scale,
|
||||||
|
constant float & max_bias,
|
||||||
|
constant float & m0,
|
||||||
|
constant float & m1,
|
||||||
|
constant uint32_t & n_head_log2,
|
||||||
|
threadgroup half * shared,
|
||||||
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
uint3 ntg[[threads_per_threadgroup]],
|
||||||
|
ushort tiisg[[thread_index_in_simdgroup]],
|
||||||
|
ushort sgitg[[simdgroup_index_in_threadgroup]]);
|
||||||
|
|
||||||
|
// ref: https://arxiv.org/pdf/2307.08691.pdf
|
||||||
|
template<int64_t D, int64_t Q = 8, int64_t C = 32> // head size, queries per threadgroup, cache items per threadgroup
|
||||||
|
kernel void kernel_flash_attn_ext_f16(
|
||||||
|
device const char * q,
|
||||||
|
device const char * k,
|
||||||
|
device const char * v,
|
||||||
|
device const char * mask,
|
||||||
|
device float * dst,
|
||||||
|
constant int64_t & ne00,
|
||||||
|
constant int64_t & ne01,
|
||||||
|
constant int64_t & ne02,
|
||||||
|
constant int64_t & ne03,
|
||||||
|
constant uint64_t & nb00,
|
||||||
|
constant uint64_t & nb01,
|
||||||
|
constant uint64_t & nb02,
|
||||||
|
constant uint64_t & nb03,
|
||||||
|
constant int64_t & ne10,
|
||||||
|
constant int64_t & ne11,
|
||||||
|
constant int64_t & ne12,
|
||||||
|
constant int64_t & ne13,
|
||||||
|
constant uint64_t & nb10,
|
||||||
|
constant uint64_t & nb11,
|
||||||
|
constant uint64_t & nb12,
|
||||||
|
constant uint64_t & nb13,
|
||||||
|
constant uint64_t & nb31,
|
||||||
|
constant int64_t & ne0,
|
||||||
|
constant int64_t & ne1,
|
||||||
|
constant int64_t & ne2,
|
||||||
|
constant int64_t & ne3,
|
||||||
|
constant float & scale,
|
||||||
|
constant float & max_bias,
|
||||||
|
constant float & m0,
|
||||||
|
constant float & m1,
|
||||||
|
constant uint32_t & n_head_log2,
|
||||||
|
threadgroup half * shared [[threadgroup(0)]],
|
||||||
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
uint3 ntg[[threads_per_threadgroup]],
|
||||||
|
ushort tiisg[[thread_index_in_simdgroup]],
|
||||||
|
ushort sgitg[[simdgroup_index_in_threadgroup]]) {
|
||||||
|
const short nsg = ntg.y; // number of simdgroups
|
||||||
|
|
||||||
|
const short iq3 = tgpig[2];
|
||||||
|
const short iq2 = tgpig[1];
|
||||||
|
const short iq1 = tgpig[0]*Q;
|
||||||
|
|
||||||
|
const short D4 = D/4;
|
||||||
|
const short D8 = D/8;
|
||||||
|
//const short Q8 = Q/8;
|
||||||
|
const short NW = N_SIMDWIDTH;
|
||||||
|
const short SH = (C + Q); // shared memory per simdgroup in (half)
|
||||||
|
|
||||||
|
const short T = D + 2*nsg*SH; // shared memory size per query in (half)
|
||||||
|
const short TF = T/2; // shared memory size per query in (float)
|
||||||
|
const short T4 = T/4; // shared memory size per query in (half4)
|
||||||
|
|
||||||
|
threadgroup half * sq = (threadgroup half *) (shared + 0*D); // holds the query data
|
||||||
|
threadgroup half4 * sq4 = (threadgroup half4 *) (shared + 0*D); // same as above but in half4
|
||||||
|
threadgroup float * ss = (threadgroup float *) (shared + 2*sgitg*SH + 1*D); // scratch buffer for attention and diagonal matrix
|
||||||
|
|
||||||
|
// store the result for all queries in local memory in 8x8 matrices (the O matrix from the paper)
|
||||||
|
simdgroup_half8x8 lo[D8];
|
||||||
|
|
||||||
|
// load heads from Q to shared memory
|
||||||
|
for (short j = sgitg; j < Q; j += nsg) {
|
||||||
|
device const float4 * q4 = (device const float4 *) ((device const char *) q + ((iq1 + j)*nb01 + iq2*nb02 + iq3*nb03));
|
||||||
|
|
||||||
|
for (short i = tiisg; i < D4; i += NW) {
|
||||||
|
if (iq1 + j < ne01) {
|
||||||
|
sq4[j*T4 + i] = (half4) q4[i];
|
||||||
|
} else {
|
||||||
|
sq4[j*T4 + i] = 0.0h;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// zero out lo
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
lo[i] = make_filled_simdgroup_matrix<half, 8>(0.0h);
|
||||||
|
}
|
||||||
|
|
||||||
|
// zero out shared memory SH
|
||||||
|
for (short j = 0; j < Q; ++j) {
|
||||||
|
for (short i = tiisg; i < SH; i += NW) {
|
||||||
|
ss[j*TF + i] = 0.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
{
|
||||||
|
float S[Q] = { [0 ... Q-1] = 0.0h };
|
||||||
|
float M[Q] = { [0 ... Q-1] = -FLT_MAX/2 };
|
||||||
|
|
||||||
|
// assume K and V are same shape
|
||||||
|
const short ne22 = ne12;
|
||||||
|
const short ne23 = ne13;
|
||||||
|
|
||||||
|
const uint nb21 = nb11;
|
||||||
|
const uint nb22 = nb12;
|
||||||
|
const uint nb23 = nb13;
|
||||||
|
|
||||||
|
// broadcast
|
||||||
|
const short rk2 = ne02/ne12;
|
||||||
|
const short rk3 = ne03/ne13;
|
||||||
|
|
||||||
|
const short rv2 = ne02/ne22;
|
||||||
|
const short rv3 = ne03/ne23;
|
||||||
|
|
||||||
|
// k indices
|
||||||
|
const short ik2 = iq2/rk2;
|
||||||
|
const short ik3 = iq3/rk3;
|
||||||
|
|
||||||
|
// v indices
|
||||||
|
const short iv2 = iq2/rv2;
|
||||||
|
const short iv3 = iq3/rv3;
|
||||||
|
|
||||||
|
// load the queries from shared memory into local memory
|
||||||
|
simdgroup_half8x8 mq[D8];
|
||||||
|
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
simdgroup_load(mq[i], sq + i*8, T);
|
||||||
|
}
|
||||||
|
|
||||||
|
// pointer to the mask
|
||||||
|
device const half * mp = (device const half *) (mask + iq1*nb31);
|
||||||
|
|
||||||
|
// prepare diagonal scale matrix
|
||||||
|
simdgroup_float8x8 mscale(scale);
|
||||||
|
|
||||||
|
// prepare diagonal slope matrix
|
||||||
|
simdgroup_float8x8 mslope(1.0f);
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const uint32_t h = iq2;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
mslope = simdgroup_float8x8(pow(base, exph));
|
||||||
|
}
|
||||||
|
|
||||||
|
// loop over the KV cache
|
||||||
|
// each simdgroup handles blocks of Q rows and C columns
|
||||||
|
for (int ic0 = 0; ic0 < ne11; ic0 += C*nsg) {
|
||||||
|
const int ic = ic0 + C*sgitg;
|
||||||
|
if (ic >= ne11) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Q*K^T
|
||||||
|
{
|
||||||
|
for (short cc = 0; cc < C/8; ++cc) {
|
||||||
|
simdgroup_float8x8 mqk = make_filled_simdgroup_matrix<float, 8>(0.h);
|
||||||
|
|
||||||
|
device const half * pk = (device const half *) ((device const char *) k + ((ic + 8*cc)*nb11 + ik2*nb12 + ik3*nb13));
|
||||||
|
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
simdgroup_half8x8 mk;
|
||||||
|
simdgroup_load(mk, pk + i*8, nb11/sizeof(half), 0, true); // transpose
|
||||||
|
|
||||||
|
simdgroup_multiply_accumulate(mqk, mq[i], mk, mqk);
|
||||||
|
}
|
||||||
|
|
||||||
|
// mqk = mqk*scale + mask*slope
|
||||||
|
simdgroup_half8x8 mm;
|
||||||
|
simdgroup_load(mm, mp + ic + 8*cc, nb31/sizeof(half), 0, false);
|
||||||
|
simdgroup_multiply(mm, mslope, mm);
|
||||||
|
simdgroup_multiply_accumulate(mqk, mqk, mscale, mm);
|
||||||
|
|
||||||
|
simdgroup_store(mqk, ss + 8*cc, TF, 0, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// used to detect blocks full of -INF
|
||||||
|
float smax = -INFINITY;
|
||||||
|
|
||||||
|
// online softmax
|
||||||
|
{
|
||||||
|
float ms[Q];
|
||||||
|
|
||||||
|
for (short j = 0; j < Q; ++j) {
|
||||||
|
const short p = tiisg;
|
||||||
|
|
||||||
|
const float m = M[j];
|
||||||
|
const float s = ss[j*TF + p];
|
||||||
|
|
||||||
|
smax = simd_max(max(smax, s));
|
||||||
|
M[j] = simd_max(max(M[j], s));
|
||||||
|
|
||||||
|
ms[j] = exp(m - M[j]);
|
||||||
|
const float vs = exp(s - M[j]);
|
||||||
|
|
||||||
|
S[j] = S[j]*ms[j] + simd_sum(vs);
|
||||||
|
|
||||||
|
// the P matrix from the paper (Q rows, C columns)
|
||||||
|
ss[j*TF + p] = vs;
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a QxQ diagonal matrix for rescaling the output
|
||||||
|
if (tiisg < Q) {
|
||||||
|
ss[tiisg*TF + C + tiisg] = ms[tiisg];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// skip -INF blocks
|
||||||
|
if (smax == -INFINITY) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// O = diag(ms)*O
|
||||||
|
{
|
||||||
|
simdgroup_float8x8 mm;
|
||||||
|
simdgroup_load(mm, ss + C, TF, 0, false);
|
||||||
|
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
simdgroup_multiply(lo[i], mm, lo[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// O = O + (Q*K^T)*V
|
||||||
|
{
|
||||||
|
for (short cc = 0; cc < C/8; ++cc) {
|
||||||
|
device const half * pv = (device const half *) ((device const char *) v + ((ic + 8*cc)*nb21 + iv2*nb22 + iv3*nb23));
|
||||||
|
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
simdgroup_half8x8 mk;
|
||||||
|
simdgroup_load(mk, pv + i*8, nb21/sizeof(half), 0, false);
|
||||||
|
|
||||||
|
simdgroup_float8x8 mv;
|
||||||
|
simdgroup_load(mv, ss + 8*cc, TF, 0, false);
|
||||||
|
|
||||||
|
simdgroup_multiply_accumulate(lo[i], mv, mk, lo[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// these are needed for reducing the results from the simdgroups (reuse the ss buffer)
|
||||||
|
for (short j = 0; j < Q; ++j) {
|
||||||
|
if (tiisg == 0) {
|
||||||
|
ss[j*TF + 0] = S[j];
|
||||||
|
ss[j*TF + 1] = M[j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reduce the warps sequentially
|
||||||
|
for (short sg = 1; sg < nsg; ++sg) {
|
||||||
|
float S = { 0.0h };
|
||||||
|
float M = { -FLT_MAX/2 };
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
// each simdgroup stores its output to shared memory, reusing sq
|
||||||
|
if (sgitg == sg) {
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
simdgroup_store(lo[i], sq + i*8, T, 0, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
// the first simdgroup accumulates the results from the other simdgroups
|
||||||
|
if (sgitg == 0) {
|
||||||
|
for (short j = 0; j < Q; ++j) {
|
||||||
|
const float S0 = ss[j*TF + 0];
|
||||||
|
const float S1 = ss[j*TF + sg*SH + 0];
|
||||||
|
|
||||||
|
const float M0 = ss[j*TF + 1];
|
||||||
|
const float M1 = ss[j*TF + sg*SH + 1];
|
||||||
|
|
||||||
|
M = max(M0, M1);
|
||||||
|
|
||||||
|
const float ms0 = exp(M0 - M);
|
||||||
|
const float ms1 = exp(M1 - M);
|
||||||
|
|
||||||
|
S = S0*ms0 + S1*ms1;
|
||||||
|
|
||||||
|
if (tiisg == 0) {
|
||||||
|
ss[j*TF + 0] = S;
|
||||||
|
ss[j*TF + 1] = M;
|
||||||
|
|
||||||
|
ss[j*TF + C + j ] = ms0;
|
||||||
|
ss[j*TF + C + j + sg*SH] = ms1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// O_0 = diag(ms0)*O_0 + diag(ms1)*O_1
|
||||||
|
{
|
||||||
|
simdgroup_half8x8 t;
|
||||||
|
simdgroup_float8x8 ms0;
|
||||||
|
simdgroup_float8x8 ms1;
|
||||||
|
|
||||||
|
simdgroup_load(ms0, ss + C, TF, 0, false);
|
||||||
|
simdgroup_load(ms1, ss + C + sg*SH, TF, 0, false);
|
||||||
|
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
simdgroup_load (t, sq + i*8, T, 0, false);
|
||||||
|
simdgroup_multiply(t, ms1, t);
|
||||||
|
|
||||||
|
simdgroup_multiply_accumulate(lo[i], ms0, lo[i], t);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// store result to shared memory (reuse sq)
|
||||||
|
if (sgitg == 0) {
|
||||||
|
for (short i = 0; i < D8; ++i) {
|
||||||
|
simdgroup_store(lo[i], sq + i*8, T, 0, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
device float4 * dst4 = (device float4 *) dst;
|
||||||
|
|
||||||
|
// final rescale with 1/S and store to global memory
|
||||||
|
if (sgitg == 0) {
|
||||||
|
for (short j = 0; j < Q && iq1 + j < ne01; ++j) {
|
||||||
|
const float S = ss[j*TF + 0];
|
||||||
|
|
||||||
|
for (short i = tiisg; i < D4; i += NW) {
|
||||||
|
dst4[(iq3*ne2*ne1 + iq2 + (iq1 + j)*ne1)*D4 + i] = (float4) sq4[j*T4 + i]/S;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template [[host_name("kernel_flash_attn_ext_f16_h64" )]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_f16<64>;
|
||||||
|
template [[host_name("kernel_flash_attn_ext_f16_h80" )]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_f16<80>;
|
||||||
|
template [[host_name("kernel_flash_attn_ext_f16_h96" )]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_f16<96>;
|
||||||
|
template [[host_name("kernel_flash_attn_ext_f16_h112")]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_f16<112>;
|
||||||
|
template [[host_name("kernel_flash_attn_ext_f16_h128")]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_f16<128>;
|
||||||
|
template [[host_name("kernel_flash_attn_ext_f16_h256")]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_f16<256>;
|
||||||
|
|
||||||
|
template<int64_t D, int64_t Q = 1, int64_t C = 32> // head size, queries per threadgroup, cache items per threadgroup
|
||||||
|
kernel void kernel_flash_attn_ext_vec_f16(
|
||||||
|
device const char * q,
|
||||||
|
device const char * k,
|
||||||
|
device const char * v,
|
||||||
|
device const char * mask,
|
||||||
|
device float * dst,
|
||||||
|
constant int64_t & ne00,
|
||||||
|
constant int64_t & ne01,
|
||||||
|
constant int64_t & ne02,
|
||||||
|
constant int64_t & ne03,
|
||||||
|
constant uint64_t & nb00,
|
||||||
|
constant uint64_t & nb01,
|
||||||
|
constant uint64_t & nb02,
|
||||||
|
constant uint64_t & nb03,
|
||||||
|
constant int64_t & ne10,
|
||||||
|
constant int64_t & ne11,
|
||||||
|
constant int64_t & ne12,
|
||||||
|
constant int64_t & ne13,
|
||||||
|
constant uint64_t & nb10,
|
||||||
|
constant uint64_t & nb11,
|
||||||
|
constant uint64_t & nb12,
|
||||||
|
constant uint64_t & nb13,
|
||||||
|
constant uint64_t & nb31,
|
||||||
|
constant int64_t & ne0,
|
||||||
|
constant int64_t & ne1,
|
||||||
|
constant int64_t & ne2,
|
||||||
|
constant int64_t & ne3,
|
||||||
|
constant float & scale,
|
||||||
|
constant float & max_bias,
|
||||||
|
constant float & m0,
|
||||||
|
constant float & m1,
|
||||||
|
constant uint32_t & n_head_log2,
|
||||||
|
threadgroup half * shared [[threadgroup(0)]],
|
||||||
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
uint3 ntg[[threads_per_threadgroup]],
|
||||||
|
ushort tiisg[[thread_index_in_simdgroup]],
|
||||||
|
ushort sgitg[[simdgroup_index_in_threadgroup]]) {
|
||||||
|
const short nsg = ntg.y; // number of simdgroups
|
||||||
|
|
||||||
|
const short iq3 = tgpig[2];
|
||||||
|
const short iq2 = tgpig[1];
|
||||||
|
const short iq1 = tgpig[0];
|
||||||
|
|
||||||
|
const short D4 = D/4;
|
||||||
|
const short NW = N_SIMDWIDTH;
|
||||||
|
const short SH = (C + Q); // shared memory per simdgroup in (half)
|
||||||
|
|
||||||
|
const short T = D + 2*nsg*SH; // shared memory size per query in (half)
|
||||||
|
|
||||||
|
float slope = 1.0f;
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const uint32_t h = iq2;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
slope = pow(base, exp);
|
||||||
|
}
|
||||||
|
|
||||||
|
//threadgroup half * sq = (threadgroup half *) (shared + 0*D); // holds the query data
|
||||||
|
threadgroup half4 * sq4 = (threadgroup half4 *) (shared + 0*D); // same as above but in half4
|
||||||
|
threadgroup float * ss = (threadgroup float *) (shared + 2*sgitg*SH + 1*D); // scratch buffer for attention and diagonal matrix
|
||||||
|
threadgroup float4 * ss4 = (threadgroup float4 *) (shared + 2*sgitg*SH + 1*D); // same as above but in half4
|
||||||
|
threadgroup half4 * sr4 = (threadgroup half4 *) (shared + sgitg*D + 1*T); // scratch buffer for the results
|
||||||
|
|
||||||
|
// store the result for all queries in local memory in 8x8 matrices (the O matrix from the paper)
|
||||||
|
half4 lo[D4/NW];
|
||||||
|
|
||||||
|
// load heads from Q to shared memory
|
||||||
|
device const float4 * q4 = (device const float4 *) ((device const char *) q + (iq1*nb01 + iq2*nb02 + iq3*nb03));
|
||||||
|
|
||||||
|
for (short i = tiisg; i < D4; i += NW) {
|
||||||
|
if (iq1 < ne01) {
|
||||||
|
sq4[i] = (half4) q4[i];
|
||||||
|
} else {
|
||||||
|
sq4[i] = 0.0h;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// zero out lo
|
||||||
|
for (short i = tiisg; i < D4; i += NW) {
|
||||||
|
lo[i/NW] = 0.0h;
|
||||||
|
}
|
||||||
|
|
||||||
|
// zero out shared memory SH
|
||||||
|
for (short i = tiisg; i < SH/4; i += NW) {
|
||||||
|
ss4[i] = 0.0h;
|
||||||
|
}
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
{
|
||||||
|
float S = { 0.0h };
|
||||||
|
float M = { -FLT_MAX/2 };
|
||||||
|
|
||||||
|
// assume K and V are same shape
|
||||||
|
const short ne22 = ne12;
|
||||||
|
const short ne23 = ne13;
|
||||||
|
|
||||||
|
const uint nb21 = nb11;
|
||||||
|
const uint nb22 = nb12;
|
||||||
|
const uint nb23 = nb13;
|
||||||
|
|
||||||
|
// broadcast
|
||||||
|
const short rk2 = ne02/ne12;
|
||||||
|
const short rk3 = ne03/ne13;
|
||||||
|
|
||||||
|
const short rv2 = ne02/ne22;
|
||||||
|
const short rv3 = ne03/ne23;
|
||||||
|
|
||||||
|
// k indices
|
||||||
|
const short ik2 = iq2 / rk2;
|
||||||
|
const short ik3 = iq3 / rk3;
|
||||||
|
|
||||||
|
// v indices
|
||||||
|
const short iv2 = iq2 / rv2;
|
||||||
|
const short iv3 = iq3 / rv3;
|
||||||
|
|
||||||
|
// load the queries from shared memory into local memory
|
||||||
|
half4 mq[D4];
|
||||||
|
|
||||||
|
for (short ii = 0; ii < D4; ii += NW) {
|
||||||
|
short i = ii + tiisg;
|
||||||
|
mq[i] = sq4[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
// pointer to the mask
|
||||||
|
device const half4 * mp4 = (device const half4 *) (mask + iq1*nb31);
|
||||||
|
|
||||||
|
// loop over the KV cache
|
||||||
|
// each simdgroup handles blocks of Q rows and C columns
|
||||||
|
for (int ic0 = 0; ic0 < ne11; ic0 += C*nsg) {
|
||||||
|
const int ic = ic0 + C*sgitg;
|
||||||
|
if (ic >= ne11) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Q*K^T
|
||||||
|
{
|
||||||
|
#pragma unroll
|
||||||
|
for (short cc = 0; cc < C/4; ++cc) {
|
||||||
|
float4 mqk = { 0.0h };
|
||||||
|
|
||||||
|
device const half4 * pk4 = (device const half4 *) ((device const char *) k + ((ic + 4*cc)*nb11 + ik2*nb12 + ik3*nb13));
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (short ii = 0; ii < D4; ii += NW) {
|
||||||
|
const short i = ii + tiisg;
|
||||||
|
|
||||||
|
half4x4 mk;
|
||||||
|
mk[0] = pk4[i + 0*(nb11/8)];
|
||||||
|
mk[1] = pk4[i + 1*(nb11/8)];
|
||||||
|
mk[2] = pk4[i + 2*(nb11/8)];
|
||||||
|
mk[3] = pk4[i + 3*(nb11/8)];
|
||||||
|
|
||||||
|
mqk += (float4) (mq[i] * mk);
|
||||||
|
}
|
||||||
|
|
||||||
|
// reduce the results from the threads in the simdgroup
|
||||||
|
mqk += simd_shuffle_down(mqk, 16);
|
||||||
|
mqk += simd_shuffle_down(mqk, 8);
|
||||||
|
mqk += simd_shuffle_down(mqk, 4);
|
||||||
|
mqk += simd_shuffle_down(mqk, 2);
|
||||||
|
mqk += simd_shuffle_down(mqk, 1);
|
||||||
|
|
||||||
|
// mqk = mqk*scale + mask*slope
|
||||||
|
if (tiisg == 0) {
|
||||||
|
float4 mm = (float4) mp4[ic/4 + cc];
|
||||||
|
mqk = mqk*scale + mm*slope;
|
||||||
|
|
||||||
|
ss4[cc] = mqk;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// online softmax
|
||||||
|
{
|
||||||
|
const short p = tiisg;
|
||||||
|
|
||||||
|
const float m = M;
|
||||||
|
const float s = ss[p];
|
||||||
|
|
||||||
|
M = simd_max(max(M, s));
|
||||||
|
|
||||||
|
const float ms = exp(m - M);
|
||||||
|
const float vs = exp(s - M);
|
||||||
|
|
||||||
|
S = S*ms + simd_sum(vs);
|
||||||
|
|
||||||
|
// the P matrix from the paper (Q rows, C columns)
|
||||||
|
ss[p] = vs;
|
||||||
|
|
||||||
|
// O = diag(ms)*O
|
||||||
|
#pragma unroll
|
||||||
|
for (short ii = 0; ii < D4; ii += NW) {
|
||||||
|
const short i = ii + tiisg;
|
||||||
|
lo[i/NW] *= ms;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// O = O + (Q*K^T)*V
|
||||||
|
{
|
||||||
|
#pragma unroll
|
||||||
|
for (short cc = 0; cc < C/4; ++cc) {
|
||||||
|
device const half4 * pv4 = (device const half4 *) ((device const char *) v + ((ic + 4*cc)*nb21 + iv2*nb22 + iv3*nb23));
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (short ii = 0; ii < D4; ii += NW) {
|
||||||
|
const short i = ii + tiisg;
|
||||||
|
|
||||||
|
lo[i/NW] += pv4[i + 0*(nb21/8)] * ss[4*cc + 0];
|
||||||
|
lo[i/NW] += pv4[i + 1*(nb21/8)] * ss[4*cc + 1];
|
||||||
|
lo[i/NW] += pv4[i + 2*(nb21/8)] * ss[4*cc + 2];
|
||||||
|
lo[i/NW] += pv4[i + 3*(nb21/8)] * ss[4*cc + 3];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// these are needed for reducing the results from the simdgroups (reuse the ss buffer)
|
||||||
|
if (tiisg == 0) {
|
||||||
|
ss[0] = S;
|
||||||
|
ss[1] = M;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// store results to shared memory
|
||||||
|
for (short ii = 0; ii < D4; ii += NW) {
|
||||||
|
short i = ii + tiisg;
|
||||||
|
sr4[i] = lo[ii/NW];
|
||||||
|
}
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
// parallel reduce
|
||||||
|
for (short r = nsg/2; r > 0; r >>= 1) {
|
||||||
|
if (sgitg < r) {
|
||||||
|
const float S0 = ss[ 0];
|
||||||
|
const float S1 = ss[r*SH + 0];
|
||||||
|
|
||||||
|
const float M0 = ss[ 1];
|
||||||
|
const float M1 = ss[r*SH + 1];
|
||||||
|
|
||||||
|
const float M = max(M0, M1);
|
||||||
|
|
||||||
|
const float ms0 = exp(M0 - M);
|
||||||
|
const float ms1 = exp(M1 - M);
|
||||||
|
|
||||||
|
const float S = S0*ms0 + S1*ms1;
|
||||||
|
|
||||||
|
if (tiisg == 0) {
|
||||||
|
ss[0] = S;
|
||||||
|
ss[1] = M;
|
||||||
|
}
|
||||||
|
|
||||||
|
// O_0 = diag(ms0)*O_0 + diag(ms1)*O_1
|
||||||
|
for (short ii = 0; ii < D4; ii += NW) {
|
||||||
|
short i = ii + tiisg;
|
||||||
|
sr4[i] = sr4[i]*ms0 + sr4[i + r*D4]*ms1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
}
|
||||||
|
|
||||||
|
device float4 * dst4 = (device float4 *) dst;
|
||||||
|
|
||||||
|
// final rescale with 1/S and store to global memory
|
||||||
|
if (sgitg == 0) {
|
||||||
|
const float S = ss[0];
|
||||||
|
|
||||||
|
for (short ii = 0; ii < D4; ii += NW) {
|
||||||
|
short i = ii + tiisg;
|
||||||
|
dst4[(iq3*ne2*ne1 + iq2 + (iq1)*ne1)*D4 + i] = (float4) sr4[i]/S;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template [[host_name("kernel_flash_attn_ext_vec_f16_h128")]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_vec_f16<128>;
|
||||||
|
template [[host_name("kernel_flash_attn_ext_vec_f16_h256")]] kernel flash_attn_ext_f16_t kernel_flash_attn_ext_vec_f16<256>;
|
||||||
|
|
||||||
kernel void kernel_cpy_f16_f16(
|
kernel void kernel_cpy_f16_f16(
|
||||||
device const half * src0,
|
device const half * src0,
|
||||||
device half * dst,
|
device half * dst,
|
||||||
@ -2204,7 +2824,8 @@ kernel void kernel_cpy_f32_f16(
|
|||||||
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
||||||
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
||||||
|
|
||||||
dst_data[i00] = src[0];
|
// TODO: is there a better way to handle -INFINITY?
|
||||||
|
dst_data[i00] = src[0] == -INFINITY ? -MAXHALF : src[0];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|