mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
Merge branch 'master' into gg/flash-attn
This commit is contained in:
commit
013721df2b
@ -12,6 +12,7 @@ Checks: >
|
|||||||
-readability-implicit-bool-conversion,
|
-readability-implicit-bool-conversion,
|
||||||
-readability-magic-numbers,
|
-readability-magic-numbers,
|
||||||
-readability-uppercase-literal-suffix,
|
-readability-uppercase-literal-suffix,
|
||||||
|
-readability-simplify-boolean-expr,
|
||||||
clang-analyzer-*,
|
clang-analyzer-*,
|
||||||
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
|
-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
|
||||||
performance-*,
|
performance-*,
|
||||||
|
@ -26,8 +26,8 @@ COPY . .
|
|||||||
|
|
||||||
# Set nvcc architecture
|
# Set nvcc architecture
|
||||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
# Enable cuBLAS
|
# Enable CUDA
|
||||||
ENV LLAMA_CUBLAS=1
|
ENV LLAMA_CUDA=1
|
||||||
|
|
||||||
RUN make
|
RUN make
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
|
# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
|
||||||
# It is up to the user to install the correct vendor-specific support.
|
# It is up to the user to install the correct vendor-specific support.
|
||||||
|
|
||||||
Name: llama.cpp-cublas
|
Name: llama.cpp-cuda
|
||||||
Version: %( date "+%%Y%%m%%d" )
|
Version: %( date "+%%Y%%m%%d" )
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
|
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
|
||||||
@ -32,16 +32,16 @@ CPU inference for Meta's Lllama2 models using default options.
|
|||||||
%setup -n llama.cpp-master
|
%setup -n llama.cpp-master
|
||||||
|
|
||||||
%build
|
%build
|
||||||
make -j LLAMA_CUBLAS=1
|
make -j LLAMA_CUDA=1
|
||||||
|
|
||||||
%install
|
%install
|
||||||
mkdir -p %{buildroot}%{_bindir}/
|
mkdir -p %{buildroot}%{_bindir}/
|
||||||
cp -p main %{buildroot}%{_bindir}/llamacppcublas
|
cp -p main %{buildroot}%{_bindir}/llamacppcuda
|
||||||
cp -p server %{buildroot}%{_bindir}/llamacppcublasserver
|
cp -p server %{buildroot}%{_bindir}/llamacppcudaserver
|
||||||
cp -p simple %{buildroot}%{_bindir}/llamacppcublassimple
|
cp -p simple %{buildroot}%{_bindir}/llamacppcudasimple
|
||||||
|
|
||||||
mkdir -p %{buildroot}/usr/lib/systemd/system
|
mkdir -p %{buildroot}/usr/lib/systemd/system
|
||||||
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamacublas.service
|
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamacuda.service
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Llama.cpp server, CPU only (no GPU support in this build).
|
Description=Llama.cpp server, CPU only (no GPU support in this build).
|
||||||
After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
|
After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
|
||||||
@ -49,7 +49,7 @@ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.t
|
|||||||
[Service]
|
[Service]
|
||||||
Type=simple
|
Type=simple
|
||||||
EnvironmentFile=/etc/sysconfig/llama
|
EnvironmentFile=/etc/sysconfig/llama
|
||||||
ExecStart=/usr/bin/llamacppcublasserver $LLAMA_ARGS
|
ExecStart=/usr/bin/llamacppcudaserver $LLAMA_ARGS
|
||||||
ExecReload=/bin/kill -s HUP $MAINPID
|
ExecReload=/bin/kill -s HUP $MAINPID
|
||||||
Restart=never
|
Restart=never
|
||||||
|
|
||||||
@ -67,10 +67,10 @@ rm -rf %{buildroot}
|
|||||||
rm -rf %{_builddir}/*
|
rm -rf %{_builddir}/*
|
||||||
|
|
||||||
%files
|
%files
|
||||||
%{_bindir}/llamacppcublas
|
%{_bindir}/llamacppcuda
|
||||||
%{_bindir}/llamacppcublasserver
|
%{_bindir}/llamacppcudaserver
|
||||||
%{_bindir}/llamacppcublassimple
|
%{_bindir}/llamacppcudasimple
|
||||||
/usr/lib/systemd/system/llamacublas.service
|
/usr/lib/systemd/system/llamacuda.service
|
||||||
%config /etc/sysconfig/llama
|
%config /etc/sysconfig/llama
|
||||||
|
|
||||||
%pre
|
%pre
|
@ -20,8 +20,8 @@ COPY . .
|
|||||||
|
|
||||||
# Set nvcc architecture
|
# Set nvcc architecture
|
||||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
# Enable cuBLAS
|
# Enable CUDA
|
||||||
ENV LLAMA_CUBLAS=1
|
ENV LLAMA_CUDA=1
|
||||||
|
|
||||||
RUN make
|
RUN make
|
||||||
|
|
||||||
|
@ -4,13 +4,14 @@
|
|||||||
config,
|
config,
|
||||||
stdenv,
|
stdenv,
|
||||||
mkShell,
|
mkShell,
|
||||||
|
runCommand,
|
||||||
cmake,
|
cmake,
|
||||||
ninja,
|
ninja,
|
||||||
pkg-config,
|
pkg-config,
|
||||||
git,
|
git,
|
||||||
python3,
|
python3,
|
||||||
mpi,
|
mpi,
|
||||||
openblas, # TODO: Use the generic `blas` so users could switch between alternative implementations
|
blas,
|
||||||
cudaPackages,
|
cudaPackages,
|
||||||
darwin,
|
darwin,
|
||||||
rocmPackages,
|
rocmPackages,
|
||||||
@ -35,7 +36,8 @@
|
|||||||
# It's necessary to consistently use backendStdenv when building with CUDA support,
|
# It's necessary to consistently use backendStdenv when building with CUDA support,
|
||||||
# otherwise we get libstdc++ errors downstream.
|
# otherwise we get libstdc++ errors downstream.
|
||||||
effectiveStdenv ? if useCuda then cudaPackages.backendStdenv else stdenv,
|
effectiveStdenv ? if useCuda then cudaPackages.backendStdenv else stdenv,
|
||||||
enableStatic ? effectiveStdenv.hostPlatform.isStatic
|
enableStatic ? effectiveStdenv.hostPlatform.isStatic,
|
||||||
|
precompileMetalShaders ? false
|
||||||
}@inputs:
|
}@inputs:
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -87,6 +89,11 @@ let
|
|||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
xcrunHost = runCommand "xcrunHost" {} ''
|
||||||
|
mkdir -p $out/bin
|
||||||
|
ln -s /usr/bin/xcrun $out/bin
|
||||||
|
'';
|
||||||
|
|
||||||
# apple_sdk is supposed to choose sane defaults, no need to handle isAarch64
|
# apple_sdk is supposed to choose sane defaults, no need to handle isAarch64
|
||||||
# separately
|
# separately
|
||||||
darwinBuildInputs =
|
darwinBuildInputs =
|
||||||
@ -150,6 +157,8 @@ effectiveStdenv.mkDerivation (
|
|||||||
postPatch = ''
|
postPatch = ''
|
||||||
substituteInPlace ./ggml-metal.m \
|
substituteInPlace ./ggml-metal.m \
|
||||||
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
||||||
|
substituteInPlace ./ggml-metal.m \
|
||||||
|
--replace '[bundle pathForResource:@"default" ofType:@"metallib"];' "@\"$out/bin/default.metallib\";"
|
||||||
|
|
||||||
# TODO: Package up each Python script or service appropriately.
|
# TODO: Package up each Python script or service appropriately.
|
||||||
# If we were to migrate to buildPythonPackage and prepare the `pyproject.toml`,
|
# If we were to migrate to buildPythonPackage and prepare the `pyproject.toml`,
|
||||||
@ -157,6 +166,14 @@ effectiveStdenv.mkDerivation (
|
|||||||
substituteInPlace ./*.py --replace "/usr/bin/env python" "${llama-python}/bin/python"
|
substituteInPlace ./*.py --replace "/usr/bin/env python" "${llama-python}/bin/python"
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
# With PR#6015 https://github.com/ggerganov/llama.cpp/pull/6015,
|
||||||
|
# `default.metallib` may be compiled with Metal compiler from XCode
|
||||||
|
# and we need to escape sandbox on MacOS to access Metal compiler.
|
||||||
|
# `xcrun` is used find the path of the Metal compiler, which is varible
|
||||||
|
# and not on $PATH
|
||||||
|
# see https://github.com/ggerganov/llama.cpp/pull/6118 for discussion
|
||||||
|
__noChroot = effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders;
|
||||||
|
|
||||||
nativeBuildInputs =
|
nativeBuildInputs =
|
||||||
[
|
[
|
||||||
cmake
|
cmake
|
||||||
@ -173,6 +190,8 @@ effectiveStdenv.mkDerivation (
|
|||||||
]
|
]
|
||||||
++ optionals (effectiveStdenv.hostPlatform.isGnu && enableStatic) [
|
++ optionals (effectiveStdenv.hostPlatform.isGnu && enableStatic) [
|
||||||
glibc.static
|
glibc.static
|
||||||
|
] ++ optionals (effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders) [
|
||||||
|
xcrunHost
|
||||||
];
|
];
|
||||||
|
|
||||||
buildInputs =
|
buildInputs =
|
||||||
@ -181,6 +200,7 @@ effectiveStdenv.mkDerivation (
|
|||||||
++ optionals useMpi [ mpi ]
|
++ optionals useMpi [ mpi ]
|
||||||
++ optionals useOpenCL [ clblast ]
|
++ optionals useOpenCL [ clblast ]
|
||||||
++ optionals useRocm rocmBuildInputs
|
++ optionals useRocm rocmBuildInputs
|
||||||
|
++ optionals useBlas [ blas ]
|
||||||
++ optionals useVulkan vulkanBuildInputs;
|
++ optionals useVulkan vulkanBuildInputs;
|
||||||
|
|
||||||
cmakeFlags =
|
cmakeFlags =
|
||||||
@ -191,7 +211,7 @@ effectiveStdenv.mkDerivation (
|
|||||||
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
|
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
|
||||||
(cmakeBool "LLAMA_BLAS" useBlas)
|
(cmakeBool "LLAMA_BLAS" useBlas)
|
||||||
(cmakeBool "LLAMA_CLBLAST" useOpenCL)
|
(cmakeBool "LLAMA_CLBLAST" useOpenCL)
|
||||||
(cmakeBool "LLAMA_CUBLAS" useCuda)
|
(cmakeBool "LLAMA_CUDA" useCuda)
|
||||||
(cmakeBool "LLAMA_HIPBLAS" useRocm)
|
(cmakeBool "LLAMA_HIPBLAS" useRocm)
|
||||||
(cmakeBool "LLAMA_METAL" useMetalKit)
|
(cmakeBool "LLAMA_METAL" useMetalKit)
|
||||||
(cmakeBool "LLAMA_MPI" useMpi)
|
(cmakeBool "LLAMA_MPI" useMpi)
|
||||||
@ -216,8 +236,10 @@ effectiveStdenv.mkDerivation (
|
|||||||
# Should likely use `rocmPackages.clr.gpuTargets`.
|
# Should likely use `rocmPackages.clr.gpuTargets`.
|
||||||
"-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
|
"-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102"
|
||||||
]
|
]
|
||||||
++ optionals useMetalKit [ (lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") ]
|
++ optionals useMetalKit [
|
||||||
++ optionals useBlas [ (lib.cmakeFeature "LLAMA_BLAS_VENDOR" "OpenBLAS") ];
|
(lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
|
||||||
|
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" (!precompileMetalShaders))
|
||||||
|
];
|
||||||
|
|
||||||
# TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
|
# TODO(SomeoneSerge): It's better to add proper install targets at the CMake level,
|
||||||
# if they haven't been added yet.
|
# if they haven't been added yet.
|
||||||
|
@ -20,8 +20,8 @@ COPY . .
|
|||||||
|
|
||||||
# Set nvcc architecture
|
# Set nvcc architecture
|
||||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||||
# Enable cuBLAS
|
# Enable CUDA
|
||||||
ENV LLAMA_CUBLAS=1
|
ENV LLAMA_CUDA=1
|
||||||
|
|
||||||
RUN make
|
RUN make
|
||||||
|
|
||||||
|
51
.github/workflows/build.yml
vendored
51
.github/workflows/build.yml
vendored
@ -15,6 +15,10 @@ on:
|
|||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m']
|
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m']
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||||
GGML_NLOOP: 3
|
GGML_NLOOP: 3
|
||||||
@ -221,6 +225,17 @@ jobs:
|
|||||||
cd build
|
cd build
|
||||||
ctest -L main --verbose --timeout 900
|
ctest -L main --verbose --timeout 900
|
||||||
|
|
||||||
|
- name: Test llama2c conversion
|
||||||
|
id: llama2c_test
|
||||||
|
run: |
|
||||||
|
cd build
|
||||||
|
echo "Fetch tokenizer"
|
||||||
|
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories260K/tok512.bin
|
||||||
|
echo "Fetch llama2c model"
|
||||||
|
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories260K/stories260K.bin
|
||||||
|
./bin/convert-llama2c-to-ggml --copy-vocab-from-model ./tok512.bin --llama2c-model stories260K.bin --llama2c-output-model stories260K.gguf
|
||||||
|
./bin/main -m stories260K.gguf -p "One day, Lily met a Shoggoth" -n 500 -c 256
|
||||||
|
|
||||||
# ubuntu-latest-cmake-sanitizer:
|
# ubuntu-latest-cmake-sanitizer:
|
||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
#
|
#
|
||||||
@ -713,13 +728,13 @@ jobs:
|
|||||||
path: |
|
path: |
|
||||||
llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip
|
llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip
|
||||||
|
|
||||||
windows-latest-cmake-cublas:
|
windows-latest-cmake-cuda:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
cuda: ['12.2.0', '11.7.1']
|
cuda: ['12.2.0', '11.7.1']
|
||||||
build: ['cublas']
|
build: ['cuda']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@ -740,7 +755,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUBLAS=ON -DBUILD_SHARED_LIBS=ON
|
cmake .. -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CUDA=ON -DBUILD_SHARED_LIBS=ON
|
||||||
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS}
|
cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS}
|
||||||
|
|
||||||
- name: Determine tag name
|
- name: Determine tag name
|
||||||
@ -785,6 +800,7 @@ jobs:
|
|||||||
|
|
||||||
windows-latest-cmake-sycl:
|
windows-latest-cmake-sycl:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
@ -793,7 +809,6 @@ jobs:
|
|||||||
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/62641e01-1e8d-4ace-91d6-ae03f7f8a71f/w_BaseKit_p_2024.0.0.49563_offline.exe
|
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/62641e01-1e8d-4ace-91d6-ae03f7f8a71f/w_BaseKit_p_2024.0.0.49563_offline.exe
|
||||||
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel
|
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel
|
||||||
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
id: checkout
|
id: checkout
|
||||||
@ -808,6 +823,32 @@ jobs:
|
|||||||
id: cmake_build
|
id: cmake_build
|
||||||
run: examples/sycl/win-build-sycl.bat
|
run: examples/sycl/win-build-sycl.bat
|
||||||
|
|
||||||
|
- name: Determine tag name
|
||||||
|
id: tag
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
BUILD_NUMBER="$(git rev-list --count HEAD)"
|
||||||
|
SHORT_HASH="$(git rev-parse --short=7 HEAD)"
|
||||||
|
if [[ "${{ env.BRANCH_NAME }}" == "master" ]]; then
|
||||||
|
echo "name=b${BUILD_NUMBER}" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
SAFE_NAME=$(echo "${{ env.BRANCH_NAME }}" | tr '/' '-')
|
||||||
|
echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Pack artifacts
|
||||||
|
id: pack_artifacts
|
||||||
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
|
run: |
|
||||||
|
7z a llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/*
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip
|
||||||
|
|
||||||
ios-xcode-build:
|
ios-xcode-build:
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
|
|
||||||
@ -870,7 +911,7 @@ jobs:
|
|||||||
- macOS-latest-make
|
- macOS-latest-make
|
||||||
- macOS-latest-cmake
|
- macOS-latest-cmake
|
||||||
- windows-latest-cmake
|
- windows-latest-cmake
|
||||||
- windows-latest-cmake-cublas
|
- windows-latest-cmake-cuda
|
||||||
- macOS-latest-cmake-arm64
|
- macOS-latest-cmake-arm64
|
||||||
- macOS-latest-cmake-x64
|
- macOS-latest-cmake-x64
|
||||||
|
|
||||||
|
2
.github/workflows/close-issue.yml
vendored
2
.github/workflows/close-issue.yml
vendored
@ -19,5 +19,5 @@ jobs:
|
|||||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||||
days-before-pr-stale: -1
|
days-before-pr-stale: -1
|
||||||
days-before-pr-close: -1
|
days-before-pr-close: -1
|
||||||
operations-per-run: 1000
|
operations-per-run: 10000
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
4
.github/workflows/code-coverage.yml
vendored
4
.github/workflows/code-coverage.yml
vendored
@ -5,6 +5,10 @@ env:
|
|||||||
GGML_NLOOP: 3
|
GGML_NLOOP: 3
|
||||||
GGML_N_THREADS: 1
|
GGML_N_THREADS: 1
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run:
|
run:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
4
.github/workflows/docker.yml
vendored
4
.github/workflows/docker.yml
vendored
@ -15,6 +15,10 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
push_to_registry:
|
push_to_registry:
|
||||||
name: Push Docker image to Docker Hub
|
name: Push Docker image to Docker Hub
|
||||||
|
4
.github/workflows/editorconfig.yml
vendored
4
.github/workflows/editorconfig.yml
vendored
@ -14,6 +14,10 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
editorconfig:
|
editorconfig:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
4
.github/workflows/nix-ci-aarch64.yml
vendored
4
.github/workflows/nix-ci-aarch64.yml
vendored
@ -17,6 +17,10 @@ on:
|
|||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
paths: ['**/*.nix', 'flake.lock']
|
paths: ['**/*.nix', 'flake.lock']
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
nix-build-aarch64:
|
nix-build-aarch64:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
4
.github/workflows/nix-ci.yml
vendored
4
.github/workflows/nix-ci.yml
vendored
@ -8,6 +8,10 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
nix-eval:
|
nix-eval:
|
||||||
strategy:
|
strategy:
|
||||||
|
@ -16,6 +16,10 @@ on:
|
|||||||
- 'requirements.txt'
|
- 'requirements.txt'
|
||||||
- 'requirements/*.txt'
|
- 'requirements/*.txt'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
python-check-requirements:
|
python-check-requirements:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
4
.github/workflows/python-lint.yml
vendored
4
.github/workflows/python-lint.yml
vendored
@ -2,6 +2,10 @@ name: flake8 Lint
|
|||||||
|
|
||||||
on: [push, pull_request]
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
flake8-lint:
|
flake8-lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
5
.github/workflows/server.yml
vendored
5
.github/workflows/server.yml
vendored
@ -18,6 +18,10 @@ on:
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: '0 0 * * *'
|
- cron: '0 0 * * *'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
server:
|
server:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -31,7 +35,6 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- build_type: Release
|
- build_type: Release
|
||||||
sanitizer: ""
|
sanitizer: ""
|
||||||
disabled_on_pr: true
|
|
||||||
fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken
|
fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken
|
||||||
|
|
||||||
container:
|
container:
|
||||||
|
4
.github/workflows/zig-build.yml
vendored
4
.github/workflows/zig-build.yml
vendored
@ -6,6 +6,10 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
strategy:
|
strategy:
|
||||||
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -50,6 +50,7 @@ models-mnt
|
|||||||
/embedding
|
/embedding
|
||||||
/gguf
|
/gguf
|
||||||
/gguf-llama-simple
|
/gguf-llama-simple
|
||||||
|
/gguf-split
|
||||||
/gritlm
|
/gritlm
|
||||||
/imatrix
|
/imatrix
|
||||||
/infill
|
/infill
|
||||||
@ -58,6 +59,9 @@ models-mnt
|
|||||||
/llava-cli
|
/llava-cli
|
||||||
/lookahead
|
/lookahead
|
||||||
/lookup
|
/lookup
|
||||||
|
/lookup-create
|
||||||
|
/lookup-merge
|
||||||
|
/lookup-stats
|
||||||
/main
|
/main
|
||||||
/metal
|
/metal
|
||||||
/passkey
|
/passkey
|
||||||
@ -73,6 +77,7 @@ models-mnt
|
|||||||
/batched-bench
|
/batched-bench
|
||||||
/export-lora
|
/export-lora
|
||||||
/finetune
|
/finetune
|
||||||
|
/retrieval
|
||||||
/speculative
|
/speculative
|
||||||
/parallel
|
/parallel
|
||||||
/train-text-from-scratch
|
/train-text-from-scratch
|
||||||
|
@ -89,8 +89,8 @@ endif()
|
|||||||
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
|
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
|
||||||
option(LLAMA_BLAS "llama: use BLAS" OFF)
|
option(LLAMA_BLAS "llama: use BLAS" OFF)
|
||||||
set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor")
|
set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor")
|
||||||
option(LLAMA_CUBLAS "llama: use CUDA" OFF)
|
option(LLAMA_CUDA "llama: use CUDA" OFF)
|
||||||
#option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF)
|
option(LLAMA_CUBLAS "llama: use CUDA (deprecated, use LLAMA_CUDA)" OFF)
|
||||||
option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF)
|
option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF)
|
||||||
option(LLAMA_CUDA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF)
|
option(LLAMA_CUDA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF)
|
||||||
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
|
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
|
||||||
@ -360,18 +360,25 @@ if (LLAMA_QKK_64)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (LLAMA_CUBLAS)
|
if (LLAMA_CUBLAS)
|
||||||
|
message(WARNING "LLAMA_CUBLAS is deprecated and will be removed in the future.\nUse LLAMA_CUDA instead")
|
||||||
|
set(LLAMA_CUDA ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (LLAMA_CUDA)
|
||||||
cmake_minimum_required(VERSION 3.17)
|
cmake_minimum_required(VERSION 3.17)
|
||||||
|
|
||||||
find_package(CUDAToolkit)
|
find_package(CUDAToolkit)
|
||||||
if (CUDAToolkit_FOUND)
|
if (CUDAToolkit_FOUND)
|
||||||
message(STATUS "cuBLAS found")
|
message(STATUS "CUDA found")
|
||||||
|
|
||||||
enable_language(CUDA)
|
enable_language(CUDA)
|
||||||
|
|
||||||
set(GGML_HEADERS_CUDA ggml-cuda.h)
|
set(GGML_HEADERS_CUDA ggml-cuda.h)
|
||||||
set(GGML_SOURCES_CUDA ggml-cuda.cu)
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CUBLAS)
|
file(GLOB GGML_SOURCES_CUDA "ggml-cuda/*.cu")
|
||||||
|
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
|
||||||
|
|
||||||
|
add_compile_definitions(GGML_USE_CUDA)
|
||||||
if (LLAMA_CUDA_FORCE_DMMV)
|
if (LLAMA_CUDA_FORCE_DMMV)
|
||||||
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
||||||
endif()
|
endif()
|
||||||
@ -420,7 +427,7 @@ if (LLAMA_CUBLAS)
|
|||||||
message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
|
message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
|
||||||
|
|
||||||
else()
|
else()
|
||||||
message(WARNING "cuBLAS not found")
|
message(WARNING "CUDA not found")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -519,9 +526,11 @@ if (LLAMA_HIPBLAS)
|
|||||||
message(STATUS "HIP and hipBLAS found")
|
message(STATUS "HIP and hipBLAS found")
|
||||||
|
|
||||||
set(GGML_HEADERS_ROCM ggml-cuda.h)
|
set(GGML_HEADERS_ROCM ggml-cuda.h)
|
||||||
set(GGML_SOURCES_ROCM ggml-cuda.cu)
|
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS)
|
file(GLOB GGML_SOURCES_ROCM "ggml-cuda/*.cu")
|
||||||
|
list(APPEND GGML_SOURCES_ROCM "ggml-cuda.cu")
|
||||||
|
|
||||||
|
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUDA)
|
||||||
|
|
||||||
if (LLAMA_HIP_UMA)
|
if (LLAMA_HIP_UMA)
|
||||||
add_compile_definitions(GGML_HIP_UMA)
|
add_compile_definitions(GGML_HIP_UMA)
|
||||||
@ -543,7 +552,7 @@ if (LLAMA_HIPBLAS)
|
|||||||
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||||
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
||||||
|
|
||||||
set_source_files_properties(ggml-cuda.cu PROPERTIES LANGUAGE CXX)
|
set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX)
|
||||||
|
|
||||||
if (LLAMA_STATIC)
|
if (LLAMA_STATIC)
|
||||||
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
message(FATAL_ERROR "Static linking not supported for HIP/ROCm")
|
||||||
@ -826,7 +835,7 @@ endif()
|
|||||||
|
|
||||||
set(CUDA_CXX_FLAGS "")
|
set(CUDA_CXX_FLAGS "")
|
||||||
|
|
||||||
if (LLAMA_CUBLAS)
|
if (LLAMA_CUDA)
|
||||||
set(CUDA_FLAGS -use_fast_math)
|
set(CUDA_FLAGS -use_fast_math)
|
||||||
|
|
||||||
if (LLAMA_FATAL_WARNINGS)
|
if (LLAMA_FATAL_WARNINGS)
|
||||||
@ -1051,7 +1060,7 @@ endif()
|
|||||||
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:${ARCH_FLAGS}>")
|
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:${ARCH_FLAGS}>")
|
||||||
add_compile_options("$<$<COMPILE_LANGUAGE:C>:${ARCH_FLAGS}>")
|
add_compile_options("$<$<COMPILE_LANGUAGE:C>:${ARCH_FLAGS}>")
|
||||||
|
|
||||||
if (LLAMA_CUBLAS)
|
if (LLAMA_CUDA)
|
||||||
list(APPEND CUDA_CXX_FLAGS ${ARCH_FLAGS})
|
list(APPEND CUDA_CXX_FLAGS ${ARCH_FLAGS})
|
||||||
list(JOIN CUDA_CXX_FLAGS " " CUDA_CXX_FLAGS_JOINED) # pass host compiler flags as a single argument
|
list(JOIN CUDA_CXX_FLAGS " " CUDA_CXX_FLAGS_JOINED) # pass host compiler flags as a single argument
|
||||||
if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL "")
|
if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL "")
|
||||||
@ -1161,6 +1170,7 @@ add_library(llama
|
|||||||
llama.h
|
llama.h
|
||||||
unicode.h
|
unicode.h
|
||||||
unicode.cpp
|
unicode.cpp
|
||||||
|
unicode-data.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
target_include_directories(llama PUBLIC .)
|
target_include_directories(llama PUBLIC .)
|
||||||
@ -1256,6 +1266,12 @@ if (LLAMA_METAL)
|
|||||||
GROUP_READ
|
GROUP_READ
|
||||||
WORLD_READ
|
WORLD_READ
|
||||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||||
|
if (NOT LLAMA_METAL_EMBED_LIBRARY)
|
||||||
|
install(
|
||||||
|
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
|
||||||
|
DESTINATION ${CMAKE_INSTALL_BINDIR}
|
||||||
|
)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
|
75
Makefile
75
Makefile
@ -1,8 +1,8 @@
|
|||||||
# Define the default target now so that it is always the first target
|
# Define the default target now so that it is always the first target
|
||||||
BUILD_TARGETS = \
|
BUILD_TARGETS = \
|
||||||
main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
||||||
simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
|
simple batched batched-bench save-load-state server gguf gguf-split llama-bench libllava.a llava-cli baby-llama beam-search \
|
||||||
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm tests/test-c.o
|
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm tests/test-c.o
|
||||||
|
|
||||||
# Binaries only useful for tests
|
# Binaries only useful for tests
|
||||||
TEST_TARGETS = \
|
TEST_TARGETS = \
|
||||||
@ -390,14 +390,20 @@ ifdef LLAMA_BLIS
|
|||||||
endif # LLAMA_BLIS
|
endif # LLAMA_BLIS
|
||||||
|
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUBLAS
|
||||||
|
# LLAMA_CUBLAS is deprecated and will be removed in the future
|
||||||
|
LLAMA_CUDA := 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef LLAMA_CUDA
|
||||||
ifneq ('', '$(wildcard /opt/cuda)')
|
ifneq ('', '$(wildcard /opt/cuda)')
|
||||||
CUDA_PATH ?= /opt/cuda
|
CUDA_PATH ?= /opt/cuda
|
||||||
else
|
else
|
||||||
CUDA_PATH ?= /usr/local/cuda
|
CUDA_PATH ?= /usr/local/cuda
|
||||||
endif
|
endif
|
||||||
MK_CPPFLAGS += -DGGML_USE_CUBLAS -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include
|
||||||
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
|
||||||
OBJS += ggml-cuda.o
|
OBJS += ggml-cuda.o
|
||||||
|
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
||||||
MK_NVCCFLAGS += -use_fast_math
|
MK_NVCCFLAGS += -use_fast_math
|
||||||
ifdef LLAMA_FATAL_WARNINGS
|
ifdef LLAMA_FATAL_WARNINGS
|
||||||
MK_NVCCFLAGS += -Werror all-warnings
|
MK_NVCCFLAGS += -Werror all-warnings
|
||||||
@ -458,13 +464,24 @@ endif # LLAMA_CUDA_NO_PEER_COPY
|
|||||||
ifdef LLAMA_CUDA_CCBIN
|
ifdef LLAMA_CUDA_CCBIN
|
||||||
MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
|
MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
|
||||||
endif
|
endif
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml-common.h
|
|
||||||
ifdef JETSON_EOL_MODULE_DETECT
|
ifdef JETSON_EOL_MODULE_DETECT
|
||||||
$(NVCC) -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I/usr/local/cuda/targets/aarch64-linux/include -std=c++11 -O3 $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
define NVCC_COMPILE
|
||||||
|
$(NVCC) -I. -Icommon -D_XOPEN_SOURCE=600 -D_GNU_SOURCE -DNDEBUG -DGGML_USE_CUDA -I/usr/local/cuda/include -I/opt/cuda/include -I/usr/local/cuda/targets/aarch64-linux/include -std=c++11 -O3 $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
||||||
|
endef # NVCC_COMPILE
|
||||||
else
|
else
|
||||||
|
define NVCC_COMPILE
|
||||||
$(NVCC) $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
$(NVCC) $(NVCCFLAGS) $(CPPFLAGS) -Xcompiler "$(CUDA_CXXFLAGS)" -c $< -o $@
|
||||||
|
endef # NVCC_COMPILE
|
||||||
endif # JETSON_EOL_MODULE_DETECT
|
endif # JETSON_EOL_MODULE_DETECT
|
||||||
endif # LLAMA_CUBLAS
|
|
||||||
|
ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
|
||||||
|
$(NVCC_COMPILE)
|
||||||
|
|
||||||
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
|
||||||
|
$(NVCC_COMPILE)
|
||||||
|
|
||||||
|
endif # LLAMA_CUDA
|
||||||
|
|
||||||
ifdef LLAMA_CLBLAST
|
ifdef LLAMA_CLBLAST
|
||||||
|
|
||||||
@ -510,7 +527,6 @@ ggml-vulkan.o: ggml-vulkan.cpp ggml-vulkan.h
|
|||||||
endif # LLAMA_VULKAN
|
endif # LLAMA_VULKAN
|
||||||
|
|
||||||
ifdef LLAMA_HIPBLAS
|
ifdef LLAMA_HIPBLAS
|
||||||
|
|
||||||
ifeq ($(wildcard /opt/rocm),)
|
ifeq ($(wildcard /opt/rocm),)
|
||||||
ROCM_PATH ?= /usr
|
ROCM_PATH ?= /usr
|
||||||
GPU_TARGETS ?= $(shell $(shell which amdgpu-arch))
|
GPU_TARGETS ?= $(shell $(shell which amdgpu-arch))
|
||||||
@ -522,7 +538,7 @@ ifdef LLAMA_HIPBLAS
|
|||||||
LLAMA_CUDA_DMMV_X ?= 32
|
LLAMA_CUDA_DMMV_X ?= 32
|
||||||
LLAMA_CUDA_MMV_Y ?= 1
|
LLAMA_CUDA_MMV_Y ?= 1
|
||||||
LLAMA_CUDA_KQUANTS_ITER ?= 2
|
LLAMA_CUDA_KQUANTS_ITER ?= 2
|
||||||
MK_CPPFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUBLAS
|
MK_CPPFLAGS += -DGGML_USE_HIPBLAS -DGGML_USE_CUDA
|
||||||
ifdef LLAMA_HIP_UMA
|
ifdef LLAMA_HIP_UMA
|
||||||
MK_CPPFLAGS += -DGGML_HIP_UMA
|
MK_CPPFLAGS += -DGGML_HIP_UMA
|
||||||
endif # LLAMA_HIP_UMA
|
endif # LLAMA_HIP_UMA
|
||||||
@ -539,8 +555,13 @@ ifdef LLAMA_CUDA_NO_PEER_COPY
|
|||||||
HIPFLAGS += -DGGML_CUDA_NO_PEER_COPY
|
HIPFLAGS += -DGGML_CUDA_NO_PEER_COPY
|
||||||
endif # LLAMA_CUDA_NO_PEER_COPY
|
endif # LLAMA_CUDA_NO_PEER_COPY
|
||||||
OBJS += ggml-cuda.o
|
OBJS += ggml-cuda.o
|
||||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
|
||||||
|
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
|
||||||
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
||||||
|
|
||||||
|
ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
|
||||||
|
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
|
||||||
|
|
||||||
endif # LLAMA_HIPBLAS
|
endif # LLAMA_HIPBLAS
|
||||||
|
|
||||||
ifdef LLAMA_METAL
|
ifdef LLAMA_METAL
|
||||||
@ -593,7 +614,7 @@ override NVCCFLAGS := $(MK_NVCCFLAGS) $(NVCCFLAGS)
|
|||||||
override LDFLAGS := $(MK_LDFLAGS) $(LDFLAGS)
|
override LDFLAGS := $(MK_LDFLAGS) $(LDFLAGS)
|
||||||
|
|
||||||
# identify CUDA host compiler
|
# identify CUDA host compiler
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUDA
|
||||||
GF_CC := $(NVCC) $(NVCCFLAGS) 2>/dev/null .c -Xcompiler
|
GF_CC := $(NVCC) $(NVCCFLAGS) 2>/dev/null .c -Xcompiler
|
||||||
include scripts/get-flags.mk
|
include scripts/get-flags.mk
|
||||||
CUDA_CXXFLAGS := $(BASE_CXXFLAGS) $(GF_CXXFLAGS) -Wno-pedantic
|
CUDA_CXXFLAGS := $(BASE_CXXFLAGS) $(GF_CXXFLAGS) -Wno-pedantic
|
||||||
@ -618,7 +639,7 @@ $(info I NVCCFLAGS: $(NVCCFLAGS))
|
|||||||
$(info I LDFLAGS: $(LDFLAGS))
|
$(info I LDFLAGS: $(LDFLAGS))
|
||||||
$(info I CC: $(shell $(CC) --version | head -n 1))
|
$(info I CC: $(shell $(CC) --version | head -n 1))
|
||||||
$(info I CXX: $(shell $(CXX) --version | head -n 1))
|
$(info I CXX: $(shell $(CXX) --version | head -n 1))
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUDA
|
||||||
$(info I NVCC: $(shell $(NVCC) --version | tail -n 1))
|
$(info I NVCC: $(shell $(NVCC) --version | tail -n 1))
|
||||||
CUDA_VERSION := $(shell $(NVCC) --version | grep -oP 'release (\K[0-9]+\.[0-9])')
|
CUDA_VERSION := $(shell $(NVCC) --version | grep -oP 'release (\K[0-9]+\.[0-9])')
|
||||||
ifeq ($(shell awk -v "v=$(CUDA_VERSION)" 'BEGIN { print (v < 11.7) }'),1)
|
ifeq ($(shell awk -v "v=$(CUDA_VERSION)" 'BEGIN { print (v < 11.7) }'),1)
|
||||||
@ -628,9 +649,16 @@ $(error I ERROR: For CUDA versions < 11.7 a target CUDA architecture must be exp
|
|||||||
endif # CUDA_POWER_ARCH
|
endif # CUDA_POWER_ARCH
|
||||||
endif # CUDA_DOCKER_ARCH
|
endif # CUDA_DOCKER_ARCH
|
||||||
endif # eq ($(shell echo "$(CUDA_VERSION) < 11.7" | bc),1)
|
endif # eq ($(shell echo "$(CUDA_VERSION) < 11.7" | bc),1)
|
||||||
endif # LLAMA_CUBLAS
|
endif # LLAMA_CUDA
|
||||||
$(info )
|
$(info )
|
||||||
|
|
||||||
|
ifdef LLAMA_CUBLAS
|
||||||
|
$(info !!!!)
|
||||||
|
$(info LLAMA_CUBLAS is deprecated and will be removed in the future. Use LLAMA_CUDA instead.)
|
||||||
|
$(info !!!!)
|
||||||
|
$(info )
|
||||||
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build library
|
# Build library
|
||||||
#
|
#
|
||||||
@ -650,7 +678,10 @@ ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h ggml-common.h
|
|||||||
unicode.o: unicode.cpp unicode.h
|
unicode.o: unicode.cpp unicode.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
OBJS += ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o
|
unicode-data.o: unicode-data.cpp unicode-data.h
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
OBJS += ggml-alloc.o ggml-backend.o ggml-quants.o unicode.o unicode-data.o
|
||||||
|
|
||||||
llama.o: llama.cpp unicode.h ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h llama.h
|
llama.o: llama.cpp unicode.h ggml.h ggml-alloc.h ggml-backend.h ggml-cuda.h ggml-metal.h llama.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
@ -676,6 +707,9 @@ json-schema-to-grammar.o: common/json-schema-to-grammar.cpp common/json-schema-t
|
|||||||
train.o: common/train.cpp common/train.h
|
train.o: common/train.cpp common/train.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
ngram-cache.o: common/ngram-cache.cpp common/ngram-cache.h
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
libllama.so: llama.o ggml.o $(OBJS)
|
libllama.so: llama.o ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
|
||||||
|
|
||||||
@ -683,7 +717,8 @@ libllama.a: llama.o ggml.o $(OBJS) $(COMMON_DEPS)
|
|||||||
ar rcs libllama.a llama.o ggml.o $(OBJS) $(COMMON_DEPS)
|
ar rcs libllama.a llama.o ggml.o $(OBJS) $(COMMON_DEPS)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -vrf *.o tests/*.o *.so *.a *.dll benchmark-matmult common/build-info.cpp *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS)
|
rm -vrf *.o tests/*.o *.so *.a *.dll benchmark-matmult lookup-create lookup-merge lookup-stats common/build-info.cpp *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS)
|
||||||
|
rm -vrf ggml-cuda/*.o
|
||||||
find examples pocs -type f -name "*.o" -delete
|
find examples pocs -type f -name "*.o" -delete
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -801,6 +836,10 @@ export-lora: examples/export-lora/export-lora.cpp ggml.o common/common.h $(OBJS)
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
retrieval: examples/retrieval/retrieval.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
speculative: examples/speculative/speculative.cpp ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
|
speculative: examples/speculative/speculative.cpp ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
@ -813,9 +852,15 @@ lookahead: examples/lookahead/lookahead.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
lookup: examples/lookup/lookup.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
lookup: examples/lookup/lookup.cpp ggml.o llama.o ngram-cache.o $(COMMON_DEPS) $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
$(CXX) $(CXXFLAGS) -c examples/lookup/lookup-create.cpp -o $(call GET_OBJ_FILE, examples/lookup/lookup-create.cpp)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, examples/lookup/lookup-create.cpp) -o lookup-create $(LDFLAGS)
|
||||||
|
$(CXX) $(CXXFLAGS) -c examples/lookup/lookup-merge.cpp -o $(call GET_OBJ_FILE, examples/lookup/lookup-merge.cpp)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, examples/lookup/lookup-merge.cpp) -o lookup-merge $(LDFLAGS)
|
||||||
|
$(CXX) $(CXXFLAGS) -c examples/lookup/lookup-stats.cpp -o $(call GET_OBJ_FILE, examples/lookup/lookup-stats.cpp)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, examples/lookup/lookup-stats.cpp) -o lookup-stats $(LDFLAGS)
|
||||||
|
|
||||||
passkey: examples/passkey/passkey.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
passkey: examples/passkey/passkey.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
@ -32,6 +32,7 @@ let package = Package(
|
|||||||
"ggml.c",
|
"ggml.c",
|
||||||
"llama.cpp",
|
"llama.cpp",
|
||||||
"unicode.cpp",
|
"unicode.cpp",
|
||||||
|
"unicode-data.cpp",
|
||||||
"ggml-alloc.c",
|
"ggml-alloc.c",
|
||||||
"ggml-backend.c",
|
"ggml-backend.c",
|
||||||
"ggml-quants.c",
|
"ggml-quants.c",
|
||||||
|
23
README.md
23
README.md
@ -10,6 +10,7 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
|
|
||||||
### Recent API changes
|
### Recent API changes
|
||||||
|
|
||||||
|
- [2024 Mar 26] Logits and embeddings API updated for compactness https://github.com/ggerganov/llama.cpp/pull/6122
|
||||||
- [2024 Mar 13] Add `llama_synchronize()` + `llama_context_params.n_ubatch` https://github.com/ggerganov/llama.cpp/pull/6017
|
- [2024 Mar 13] Add `llama_synchronize()` + `llama_context_params.n_ubatch` https://github.com/ggerganov/llama.cpp/pull/6017
|
||||||
- [2024 Mar 8] `llama_kv_cache_seq_rm()` returns a `bool` instead of `void`, and new `llama_n_seq_max()` returns the upper limit of acceptable `seq_id` in batches (relevant when dealing with multiple sequences) https://github.com/ggerganov/llama.cpp/pull/5328
|
- [2024 Mar 8] `llama_kv_cache_seq_rm()` returns a `bool` instead of `void`, and new `llama_n_seq_max()` returns the upper limit of acceptable `seq_id` in batches (relevant when dealing with multiple sequences) https://github.com/ggerganov/llama.cpp/pull/5328
|
||||||
- [2024 Mar 4] Embeddings API updated https://github.com/ggerganov/llama.cpp/pull/5796
|
- [2024 Mar 4] Embeddings API updated https://github.com/ggerganov/llama.cpp/pull/5796
|
||||||
@ -22,6 +23,7 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
- Looking for contributions to add Deepseek support: https://github.com/ggerganov/llama.cpp/issues/5981
|
- Looking for contributions to add Deepseek support: https://github.com/ggerganov/llama.cpp/issues/5981
|
||||||
- Quantization blind testing: https://github.com/ggerganov/llama.cpp/discussions/5962
|
- Quantization blind testing: https://github.com/ggerganov/llama.cpp/discussions/5962
|
||||||
- Initial Mamba support has been added: https://github.com/ggerganov/llama.cpp/pull/5328
|
- Initial Mamba support has been added: https://github.com/ggerganov/llama.cpp/pull/5328
|
||||||
|
- Support loading sharded model, using `gguf-split` CLI https://github.com/ggerganov/llama.cpp/pull/6187
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
@ -146,6 +148,7 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
- Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp)
|
- Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp)
|
||||||
- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig)
|
- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig)
|
||||||
- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart)
|
- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart)
|
||||||
|
- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/llama.cpp/pull/6326)
|
||||||
|
|
||||||
**UI:**
|
**UI:**
|
||||||
|
|
||||||
@ -447,30 +450,27 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
|
|
||||||
Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information.
|
Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information.
|
||||||
|
|
||||||
- #### cuBLAS
|
- #### CUDA
|
||||||
|
|
||||||
This provides BLAS acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
This provides GPU acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
||||||
|
|
||||||
For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling.
|
For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling.
|
||||||
|
|
||||||
- Using `make`:
|
- Using `make`:
|
||||||
```bash
|
```bash
|
||||||
make LLAMA_CUBLAS=1
|
make LLAMA_CUDA=1
|
||||||
```
|
```
|
||||||
- Using `CMake`:
|
- Using `CMake`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -DLLAMA_CUBLAS=ON
|
cmake .. -DLLAMA_CUDA=ON
|
||||||
cmake --build . --config Release
|
cmake --build . --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance:
|
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance:
|
||||||
|
|
||||||
<!---
|
|
||||||
| LLAMA_CUDA_CUBLAS | Boolean | false | Use cuBLAS instead of custom CUDA kernels for prompt processing. Faster for all quantization formats except for q4_0 and q8_0, especially for k-quants. Increases VRAM usage (700 MiB for 7b, 970 MiB for 13b, 1430 MiB for 33b). |
|
|
||||||
--->
|
|
||||||
| Option | Legal values | Default | Description |
|
| Option | Legal values | Default | Description |
|
||||||
|--------------------------------|------------------------|---------|-------------|
|
|--------------------------------|------------------------|---------|-------------|
|
||||||
| LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
|
| LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
|
||||||
@ -632,6 +632,15 @@ Building the program with BLAS support may lead to some performance improvements
|
|||||||
|
|
||||||
- #### Vulkan
|
- #### Vulkan
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
>
|
||||||
|
> Vulkan support has been broken in https://github.com/ggerganov/llama.cpp/pull/6122
|
||||||
|
> due to relying on `GGML_OP_GET_ROWS` which is not yet properly supported by the Vulkan backend,
|
||||||
|
> but should be fixed relatively soon (possibly in https://github.com/ggerganov/llama.cpp/pull/6155
|
||||||
|
> (ref: https://github.com/ggerganov/llama.cpp/pull/6122#issuecomment-2015327635)).
|
||||||
|
>
|
||||||
|
> Meanwhile, if you want to use the Vulkan backend, you should use the commit right before the breaking change, https://github.com/ggerganov/llama.cpp/commit/55c1b2a3bbd470e9e2a3a0618b92cf64a885f806
|
||||||
|
|
||||||
**With docker**:
|
**With docker**:
|
||||||
|
|
||||||
You don't need to install Vulkan SDK. It will be installed inside the container.
|
You don't need to install Vulkan SDK. It will be installed inside the container.
|
||||||
|
15
build.zig
15
build.zig
@ -116,6 +116,7 @@ pub fn build(b: *std.build.Builder) !void {
|
|||||||
const ggml_backend = make.obj("ggml-backend", "ggml-backend.c");
|
const ggml_backend = make.obj("ggml-backend", "ggml-backend.c");
|
||||||
const ggml_quants = make.obj("ggml-quants", "ggml-quants.c");
|
const ggml_quants = make.obj("ggml-quants", "ggml-quants.c");
|
||||||
const unicode = make.obj("unicode", "unicode.cpp");
|
const unicode = make.obj("unicode", "unicode.cpp");
|
||||||
|
const unicode_data = make.obj("unicode-data", "unicode-data.cpp");
|
||||||
const llama = make.obj("llama", "llama.cpp");
|
const llama = make.obj("llama", "llama.cpp");
|
||||||
const buildinfo = make.obj("common", "common/build-info.cpp");
|
const buildinfo = make.obj("common", "common/build-info.cpp");
|
||||||
const common = make.obj("common", "common/common.cpp");
|
const common = make.obj("common", "common/common.cpp");
|
||||||
@ -127,14 +128,14 @@ pub fn build(b: *std.build.Builder) !void {
|
|||||||
const clip = make.obj("clip", "examples/llava/clip.cpp");
|
const clip = make.obj("clip", "examples/llava/clip.cpp");
|
||||||
const llava = make.obj("llava", "examples/llava/llava.cpp");
|
const llava = make.obj("llava", "examples/llava/llava.cpp");
|
||||||
|
|
||||||
_ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, sampling, console, grammar_parser });
|
_ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, buildinfo, sampling, console, grammar_parser });
|
||||||
_ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo });
|
_ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, buildinfo });
|
||||||
_ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo });
|
_ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, buildinfo });
|
||||||
_ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo });
|
_ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, buildinfo });
|
||||||
_ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, train });
|
_ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, buildinfo, train });
|
||||||
_ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, train });
|
_ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, buildinfo, train });
|
||||||
|
|
||||||
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, common, buildinfo, sampling, grammar_parser, json_schema_to_grammar, clip, llava });
|
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, ggml_backend, ggml_quants, llama, unicode, unicode_data, common, buildinfo, sampling, grammar_parser, json_schema_to_grammar, clip, llava });
|
||||||
if (server.target.isWindows()) {
|
if (server.target.isWindows()) {
|
||||||
server.linkSystemLibrary("ws2_32");
|
server.linkSystemLibrary("ws2_32");
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ if [ ! -z ${GG_BUILD_METAL} ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z ${GG_BUILD_CUDA} ]; then
|
if [ ! -z ${GG_BUILD_CUDA} ]; then
|
||||||
CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_CUBLAS=1"
|
CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_CUDA=1"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z ${GG_BUILD_SYCL} ]; then
|
if [ ! -z ${GG_BUILD_SYCL} ]; then
|
||||||
@ -412,8 +412,8 @@ function gg_run_open_llama_7b_v2 {
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUBLAS=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../convert.py ${path_models}
|
python3 ../convert.py ${path_models}
|
||||||
|
|
||||||
|
@ -65,6 +65,8 @@ add_library(${TARGET} STATIC
|
|||||||
json.hpp
|
json.hpp
|
||||||
train.h
|
train.h
|
||||||
train.cpp
|
train.cpp
|
||||||
|
ngram-cache.h
|
||||||
|
ngram-cache.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
if (BUILD_SHARED_LIBS)
|
if (BUILD_SHARED_LIBS)
|
||||||
|
@ -39,18 +39,21 @@
|
|||||||
#endif
|
#endif
|
||||||
#if defined(LLAMA_USE_CURL)
|
#if defined(LLAMA_USE_CURL)
|
||||||
#include <curl/curl.h>
|
#include <curl/curl.h>
|
||||||
|
#include <curl/easy.h>
|
||||||
|
#include <thread>
|
||||||
|
#include <future>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL))
|
#if (defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL))
|
||||||
#define GGML_USE_CUBLAS_SYCL
|
#define GGML_USE_CUDA_SYCL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)) || defined(GGML_USE_VULKAN)
|
#if (defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)) || defined(GGML_USE_VULKAN)
|
||||||
#define GGML_USE_CUBLAS_SYCL_VULKAN
|
#define GGML_USE_CUDA_SYCL_VULKAN
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(LLAMA_USE_CURL)
|
#if defined(LLAMA_USE_CURL)
|
||||||
@ -61,7 +64,7 @@
|
|||||||
#else
|
#else
|
||||||
#include <sys/syslimits.h>
|
#include <sys/syslimits.h>
|
||||||
#endif
|
#endif
|
||||||
#define LLAMA_CURL_MAX_PATH_LENGTH PATH_MAX
|
#define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
|
||||||
#define LLAMA_CURL_MAX_HEADER_LENGTH 256
|
#define LLAMA_CURL_MAX_HEADER_LENGTH 256
|
||||||
#endif // LLAMA_USE_CURL
|
#endif // LLAMA_USE_CURL
|
||||||
|
|
||||||
@ -154,7 +157,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param) {
|
bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param) {
|
||||||
llama_sampling_params& sparams = params.sparams;
|
llama_sampling_params& sparams = params.sparams;
|
||||||
|
|
||||||
if (arg == "-s" || arg == "--seed") {
|
if (arg == "-s" || arg == "--seed") {
|
||||||
@ -858,9 +861,9 @@ static bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
params.main_gpu = std::stoi(argv[i]);
|
params.main_gpu = std::stoi(argv[i]);
|
||||||
#ifndef GGML_USE_CUBLAS_SYCL
|
#ifndef GGML_USE_CUDA_SYCL
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS/SYCL. Setting the main GPU has no effect.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL. Setting the main GPU has no effect.\n");
|
||||||
#endif // GGML_USE_CUBLAS_SYCL
|
#endif // GGML_USE_CUDA_SYCL
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--split-mode" || arg == "-sm") {
|
if (arg == "--split-mode" || arg == "-sm") {
|
||||||
@ -886,9 +889,9 @@ static bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg,
|
|||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#ifndef GGML_USE_CUBLAS_SYCL
|
#ifndef GGML_USE_CUDA_SYCL
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS/SYCL. Setting the split mode has no effect.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL. Setting the split mode has no effect.\n");
|
||||||
#endif // GGML_USE_CUBLAS_SYCL
|
#endif // GGML_USE_CUDA_SYCL
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--tensor-split" || arg == "-ts") {
|
if (arg == "--tensor-split" || arg == "-ts") {
|
||||||
@ -914,9 +917,9 @@ static bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg,
|
|||||||
params.tensor_split[i] = 0.0f;
|
params.tensor_split[i] = 0.0f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifndef GGML_USE_CUBLAS_SYCL_VULKAN
|
#ifndef GGML_USE_CUDA_SYCL_VULKAN
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS/SYCL/Vulkan. Setting a tensor split has no effect.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting a tensor split has no effect.\n");
|
||||||
#endif // GGML_USE_CUBLAS_SYCL
|
#endif // GGML_USE_CUDA_SYCL_VULKAN
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--no-mmap") {
|
if (arg == "--no-mmap") {
|
||||||
@ -963,6 +966,22 @@ static bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg,
|
|||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "-lcs" || arg == "--lookup-cache-static") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
params.lookup_cache_static = argv[i];
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (arg == "-lcd" || arg == "--lookup-cache-dynamic") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
params.lookup_cache_dynamic = argv[i];
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "--save-all-logits" || arg == "--kl-divergence-base") {
|
if (arg == "--save-all-logits" || arg == "--kl-divergence-base") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -1043,8 +1062,8 @@ static bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg,
|
|||||||
params.ignore_eos = true;
|
params.ignore_eos = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--no-penalize-nl") {
|
if (arg == "--penalize-nl") {
|
||||||
sparams.penalize_nl = false;
|
sparams.penalize_nl = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "-l" || arg == "--logit-bias") {
|
if (arg == "-l" || arg == "--logit-bias") {
|
||||||
@ -1220,9 +1239,11 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
throw std::invalid_argument("error: unknown argument: " + arg);
|
throw std::invalid_argument("error: unknown argument: " + arg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (invalid_param) {
|
if (invalid_param) {
|
||||||
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
|
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.prompt_cache_all &&
|
if (params.prompt_cache_all &&
|
||||||
(params.interactive || params.interactive_first ||
|
(params.interactive || params.interactive_first ||
|
||||||
params.instruct)) {
|
params.instruct)) {
|
||||||
@ -1230,6 +1251,11 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
|
throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// short-hand to avoid specifying --hf-file -> default it to --model
|
||||||
|
if (!params.hf_repo.empty() && params.hf_file.empty()) {
|
||||||
|
params.hf_file = params.model;
|
||||||
|
}
|
||||||
|
|
||||||
if (params.escape) {
|
if (params.escape) {
|
||||||
process_escapes(params.prompt);
|
process_escapes(params.prompt);
|
||||||
process_escapes(params.input_prefix);
|
process_escapes(params.input_prefix);
|
||||||
@ -1347,7 +1373,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" -dt N, --defrag-thold N\n");
|
printf(" -dt N, --defrag-thold N\n");
|
||||||
printf(" KV cache defragmentation threshold (default: %.1f, < 0 - disabled)\n", params.defrag_thold);
|
printf(" KV cache defragmentation threshold (default: %.1f, < 0 - disabled)\n", params.defrag_thold);
|
||||||
printf(" --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
|
printf(" --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
|
||||||
printf(" --no-penalize-nl do not penalize newline token\n");
|
printf(" --penalize-nl penalize newline tokens\n");
|
||||||
printf(" --temp N temperature (default: %.1f)\n", (double)sparams.temp);
|
printf(" --temp N temperature (default: %.1f)\n", (double)sparams.temp);
|
||||||
printf(" --all-logits return logits for all tokens in the batch (default: disabled)\n");
|
printf(" --all-logits return logits for all tokens in the batch (default: disabled)\n");
|
||||||
printf(" --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
|
printf(" --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
|
||||||
@ -1429,6 +1455,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" Hugging Face model file (default: unused)\n");
|
printf(" Hugging Face model file (default: unused)\n");
|
||||||
printf(" -ld LOGDIR, --logdir LOGDIR\n");
|
printf(" -ld LOGDIR, --logdir LOGDIR\n");
|
||||||
printf(" path under which to save YAML logs (no logging if unset)\n");
|
printf(" path under which to save YAML logs (no logging if unset)\n");
|
||||||
|
printf(" -lcs FNAME, --lookup-cache-static FNAME\n");
|
||||||
|
printf(" path to static lookup cache to use for lookup decoding (not updated by generation)\n");
|
||||||
|
printf(" -lcd FNAME, --lookup-cache-dynamic FNAME\n");
|
||||||
|
printf(" path to dynamic lookup cache to use for lookup decoding (updated by generation)\n");
|
||||||
printf(" --override-kv KEY=TYPE:VALUE\n");
|
printf(" --override-kv KEY=TYPE:VALUE\n");
|
||||||
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
|
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
|
||||||
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
|
||||||
@ -1675,27 +1705,13 @@ void llama_batch_add(
|
|||||||
|
|
||||||
#ifdef LLAMA_USE_CURL
|
#ifdef LLAMA_USE_CURL
|
||||||
|
|
||||||
struct llama_model * llama_load_model_from_url(
|
static bool llama_download_file(CURL * curl, const char * url, const char * path) {
|
||||||
const char * model_url,
|
bool force_download = false;
|
||||||
const char * path_model,
|
|
||||||
const struct llama_model_params & params) {
|
|
||||||
// Basic validation of the model_url
|
|
||||||
if (!model_url || strlen(model_url) == 0) {
|
|
||||||
fprintf(stderr, "%s: invalid model_url\n", __func__);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize libcurl globally
|
|
||||||
auto curl = curl_easy_init();
|
|
||||||
|
|
||||||
if (!curl) {
|
|
||||||
fprintf(stderr, "%s: error initializing libcurl\n", __func__);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the URL, allow to follow http redirection
|
// Set the URL, allow to follow http redirection
|
||||||
curl_easy_setopt(curl, CURLOPT_URL, model_url);
|
curl_easy_setopt(curl, CURLOPT_URL, url);
|
||||||
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
|
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
|
||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
|
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
|
||||||
// operating system. Currently implemented under MS-Windows.
|
// operating system. Currently implemented under MS-Windows.
|
||||||
@ -1704,16 +1720,16 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
|
|
||||||
// Check if the file already exists locally
|
// Check if the file already exists locally
|
||||||
struct stat model_file_info;
|
struct stat model_file_info;
|
||||||
auto file_exists = (stat(path_model, &model_file_info) == 0);
|
auto file_exists = (stat(path, &model_file_info) == 0);
|
||||||
|
|
||||||
// If the file exists, check for ${path_model}.etag or ${path_model}.lastModified files
|
// If the file exists, check for ${path_model}.etag or ${path_model}.lastModified files
|
||||||
char etag[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
char etag[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
||||||
char etag_path[LLAMA_CURL_MAX_PATH_LENGTH] = {0};
|
char etag_path[PATH_MAX] = {0};
|
||||||
snprintf(etag_path, sizeof(etag_path), "%s.etag", path_model);
|
snprintf(etag_path, sizeof(etag_path), "%s.etag", path);
|
||||||
|
|
||||||
char last_modified[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
char last_modified[LLAMA_CURL_MAX_HEADER_LENGTH] = {0};
|
||||||
char last_modified_path[LLAMA_CURL_MAX_PATH_LENGTH] = {0};
|
char last_modified_path[PATH_MAX] = {0};
|
||||||
snprintf(last_modified_path, sizeof(last_modified_path), "%s.lastModified", path_model);
|
snprintf(last_modified_path, sizeof(last_modified_path), "%s.lastModified", path);
|
||||||
|
|
||||||
if (file_exists) {
|
if (file_exists) {
|
||||||
auto * f_etag = fopen(etag_path, "r");
|
auto * f_etag = fopen(etag_path, "r");
|
||||||
@ -1721,7 +1737,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
if (!fgets(etag, sizeof(etag), f_etag)) {
|
if (!fgets(etag, sizeof(etag), f_etag)) {
|
||||||
fprintf(stderr, "%s: unable to read file %s\n", __func__, etag_path);
|
fprintf(stderr, "%s: unable to read file %s\n", __func__, etag_path);
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "%s: previous model file found %s: %s\n", __func__, etag_path, etag);
|
fprintf(stderr, "%s: previous file found %s: %s\n", __func__, etag_path, etag);
|
||||||
}
|
}
|
||||||
fclose(f_etag);
|
fclose(f_etag);
|
||||||
}
|
}
|
||||||
@ -1731,7 +1747,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
if (!fgets(last_modified, sizeof(last_modified), f_last_modified)) {
|
if (!fgets(last_modified, sizeof(last_modified), f_last_modified)) {
|
||||||
fprintf(stderr, "%s: unable to read file %s\n", __func__, last_modified_path);
|
fprintf(stderr, "%s: unable to read file %s\n", __func__, last_modified_path);
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "%s: previous model file found %s: %s\n", __func__, last_modified_path,
|
fprintf(stderr, "%s: previous file found %s: %s\n", __func__, last_modified_path,
|
||||||
last_modified);
|
last_modified);
|
||||||
}
|
}
|
||||||
fclose(f_last_modified);
|
fclose(f_last_modified);
|
||||||
@ -1749,6 +1765,11 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
|
auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t {
|
||||||
llama_load_model_from_url_headers *headers = (llama_load_model_from_url_headers *) userdata;
|
llama_load_model_from_url_headers *headers = (llama_load_model_from_url_headers *) userdata;
|
||||||
|
|
||||||
|
// Convert header field name to lowercase
|
||||||
|
for (size_t i = 0; i < n_items && buffer[i] != ':'; ++i) {
|
||||||
|
buffer[i] = tolower(buffer[i]);
|
||||||
|
}
|
||||||
|
|
||||||
const char * etag_prefix = "etag: ";
|
const char * etag_prefix = "etag: ";
|
||||||
if (strncmp(buffer, etag_prefix, strlen(etag_prefix)) == 0) {
|
if (strncmp(buffer, etag_prefix, strlen(etag_prefix)) == 0) {
|
||||||
strncpy(headers->etag, buffer + strlen(etag_prefix), n_items - strlen(etag_prefix) - 2); // Remove CRLF
|
strncpy(headers->etag, buffer + strlen(etag_prefix), n_items - strlen(etag_prefix) - 2); // Remove CRLF
|
||||||
@ -1771,7 +1792,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
if (res != CURLE_OK) {
|
if (res != CURLE_OK) {
|
||||||
curl_easy_cleanup(curl);
|
curl_easy_cleanup(curl);
|
||||||
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
long http_code = 0;
|
long http_code = 0;
|
||||||
@ -1779,30 +1800,34 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
if (http_code != 200) {
|
if (http_code != 200) {
|
||||||
// HEAD not supported, we don't know if the file has changed
|
// HEAD not supported, we don't know if the file has changed
|
||||||
// force trigger downloading
|
// force trigger downloading
|
||||||
file_exists = false;
|
force_download = true;
|
||||||
fprintf(stderr, "%s: HEAD invalid http status code received: %ld\n", __func__, http_code);
|
fprintf(stderr, "%s: HEAD invalid http status code received: %ld\n", __func__, http_code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the ETag or the Last-Modified headers are different: trigger a new download
|
// If the ETag or the Last-Modified headers are different: trigger a new download
|
||||||
if (!file_exists || strcmp(etag, headers.etag) != 0 || strcmp(last_modified, headers.last_modified) != 0) {
|
bool should_download = !file_exists
|
||||||
char path_model_temporary[LLAMA_CURL_MAX_PATH_LENGTH] = {0};
|
|| force_download
|
||||||
snprintf(path_model_temporary, sizeof(path_model_temporary), "%s.downloadInProgress", path_model);
|
|| (strlen(headers.etag) > 0 && strcmp(etag, headers.etag) != 0)
|
||||||
|
|| (strlen(headers.last_modified) > 0 && strcmp(last_modified, headers.last_modified) != 0);
|
||||||
|
if (should_download) {
|
||||||
|
char path_temporary[PATH_MAX] = {0};
|
||||||
|
snprintf(path_temporary, sizeof(path_temporary), "%s.downloadInProgress", path);
|
||||||
if (file_exists) {
|
if (file_exists) {
|
||||||
fprintf(stderr, "%s: deleting previous downloaded model file: %s\n", __func__, path_model);
|
fprintf(stderr, "%s: deleting previous downloaded file: %s\n", __func__, path);
|
||||||
if (remove(path_model) != 0) {
|
if (remove(path) != 0) {
|
||||||
curl_easy_cleanup(curl);
|
curl_easy_cleanup(curl);
|
||||||
fprintf(stderr, "%s: unable to delete file: %s\n", __func__, path_model);
|
fprintf(stderr, "%s: unable to delete file: %s\n", __func__, path);
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the output file
|
// Set the output file
|
||||||
auto * outfile = fopen(path_model_temporary, "wb");
|
auto * outfile = fopen(path_temporary, "wb");
|
||||||
if (!outfile) {
|
if (!outfile) {
|
||||||
curl_easy_cleanup(curl);
|
curl_easy_cleanup(curl);
|
||||||
fprintf(stderr, "%s: error opening local file for writing: %s\n", __func__, path_model);
|
fprintf(stderr, "%s: error opening local file for writing: %s\n", __func__, path);
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * data, size_t size, size_t nmemb, void * fd);
|
typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * data, size_t size, size_t nmemb, void * fd);
|
||||||
@ -1816,15 +1841,30 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
// display download progress
|
// display download progress
|
||||||
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
|
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L);
|
||||||
|
|
||||||
|
// helper function to hide password in URL
|
||||||
|
auto llama_download_hide_password_in_url = [](const std::string & url) -> std::string {
|
||||||
|
std::size_t protocol_pos = url.find("://");
|
||||||
|
if (protocol_pos == std::string::npos) {
|
||||||
|
return url; // Malformed URL
|
||||||
|
}
|
||||||
|
|
||||||
|
std::size_t at_pos = url.find('@', protocol_pos + 3);
|
||||||
|
if (at_pos == std::string::npos) {
|
||||||
|
return url; // No password in URL
|
||||||
|
}
|
||||||
|
|
||||||
|
return url.substr(0, protocol_pos + 3) + "********" + url.substr(at_pos);
|
||||||
|
};
|
||||||
|
|
||||||
// start the download
|
// start the download
|
||||||
fprintf(stderr, "%s: downloading model from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
|
fprintf(stderr, "%s: downloading from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
|
||||||
model_url, path_model, headers.etag, headers.last_modified);
|
llama_download_hide_password_in_url(url).c_str(), path, headers.etag, headers.last_modified);
|
||||||
auto res = curl_easy_perform(curl);
|
auto res = curl_easy_perform(curl);
|
||||||
if (res != CURLE_OK) {
|
if (res != CURLE_OK) {
|
||||||
fclose(outfile);
|
fclose(outfile);
|
||||||
curl_easy_cleanup(curl);
|
curl_easy_cleanup(curl);
|
||||||
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
long http_code = 0;
|
long http_code = 0;
|
||||||
@ -1833,7 +1873,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
fclose(outfile);
|
fclose(outfile);
|
||||||
curl_easy_cleanup(curl);
|
curl_easy_cleanup(curl);
|
||||||
fprintf(stderr, "%s: invalid http status code received: %ld\n", __func__, http_code);
|
fprintf(stderr, "%s: invalid http status code received: %ld\n", __func__, http_code);
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clean up
|
// Clean up
|
||||||
@ -1845,7 +1885,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
if (etag_file) {
|
if (etag_file) {
|
||||||
fputs(headers.etag, etag_file);
|
fputs(headers.etag, etag_file);
|
||||||
fclose(etag_file);
|
fclose(etag_file);
|
||||||
fprintf(stderr, "%s: model etag saved %s: %s\n", __func__, etag_path, headers.etag);
|
fprintf(stderr, "%s: file etag saved %s: %s\n", __func__, etag_path, headers.etag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1855,20 +1895,118 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
if (last_modified_file) {
|
if (last_modified_file) {
|
||||||
fputs(headers.last_modified, last_modified_file);
|
fputs(headers.last_modified, last_modified_file);
|
||||||
fclose(last_modified_file);
|
fclose(last_modified_file);
|
||||||
fprintf(stderr, "%s: model last modified saved %s: %s\n", __func__, last_modified_path,
|
fprintf(stderr, "%s: file last modified saved %s: %s\n", __func__, last_modified_path,
|
||||||
headers.last_modified);
|
headers.last_modified);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rename(path_model_temporary, path_model) != 0) {
|
if (rename(path_temporary, path) != 0) {
|
||||||
curl_easy_cleanup(curl);
|
curl_easy_cleanup(curl);
|
||||||
fprintf(stderr, "%s: unable to rename file: %s to %s\n", __func__, path_model_temporary, path_model);
|
fprintf(stderr, "%s: unable to rename file: %s to %s\n", __func__, path_temporary, path);
|
||||||
return NULL;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct llama_model * llama_load_model_from_url(
|
||||||
|
const char * model_url,
|
||||||
|
const char * path_model,
|
||||||
|
const struct llama_model_params & params) {
|
||||||
|
// Basic validation of the model_url
|
||||||
|
if (!model_url || strlen(model_url) == 0) {
|
||||||
|
fprintf(stderr, "%s: invalid model_url\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize libcurl
|
||||||
|
auto * curl = curl_easy_init();
|
||||||
|
|
||||||
|
if (!curl) {
|
||||||
|
fprintf(stderr, "%s: error initializing libcurl\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!curl) {
|
||||||
|
fprintf(stderr, "%s: error initializing libcurl\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!llama_download_file(curl, model_url, path_model)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for additional GGUFs split to download
|
||||||
|
int n_split = 0;
|
||||||
|
{
|
||||||
|
struct gguf_init_params gguf_params = {
|
||||||
|
/*.no_alloc = */ true,
|
||||||
|
/*.ctx = */ NULL,
|
||||||
|
};
|
||||||
|
auto * ctx_gguf = gguf_init_from_file(path_model, gguf_params);
|
||||||
|
if (!ctx_gguf) {
|
||||||
|
fprintf(stderr, "\n%s: failed to load input GGUF from %s\n", __func__, path_model);
|
||||||
|
curl_easy_cleanup(curl);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_SPLIT_COUNT);
|
||||||
|
if (key_n_split >= 0) {
|
||||||
|
n_split = gguf_get_val_u16(ctx_gguf, key_n_split);
|
||||||
|
}
|
||||||
|
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
}
|
||||||
|
|
||||||
curl_easy_cleanup(curl);
|
curl_easy_cleanup(curl);
|
||||||
|
|
||||||
|
if (n_split > 1) {
|
||||||
|
char split_prefix[PATH_MAX] = {0};
|
||||||
|
char split_url_prefix[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
||||||
|
|
||||||
|
// Verify the first split file format
|
||||||
|
// and extract split URL and PATH prefixes
|
||||||
|
{
|
||||||
|
if (!llama_split_prefix(split_prefix, sizeof(split_prefix), path_model, 0, n_split)) {
|
||||||
|
fprintf(stderr, "\n%s: unexpected model file name: %s"
|
||||||
|
" n_split=%d\n", __func__, path_model, n_split);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model_url, 0, n_split)) {
|
||||||
|
fprintf(stderr, "\n%s: unexpected model url: %s"
|
||||||
|
" n_split=%d\n", __func__, model_url, n_split);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare download in parallel
|
||||||
|
std::vector<std::future<bool>> futures_download;
|
||||||
|
for (int idx = 1; idx < n_split; idx++) {
|
||||||
|
futures_download.push_back(std::async(std::launch::async, [&split_prefix, &split_url_prefix, &n_split](int download_idx) -> bool {
|
||||||
|
char split_path[PATH_MAX] = {0};
|
||||||
|
llama_split_path(split_path, sizeof(split_path), split_prefix, download_idx, n_split);
|
||||||
|
|
||||||
|
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
||||||
|
llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
|
||||||
|
|
||||||
|
auto * curl = curl_easy_init();
|
||||||
|
bool res = llama_download_file(curl, split_url, split_path);
|
||||||
|
curl_easy_cleanup(curl);
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}, idx));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all downloads to complete
|
||||||
|
for (auto & f : futures_download) {
|
||||||
|
if (!f.get()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return llama_load_model_from_file(path_model, params);
|
return llama_load_model_from_file(path_model, params);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2249,7 +2387,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "cpu_has_avx512: %s\n", ggml_cpu_has_avx512() ? "true" : "false");
|
fprintf(stream, "cpu_has_avx512: %s\n", ggml_cpu_has_avx512() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_avx512_vbmi: %s\n", ggml_cpu_has_avx512_vbmi() ? "true" : "false");
|
fprintf(stream, "cpu_has_avx512_vbmi: %s\n", ggml_cpu_has_avx512_vbmi() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_avx512_vnni: %s\n", ggml_cpu_has_avx512_vnni() ? "true" : "false");
|
fprintf(stream, "cpu_has_avx512_vnni: %s\n", ggml_cpu_has_avx512_vnni() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_cublas: %s\n", ggml_cpu_has_cublas() ? "true" : "false");
|
fprintf(stream, "cpu_has_cuda: %s\n", ggml_cpu_has_cuda() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_vulkan: %s\n", ggml_cpu_has_vulkan() ? "true" : "false");
|
fprintf(stream, "cpu_has_vulkan: %s\n", ggml_cpu_has_vulkan() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_clblast: %s\n", ggml_cpu_has_clblast() ? "true" : "false");
|
fprintf(stream, "cpu_has_clblast: %s\n", ggml_cpu_has_clblast() ? "true" : "false");
|
||||||
fprintf(stream, "cpu_has_kompute: %s\n", ggml_cpu_has_kompute() ? "true" : "false");
|
fprintf(stream, "cpu_has_kompute: %s\n", ggml_cpu_has_kompute() ? "true" : "false");
|
||||||
@ -2351,7 +2489,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict);
|
fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict);
|
||||||
fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", sparams.n_probs);
|
fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", sparams.n_probs);
|
||||||
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
||||||
fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false");
|
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
|
||||||
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
||||||
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
||||||
fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present);
|
fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present);
|
||||||
|
@ -88,20 +88,22 @@ struct gpt_params {
|
|||||||
// // sampling parameters
|
// // sampling parameters
|
||||||
struct llama_sampling_params sparams;
|
struct llama_sampling_params sparams;
|
||||||
|
|
||||||
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
||||||
std::string model_draft = ""; // draft model for speculative decoding
|
std::string model_draft = ""; // draft model for speculative decoding
|
||||||
std::string model_alias = "unknown"; // model alias
|
std::string model_alias = "unknown"; // model alias
|
||||||
std::string model_url = ""; // model url to download
|
std::string model_url = ""; // model url to download
|
||||||
std::string hf_repo = ""; // HF repo
|
std::string hf_repo = ""; // HF repo
|
||||||
std::string hf_file = ""; // HF file
|
std::string hf_file = ""; // HF file
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
std::string prompt_file = ""; // store the external prompt file name
|
std::string prompt_file = ""; // store the external prompt file name
|
||||||
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
||||||
std::string input_prefix = ""; // string to prefix user inputs with
|
std::string input_prefix = ""; // string to prefix user inputs with
|
||||||
std::string input_suffix = ""; // string to suffix user inputs with
|
std::string input_suffix = ""; // string to suffix user inputs with
|
||||||
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
|
||||||
std::string logdir = ""; // directory in which to save YAML log files
|
std::string logdir = ""; // directory in which to save YAML log files
|
||||||
std::string logits_file = ""; // file for saving *all* logits
|
std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding
|
||||||
|
std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding
|
||||||
|
std::string logits_file = ""; // file for saving *all* logits
|
||||||
|
|
||||||
std::vector<llama_model_kv_override> kv_overrides;
|
std::vector<llama_model_kv_override> kv_overrides;
|
||||||
|
|
||||||
@ -169,6 +171,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
|
|||||||
|
|
||||||
void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
|
void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
|
||||||
|
|
||||||
|
bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param);
|
||||||
|
|
||||||
std::string get_system_info(const gpt_params & params);
|
std::string get_system_info(const gpt_params & params);
|
||||||
|
|
||||||
std::string gpt_random_prompt(std::mt19937 & rng);
|
std::string gpt_random_prompt(std::mt19937 & rng);
|
||||||
@ -304,3 +308,10 @@ struct llama_control_vector_load_info {
|
|||||||
// Load control vectors, scale each by strength, and add them together.
|
// Load control vectors, scale each by strength, and add them together.
|
||||||
// On error, returns {-1, empty}
|
// On error, returns {-1, empty}
|
||||||
llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos);
|
llama_control_vector_data llama_control_vector_load(const std::vector<llama_control_vector_load_info> & load_infos);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Split utils
|
||||||
|
//
|
||||||
|
static const char * const LLM_KV_SPLIT_NO = "split.no";
|
||||||
|
static const char * const LLM_KV_SPLIT_COUNT = "split.count";
|
||||||
|
static const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
||||||
|
@ -234,7 +234,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
|||||||
// INTERNAL, DO NOT USE
|
// INTERNAL, DO NOT USE
|
||||||
// USE LOG() INSTEAD
|
// USE LOG() INSTEAD
|
||||||
//
|
//
|
||||||
#ifndef _MSC_VER
|
#if !defined(_MSC_VER) or defined(__INTEL_LLVM_COMPILER)
|
||||||
#define LOG_IMPL(str, ...) \
|
#define LOG_IMPL(str, ...) \
|
||||||
do { \
|
do { \
|
||||||
if (LOG_TARGET != nullptr) \
|
if (LOG_TARGET != nullptr) \
|
||||||
@ -257,7 +257,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std::
|
|||||||
// INTERNAL, DO NOT USE
|
// INTERNAL, DO NOT USE
|
||||||
// USE LOG_TEE() INSTEAD
|
// USE LOG_TEE() INSTEAD
|
||||||
//
|
//
|
||||||
#ifndef _MSC_VER
|
#if !defined(_MSC_VER) or defined(__INTEL_LLVM_COMPILER)
|
||||||
#define LOG_TEE_IMPL(str, ...) \
|
#define LOG_TEE_IMPL(str, ...) \
|
||||||
do { \
|
do { \
|
||||||
if (LOG_TARGET != nullptr) \
|
if (LOG_TARGET != nullptr) \
|
||||||
@ -566,6 +566,7 @@ inline void log_print_usage()
|
|||||||
printf(" --log-new Create a separate new log file on start. "
|
printf(" --log-new Create a separate new log file on start. "
|
||||||
"Each log file will have unique name: \"<name>.<ID>.log\"\n");
|
"Each log file will have unique name: \"<name>.<ID>.log\"\n");
|
||||||
printf(" --log-append Don't truncate the old log file.\n");
|
printf(" --log-append Don't truncate the old log file.\n");
|
||||||
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define log_dump_cmdline(argc, argv) log_dump_cmdline_impl(argc, argv)
|
#define log_dump_cmdline(argc, argv) log_dump_cmdline_impl(argc, argv)
|
||||||
|
282
common/ngram-cache.cpp
Normal file
282
common/ngram-cache.cpp
Normal file
@ -0,0 +1,282 @@
|
|||||||
|
#include "ngram-cache.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include "log.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
void llama_ngram_cache_update(llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max,
|
||||||
|
std::vector<llama_token> & inp, int nnew, bool print_progress) {
|
||||||
|
const int64_t t_start_ms = ggml_time_ms();
|
||||||
|
const int64_t inp_size = inp.size();
|
||||||
|
|
||||||
|
const int64_t n_todo = inp_size * (ngram_max - ngram_min + 1);
|
||||||
|
int64_t n_done = 0;
|
||||||
|
|
||||||
|
for (int64_t ngram_size = ngram_min; ngram_size <= ngram_max; ++ngram_size) {
|
||||||
|
const int64_t i_start = std::max(inp_size - nnew, ngram_size);
|
||||||
|
for (int64_t i = i_start; i < inp_size; ++i) {
|
||||||
|
const int64_t ngram_start = i - ngram_size;
|
||||||
|
llama_ngram ngram(&inp[ngram_start], ngram_size);
|
||||||
|
const llama_token token = inp[i];
|
||||||
|
|
||||||
|
llama_ngram_cache::iterator part_it = ngram_cache.find(ngram);
|
||||||
|
if (part_it == ngram_cache.end()) {
|
||||||
|
llama_ngram_cache_part part;
|
||||||
|
part.emplace(token, 1);
|
||||||
|
ngram_cache.emplace(ngram, part);
|
||||||
|
} else {
|
||||||
|
llama_ngram_cache_part::iterator token_count_it = part_it->second.find(token);
|
||||||
|
if (token_count_it == part_it->second.end()) {
|
||||||
|
part_it->second.emplace(token, 1);
|
||||||
|
} else {
|
||||||
|
token_count_it->second++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++n_done;
|
||||||
|
|
||||||
|
if (print_progress && n_done % 10000000 == 0) {
|
||||||
|
const int64_t t_now_ms = ggml_time_ms();
|
||||||
|
const int64_t eta_ms = (inp_size*(ngram_max-ngram_min+1) - n_done) * (t_now_ms - t_start_ms) / n_done;
|
||||||
|
const int64_t eta_min = eta_ms / (60*1000);
|
||||||
|
const int64_t eta_s = (eta_ms - 60*1000*eta_min) / 1000;
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: %" PRId64 "/%" PRId64 " done, ETA: %02" PRId64 ":%02" PRId64 "\n", __func__, n_done, n_todo, eta_min, eta_s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to get a token from the combined, speculative sequence of inp and draft.
|
||||||
|
static llama_token get_token(const std::vector<llama_token> & inp, const std::vector<llama_token> & draft, const size_t i) {
|
||||||
|
return i < inp.size() ? inp[i] : draft[1 + i - inp.size()];
|
||||||
|
}
|
||||||
|
|
||||||
|
// If sample size or percentage are below these thresholds the draft is aborted early:
|
||||||
|
constexpr int draft_min_sample_size_lax[LLAMA_NGRAM_MAX] = { 2, 2, 1, 1};
|
||||||
|
constexpr int draft_min_percent_lax[LLAMA_NGRAM_MAX] = {66, 50, 50, 50};
|
||||||
|
constexpr int draft_min_sample_size_strict[LLAMA_NGRAM_MAX] = { 4, 3, 2, 2};
|
||||||
|
constexpr int draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
|
||||||
|
|
||||||
|
// Helper function that tries to draft a token from only the static ngram cache:
|
||||||
|
static llama_token try_draft(llama_ngram_cache & nc_static, const llama_ngram ngram_static) {
|
||||||
|
llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
|
||||||
|
if (part_static_it == nc_static.end()) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
const llama_ngram_cache_part part_static = part_static_it->second;
|
||||||
|
|
||||||
|
int max_count_static = 0;
|
||||||
|
int sum_count_static = 0;
|
||||||
|
llama_token max_token = -1;
|
||||||
|
|
||||||
|
for (std::pair<llama_token, int> token_count_static : part_static) {
|
||||||
|
const llama_token token = token_count_static.first;
|
||||||
|
const int32_t count_static = token_count_static.second;
|
||||||
|
|
||||||
|
if (count_static > max_count_static) {
|
||||||
|
max_token = token;
|
||||||
|
max_count_static = count_static;
|
||||||
|
}
|
||||||
|
sum_count_static += count_static;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
return max_token;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to draft a token from primary cache (context/dynamic), validate with static cache:
|
||||||
|
static llama_token try_draft(
|
||||||
|
llama_ngram_cache & nc_primary, const std::vector<llama_ngram> & ngrams_primary, llama_ngram_cache_part & part_static,
|
||||||
|
const int * min_sample_size, const int * min_percent) {
|
||||||
|
|
||||||
|
llama_token drafted_token = -1;
|
||||||
|
|
||||||
|
for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) {
|
||||||
|
const llama_ngram ngram_primary = ngrams_primary[i];
|
||||||
|
|
||||||
|
llama_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
|
||||||
|
if (part_primary_it == nc_primary.end()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const llama_ngram_cache_part part_primary = part_primary_it->second;
|
||||||
|
|
||||||
|
int max_count_primary = 0;
|
||||||
|
int max_count_static = 0;
|
||||||
|
int sum_count_primary = 0;
|
||||||
|
llama_token max_token = -1;
|
||||||
|
|
||||||
|
for (std::pair<llama_token, int> token_count_primary : part_primary) {
|
||||||
|
const llama_token token = token_count_primary.first;
|
||||||
|
|
||||||
|
llama_ngram_cache_part::iterator token_count_static_it = part_static.find(token);
|
||||||
|
|
||||||
|
const int32_t count_primary = token_count_primary.second;
|
||||||
|
const int32_t count_static = token_count_static_it != part_static.end() ? 100*token_count_static_it->second : 1;
|
||||||
|
|
||||||
|
if (count_primary*count_static > max_count_primary*max_count_static) {
|
||||||
|
max_token = token;
|
||||||
|
max_count_primary = count_primary;
|
||||||
|
max_count_static = count_static;
|
||||||
|
}
|
||||||
|
sum_count_primary += count_primary;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sum_count_primary < min_sample_size[i]) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (100*max_count_primary < min_percent[i]*sum_count_primary) {
|
||||||
|
continue;;
|
||||||
|
}
|
||||||
|
drafted_token = max_token;
|
||||||
|
}
|
||||||
|
|
||||||
|
return drafted_token;
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_ngram_cache_draft(
|
||||||
|
std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
|
||||||
|
llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static
|
||||||
|
) {
|
||||||
|
GGML_ASSERT(draft.size() == 1);
|
||||||
|
const int inp_size = inp.size();
|
||||||
|
|
||||||
|
if (inp_size < LLAMA_NGRAM_STATIC) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((int) draft.size()-1 < n_draft) {
|
||||||
|
llama_token drafted_token = -1;
|
||||||
|
|
||||||
|
const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
|
||||||
|
llama_ngram ngram_static;
|
||||||
|
for (int j = ngram_start_static; j < ngram_start_static + LLAMA_NGRAM_STATIC; ++j) {
|
||||||
|
ngram_static.tokens[j-ngram_start_static] = get_token(inp, draft, j);
|
||||||
|
}
|
||||||
|
llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
|
||||||
|
llama_ngram_cache_part part_static;
|
||||||
|
if (part_static_it != nc_static.end()) {
|
||||||
|
part_static = part_static_it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
// cd = context + dynamic
|
||||||
|
std::vector<llama_ngram> ngrams_cd;
|
||||||
|
for (int ngram_size_cd = ngram_min; ngram_size_cd <= ngram_max; ++ngram_size_cd) {
|
||||||
|
const int ngram_start_cd = inp_size-ngram_size_cd + draft.size()-1;
|
||||||
|
llama_ngram ngram_cd;
|
||||||
|
for (int j = ngram_start_cd; j < ngram_start_cd + ngram_size_cd; ++j) {
|
||||||
|
ngram_cd.tokens[j-ngram_start_cd] = get_token(inp, draft, j);
|
||||||
|
}
|
||||||
|
ngrams_cd.push_back(ngram_cd);
|
||||||
|
}
|
||||||
|
if (drafted_token == -1) {
|
||||||
|
drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax);
|
||||||
|
}
|
||||||
|
if (drafted_token == -1) {
|
||||||
|
drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict);
|
||||||
|
}
|
||||||
|
if (drafted_token == -1) {
|
||||||
|
drafted_token = try_draft(nc_static, ngram_static);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (drafted_token == -1) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(" - draft candidate: token=%d\n", drafted_token);
|
||||||
|
draft.push_back(drafted_token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename) {
|
||||||
|
std::ofstream file_out(filename, std::ios::binary);
|
||||||
|
for (std::pair<llama_ngram, llama_ngram_cache_part> item : ngram_cache) {
|
||||||
|
const llama_ngram ngram = item.first;
|
||||||
|
llama_ngram_cache_part token_counts = item.second;
|
||||||
|
GGML_ASSERT(!token_counts.empty());
|
||||||
|
const int32_t ntokens = token_counts.size();
|
||||||
|
GGML_ASSERT(ntokens > 0);
|
||||||
|
|
||||||
|
file_out.write(reinterpret_cast<const char *>(&ngram), sizeof(llama_ngram));
|
||||||
|
file_out.write(reinterpret_cast<const char *>(&ntokens), sizeof(int32_t));
|
||||||
|
for (std::pair<llama_token, int32_t> item2 : token_counts) {
|
||||||
|
const llama_token token = item2.first;
|
||||||
|
const int32_t count = item2.second;
|
||||||
|
GGML_ASSERT(count > 0);
|
||||||
|
|
||||||
|
file_out.write(reinterpret_cast<const char *>(&token), sizeof(llama_token));
|
||||||
|
file_out.write(reinterpret_cast<const char *>(&count), sizeof(int32_t));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ngram_cache llama_ngram_cache_load(std::string & filename) {
|
||||||
|
std::ifstream hashmap_file(filename, std::ios::binary);
|
||||||
|
if (!hashmap_file) {
|
||||||
|
throw std::ifstream::failure("Unable to open file " + filename);
|
||||||
|
}
|
||||||
|
llama_ngram_cache ngram_cache;
|
||||||
|
|
||||||
|
llama_ngram ngram;
|
||||||
|
int32_t ntokens;
|
||||||
|
llama_token token;
|
||||||
|
int32_t count;
|
||||||
|
|
||||||
|
char * ngramc = reinterpret_cast<char*>(&ngram);
|
||||||
|
char * ntokensc = reinterpret_cast<char*>(&ntokens);
|
||||||
|
char * tokenc = reinterpret_cast<char*>(&token);
|
||||||
|
char * countc = reinterpret_cast<char*>(&count);
|
||||||
|
while(hashmap_file.read(ngramc, sizeof(llama_ngram))) {
|
||||||
|
GGML_ASSERT(!hashmap_file.eof());
|
||||||
|
GGML_ASSERT(hashmap_file.read(ntokensc, sizeof(int32_t)));
|
||||||
|
GGML_ASSERT(ntokens > 0);
|
||||||
|
llama_ngram_cache_part token_counts;
|
||||||
|
|
||||||
|
for (int i = 0; i < ntokens; ++i) {
|
||||||
|
GGML_ASSERT(!hashmap_file.eof());
|
||||||
|
GGML_ASSERT(hashmap_file.read(tokenc, sizeof(llama_token)));
|
||||||
|
GGML_ASSERT(!hashmap_file.eof());
|
||||||
|
GGML_ASSERT(hashmap_file.read(countc, sizeof(int32_t)));
|
||||||
|
GGML_ASSERT(count > 0);
|
||||||
|
token_counts.emplace(token, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
ngram_cache.emplace(ngram, token_counts);
|
||||||
|
}
|
||||||
|
GGML_ASSERT(hashmap_file.eof());
|
||||||
|
|
||||||
|
return ngram_cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add) {
|
||||||
|
for (std::pair<llama_ngram, llama_ngram_cache_part> ngram_part : ngram_cache_add) {
|
||||||
|
const llama_ngram ngram = ngram_part.first;
|
||||||
|
llama_ngram_cache_part part = ngram_part.second;
|
||||||
|
|
||||||
|
llama_ngram_cache::iterator part_merged_it = ngram_cache_target.find(ngram);
|
||||||
|
if (part_merged_it == ngram_cache_target.end()) {
|
||||||
|
ngram_cache_target.emplace(ngram, part);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (std::pair<llama_token, int32_t> token_count : part) {
|
||||||
|
const llama_token token = token_count.first;
|
||||||
|
const int32_t count = token_count.second;
|
||||||
|
GGML_ASSERT(count > 0);
|
||||||
|
|
||||||
|
llama_ngram_cache_part::iterator token_count_merged_it = part_merged_it->second.find(token);
|
||||||
|
if (token_count_merged_it == part_merged_it->second.end()) {
|
||||||
|
part_merged_it->second.emplace(token, count);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
token_count_merged_it->second += count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
94
common/ngram-cache.h
Normal file
94
common/ngram-cache.h
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#define LLAMA_NGRAM_MIN 1
|
||||||
|
#define LLAMA_NGRAM_MAX 4
|
||||||
|
#define LLAMA_NGRAM_STATIC 2
|
||||||
|
|
||||||
|
// Data structures to map n-grams to empirical token probabilities:
|
||||||
|
|
||||||
|
struct llama_ngram {
|
||||||
|
llama_token tokens[LLAMA_NGRAM_MAX];
|
||||||
|
|
||||||
|
llama_ngram() {
|
||||||
|
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
|
||||||
|
tokens[i] = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_ngram(const llama_token * input, const int ngram_size) {
|
||||||
|
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
|
||||||
|
tokens[i] = i < ngram_size ? input[i] : -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator==(const llama_ngram & other) const {
|
||||||
|
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
|
||||||
|
if (tokens[i] != other.tokens[i]) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_ngram_hash_function {
|
||||||
|
size_t operator()(const llama_ngram & ngram) const {
|
||||||
|
size_t hash = 0;
|
||||||
|
for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
|
||||||
|
hash ^= std::hash<llama_token>{}(ngram.tokens[i]);
|
||||||
|
}
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// token -> number of times token has been seen
|
||||||
|
typedef std::unordered_map<llama_token, int32_t> llama_ngram_cache_part;
|
||||||
|
|
||||||
|
// n-gram -> empirical distribution of following tokens
|
||||||
|
typedef std::unordered_map<llama_ngram, llama_ngram_cache_part, llama_ngram_hash_function> llama_ngram_cache;
|
||||||
|
|
||||||
|
|
||||||
|
// Update an ngram cache with tokens.
|
||||||
|
// ngram_cache: the cache to modify.
|
||||||
|
// ngram_min/ngram_max: the min/max size of the ngrams to extract from inp_data.
|
||||||
|
// inp_data: the token sequence with which to update ngram_cache.
|
||||||
|
// nnew: how many new tokens have been appended to inp_data since the last call to this function.
|
||||||
|
// print_progress: whether to print progress to stderr.
|
||||||
|
//
|
||||||
|
// In order to get correct results inp_data can ONLY BE APPENDED TO.
|
||||||
|
// Changes in the middle need a complete rebuild.
|
||||||
|
void llama_ngram_cache_update(
|
||||||
|
llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max, std::vector<llama_token> & inp_data, int nnew, bool print_progress);
|
||||||
|
|
||||||
|
// Try to draft tokens from ngram caches.
|
||||||
|
// inp: the tokens generated so far.
|
||||||
|
// draft: the token sequence to draft. Expected to initially contain the previously sampled token.
|
||||||
|
// n_draft: maximum number of tokens to add to draft.
|
||||||
|
// ngram_min/gram_max: the min/max size of the ngrams in nc_context and nc_dynamic.
|
||||||
|
// nc_context: ngram cache based on current context.
|
||||||
|
// nc_dynamic: ngram cache based on previous user generations.
|
||||||
|
// nc_static: ngram cache generated from a large text corpus, used for validation.
|
||||||
|
void llama_ngram_cache_draft(
|
||||||
|
std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
|
||||||
|
llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static);
|
||||||
|
|
||||||
|
// Save an ngram cache to a file.
|
||||||
|
// ngram_cache: the ngram cache to save.
|
||||||
|
// filename: the path under which to save the ngram cache.
|
||||||
|
void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename);
|
||||||
|
|
||||||
|
// Load an ngram cache saved with llama_ngram_cache_save.
|
||||||
|
// filename: the path from which to load the ngram cache.
|
||||||
|
// returns: an ngram cache containing the information saved to filename.
|
||||||
|
llama_ngram_cache llama_ngram_cache_load(std::string & filename);
|
||||||
|
|
||||||
|
// Merge two ngram caches.
|
||||||
|
// ngram_cache_target: the ngram cache to which to add the information from ngram_cache_add.
|
||||||
|
// ngram_cache_add: the ngram cache to add to ngram_cache_target.
|
||||||
|
void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add);
|
@ -168,77 +168,20 @@ static llama_token llama_sampling_sample_impl(
|
|||||||
bool is_resampling) { // Add a parameter to indicate if we are resampling
|
bool is_resampling) { // Add a parameter to indicate if we are resampling
|
||||||
const llama_sampling_params & params = ctx_sampling->params;
|
const llama_sampling_params & params = ctx_sampling->params;
|
||||||
|
|
||||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
|
|
||||||
|
|
||||||
const float temp = params.temp;
|
const float temp = params.temp;
|
||||||
const int32_t penalty_last_n = params.penalty_last_n < 0 ? params.n_prev : params.penalty_last_n;
|
|
||||||
const float penalty_repeat = params.penalty_repeat;
|
|
||||||
const float penalty_freq = params.penalty_freq;
|
|
||||||
const float penalty_present = params.penalty_present;
|
|
||||||
const int mirostat = params.mirostat;
|
const int mirostat = params.mirostat;
|
||||||
const float mirostat_tau = params.mirostat_tau;
|
const float mirostat_tau = params.mirostat_tau;
|
||||||
const float mirostat_eta = params.mirostat_eta;
|
const float mirostat_eta = params.mirostat_eta;
|
||||||
const bool penalize_nl = params.penalize_nl;
|
|
||||||
|
|
||||||
auto & prev = ctx_sampling->prev;
|
|
||||||
auto & cur = ctx_sampling->cur;
|
|
||||||
|
|
||||||
|
std::vector<float> original_logits;
|
||||||
|
auto cur_p = llama_sampling_prepare(ctx_sampling, ctx_main, ctx_cfg, idx, !is_resampling, &original_logits);
|
||||||
|
if (!is_resampling) {
|
||||||
|
GGML_ASSERT(!original_logits.empty());
|
||||||
|
}
|
||||||
llama_token id = 0;
|
llama_token id = 0;
|
||||||
|
|
||||||
// Get a pointer to the logits
|
// Get a pointer to the logits
|
||||||
float * logits = llama_get_logits_ith(ctx_main, idx);
|
float * logits = llama_get_logits_ith(ctx_main, idx);
|
||||||
|
|
||||||
// Declare original_logits at the beginning of the function scope
|
|
||||||
std::vector<float> original_logits;
|
|
||||||
|
|
||||||
if (!is_resampling) {
|
|
||||||
// Only make a copy of the original logits if we are not in the resampling phase, not sure if I actually have to do this.
|
|
||||||
original_logits = std::vector<float>(logits, logits + llama_n_vocab(llama_get_model(ctx_main)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// apply params.logit_bias map
|
|
||||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
|
||||||
logits[it->first] += it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ctx_cfg) {
|
|
||||||
float * logits_guidance = llama_get_logits_ith(ctx_cfg, idx);
|
|
||||||
llama_sample_apply_guidance(ctx_main, logits, logits_guidance, params.cfg_scale);
|
|
||||||
}
|
|
||||||
|
|
||||||
cur.clear();
|
|
||||||
|
|
||||||
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
|
||||||
cur.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_token_data_array cur_p = { cur.data(), cur.size(), false };
|
|
||||||
|
|
||||||
// apply penalties
|
|
||||||
const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev;
|
|
||||||
const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n);
|
|
||||||
if (penalty_tokens_used_size) {
|
|
||||||
const float nl_logit = logits[llama_token_nl(llama_get_model(ctx_main))];
|
|
||||||
|
|
||||||
llama_sample_repetition_penalties(ctx_main, &cur_p,
|
|
||||||
penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size,
|
|
||||||
penalty_tokens_used_size, penalty_repeat, penalty_freq, penalty_present);
|
|
||||||
|
|
||||||
if (!penalize_nl) {
|
|
||||||
for (size_t idx = 0; idx < cur_p.size; idx++) {
|
|
||||||
if (cur_p.data[idx].id == llama_token_nl(llama_get_model(ctx_main))) {
|
|
||||||
cur_p.data[idx].logit = nl_logit;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we are in the resampling phase, apply grammar checks before sampling logic
|
|
||||||
if (is_resampling && ctx_sampling->grammar != NULL) {
|
|
||||||
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (temp < 0.0) {
|
if (temp < 0.0) {
|
||||||
// greedy sampling, with probs
|
// greedy sampling, with probs
|
||||||
llama_sample_softmax(ctx_main, &cur_p);
|
llama_sample_softmax(ctx_main, &cur_p);
|
||||||
@ -302,11 +245,13 @@ static llama_token llama_sampling_sample_impl(
|
|||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
||||||
static llama_token_data_array llama_sample_probability_distribution_impl(
|
static llama_token_data_array llama_sampling_prepare_impl(
|
||||||
struct llama_sampling_context * ctx_sampling,
|
struct llama_sampling_context * ctx_sampling,
|
||||||
struct llama_context * ctx_main,
|
struct llama_context * ctx_main,
|
||||||
struct llama_context * ctx_cfg,
|
struct llama_context * ctx_cfg,
|
||||||
const int idx) {
|
const int idx,
|
||||||
|
bool apply_grammar,
|
||||||
|
std::vector<float> * original_logits) {
|
||||||
const llama_sampling_params & params = ctx_sampling->params;
|
const llama_sampling_params & params = ctx_sampling->params;
|
||||||
|
|
||||||
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
|
const int n_vocab = llama_n_vocab(llama_get_model(ctx_main));
|
||||||
@ -315,6 +260,7 @@ static llama_token_data_array llama_sample_probability_distribution_impl(
|
|||||||
const float penalty_repeat = params.penalty_repeat;
|
const float penalty_repeat = params.penalty_repeat;
|
||||||
const float penalty_freq = params.penalty_freq;
|
const float penalty_freq = params.penalty_freq;
|
||||||
const float penalty_present = params.penalty_present;
|
const float penalty_present = params.penalty_present;
|
||||||
|
|
||||||
const bool penalize_nl = params.penalize_nl;
|
const bool penalize_nl = params.penalize_nl;
|
||||||
|
|
||||||
auto & prev = ctx_sampling->prev;
|
auto & prev = ctx_sampling->prev;
|
||||||
@ -323,8 +269,10 @@ static llama_token_data_array llama_sample_probability_distribution_impl(
|
|||||||
// Get a pointer to the logits
|
// Get a pointer to the logits
|
||||||
float * logits = llama_get_logits_ith(ctx_main, idx);
|
float * logits = llama_get_logits_ith(ctx_main, idx);
|
||||||
|
|
||||||
// Declare original_logits at the beginning of the function scope
|
if (apply_grammar && original_logits != NULL) {
|
||||||
std::vector<float> original_logits;
|
// Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
|
||||||
|
*original_logits = {logits, logits + llama_n_vocab(llama_get_model(ctx_main))};
|
||||||
|
}
|
||||||
|
|
||||||
// apply params.logit_bias map
|
// apply params.logit_bias map
|
||||||
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
|
||||||
@ -364,12 +312,11 @@ static llama_token_data_array llama_sample_probability_distribution_impl(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply grammar checks
|
// apply grammar checks before sampling logic
|
||||||
if (ctx_sampling->grammar != NULL) {
|
if (apply_grammar && ctx_sampling->grammar != NULL) {
|
||||||
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
|
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_sample_softmax(ctx_main, &cur_p);
|
|
||||||
return cur_p;
|
return cur_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,12 +329,14 @@ llama_token llama_sampling_sample(
|
|||||||
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, false);
|
return llama_sampling_sample_impl(ctx_sampling, ctx_main, ctx_cfg, idx, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_token_data_array llama_sampling_probability_distribution(
|
llama_token_data_array llama_sampling_prepare(
|
||||||
struct llama_sampling_context * ctx_sampling,
|
struct llama_sampling_context * ctx_sampling,
|
||||||
struct llama_context * ctx_main,
|
struct llama_context * ctx_main,
|
||||||
struct llama_context * ctx_cfg,
|
struct llama_context * ctx_cfg,
|
||||||
const int idx) {
|
const int idx,
|
||||||
return llama_sample_probability_distribution_impl(ctx_sampling,ctx_main, ctx_cfg, idx);
|
bool apply_grammar,
|
||||||
|
std::vector<float> * original_logits) {
|
||||||
|
return llama_sampling_prepare_impl(ctx_sampling,ctx_main, ctx_cfg, idx, apply_grammar, original_logits);
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_sampling_accept(
|
void llama_sampling_accept(
|
||||||
|
@ -131,12 +131,14 @@ llama_token llama_sampling_sample(
|
|||||||
struct llama_context * ctx_cfg,
|
struct llama_context * ctx_cfg,
|
||||||
int idx = 0);
|
int idx = 0);
|
||||||
|
|
||||||
// returns the probability that token of given id will be sampled
|
// Prepares and adjusts the set of token candidates for sampling based on penalties, biases, and sampling parameters.
|
||||||
llama_token_data_array llama_sampling_probability_distribution(
|
llama_token_data_array llama_sampling_prepare(
|
||||||
struct llama_sampling_context * ctx_sampling,
|
struct llama_sampling_context * ctx_sampling,
|
||||||
struct llama_context * ctx_main,
|
struct llama_context * ctx_main,
|
||||||
struct llama_context * ctx_cfg,
|
struct llama_context * ctx_cfg,
|
||||||
int idx = 0);
|
int idx = 0,
|
||||||
|
bool apply_grammar = true,
|
||||||
|
std::vector<float> * original_logits = nullptr);
|
||||||
|
|
||||||
void llama_sampling_accept(
|
void llama_sampling_accept(
|
||||||
struct llama_sampling_context * ctx_sampling,
|
struct llama_sampling_context * ctx_sampling,
|
||||||
|
@ -93,31 +93,42 @@ class Model(ABC):
|
|||||||
|
|
||||||
if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
|
if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None:
|
||||||
self.gguf_writer.add_context_length(n_ctx)
|
self.gguf_writer.add_context_length(n_ctx)
|
||||||
|
print(f"gguf: context length = {n_ctx}")
|
||||||
|
|
||||||
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|
n_embd = self.find_hparam(["hidden_size", "n_embd"])
|
||||||
self.gguf_writer.add_embedding_length(n_embd)
|
self.gguf_writer.add_embedding_length(n_embd)
|
||||||
|
print(f"gguf: embedding length = {n_embd}")
|
||||||
|
|
||||||
if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
|
if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None:
|
||||||
self.gguf_writer.add_feed_forward_length(n_ff)
|
self.gguf_writer.add_feed_forward_length(n_ff)
|
||||||
|
print(f"gguf: feed forward length = {n_ff}")
|
||||||
|
|
||||||
n_head = self.find_hparam(["num_attention_heads", "n_head"])
|
n_head = self.find_hparam(["num_attention_heads", "n_head"])
|
||||||
self.gguf_writer.add_head_count(n_head)
|
self.gguf_writer.add_head_count(n_head)
|
||||||
|
print(f"gguf: head count = {n_head}")
|
||||||
|
|
||||||
if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
|
if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None:
|
||||||
self.gguf_writer.add_head_count_kv(n_head_kv)
|
self.gguf_writer.add_head_count_kv(n_head_kv)
|
||||||
|
print(f"gguf: key-value head count = {n_head_kv}")
|
||||||
|
|
||||||
if (rope_theta := self.hparams.get("rope_theta")) is not None:
|
if (rope_theta := self.hparams.get("rope_theta")) is not None:
|
||||||
self.gguf_writer.add_rope_freq_base(rope_theta)
|
self.gguf_writer.add_rope_freq_base(rope_theta)
|
||||||
|
print(f"gguf: rope theta = {rope_theta}")
|
||||||
if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
|
if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None:
|
||||||
self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
|
self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps)
|
||||||
|
print(f"gguf: rms norm epsilon = {f_rms_eps}")
|
||||||
if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
|
if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None:
|
||||||
self.gguf_writer.add_layer_norm_eps(f_norm_eps)
|
self.gguf_writer.add_layer_norm_eps(f_norm_eps)
|
||||||
|
print(f"gguf: layer norm epsilon = {f_norm_eps}")
|
||||||
if (n_experts := self.hparams.get("num_local_experts")) is not None:
|
if (n_experts := self.hparams.get("num_local_experts")) is not None:
|
||||||
self.gguf_writer.add_expert_count(n_experts)
|
self.gguf_writer.add_expert_count(n_experts)
|
||||||
|
print(f"gguf: expert count = {n_experts}")
|
||||||
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
|
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
|
||||||
self.gguf_writer.add_expert_used_count(n_experts_used)
|
self.gguf_writer.add_expert_used_count(n_experts_used)
|
||||||
|
print(f"gguf: experts used count = {n_experts_used}")
|
||||||
|
|
||||||
self.gguf_writer.add_file_type(self.ftype)
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
print(f"gguf: file type = {self.ftype}")
|
||||||
|
|
||||||
def write_tensors(self):
|
def write_tensors(self):
|
||||||
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
|
||||||
@ -320,7 +331,7 @@ class Model(ABC):
|
|||||||
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
||||||
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
|
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
|
||||||
|
|
||||||
for token_id in range(vocab_size):
|
for token_id in range(tokenizer.vocab_size()):
|
||||||
piece = tokenizer.id_to_piece(token_id)
|
piece = tokenizer.id_to_piece(token_id)
|
||||||
text = piece.encode("utf-8")
|
text = piece.encode("utf-8")
|
||||||
score = tokenizer.get_score(token_id)
|
score = tokenizer.get_score(token_id)
|
||||||
@ -345,9 +356,13 @@ class Model(ABC):
|
|||||||
added_tokens_json = json.load(f)
|
added_tokens_json = json.load(f)
|
||||||
|
|
||||||
for key in added_tokens_json:
|
for key in added_tokens_json:
|
||||||
tokens.append(key.encode("utf-8"))
|
key = key.encode("utf-8")
|
||||||
scores.append(-1000.0)
|
if key not in tokens:
|
||||||
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
|
tokens.append(key)
|
||||||
|
scores.append(-1000.0)
|
||||||
|
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
|
||||||
|
|
||||||
|
assert len(tokens) == vocab_size
|
||||||
|
|
||||||
self.gguf_writer.add_tokenizer_model("llama")
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
self.gguf_writer.add_token_list(tokens)
|
self.gguf_writer.add_token_list(tokens)
|
||||||
@ -1051,6 +1066,21 @@ class MixtralModel(Model):
|
|||||||
self._set_vocab_sentencepiece()
|
self._set_vocab_sentencepiece()
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("GrokForCausalLM")
|
||||||
|
class GrokModel(Model):
|
||||||
|
model_arch = gguf.MODEL_ARCH.GROK
|
||||||
|
|
||||||
|
def set_vocab(self):
|
||||||
|
self._set_vocab_sentencepiece()
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
super().set_gguf_parameters()
|
||||||
|
self.gguf_writer.add_name("Grok")
|
||||||
|
|
||||||
|
|
||||||
@Model.register("MiniCPMForCausalLM")
|
@Model.register("MiniCPMForCausalLM")
|
||||||
class MiniCPMModel(Model):
|
class MiniCPMModel(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.MINICPM
|
model_arch = gguf.MODEL_ARCH.MINICPM
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# Token generation performance troubleshooting
|
# Token generation performance troubleshooting
|
||||||
|
|
||||||
## Verifying that the model is running on the GPU with cuBLAS
|
## Verifying that the model is running on the GPU with CUDA
|
||||||
Make sure you compiled llama with the correct env variables according to [this guide](../README.md#cublas), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example:
|
Make sure you compiled llama with the correct env variables according to [this guide](../README.md#CUDA), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example:
|
||||||
```shell
|
```shell
|
||||||
./main -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some "
|
./main -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some "
|
||||||
```
|
```
|
||||||
|
@ -34,6 +34,7 @@ else()
|
|||||||
add_subdirectory(perplexity)
|
add_subdirectory(perplexity)
|
||||||
add_subdirectory(quantize)
|
add_subdirectory(quantize)
|
||||||
add_subdirectory(quantize-stats)
|
add_subdirectory(quantize-stats)
|
||||||
|
add_subdirectory(retrieval)
|
||||||
add_subdirectory(save-load-state)
|
add_subdirectory(save-load-state)
|
||||||
add_subdirectory(simple)
|
add_subdirectory(simple)
|
||||||
add_subdirectory(passkey)
|
add_subdirectory(passkey)
|
||||||
|
@ -21,6 +21,8 @@ An example command using a model from [karpathy/tinyllamas](https://huggingface.
|
|||||||
|
|
||||||
`$ ./convert-llama2c-to-ggml --copy-vocab-from-model llama-2-7b-chat.gguf.q2_K.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.gguf.bin`
|
`$ ./convert-llama2c-to-ggml --copy-vocab-from-model llama-2-7b-chat.gguf.q2_K.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.gguf.bin`
|
||||||
|
|
||||||
|
Note: The vocabulary for `stories260K.bin` should be its own tokenizer `tok512.bin` found in [karpathy/tinyllamas/stories260K](https://huggingface.co/karpathy/tinyllamas/tree/main/stories260K).
|
||||||
|
|
||||||
Now you can use the model with a command like:
|
Now you can use the model with a command like:
|
||||||
|
|
||||||
`$ ./main -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256`
|
`$ ./main -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256`
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
#include "log.h"
|
||||||
|
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -78,111 +79,101 @@ typedef struct {
|
|||||||
|
|
||||||
struct TransformerWeights {
|
struct TransformerWeights {
|
||||||
// token embedding table
|
// token embedding table
|
||||||
float* token_embedding_table; // (vocab_size, dim)
|
std::vector<float> token_embedding_table; // (vocab_size, dim)
|
||||||
// weights for rmsnorms
|
// weights for rmsnorms
|
||||||
float* rms_att_weight; // (layer, dim) rmsnorm weights
|
std::vector<float> rms_att_weight; // (layer, dim) rmsnorm weights
|
||||||
float* rms_ffn_weight; // (layer, dim)
|
std::vector<float> rms_ffn_weight; // (layer, dim)
|
||||||
// weights for matmuls
|
// weights for matmuls
|
||||||
float* wq; // (layer, dim, dim)
|
std::vector<float> wq; // (layer, dim, dim)
|
||||||
float* wk; // (layer, dim, dim)
|
std::vector<float> wk; // (layer, dim, dim)
|
||||||
float* wv; // (layer, dim, dim)
|
std::vector<float> wv; // (layer, dim, dim)
|
||||||
float* wo; // (layer, dim, dim)
|
std::vector<float> wo; // (layer, dim, dim)
|
||||||
// weights for ffn
|
// weights for ffn
|
||||||
float* w1; // (layer, hidden_dim, dim)
|
std::vector<float> w1; // (layer, hidden_dim, dim)
|
||||||
float* w2; // (layer, dim, hidden_dim)
|
std::vector<float> w2; // (layer, dim, hidden_dim)
|
||||||
float* w3; // (layer, hidden_dim, dim)
|
std::vector<float> w3; // (layer, hidden_dim, dim)
|
||||||
// final rmsnorm
|
// final rmsnorm
|
||||||
float* rms_final_weight; // (dim,)
|
std::vector<float> rms_final_weight; // (dim,)
|
||||||
// freq_cis for RoPE relatively positional embeddings
|
// freq_cis for RoPE relatively positional embeddings
|
||||||
// float* freq_cis_real; // (seq_len, dim/2)
|
// std::vector<float> freq_cis_real; // (seq_len, dim/2)
|
||||||
// float* freq_cis_imag; // (seq_len, dim/2)
|
// std::vector<float> freq_cis_imag; // (seq_len, dim/2)
|
||||||
// (optional) classifier weights for the logits, on the last layer
|
// (optional) classifier weights for the logits, on the last layer
|
||||||
float* wcls;
|
std::vector<float> wcls;
|
||||||
|
|
||||||
~TransformerWeights() {
|
|
||||||
delete[] token_embedding_table;
|
|
||||||
delete[] rms_att_weight;
|
|
||||||
delete[] rms_ffn_weight;
|
|
||||||
delete[] wq;
|
|
||||||
delete[] wk;
|
|
||||||
delete[] wv;
|
|
||||||
delete[] wo;
|
|
||||||
delete[] w1;
|
|
||||||
delete[] w2;
|
|
||||||
delete[] w3;
|
|
||||||
delete[] rms_final_weight;
|
|
||||||
delete[] wcls;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void malloc_weights(TransformerWeights* w, Config* p, bool shared_weights) {
|
static void alloc_weights(TransformerWeights * w, const Config * p, bool shared_weights) {
|
||||||
// we calloc instead of malloc to keep valgrind happy
|
const int n_multiqueries = p->n_kv_heads <= 0 || p->n_kv_heads >= p->n_heads ? 1 : p->n_heads / p->n_kv_heads;
|
||||||
w->token_embedding_table = new float[p->vocab_size * p->dim]();
|
try {
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->token_embedding_table\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
|
w->token_embedding_table.resize(p->vocab_size * p->dim);
|
||||||
|
LOG("%s: Allocating [%d] x [%d] = [%d] float space for w->token_embedding_table\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
|
||||||
|
|
||||||
w->rms_att_weight = new float[p->n_layers * p->dim]();
|
w->rms_att_weight.resize(p->n_layers * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->rms_att_weight\n",__func__,p->n_layers, p->dim, p->n_layers * p->dim);
|
LOG("%s: Allocating [%d] x [%d] = [%d] float space for w->rms_att_weight\n",__func__,p->n_layers, p->dim, p->n_layers * p->dim);
|
||||||
|
|
||||||
w->rms_ffn_weight = new float[p->n_layers * p->dim]();
|
w->rms_ffn_weight.resize(p->n_layers * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->rms_ffn_weight\n",__func__,p->n_layers , p->dim, p->n_layers * p->dim);
|
LOG("%s: Allocating [%d] x [%d] = [%d] float space for w->rms_ffn_weight\n",__func__,p->n_layers , p->dim, p->n_layers * p->dim);
|
||||||
|
|
||||||
w->wq = new float[p->n_layers * p->dim * p->dim]();
|
w->wq.resize(p->n_layers * p->dim * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] x [%d] = [%d] float space for w->wq\n",__func__,p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
|
LOG("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wq\n",__func__,p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
|
||||||
|
|
||||||
w->wk = new float[p->n_layers * p->dim * p->dim]();
|
w->wk.resize(p->n_layers * p->dim * p->dim / n_multiqueries);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] x [%d] = [%d] float space for w->wk\n",__func__,p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
|
LOG("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wk\n",__func__,p->n_layers, p->dim, p->dim / n_multiqueries, p->n_layers * p->dim * p->dim / n_multiqueries);
|
||||||
|
|
||||||
w->wv = new float[p->n_layers * p->dim * p->dim]();
|
w->wv.resize(p->n_layers * p->dim * p->dim / n_multiqueries);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] x [%d] = [%d] float space for w->wv\n",__func__, p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
|
LOG("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wv\n",__func__, p->n_layers, p->dim, p->dim / n_multiqueries, p->n_layers * p->dim * p->dim / n_multiqueries);
|
||||||
|
|
||||||
w->wo = new float[p->n_layers * p->dim * p->dim]();
|
w->wo.resize(p->n_layers * p->dim * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] x [%d] = [%d] float space for w->wo\n",__func__,p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
|
LOG("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->wo\n",__func__,p->n_layers, p->dim, p->dim, p->n_layers * p->dim * p->dim);
|
||||||
|
|
||||||
w->w1 = new float[p->n_layers * p->hidden_dim * p->dim]();
|
w->w1.resize(p->n_layers * p->hidden_dim * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] x [%d] = [%d] float space for w->w1\n",__func__,p->n_layers, p->hidden_dim, p->dim, p->n_layers * p->hidden_dim * p->dim);
|
LOG("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->w1\n",__func__,p->n_layers, p->hidden_dim, p->dim, p->n_layers * p->hidden_dim * p->dim);
|
||||||
|
|
||||||
w->w2 = new float[p->n_layers * p->hidden_dim * p->dim]();
|
w->w2.resize(p->n_layers * p->hidden_dim * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] x [%d] = [%d] float space for w->w2\n",__func__,p->n_layers, p->dim, p->hidden_dim, p->n_layers * p->hidden_dim * p->dim);
|
LOG("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->w2\n",__func__,p->n_layers, p->dim, p->hidden_dim, p->n_layers * p->hidden_dim * p->dim);
|
||||||
|
|
||||||
w->w3 = new float[p->n_layers * p->hidden_dim * p->dim]();
|
w->w3.resize(p->n_layers * p->hidden_dim * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] x [%d] = [%d] float space for w->w3\n",__func__,p->n_layers, p->hidden_dim, p->dim, p->n_layers * p->hidden_dim * p->dim);
|
LOG("%s: Allocating [%d] x [%d] x [%d] = [%d] float space for w->w3\n",__func__,p->n_layers, p->hidden_dim, p->dim, p->n_layers * p->hidden_dim * p->dim);
|
||||||
|
|
||||||
w->rms_final_weight = new float[p->dim]();
|
w->rms_final_weight.resize(p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] float space for w->rms_final_weight\n",__func__,p->dim);
|
LOG("%s: Allocating [%d] float space for w->rms_final_weight\n",__func__,p->dim);
|
||||||
|
|
||||||
if (shared_weights) {
|
if (shared_weights) {
|
||||||
w->wcls = NULL;
|
w->wcls = {};
|
||||||
} else {
|
} else {
|
||||||
w->wcls = new float[p->vocab_size * p->dim]();
|
w->wcls.resize(p->vocab_size * p->dim);
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
|
LOG("%s: Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (std::length_error &) {
|
||||||
|
die("Invalid configuration. Failed to allocate memory for weights");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f, bool shared_weights) {
|
static int checkpoint_init_weights(TransformerWeights * w, const Config * p, FILE * f, bool shared_weights) {
|
||||||
if (fread(w->token_embedding_table, sizeof(float), p->vocab_size * p->dim, f) != static_cast<size_t>(p->vocab_size * p->dim)) return 1;
|
if (fread(w->token_embedding_table.data(), sizeof(float), w->token_embedding_table.size(), f) != w->token_embedding_table.size()) return 1;
|
||||||
if (fread(w->rms_att_weight, sizeof(float), p->n_layers * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim)) return 1;
|
if (fread(w->rms_att_weight.data(), sizeof(float), w->rms_att_weight.size(), f) != w->rms_att_weight.size()) return 1;
|
||||||
if (fread(w->wq, sizeof(float), p->n_layers * p->dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->dim)) return 1;
|
if (fread(w->wq.data(), sizeof(float), w->wq.size(), f) != w->wq.size()) return 1;
|
||||||
if (fread(w->wk, sizeof(float), p->n_layers * p->dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->dim)) return 1;
|
if (fread(w->wk.data(), sizeof(float), w->wk.size(), f) != w->wk.size()) return 1;
|
||||||
if (fread(w->wv, sizeof(float), p->n_layers * p->dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->dim)) return 1;
|
if (fread(w->wv.data(), sizeof(float), w->wv.size(), f) != w->wv.size()) return 1;
|
||||||
if (fread(w->wo, sizeof(float), p->n_layers * p->dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->dim)) return 1;
|
if (fread(w->wo.data(), sizeof(float), w->wo.size(), f) != w->wo.size()) return 1;
|
||||||
if (fread(w->rms_ffn_weight, sizeof(float), p->n_layers * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim)) return 1;
|
if (fread(w->rms_ffn_weight.data(), sizeof(float), w->rms_ffn_weight.size(), f) != w->rms_ffn_weight.size()) return 1;
|
||||||
if (fread(w->w1, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->hidden_dim)) return 1;
|
if (fread(w->w1.data(), sizeof(float), w->w1.size(), f) != w->w1.size()) return 1;
|
||||||
if (fread(w->w2, sizeof(float), p->n_layers * p->hidden_dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->hidden_dim * p->dim)) return 1;
|
if (fread(w->w2.data(), sizeof(float), w->w2.size(), f) != w->w2.size()) return 1;
|
||||||
if (fread(w->w3, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->hidden_dim)) return 1;
|
if (fread(w->w3.data(), sizeof(float), w->w3.size(), f) != w->w3.size()) return 1;
|
||||||
if (fread(w->rms_final_weight, sizeof(float), p->dim, f) != static_cast<size_t>(p->dim)) return 1;
|
if (fread(w->rms_final_weight.data(), sizeof(float), w->rms_final_weight.size(), f) != w->rms_final_weight.size()) return 1;
|
||||||
|
|
||||||
// Skip freq_cis_real & freq_cis_imag
|
// Skip freq_cis_real & freq_cis_imag
|
||||||
int head_size = p->dim / p->n_heads;
|
int head_size = p->dim / p->n_heads;
|
||||||
fseek(f, p->seq_len * head_size * sizeof(float), SEEK_CUR);
|
fseek(f, p->seq_len * head_size * sizeof(float), SEEK_CUR);
|
||||||
|
|
||||||
if (!shared_weights && fread(w->wcls, sizeof(float), p->vocab_size * p->dim, f) != static_cast<size_t>(p->vocab_size * p->dim)) return 1;
|
if (!shared_weights && fread(w->wcls.data(), sizeof(float), w->wcls.size(), f) != w->wcls.size()) return 1;
|
||||||
|
|
||||||
// Check we didn't forget to read anything
|
// Check we didn't forget to read anything
|
||||||
auto curr = ftell(f);
|
auto curr = ftell(f);
|
||||||
fseek(f, 0, SEEK_END);
|
fseek(f, 0, SEEK_END);
|
||||||
auto end = ftell(f);
|
auto end = ftell(f);
|
||||||
if (curr != end) {
|
if (curr != end) {
|
||||||
printf("Error: failed to read the checkpoint file to the end (curr = %ld, end = %ld)\n", curr, end);
|
LOG("%s: Error: failed to read the checkpoint file to the end (curr = %ld, end = %ld)\n", __func__, curr, end);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,20 +181,20 @@ static int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f, bo
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void print_sample_weights(TransformerWeights *w){
|
static void print_sample_weights(TransformerWeights *w){
|
||||||
printf("----- Quick print of first of the weight vales of all the variables\n");
|
LOG("----- Quick print of first of the weight vales of all the variables\n");
|
||||||
printf("%f\n", w->token_embedding_table[0]);
|
LOG("%f\n", w->token_embedding_table[0]);
|
||||||
printf("%f\n", w->rms_att_weight[0]);
|
LOG("%f\n", w->rms_att_weight[0]);
|
||||||
printf("%f\n", w->rms_ffn_weight[0]);
|
LOG("%f\n", w->rms_ffn_weight[0]);
|
||||||
|
|
||||||
printf("%f\n", w->wq[0]);
|
LOG("%f\n", w->wq[0]);
|
||||||
printf("%f\n", w->wk[0]);
|
LOG("%f\n", w->wk[0]);
|
||||||
printf("%f\n", w->wv[0]);
|
LOG("%f\n", w->wv[0]);
|
||||||
printf("%f\n", w->wo[0]);
|
LOG("%f\n", w->wo[0]);
|
||||||
printf("%f\n", w->w1[0]);
|
LOG("%f\n", w->w1[0]);
|
||||||
printf("%f\n", w->w2[0]);
|
LOG("%f\n", w->w2[0]);
|
||||||
printf("%f\n", w->w3[0]);
|
LOG("%f\n", w->w3[0]);
|
||||||
printf("%f\n", w->rms_att_weight[0]);
|
LOG("%f\n", w->rms_att_weight[0]);
|
||||||
if (w->wcls) printf("%f\n", w->wcls[0]);
|
if (!w->wcls.empty()) LOG("%f\n", w->wcls[0]);
|
||||||
}
|
}
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
@ -225,14 +216,16 @@ struct llama_vocab {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct my_llama_hparams {
|
struct my_llama_hparams {
|
||||||
uint32_t n_vocab = 32000;
|
uint32_t n_vocab = 32000;
|
||||||
uint32_t n_ctx = 512; // this is provided as user input?
|
uint32_t n_ctx = 512; // this is provided as user input?
|
||||||
uint32_t n_embd = 4096;
|
uint32_t n_embd = 4096;
|
||||||
uint32_t n_ff = 11008;
|
uint32_t n_ff = 11008;
|
||||||
uint32_t n_mult = 4;
|
uint32_t n_mult = 4;
|
||||||
uint32_t n_head = 32;
|
uint32_t n_head = 32;
|
||||||
uint32_t n_layer = 32;
|
uint32_t n_head_kv = 32;
|
||||||
uint32_t n_rot = 64;
|
uint32_t n_layer = 32;
|
||||||
|
uint32_t n_rot = 64;
|
||||||
|
|
||||||
bool operator!=(const my_llama_hparams& other) const {
|
bool operator!=(const my_llama_hparams& other) const {
|
||||||
return memcmp(this, &other, sizeof(my_llama_hparams));
|
return memcmp(this, &other, sizeof(my_llama_hparams));
|
||||||
}
|
}
|
||||||
@ -325,14 +318,30 @@ struct train_params {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static void print_params(struct my_llama_hparams * params) {
|
static void print_params(struct my_llama_hparams * params) {
|
||||||
printf("%s: n_vocab: %u\n", __func__, params->n_vocab);
|
LOG("%s: n_vocab: %u\n", __func__, params->n_vocab);
|
||||||
printf("%s: n_ctx: %u\n", __func__, params->n_ctx);
|
LOG("%s: n_ctx: %u\n", __func__, params->n_ctx);
|
||||||
printf("%s: n_embd: %u\n", __func__, params->n_embd);
|
LOG("%s: n_embd: %u\n", __func__, params->n_embd);
|
||||||
printf("%s: n_mult: %u\n", __func__, params->n_mult);
|
LOG("%s: n_mult: %u\n", __func__, params->n_mult);
|
||||||
printf("%s: n_head: %u\n", __func__, params->n_head);
|
LOG("%s: n_head: %u\n", __func__, params->n_head);
|
||||||
printf("%s: n_ff: %u\n", __func__, params->n_ff);
|
LOG("%s: n_head_kv: %u\n", __func__, params->n_head_kv);
|
||||||
printf("%s: n_layer: %u\n", __func__, params->n_layer);
|
LOG("%s: n_ff: %u\n", __func__, params->n_ff);
|
||||||
printf("%s: n_rot: %u\n", __func__, params->n_rot);
|
LOG("%s: n_layer: %u\n", __func__, params->n_layer);
|
||||||
|
LOG("%s: n_rot: %u\n", __func__, params->n_rot);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void print_tensor_info(const struct ggml_context * ctx) {
|
||||||
|
for (auto t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
||||||
|
LOG("%s: Allocating ", __func__);
|
||||||
|
int64_t total = 1;
|
||||||
|
int i = 0;
|
||||||
|
for (; i < ggml_n_dims(t); ++i) {
|
||||||
|
if (i > 0) LOG("x ");
|
||||||
|
LOG("[%" PRId64 "] ", t->ne[i]);
|
||||||
|
total *= t->ne[i];
|
||||||
|
}
|
||||||
|
if (i > 1) LOG("= [%" PRId64 "] ", total);
|
||||||
|
LOG("float space for %s\n", ggml_get_name(t));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_model(struct my_llama_model * model) {
|
static void init_model(struct my_llama_model * model) {
|
||||||
@ -342,6 +351,8 @@ static void init_model(struct my_llama_model * model) {
|
|||||||
const uint32_t n_layer = hparams.n_layer;
|
const uint32_t n_layer = hparams.n_layer;
|
||||||
const uint32_t n_vocab = hparams.n_vocab;
|
const uint32_t n_vocab = hparams.n_vocab;
|
||||||
|
|
||||||
|
const uint32_t n_multiqueries = hparams.n_head_kv <= 0 || hparams.n_head_kv >= hparams.n_head ? 1 : hparams.n_head / hparams.n_head_kv;
|
||||||
|
|
||||||
const uint32_t n_ff = hparams.n_ff;
|
const uint32_t n_ff = hparams.n_ff;
|
||||||
struct ggml_context * ctx = model->ctx;
|
struct ggml_context * ctx = model->ctx;
|
||||||
|
|
||||||
@ -350,25 +361,8 @@ static void init_model(struct my_llama_model * model) {
|
|||||||
model->train_tokens = 0;
|
model->train_tokens = 0;
|
||||||
|
|
||||||
model->tok_embeddings = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab);
|
model->tok_embeddings = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab);
|
||||||
printf("[%s:GG] Allocating [%u] x [%u] = [%u] float space for model->tok_embeddings\n",__func__,n_embd , n_vocab, n_embd * n_vocab);
|
|
||||||
|
|
||||||
model->norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
model->norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||||
printf("[%s:GG] Allocating [%u] float space for model->norm\n",__func__,n_embd);
|
|
||||||
|
|
||||||
model->output = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab);
|
model->output = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_vocab);
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for model->output\n",__func__,n_embd, n_vocab, n_embd * n_vocab);
|
|
||||||
|
|
||||||
// printing the per-layer allocations here so we dont print in the for loop.
|
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for layer.wq for [%u] layers\n",__func__, n_embd, n_embd, n_embd * n_embd, n_layer);
|
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for layer.wk for [%u] layers\n",__func__, n_embd, n_embd, n_embd * n_embd, n_layer);
|
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for layer.wv for [%u] layers\n",__func__, n_embd, n_embd, n_embd * n_embd, n_layer);
|
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for layer.wo for [%u] layers\n",__func__, n_embd, n_embd, n_embd * n_embd, n_layer);
|
|
||||||
|
|
||||||
printf("[%s:GG] Allocating [%u] float space for layer.ffn_norm for [%u] layers\n",__func__,n_embd, n_layer);
|
|
||||||
|
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for layer.w1 for [%u] layers\n",__func__, n_ff, n_embd, n_embd * n_ff, n_layer);
|
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for layer.w2 for [%u] layers\n",__func__, n_embd, n_ff, n_ff * n_embd, n_layer);
|
|
||||||
printf("[%s:GG] Allocating [%u] x[%u] = [%u] float space for layer.w3 for [%u] layers\n",__func__, n_ff, n_embd, n_embd * n_ff, n_layer);
|
|
||||||
|
|
||||||
ggml_set_name(model->tok_embeddings, "tok_embeddings.weight");
|
ggml_set_name(model->tok_embeddings, "tok_embeddings.weight");
|
||||||
ggml_set_name(model->norm, "norm.weight");
|
ggml_set_name(model->norm, "norm.weight");
|
||||||
@ -383,8 +377,8 @@ static void init_model(struct my_llama_model * model) {
|
|||||||
layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||||
|
|
||||||
layer.wq = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
layer.wq = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
||||||
layer.wk = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
layer.wk = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd / n_multiqueries);
|
||||||
layer.wv = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
layer.wv = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd / n_multiqueries);
|
||||||
layer.wo = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
layer.wo = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
||||||
|
|
||||||
layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||||
@ -406,6 +400,8 @@ static void init_model(struct my_llama_model * model) {
|
|||||||
ggml_format_name(layer.w2, "%s.feed_forward.w2.weight", layers_i.c_str());
|
ggml_format_name(layer.w2, "%s.feed_forward.w2.weight", layers_i.c_str());
|
||||||
ggml_format_name(layer.w3, "%s.feed_forward.w3.weight", layers_i.c_str());
|
ggml_format_name(layer.w3, "%s.feed_forward.w3.weight", layers_i.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
print_tensor_info(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static float get_f32_2d(struct ggml_tensor * tensor, int64_t i0, int64_t i1) {
|
static float get_f32_2d(struct ggml_tensor * tensor, int64_t i0, int64_t i1) {
|
||||||
@ -421,9 +417,9 @@ static int32_t get_i32_2d(struct ggml_tensor * tensor, int64_t i0, int64_t i1) {
|
|||||||
static void print_row(struct ggml_tensor * probs, int i) {
|
static void print_row(struct ggml_tensor * probs, int i) {
|
||||||
for (int k = 0; k < probs->ne[0]; ++k) {
|
for (int k = 0; k < probs->ne[0]; ++k) {
|
||||||
float p = get_f32_2d(probs, k, i);
|
float p = get_f32_2d(probs, k, i);
|
||||||
printf(" %f", p);
|
LOG(" %f", p);
|
||||||
}
|
}
|
||||||
printf("\n");
|
LOG("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_matrix(struct ggml_tensor * probs) {
|
static void print_matrix(struct ggml_tensor * probs) {
|
||||||
@ -431,33 +427,12 @@ static void print_matrix(struct ggml_tensor * probs) {
|
|||||||
for (int i = 0; i < probs->ne[1]; ++i) {
|
for (int i = 0; i < probs->ne[1]; ++i) {
|
||||||
for (int k = 0; k < probs->ne[0]; ++k) {
|
for (int k = 0; k < probs->ne[0]; ++k) {
|
||||||
float p = get_f32_2d(probs, k, i);
|
float p = get_f32_2d(probs, k, i);
|
||||||
printf(" %.2f", p);
|
LOG(" %.2f", p);
|
||||||
}
|
}
|
||||||
printf("\n");
|
LOG("\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __GNUC__
|
|
||||||
#ifdef __MINGW32__
|
|
||||||
__attribute__((format(gnu_printf, 1, 2)))
|
|
||||||
#else
|
|
||||||
__attribute__((format(printf, 1, 2)))
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
static std::string format(const char * fmt, ...) {
|
|
||||||
va_list ap, ap2;
|
|
||||||
va_start(ap, fmt);
|
|
||||||
va_copy(ap2, ap);
|
|
||||||
int size = vsnprintf(NULL, 0, fmt, ap);
|
|
||||||
GGML_ASSERT(size >= 0 && size < INT_MAX);
|
|
||||||
std::vector<char> buf(size + 1);
|
|
||||||
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
|
||||||
GGML_ASSERT(size2 == size);
|
|
||||||
va_end(ap2);
|
|
||||||
va_end(ap);
|
|
||||||
return std::string(buf.data(), size);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct llama_file {
|
struct llama_file {
|
||||||
// use FILE * so we don't have to re-open the file to mmap
|
// use FILE * so we don't have to re-open the file to mmap
|
||||||
FILE * fp;
|
FILE * fp;
|
||||||
@ -549,8 +524,9 @@ static std::string llama_escape_whitespaces(const std::string & text) {
|
|||||||
return out.str();
|
return out.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab) {
|
static void load_vocab(const char * filename, const Config * config, struct llama_vocab * vocab) {
|
||||||
if (is_ggml_file(filename)) {
|
if (is_ggml_file(filename)) {
|
||||||
|
LOG("%s: Loading vocabulary from gguf file %s\n", __func__, filename);
|
||||||
struct ggml_context * ctx_data = NULL;
|
struct ggml_context * ctx_data = NULL;
|
||||||
|
|
||||||
struct gguf_init_params params = {
|
struct gguf_init_params params = {
|
||||||
@ -578,6 +554,9 @@ static void load_vocab(const char *filename, Config *config, struct llama_vocab
|
|||||||
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
|
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
|
||||||
|
|
||||||
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
|
const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
|
||||||
|
if (n_vocab != static_cast<uint32_t>(config->vocab_size)) {
|
||||||
|
die_fmt("vocab size mismatch: (gguf) %u != (llama2c) %d", n_vocab, config->vocab_size);
|
||||||
|
}
|
||||||
|
|
||||||
vocab->id_to_token.resize(n_vocab);
|
vocab->id_to_token.resize(n_vocab);
|
||||||
|
|
||||||
@ -595,7 +574,7 @@ static void load_vocab(const char *filename, Config *config, struct llama_vocab
|
|||||||
gguf_free(ctx);
|
gguf_free(ctx);
|
||||||
} else {
|
} else {
|
||||||
// assume llama2.c vocabulary
|
// assume llama2.c vocabulary
|
||||||
printf("Assuming llama2.c vocabulary since %s is not a gguf file\n", filename);
|
LOG("%s: Assuming llama2.c vocabulary since %s is not a gguf file\n", __func__, filename);
|
||||||
llama_file file(filename, "rb");
|
llama_file file(filename, "rb");
|
||||||
if (!file.fp) {
|
if (!file.fp) {
|
||||||
die_fmt("%s: %s", strerror(errno), filename);
|
die_fmt("%s: %s", strerror(errno), filename);
|
||||||
@ -638,38 +617,15 @@ static void load_vocab(const char *filename, Config *config, struct llama_vocab
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
|
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
|
||||||
int ct;
|
int size = 1;
|
||||||
switch (ggml_n_dims(gg_weights)) {
|
for (int dim = 0; dim < ggml_n_dims(gg_weights); ++dim) {
|
||||||
case 1:
|
size *= gg_weights->ne[dim];
|
||||||
ct = 0;
|
}
|
||||||
for (int i0 = 0; i0 < gg_weights->ne[0]; i0++){
|
for (int ct = 0; ct < size; ++ct) {
|
||||||
float * ptr = (float *) ((char *) gg_weights->data + i0*gg_weights->nb[0]);
|
int64_t i0 = 0; int64_t i1 = 0;
|
||||||
*ptr = karpathy_weights[ct];
|
int64_t i2 = 0; int64_t i3 = 0;
|
||||||
ct++;
|
ggml_unravel_index(gg_weights, ct, &i0, &i1, &i2, &i3);
|
||||||
}
|
ggml_set_f32_nd(gg_weights, i0, i1, i2, i3, karpathy_weights[ct]);
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
ct = 0;
|
|
||||||
for (int i1 = 0; i1 < gg_weights->ne[1]; i1++) {
|
|
||||||
for (int i0 = 0; i0 < gg_weights->ne[0]; i0++) {
|
|
||||||
float * ptr = (float *) ((char *) gg_weights->data + i0*gg_weights->nb[0] + i1*gg_weights->nb[1]);
|
|
||||||
*ptr = karpathy_weights[ct];
|
|
||||||
ct++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
ct = 0;
|
|
||||||
for (int i2 = 0; i2 < gg_weights->ne[2]; i2++) {
|
|
||||||
for (int i1 = 0; i1 < gg_weights->ne[1]; i1++) {
|
|
||||||
for (int i0 = 0; i0 < gg_weights->ne[0]; i0++) {
|
|
||||||
float * ptr = (float *) ((char *) gg_weights->data + i0*gg_weights->nb[0] + i1*gg_weights->nb[1] + i2*gg_weights->nb[2]);
|
|
||||||
*ptr = karpathy_weights[ct];
|
|
||||||
ct++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -679,16 +635,18 @@ static void save_as_llama_model(
|
|||||||
// convert AK weights into GG weights one by one.
|
// convert AK weights into GG weights one by one.
|
||||||
// w->token_embedding_table -> model->tok_embeddings
|
// w->token_embedding_table -> model->tok_embeddings
|
||||||
// float* -> struct ggml_tensor
|
// float* -> struct ggml_tensor
|
||||||
convert_weights_ak_to_gg(model->tok_embeddings, w->token_embedding_table);
|
convert_weights_ak_to_gg(model->tok_embeddings, w->token_embedding_table.data());
|
||||||
convert_weights_ak_to_gg(model->output, w->wcls ? w->wcls : w->token_embedding_table);
|
convert_weights_ak_to_gg(model->output, !w->wcls.empty() ? w->wcls.data() : w->token_embedding_table.data());
|
||||||
|
|
||||||
convert_weights_ak_to_gg(model->norm, w->rms_final_weight);
|
convert_weights_ak_to_gg(model->norm, w->rms_final_weight.data());
|
||||||
//print_row(model->norm, 0);
|
//print_row(model->norm, 0);
|
||||||
|
|
||||||
// for rms-att-weight
|
// for rms-att-weight
|
||||||
int row_length = model->hparams.n_embd;
|
int row_length = model->hparams.n_embd;
|
||||||
int n_ff = model->hparams.n_ff;
|
int n_ff = model->hparams.n_ff;
|
||||||
|
|
||||||
|
const uint32_t n_multiqueries = model->hparams.n_head_kv <= 0 || model->hparams.n_head_kv >= model->hparams.n_head ? 1 : model->hparams.n_head / model->hparams.n_head_kv;
|
||||||
|
|
||||||
for (uint32_t i = 0; i < model->hparams.n_layer; ++i){
|
for (uint32_t i = 0; i < model->hparams.n_layer; ++i){
|
||||||
auto & layer = model->layers[i];
|
auto & layer = model->layers[i];
|
||||||
// 1d
|
// 1d
|
||||||
@ -697,9 +655,10 @@ static void save_as_llama_model(
|
|||||||
|
|
||||||
// from 3d matrix layer x dim x dim to 2d matrix dim x dim
|
// from 3d matrix layer x dim x dim to 2d matrix dim x dim
|
||||||
convert_weights_ak_to_gg(layer.wq , &w->wq[i*row_length*row_length]);
|
convert_weights_ak_to_gg(layer.wq , &w->wq[i*row_length*row_length]);
|
||||||
convert_weights_ak_to_gg(layer.wk , &w->wk[i*row_length*row_length]);
|
|
||||||
convert_weights_ak_to_gg(layer.wv , &w->wv[i*row_length*row_length]);
|
|
||||||
convert_weights_ak_to_gg(layer.wo , &w->wo[i*row_length*row_length]);
|
convert_weights_ak_to_gg(layer.wo , &w->wo[i*row_length*row_length]);
|
||||||
|
// from 3d matrix layer x dim x dim to 2d matrix dim x dim / n_multiqueries
|
||||||
|
convert_weights_ak_to_gg(layer.wk , &w->wk[i*row_length*row_length/n_multiqueries]);
|
||||||
|
convert_weights_ak_to_gg(layer.wv , &w->wv[i*row_length*row_length/n_multiqueries]);
|
||||||
|
|
||||||
convert_weights_ak_to_gg(layer.w1 , &w->w1[i*row_length*n_ff]);
|
convert_weights_ak_to_gg(layer.w1 , &w->w1[i*row_length*n_ff]);
|
||||||
convert_weights_ak_to_gg(layer.w2 , &w->w2[i*n_ff*row_length]);
|
convert_weights_ak_to_gg(layer.w2 , &w->w2[i*n_ff*row_length]);
|
||||||
@ -736,8 +695,8 @@ static void save_as_llama_model(
|
|||||||
gguf_set_val_u32(ctx, KV_EMBEDDING_LENGTH, model->hparams.n_embd);
|
gguf_set_val_u32(ctx, KV_EMBEDDING_LENGTH, model->hparams.n_embd);
|
||||||
gguf_set_val_u32(ctx, KV_FEED_FORWARD_LENGTH, model->hparams.n_ff);
|
gguf_set_val_u32(ctx, KV_FEED_FORWARD_LENGTH, model->hparams.n_ff);
|
||||||
gguf_set_val_u32(ctx, KV_ATTENTION_HEAD_COUNT, model->hparams.n_head);
|
gguf_set_val_u32(ctx, KV_ATTENTION_HEAD_COUNT, model->hparams.n_head);
|
||||||
// n_head_kv is optional, default to n_head
|
gguf_set_val_u32(ctx, KV_ATTENTION_HEAD_COUNT, model->hparams.n_head);
|
||||||
// gguf_set_val_u32(ctx, KV_ATTENTION_HEAD_COUNT_KV, ...);
|
gguf_set_val_u32(ctx, KV_ATTENTION_HEAD_COUNT_KV, model->hparams.n_head_kv);
|
||||||
gguf_set_val_u32(ctx, KV_BLOCK_COUNT, model->hparams.n_layer);
|
gguf_set_val_u32(ctx, KV_BLOCK_COUNT, model->hparams.n_layer);
|
||||||
gguf_set_val_u32(ctx, KV_ROPE_DIMENSION_COUNT, model->hparams.n_rot);
|
gguf_set_val_u32(ctx, KV_ROPE_DIMENSION_COUNT, model->hparams.n_rot);
|
||||||
gguf_set_val_f32(ctx, KV_ATTENTION_LAYERNORM_RMS_EPS, 1e-5f);
|
gguf_set_val_f32(ctx, KV_ATTENTION_LAYERNORM_RMS_EPS, 1e-5f);
|
||||||
@ -789,12 +748,12 @@ static void save_as_llama_model(
|
|||||||
|
|
||||||
static struct train_params get_default_train_params() {
|
static struct train_params get_default_train_params() {
|
||||||
struct train_params params;
|
struct train_params params;
|
||||||
params.fn_vocab_model = "models/7B/ggml-model-f16.gguf";
|
params.fn_vocab_model = "models/7B/ggml-model-f16.gguf";
|
||||||
params.fn_llama2c_output_model = "ak_llama_model.bin";
|
params.fn_llama2c_output_model = "ak_llama_model.bin";
|
||||||
params.fn_train_data = "shakespeare.txt";
|
params.fn_train_data = "shakespeare.txt";
|
||||||
params.fn_checkpoint_in = "checkpoint.bin";
|
params.fn_checkpoint_in = "checkpoint.bin";
|
||||||
params.fn_checkpoint_out = "checkpoint.bin";
|
params.fn_checkpoint_out = "checkpoint.bin";
|
||||||
params.fn_model_out = "ggml-checkpoint-f32.bin";
|
params.fn_model_out = "ggml-checkpoint-f32.bin";
|
||||||
|
|
||||||
params.seed = -1;
|
params.seed = -1;
|
||||||
|
|
||||||
@ -829,8 +788,8 @@ static struct train_params get_default_train_params() {
|
|||||||
params.adam_alpha = 1e-3f;
|
params.adam_alpha = 1e-3f;
|
||||||
params.adam_decay = 1e-3f;
|
params.adam_decay = 1e-3f;
|
||||||
|
|
||||||
params.mem_model_gb = 2;
|
params.mem_model_gb = 2;
|
||||||
params.mem_compute_gb = 24;
|
params.mem_compute_gb = 24;
|
||||||
params.mem_compute0_gb = 8;
|
params.mem_compute0_gb = 8;
|
||||||
params.mem_compute1_gb = 2;
|
params.mem_compute1_gb = 2;
|
||||||
|
|
||||||
@ -916,19 +875,30 @@ int main(int argc, char ** argv) {
|
|||||||
if (!params_parse(argc, argv, ¶ms)) {
|
if (!params_parse(argc, argv, ¶ms)) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
log_set_target(stdout);
|
||||||
Config config;
|
Config config;
|
||||||
TransformerWeights weights = {};
|
TransformerWeights weights = {};
|
||||||
{
|
{
|
||||||
FILE *file = fopen(params.fn_llama2c_model, "rb");
|
LOG("%s: Loading llama2c model from %s\n", __func__, params.fn_llama2c_model);
|
||||||
if (!file) { printf("Unable to open the checkpoint file %s!\n", params.fn_llama2c_model); return 1; }
|
FILE * file = fopen(params.fn_llama2c_model, "rb");
|
||||||
|
if (!file) {
|
||||||
|
LOG("%s: Unable to open the checkpoint file %s!\n", __func__, params.fn_llama2c_model);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
// read in the config header
|
// read in the config header
|
||||||
if(fread(&config, sizeof(Config), 1, file) != 1) { return 1; }
|
if (fread(&config, sizeof(Config), 1, file) != 1) {
|
||||||
|
LOG("%s: Unable to read llama2c config from %s!\n",__func__,params.fn_llama2c_model);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
auto shared_weights = config.vocab_size > 0;
|
auto shared_weights = config.vocab_size > 0;
|
||||||
config.vocab_size = abs(config.vocab_size);
|
config.vocab_size = abs(config.vocab_size);
|
||||||
|
|
||||||
// read in the Transformer weights
|
// read in the Transformer weights
|
||||||
malloc_weights(&weights, &config, shared_weights);
|
alloc_weights(&weights, &config, shared_weights);
|
||||||
if(checkpoint_init_weights(&weights, &config, file, shared_weights)) { return 1; }
|
if (checkpoint_init_weights(&weights, &config, file, shared_weights)) {
|
||||||
|
LOG("%s: Unable to initialize transformer weights from %s!",__func__,params.fn_llama2c_model);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
fclose(file);
|
fclose(file);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -936,15 +906,18 @@ int main(int argc, char ** argv) {
|
|||||||
load_vocab(params.fn_vocab_model, &config, &vocab);
|
load_vocab(params.fn_vocab_model, &config, &vocab);
|
||||||
|
|
||||||
struct my_llama_model model;
|
struct my_llama_model model;
|
||||||
model.hparams.n_vocab = config.vocab_size; //llama_n_vocab(lctx);
|
model.hparams.n_vocab = config.vocab_size; //llama_n_vocab(lctx);
|
||||||
model.hparams.n_ctx = params.n_ctx;
|
model.hparams.n_ctx = params.n_ctx;
|
||||||
model.hparams.n_embd = config.dim; //params.n_embd;
|
model.hparams.n_embd = config.dim; //params.n_embd;
|
||||||
model.hparams.n_ff = config.hidden_dim;
|
model.hparams.n_ff = config.hidden_dim;
|
||||||
model.hparams.n_mult = 32;//params.n_mult;
|
model.hparams.n_mult = 32;//params.n_mult;
|
||||||
model.hparams.n_head = config.n_heads; //params.n_head;
|
model.hparams.n_head = config.n_heads; //params.n_head;
|
||||||
model.hparams.n_layer = config.n_layers; //params.n_layer;
|
model.hparams.n_head_kv = config.n_kv_heads;
|
||||||
model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head);
|
model.hparams.n_layer = config.n_layers; //params.n_layer;
|
||||||
|
model.hparams.n_rot = std::min((uint32_t)params.n_rotmax, model.hparams.n_embd / model.hparams.n_head);
|
||||||
|
|
||||||
print_params(&model.hparams);
|
print_params(&model.hparams);
|
||||||
|
|
||||||
struct ggml_init_params lcparams;
|
struct ggml_init_params lcparams;
|
||||||
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
|
lcparams.mem_size = 1024ll*1024ll*1024ll*((size_t) params.mem_model_gb);
|
||||||
lcparams.mem_buffer = NULL;
|
lcparams.mem_buffer = NULL;
|
||||||
@ -956,7 +929,7 @@ int main(int argc, char ** argv) {
|
|||||||
model.name = basename(params.fn_llama2c_model);
|
model.name = basename(params.fn_llama2c_model);
|
||||||
save_as_llama_model(&vocab, &model, &weights, params.fn_llama2c_output_model);
|
save_as_llama_model(&vocab, &model, &weights, params.fn_llama2c_output_model);
|
||||||
|
|
||||||
printf("Saving llama.c model file %s in ggml format at %s\n", params.fn_llama2c_model, params.fn_llama2c_output_model);
|
LOG("%s: Saving llama.c model file %s in ggml format at %s\n", __func__, params.fn_llama2c_model, params.fn_llama2c_output_model);
|
||||||
|
|
||||||
ggml_free(model.ctx);
|
ggml_free(model.ctx);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -61,6 +61,8 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
params.embedding = true;
|
params.embedding = true;
|
||||||
|
// For non-causal models, batch size must be equal to ubatch size
|
||||||
|
params.n_ubatch = params.n_batch;
|
||||||
|
|
||||||
print_build_info();
|
print_build_info();
|
||||||
|
|
||||||
@ -114,7 +116,9 @@ int main(int argc, char ** argv) {
|
|||||||
for (const auto & prompt : prompts) {
|
for (const auto & prompt : prompts) {
|
||||||
auto inp = ::llama_tokenize(ctx, prompt, true, false);
|
auto inp = ::llama_tokenize(ctx, prompt, true, false);
|
||||||
if (inp.size() > n_batch) {
|
if (inp.size() > n_batch) {
|
||||||
inp.resize(n_batch);
|
fprintf(stderr, "%s: error: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
||||||
|
__func__, (long long int) inp.size(), (long long int) n_batch);
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
inputs.push_back(inp);
|
inputs.push_back(inp);
|
||||||
}
|
}
|
||||||
|
@ -1,32 +1,31 @@
|
|||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "ggml.h"
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdint>
|
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <ios>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <fcntl.h>
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
#include <climits>
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#include <windows.h>
|
||||||
|
#ifndef PATH_MAX
|
||||||
|
#define PATH_MAX MAX_PATH
|
||||||
|
#endif
|
||||||
|
#include <io.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
enum split_operation : uint8_t {
|
enum split_operation : uint8_t {
|
||||||
SPLIT_OP_SPLIT,
|
SPLIT_OP_SPLIT,
|
||||||
SPLIT_OP_MERGE,
|
SPLIT_OP_MERGE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char * const LLM_KV_GENERAL_SPLIT_I_SPLIT = "general.split";
|
|
||||||
static const char * const LLM_KV_GENERAL_SPLIT_N_SPLIT = "general.split_count";
|
|
||||||
|
|
||||||
static const int SPLIT_FILENAME_MAX = 256;
|
|
||||||
|
|
||||||
static const char * const SPLIT_FILENAME_FORMAT = "%s-%05d-of-%05d.gguf";
|
|
||||||
|
|
||||||
struct split_params {
|
struct split_params {
|
||||||
split_operation operation = SPLIT_OP_SPLIT;
|
split_operation operation = SPLIT_OP_SPLIT;
|
||||||
int n_split_tensors = 128;
|
int n_split_tensors = 128;
|
||||||
@ -116,13 +115,13 @@ static bool split_params_parse(int argc, const char ** argv, split_params & para
|
|||||||
try {
|
try {
|
||||||
if (!split_params_parse_ex(argc, argv, params)) {
|
if (!split_params_parse_ex(argc, argv, params)) {
|
||||||
split_print_usage(argv[0]);
|
split_print_usage(argv[0]);
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (const std::invalid_argument & ex) {
|
catch (const std::invalid_argument & ex) {
|
||||||
fprintf(stderr, "%s\n", ex.what());
|
fprintf(stderr, "%s\n", ex.what());
|
||||||
split_print_usage(argv[0]);
|
split_print_usage(argv[0]);
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -134,12 +133,6 @@ static void zeros(std::ofstream & file, size_t n) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string split_file_name(const std::string & path, int i_split, int n_split) {
|
|
||||||
char f_split[SPLIT_FILENAME_MAX] = {0};
|
|
||||||
snprintf(f_split, sizeof(f_split), SPLIT_FILENAME_FORMAT, path.c_str(), i_split + 1, n_split);
|
|
||||||
return std::string(f_split);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct split_strategy {
|
struct split_strategy {
|
||||||
const split_params params;
|
const split_params params;
|
||||||
std::ifstream & f_input;
|
std::ifstream & f_input;
|
||||||
@ -180,8 +173,9 @@ struct split_strategy {
|
|||||||
if (i_split == 0) {
|
if (i_split == 0) {
|
||||||
gguf_set_kv(ctx_out, ctx_gguf);
|
gguf_set_kv(ctx_out, ctx_gguf);
|
||||||
}
|
}
|
||||||
gguf_set_val_u8(ctx_out, LLM_KV_GENERAL_SPLIT_I_SPLIT, i_split);
|
gguf_set_val_u16(ctx_out, LLM_KV_SPLIT_NO, i_split);
|
||||||
gguf_set_val_u8(ctx_out, LLM_KV_GENERAL_SPLIT_N_SPLIT, n_split);
|
gguf_set_val_u16(ctx_out, LLM_KV_SPLIT_COUNT, n_split);
|
||||||
|
gguf_set_val_i32(ctx_out, LLM_KV_SPLIT_TENSORS_COUNT, n_tensors);
|
||||||
|
|
||||||
// populate the original tensors, so we get an initial metadata
|
// populate the original tensors, so we get an initial metadata
|
||||||
for (int i = i_split * params.n_split_tensors; i < n_tensors && i < (i_split + 1) * params.n_split_tensors; ++i) {
|
for (int i = i_split * params.n_split_tensors; i < n_tensors && i < (i_split + 1) * params.n_split_tensors; ++i) {
|
||||||
@ -189,10 +183,11 @@ struct split_strategy {
|
|||||||
gguf_add_tensor(ctx_out, meta);
|
gguf_add_tensor(ctx_out, meta);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto split_name = split_file_name(params.output, i_split, n_split);
|
char split_path[PATH_MAX] = {0};
|
||||||
|
llama_split_path(split_path, sizeof(split_path), params.output.c_str(), i_split, n_split);
|
||||||
|
|
||||||
fprintf(stderr, "%s: %s ...", __func__, split_name.c_str());
|
fprintf(stderr, "%s: %s ...", __func__, split_path);
|
||||||
fout = std::ofstream(split_name, std::ios::binary);
|
fout = std::ofstream(split_path, std::ios::binary);
|
||||||
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
||||||
|
|
||||||
auto meta_size = gguf_get_meta_size(ctx_out);
|
auto meta_size = gguf_get_meta_size(ctx_out);
|
||||||
@ -250,19 +245,23 @@ static void gguf_split(const split_params & split_params) {
|
|||||||
std::ifstream f_input(split_params.input.c_str(), std::ios::binary);
|
std::ifstream f_input(split_params.input.c_str(), std::ios::binary);
|
||||||
if (!f_input.is_open()) {
|
if (!f_input.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_params.input.c_str());
|
fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_params.input.c_str());
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto * ctx_gguf = gguf_init_from_file(split_params.input.c_str(), params);
|
auto * ctx_gguf = gguf_init_from_file(split_params.input.c_str(), params);
|
||||||
if (!ctx_gguf) {
|
if (!ctx_gguf) {
|
||||||
fprintf(stderr, "%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
|
fprintf(stderr, "%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
split_strategy strategy(split_params, f_input, ctx_gguf, ctx_meta);
|
split_strategy strategy(split_params, f_input, ctx_gguf, ctx_meta);
|
||||||
|
|
||||||
|
char first_split_path[PATH_MAX] = {0};
|
||||||
|
llama_split_path(first_split_path, sizeof(first_split_path),
|
||||||
|
split_params.output.c_str(), strategy.i_split, strategy.n_split);
|
||||||
fprintf(stderr, "%s: %s -> %s (%d tensors per file)\n",
|
fprintf(stderr, "%s: %s -> %s (%d tensors per file)\n",
|
||||||
__func__, split_params.input.c_str(),
|
__func__, split_params.input.c_str(),
|
||||||
split_file_name(split_params.output, strategy.i_split, strategy.n_split).c_str(),
|
first_split_path,
|
||||||
split_params.n_split_tensors);
|
split_params.n_split_tensors);
|
||||||
|
|
||||||
strategy.split_start();
|
strategy.split_start();
|
||||||
@ -298,7 +297,9 @@ static void gguf_merge(const split_params & split_params) {
|
|||||||
std::vector<ggml_context *> ctx_metas;
|
std::vector<ggml_context *> ctx_metas;
|
||||||
std::vector<gguf_context *> ctx_ggufs;
|
std::vector<gguf_context *> ctx_ggufs;
|
||||||
|
|
||||||
std::string split_prefix;
|
char split_path[PATH_MAX] = {0};
|
||||||
|
strncpy(split_path, split_params.input.c_str(), sizeof(split_path) - 1);
|
||||||
|
char split_prefix[PATH_MAX] = {0};
|
||||||
|
|
||||||
// First pass to find KV and tensors metadata
|
// First pass to find KV and tensors metadata
|
||||||
for (int i_split = 0; i_split < n_split; i_split++) {
|
for (int i_split = 0; i_split < n_split; i_split++) {
|
||||||
@ -309,89 +310,66 @@ static void gguf_merge(const split_params & split_params) {
|
|||||||
/*.ctx = */ &ctx_meta,
|
/*.ctx = */ &ctx_meta,
|
||||||
};
|
};
|
||||||
|
|
||||||
auto split_name = split_params.input;
|
|
||||||
if (i_split > 0) {
|
if (i_split > 0) {
|
||||||
split_name = split_file_name(split_prefix, i_split, n_split);
|
llama_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split);
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: reading metadata %s ...", __func__, split_name.c_str());
|
fprintf(stderr, "%s: reading metadata %s ...", __func__, split_path);
|
||||||
|
|
||||||
auto * ctx_gguf = gguf_init_from_file(split_name.c_str(), params);
|
auto * ctx_gguf = gguf_init_from_file(split_path, params);
|
||||||
if (!ctx_gguf) {
|
if (!ctx_gguf) {
|
||||||
fprintf(stderr, "\n%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
|
fprintf(stderr, "\n%s: failed to load input GGUF from %s\n", __func__, split_params.input.c_str());
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
ctx_ggufs.push_back(ctx_gguf);
|
ctx_ggufs.push_back(ctx_gguf);
|
||||||
ctx_metas.push_back(ctx_meta);
|
ctx_metas.push_back(ctx_meta);
|
||||||
|
|
||||||
if (i_split == 0) {
|
if (i_split == 0) {
|
||||||
auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_GENERAL_SPLIT_N_SPLIT);
|
auto key_n_split = gguf_find_key(ctx_gguf, LLM_KV_SPLIT_COUNT);
|
||||||
if (key_n_split < 0) {
|
if (key_n_split < 0) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"\n%s: input file does not contain %s metadata\n",
|
"\n%s: input file does not contain %s metadata\n",
|
||||||
__func__,
|
__func__,
|
||||||
LLM_KV_GENERAL_SPLIT_N_SPLIT);
|
LLM_KV_SPLIT_COUNT);
|
||||||
gguf_free(ctx_gguf);
|
gguf_free(ctx_gguf);
|
||||||
|
ggml_free(ctx_meta);
|
||||||
gguf_free(ctx_out);
|
gguf_free(ctx_out);
|
||||||
fout.close();
|
fout.close();
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
n_split = gguf_get_val_u8(ctx_gguf, key_n_split);
|
n_split = gguf_get_val_u16(ctx_gguf, key_n_split);
|
||||||
if (n_split < 1) {
|
if (n_split < 1) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"\n%s: input file does not contain a valid split count %d\n",
|
"\n%s: input file does not contain a valid split count %d\n",
|
||||||
__func__,
|
__func__,
|
||||||
n_split);
|
n_split);
|
||||||
gguf_free(ctx_gguf);
|
gguf_free(ctx_gguf);
|
||||||
|
ggml_free(ctx_meta);
|
||||||
gguf_free(ctx_out);
|
gguf_free(ctx_out);
|
||||||
fout.close();
|
fout.close();
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the file naming and extract split_prefix
|
||||||
|
if (!llama_split_prefix(split_prefix, sizeof (split_prefix), split_path, i_split, n_split)) {
|
||||||
|
fprintf(stderr, "\n%s: unexpected input file name: %s"
|
||||||
|
" i_split=%d"
|
||||||
|
" n_split=%d\n", __func__,
|
||||||
|
split_path, i_split, n_split);
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
ggml_free(ctx_meta);
|
||||||
|
gguf_free(ctx_out);
|
||||||
|
fout.close();
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not trigger merge if we try to merge again the output
|
// Do not trigger merge if we try to merge again the output
|
||||||
gguf_set_val_u8(ctx_out, LLM_KV_GENERAL_SPLIT_N_SPLIT, 0);
|
gguf_set_val_u16(ctx_gguf, LLM_KV_SPLIT_COUNT, 0);
|
||||||
|
|
||||||
// Set metadata from the first split
|
// Set metadata from the first split
|
||||||
gguf_set_kv(ctx_out, ctx_gguf);
|
gguf_set_kv(ctx_out, ctx_gguf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the file naming
|
|
||||||
{
|
|
||||||
int i_split_file = 0;
|
|
||||||
int n_split_file = 0;
|
|
||||||
const char * i_split_format = "-00000-of-00000.gguf";
|
|
||||||
|
|
||||||
if (split_name.size() < strlen(i_split_format)) {
|
|
||||||
fprintf(stderr, "\n%s: unexpected input file name: %s\n", __func__, split_params.input.c_str());
|
|
||||||
for (auto * _ctx_gguf : ctx_ggufs) {
|
|
||||||
gguf_free(_ctx_gguf);
|
|
||||||
}
|
|
||||||
gguf_free(ctx_out);
|
|
||||||
fout.close();
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
split_prefix = split_name.substr(0, split_name.size() - strlen(i_split_format));
|
|
||||||
|
|
||||||
const char * split_name_c_str = split_name.c_str();
|
|
||||||
int n_part = sscanf(&split_name_c_str[0] + split_prefix.size(), "-%d-of-%d", &i_split_file, &n_split_file);
|
|
||||||
|
|
||||||
if (n_part != 2 || i_split_file - 1 != i_split || n_split_file != n_split) {
|
|
||||||
fprintf(stderr, "\n%s: unexpected input file name: %s"
|
|
||||||
" i_split=%d i_split_file=%d"
|
|
||||||
" n_split=%d n_split_file=%d\n", __func__,
|
|
||||||
split_params.input.c_str(),
|
|
||||||
i_split, i_split_file,
|
|
||||||
n_split, n_split_file);
|
|
||||||
for (auto * _ctx_gguf : ctx_ggufs) {
|
|
||||||
gguf_free(_ctx_gguf);
|
|
||||||
}
|
|
||||||
gguf_free(ctx_out);
|
|
||||||
fout.close();
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto n_tensors = gguf_get_n_tensors(ctx_gguf);
|
auto n_tensors = gguf_get_n_tensors(ctx_gguf);
|
||||||
for (int i_tensor = 0; i_tensor < n_tensors; i_tensor++) {
|
for (int i_tensor = 0; i_tensor < n_tensors; i_tensor++) {
|
||||||
const char * t_name = gguf_get_tensor_name(ctx_gguf, i_tensor);
|
const char * t_name = gguf_get_tensor_name(ctx_gguf, i_tensor);
|
||||||
@ -411,18 +389,19 @@ static void gguf_merge(const split_params & split_params) {
|
|||||||
|
|
||||||
// Write tensors data
|
// Write tensors data
|
||||||
for (int i_split = 0; i_split < n_split; i_split++) {
|
for (int i_split = 0; i_split < n_split; i_split++) {
|
||||||
auto split_name = split_file_name(split_prefix, i_split, n_split);
|
llama_split_path(split_path, sizeof(split_path), split_prefix, i_split, n_split);
|
||||||
std::ifstream f_input(split_name.c_str(), std::ios::binary);
|
std::ifstream f_input(split_path, std::ios::binary);
|
||||||
if (!f_input.is_open()) {
|
if (!f_input.is_open()) {
|
||||||
fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_name.c_str());
|
fprintf(stderr, "%s: failed to open input GGUF from %s\n", __func__, split_path);
|
||||||
for (auto * _ctx_gguf : ctx_ggufs) {
|
for (uint32_t i = 0; i < ctx_ggufs.size(); i++) {
|
||||||
gguf_free(_ctx_gguf);
|
gguf_free(ctx_ggufs[i]);
|
||||||
|
ggml_free(ctx_metas[i]);
|
||||||
}
|
}
|
||||||
gguf_free(ctx_out);
|
gguf_free(ctx_out);
|
||||||
fout.close();
|
fout.close();
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: writing tensors %s ...", __func__, split_name.c_str());
|
fprintf(stderr, "%s: writing tensors %s ...", __func__, split_path);
|
||||||
|
|
||||||
auto * ctx_gguf = ctx_ggufs[i_split];
|
auto * ctx_gguf = ctx_ggufs[i_split];
|
||||||
auto * ctx_meta = ctx_metas[i_split];
|
auto * ctx_meta = ctx_metas[i_split];
|
||||||
@ -481,8 +460,8 @@ int main(int argc, const char ** argv) {
|
|||||||
break;
|
break;
|
||||||
case SPLIT_OP_MERGE: gguf_merge(params);
|
case SPLIT_OP_MERGE: gguf_merge(params);
|
||||||
break;
|
break;
|
||||||
default:split_print_usage(argv[0]);
|
default: split_print_usage(argv[0]);
|
||||||
exit(1);
|
exit(EXIT_FAILURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -22,7 +22,7 @@ For faster computation, make sure to use GPU offloading via the `-ngl` argument
|
|||||||
## Example
|
## Example
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
LLAMA_CUBLAS=1 make -j
|
LLAMA_CUDA=1 make -j
|
||||||
|
|
||||||
# generate importance matrix (imatrix.dat)
|
# generate importance matrix (imatrix.dat)
|
||||||
./imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99
|
./imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99
|
||||||
|
@ -50,29 +50,31 @@ private:
|
|||||||
void keep_imatrix(int ncall) const;
|
void keep_imatrix(int ncall) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// remove any prefix and suffixes from the name
|
||||||
|
// CUDA0#blk.0.attn_k.weight#0 => blk.0.attn_k.weight
|
||||||
|
static std::string filter_tensor_name(const char * name) {
|
||||||
|
std::string wname;
|
||||||
|
const char * p = strchr(name, '#');
|
||||||
|
if (p != NULL) {
|
||||||
|
p = p + 1;
|
||||||
|
const char * q = strchr(p, '#');
|
||||||
|
if (q != NULL) {
|
||||||
|
wname = std::string(p, q - p);
|
||||||
|
} else {
|
||||||
|
wname = p;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
wname = name;
|
||||||
|
}
|
||||||
|
return wname;
|
||||||
|
}
|
||||||
|
|
||||||
bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
|
bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
|
||||||
GGML_UNUSED(user_data);
|
GGML_UNUSED(user_data);
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = t->src[0];
|
const struct ggml_tensor * src0 = t->src[0];
|
||||||
const struct ggml_tensor * src1 = t->src[1];
|
const struct ggml_tensor * src1 = t->src[1];
|
||||||
|
std::string wname = filter_tensor_name(src0->name);
|
||||||
std::string wname;
|
|
||||||
{
|
|
||||||
// remove any prefix and suffixes from the name
|
|
||||||
// CUDA0#blk.0.attn_k.weight#0 => blk.0.attn_k.weight
|
|
||||||
const char * p = strchr(src0->name, '#');
|
|
||||||
if (p != NULL) {
|
|
||||||
p = p + 1;
|
|
||||||
const char * q = strchr(p, '#');
|
|
||||||
if (q != NULL) {
|
|
||||||
wname = std::string(p, q - p);
|
|
||||||
} else {
|
|
||||||
wname = p;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
wname = src0->name;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// when ask is true, the scheduler wants to know if we are interested in data from this tensor
|
// when ask is true, the scheduler wants to know if we are interested in data from this tensor
|
||||||
// if we return true, a follow-up call will be made with ask=false in which we can do the actual collection
|
// if we return true, a follow-up call will be made with ask=false in which we can do the actual collection
|
||||||
@ -112,6 +114,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
|
|||||||
// this is necessary to guarantee equal number of "ncall" for each tensor
|
// this is necessary to guarantee equal number of "ncall" for each tensor
|
||||||
for (int ex = 0; ex < n_as; ++ex) {
|
for (int ex = 0; ex < n_as; ++ex) {
|
||||||
src0 = t->src[2 + ex];
|
src0 = t->src[2 + ex];
|
||||||
|
wname = filter_tensor_name(src0->name);
|
||||||
auto& e = m_stats[wname];
|
auto& e = m_stats[wname];
|
||||||
if (e.values.empty()) {
|
if (e.values.empty()) {
|
||||||
e.values.resize(src1->ne[0], 0);
|
e.values.resize(src1->ne[0], 0);
|
||||||
@ -421,6 +424,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params, bool
|
|||||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use batch.logits to save computations instead of relying on logits_all == true
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
|
@ -113,7 +113,7 @@ static std::string get_cpu_info() {
|
|||||||
|
|
||||||
static std::string get_gpu_info() {
|
static std::string get_gpu_info() {
|
||||||
std::string id;
|
std::string id;
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUDA
|
||||||
int count = ggml_backend_cuda_get_device_count();
|
int count = ggml_backend_cuda_get_device_count();
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
char buf[128];
|
char buf[128];
|
||||||
@ -808,7 +808,7 @@ struct test {
|
|||||||
|
|
||||||
const std::string test::build_commit = LLAMA_COMMIT;
|
const std::string test::build_commit = LLAMA_COMMIT;
|
||||||
const int test::build_number = LLAMA_BUILD_NUMBER;
|
const int test::build_number = LLAMA_BUILD_NUMBER;
|
||||||
const bool test::cuda = !!ggml_cpu_has_cublas();
|
const bool test::cuda = !!ggml_cpu_has_cuda();
|
||||||
const bool test::opencl = !!ggml_cpu_has_clblast();
|
const bool test::opencl = !!ggml_cpu_has_clblast();
|
||||||
const bool test::vulkan = !!ggml_cpu_has_vulkan();
|
const bool test::vulkan = !!ggml_cpu_has_vulkan();
|
||||||
const bool test::kompute = !!ggml_cpu_has_kompute();
|
const bool test::kompute = !!ggml_cpu_has_kompute();
|
||||||
|
@ -124,7 +124,7 @@ llama_print_timings: total time = 34570.79 ms
|
|||||||
## Orin compile and run
|
## Orin compile and run
|
||||||
### compile
|
### compile
|
||||||
```sh
|
```sh
|
||||||
make LLAMA_CUBLAS=1 CUDA_DOCKER_ARCH=sm_87 LLAMA_CUDA_F16=1 -j 32
|
make LLAMA_CUDA=1 CUDA_DOCKER_ARCH=sm_87 LLAMA_CUDA_F16=1 -j 32
|
||||||
```
|
```
|
||||||
|
|
||||||
### run on Orin
|
### run on Orin
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include "ggml-alloc.h"
|
#include "ggml-alloc.h"
|
||||||
#include "ggml-backend.h"
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUDA
|
||||||
#include "ggml-cuda.h"
|
#include "ggml-cuda.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -968,7 +968,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUDA
|
||||||
new_clip->backend = ggml_backend_cuda_init(0);
|
new_clip->backend = ggml_backend_cuda_init(0);
|
||||||
printf("%s: CLIP using CUDA backend\n", __func__);
|
printf("%s: CLIP using CUDA backend\n", __func__);
|
||||||
#endif
|
#endif
|
||||||
|
@ -3,3 +3,21 @@ add_executable(${TARGET} lookup.cpp)
|
|||||||
install(TARGETS ${TARGET} RUNTIME)
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
|
|
||||||
|
set(TARGET lookup-create)
|
||||||
|
add_executable(${TARGET} lookup-create.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
|
|
||||||
|
set(TARGET lookup-merge)
|
||||||
|
add_executable(${TARGET} lookup-merge.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
|
|
||||||
|
set(TARGET lookup-stats)
|
||||||
|
add_executable(${TARGET} lookup-stats.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
|
43
examples/lookup/lookup-create.cpp
Normal file
43
examples/lookup/lookup-create.cpp
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#include "ggml.h"
|
||||||
|
#include "llama.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include "ngram-cache.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <fstream>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
int main(int argc, char ** argv){
|
||||||
|
gpt_params params;
|
||||||
|
|
||||||
|
if (!gpt_params_parse(argc, argv, params)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
// init llama.cpp
|
||||||
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
|
llama_model * model = NULL;
|
||||||
|
llama_context * ctx = NULL;
|
||||||
|
|
||||||
|
// load the model
|
||||||
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
GGML_ASSERT(model != nullptr);
|
||||||
|
|
||||||
|
// tokenize the prompt
|
||||||
|
const bool add_bos = llama_should_add_bos_token(model);
|
||||||
|
|
||||||
|
std::vector<llama_token> inp;
|
||||||
|
inp = ::llama_tokenize(ctx, params.prompt, add_bos, true);
|
||||||
|
fprintf(stderr, "%s: tokenization done\n", __func__);
|
||||||
|
|
||||||
|
|
||||||
|
llama_ngram_cache ngram_cache;
|
||||||
|
llama_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true);
|
||||||
|
fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.lookup_cache_static.c_str());
|
||||||
|
|
||||||
|
llama_ngram_cache_save(ngram_cache, params.lookup_cache_static);
|
||||||
|
}
|
47
examples/lookup/lookup-merge.cpp
Normal file
47
examples/lookup/lookup-merge.cpp
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#include "ggml.h"
|
||||||
|
#include "llama.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include "ngram-cache.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
static void print_usage() {
|
||||||
|
fprintf(stderr, "Merges multiple lookup cache files into a single one.\n");
|
||||||
|
fprintf(stderr, "Usage: lookup-merge [--help] lookup_part_1.bin lookup_part_2.bin ... lookup_merged.bin\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char ** argv){
|
||||||
|
if (argc < 3) {
|
||||||
|
print_usage();
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> args;
|
||||||
|
args.resize(argc-1);
|
||||||
|
for (int i = 0; i < argc-1; ++i) {
|
||||||
|
args[i] = argv[i+1];
|
||||||
|
if (args[i] == "-h" || args[i] == "--help") {
|
||||||
|
print_usage();
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "lookup-merge: loading file %s\n", args[0].c_str());
|
||||||
|
llama_ngram_cache ngram_cache_merged = llama_ngram_cache_load(args[0]);
|
||||||
|
|
||||||
|
for (size_t i = 1; i < args.size()-1; ++i) {
|
||||||
|
fprintf(stderr, "lookup-merge: loading file %s\n", args[i].c_str());
|
||||||
|
llama_ngram_cache ngram_cache = llama_ngram_cache_load(args[i]);
|
||||||
|
|
||||||
|
llama_ngram_cache_merge(ngram_cache_merged, ngram_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "lookup-merge: saving file %s\n", args.back().c_str());
|
||||||
|
llama_ngram_cache_save(ngram_cache_merged, args.back());
|
||||||
|
}
|
163
examples/lookup/lookup-stats.cpp
Normal file
163
examples/lookup/lookup-stats.cpp
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
#include "ggml.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include "llama.h"
|
||||||
|
#include "log.h"
|
||||||
|
#include "ngram-cache.h"
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
int main(int argc, char ** argv){
|
||||||
|
gpt_params params;
|
||||||
|
|
||||||
|
if (!gpt_params_parse(argc, argv, params)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n_draft = params.n_draft;
|
||||||
|
|
||||||
|
// init llama.cpp
|
||||||
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
|
llama_model * model = NULL;
|
||||||
|
llama_context * ctx = NULL;
|
||||||
|
|
||||||
|
// load the model
|
||||||
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
llama_set_rng_seed(ctx, params.seed);
|
||||||
|
GGML_ASSERT(llama_n_vocab(model) < (1 << 16));
|
||||||
|
|
||||||
|
// tokenize the prompt
|
||||||
|
const bool add_bos = llama_should_add_bos_token(model);
|
||||||
|
LOG("add_bos tgt: %d\n", add_bos);
|
||||||
|
|
||||||
|
std::vector<llama_token> inp;
|
||||||
|
inp = ::llama_tokenize(ctx, params.prompt, add_bos, true);
|
||||||
|
|
||||||
|
llama_ngram_cache ngram_cache_context;
|
||||||
|
llama_ngram_cache ngram_cache_dynamic;
|
||||||
|
llama_ngram_cache ngram_cache_static;
|
||||||
|
int64_t t_draft_flat_us = 0;
|
||||||
|
int64_t t_draft_us = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
|
||||||
|
if (!params.lookup_cache_static.empty()) {
|
||||||
|
try {
|
||||||
|
ngram_cache_static = llama_ngram_cache_load(params.lookup_cache_static);
|
||||||
|
} catch (std::ifstream::failure const &) {
|
||||||
|
fprintf(stderr, "error: failed to open static lookup cache: %s", params.lookup_cache_static.c_str());
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!params.lookup_cache_dynamic.empty()) {
|
||||||
|
try {
|
||||||
|
ngram_cache_dynamic = llama_ngram_cache_load(params.lookup_cache_dynamic);
|
||||||
|
} catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
|
||||||
|
}
|
||||||
|
|
||||||
|
t_draft_flat_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n_input = inp.size();
|
||||||
|
const int n_ctx = params.n_ctx;
|
||||||
|
|
||||||
|
int n_drafted = 0;
|
||||||
|
int n_accept = 0;
|
||||||
|
|
||||||
|
const int64_t t_start_ms = ggml_time_ms();
|
||||||
|
|
||||||
|
// Iterate over input tokens in chunks of size n_ctx.
|
||||||
|
// Each chunk is treated as if a sequential generation but with pre-determined tokens to ensure reproducibility.
|
||||||
|
for (int i_start = 0; i_start + n_ctx < n_input; i_start += n_ctx) {
|
||||||
|
const std::vector<llama_token> inp_slice(inp.begin() + i_start, inp.begin() + i_start + n_ctx);
|
||||||
|
std::vector<llama_token> pseudo_output;
|
||||||
|
pseudo_output.push_back(inp_slice[0]);
|
||||||
|
|
||||||
|
while ((int) pseudo_output.size() < n_ctx) {
|
||||||
|
// Simulate drafting and decoding from draft:
|
||||||
|
std::vector<llama_token> draft;
|
||||||
|
draft.push_back(pseudo_output.back());
|
||||||
|
|
||||||
|
{
|
||||||
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
llama_ngram_cache_draft(pseudo_output, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
||||||
|
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
}
|
||||||
|
|
||||||
|
n_drafted += draft.size() - 1;
|
||||||
|
|
||||||
|
for (size_t j = 1; j < draft.size() && (int) pseudo_output.size() < n_ctx; ++j) {
|
||||||
|
const llama_token ground_truth = inp_slice[pseudo_output.size()];
|
||||||
|
const llama_token drafted = draft[j];
|
||||||
|
|
||||||
|
if (ground_truth != drafted) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
++n_accept;
|
||||||
|
pseudo_output.push_back(ground_truth);
|
||||||
|
|
||||||
|
{
|
||||||
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
|
||||||
|
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// After each simulated batch decoding simulate the sampling of a single token:
|
||||||
|
if ((int) pseudo_output.size() < n_ctx) {
|
||||||
|
pseudo_output.push_back(inp_slice[pseudo_output.size()]);
|
||||||
|
{
|
||||||
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
|
||||||
|
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
draft.erase(draft.begin());
|
||||||
|
|
||||||
|
}
|
||||||
|
if (i_start > 0 && i_start / 100000 != (i_start - n_ctx) / 100000) {
|
||||||
|
const int64_t t_now_ms = ggml_time_ms();
|
||||||
|
const int64_t eta_ms = (n_input - i_start) * (t_now_ms - t_start_ms) / i_start;
|
||||||
|
const int64_t eta_min = eta_ms / (60*1000);
|
||||||
|
const int64_t eta_s = (eta_ms - 60*1000*eta_min) / 1000;
|
||||||
|
|
||||||
|
LOG_TEE("lookup-stats: %d/%d done, ETA: %02" PRId64 ":%02" PRId64 "\n", i_start, n_input, eta_min, eta_s);
|
||||||
|
}
|
||||||
|
|
||||||
|
// After each chunk, update the dynamic ngram cache with the context ngram cache:
|
||||||
|
llama_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
|
||||||
|
ngram_cache_context.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TEE("\n");
|
||||||
|
|
||||||
|
LOG_TEE("\n");
|
||||||
|
LOG_TEE("n_draft = %d\n", n_draft);
|
||||||
|
LOG_TEE("n_predict = %d\n", n_input - n_input % n_ctx);
|
||||||
|
LOG_TEE("n_drafted = %d\n", n_drafted);
|
||||||
|
LOG_TEE("t_draft_flat = %.2f ms\n", t_draft_flat_us*1e-3);
|
||||||
|
LOG_TEE("t_draft = %.2f ms, %.2f us per token, %.2f tokens per second\n",
|
||||||
|
t_draft_us*1e-3, 1.0f*t_draft_us/n_drafted, n_drafted/(1e-6*t_draft_us));
|
||||||
|
LOG_TEE("n_accept = %d\n", n_accept);
|
||||||
|
LOG_TEE("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
|
||||||
|
|
||||||
|
llama_free(ctx);
|
||||||
|
llama_free_model(model);
|
||||||
|
|
||||||
|
llama_backend_free();
|
||||||
|
|
||||||
|
fprintf(stderr, "\n\n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -1,12 +1,15 @@
|
|||||||
#include "common.h"
|
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include "ngram-cache.h"
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
|
#include <fstream>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
int main(int argc, char ** argv){
|
int main(int argc, char ** argv){
|
||||||
gpt_params params;
|
gpt_params params;
|
||||||
@ -15,11 +18,7 @@ int main(int argc, char ** argv){
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// max/min n-grams size to search for in prompt
|
// max. number of additional tokens to draft if match is found
|
||||||
const int ngram_max = 4;
|
|
||||||
const int ngram_min = 1;
|
|
||||||
|
|
||||||
// length of the candidate / draft sequence, if match is found
|
|
||||||
const int n_draft = params.n_draft;
|
const int n_draft = params.n_draft;
|
||||||
|
|
||||||
const bool dump_kv_cache = params.dump_kv_cache;
|
const bool dump_kv_cache = params.dump_kv_cache;
|
||||||
@ -39,6 +38,8 @@ int main(int argc, char ** argv){
|
|||||||
|
|
||||||
// load the model
|
// load the model
|
||||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
llama_set_rng_seed(ctx, params.seed);
|
||||||
|
GGML_ASSERT(llama_n_vocab(model) < (1 << 16));
|
||||||
|
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
const bool add_bos = llama_should_add_bos_token(model);
|
const bool add_bos = llama_should_add_bos_token(model);
|
||||||
@ -47,6 +48,35 @@ int main(int argc, char ** argv){
|
|||||||
std::vector<llama_token> inp;
|
std::vector<llama_token> inp;
|
||||||
inp = ::llama_tokenize(ctx, params.prompt, add_bos, true);
|
inp = ::llama_tokenize(ctx, params.prompt, add_bos, true);
|
||||||
|
|
||||||
|
llama_ngram_cache ngram_cache_context;
|
||||||
|
llama_ngram_cache ngram_cache_dynamic;
|
||||||
|
llama_ngram_cache ngram_cache_static;
|
||||||
|
int64_t t_draft_flat_us = 0;
|
||||||
|
int64_t t_draft_us = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
// Fill up context ngram cache with tokens from user input:
|
||||||
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, inp.size(), false);
|
||||||
|
|
||||||
|
if (!params.lookup_cache_static.empty()) {
|
||||||
|
try {
|
||||||
|
ngram_cache_static = llama_ngram_cache_load(params.lookup_cache_static);
|
||||||
|
} catch (std::ifstream::failure const &) {
|
||||||
|
fprintf(stderr, "error: failed to open static lookup cache: %s", params.lookup_cache_static.c_str());
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!params.lookup_cache_dynamic.empty()) {
|
||||||
|
try {
|
||||||
|
ngram_cache_dynamic = llama_ngram_cache_load(params.lookup_cache_dynamic);
|
||||||
|
} catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
|
||||||
|
}
|
||||||
|
|
||||||
|
t_draft_flat_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
}
|
||||||
|
|
||||||
const int max_context_size = llama_n_ctx(ctx);
|
const int max_context_size = llama_n_ctx(ctx);
|
||||||
const int max_tokens_list_size = max_context_size - 4;
|
const int max_tokens_list_size = max_context_size - 4;
|
||||||
|
|
||||||
@ -76,8 +106,6 @@ int main(int argc, char ** argv){
|
|||||||
int n_drafted = 0;
|
int n_drafted = 0;
|
||||||
int n_accept = 0;
|
int n_accept = 0;
|
||||||
|
|
||||||
int64_t t_draft_us = 0;
|
|
||||||
|
|
||||||
int n_past = inp.size();
|
int n_past = inp.size();
|
||||||
|
|
||||||
bool has_eos = false;
|
bool has_eos = false;
|
||||||
@ -129,6 +157,12 @@ int main(int argc, char ** argv){
|
|||||||
++n_past;
|
++n_past;
|
||||||
++i_dft;
|
++i_dft;
|
||||||
inp.push_back(id);
|
inp.push_back(id);
|
||||||
|
{
|
||||||
|
// Update context ngram cache with the newly accepted token:
|
||||||
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
|
||||||
|
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
}
|
||||||
|
|
||||||
if (params.use_color) {
|
if (params.use_color) {
|
||||||
// color accepted draft token
|
// color accepted draft token
|
||||||
@ -149,6 +183,12 @@ int main(int argc, char ** argv){
|
|||||||
draft.clear();
|
draft.clear();
|
||||||
draft.push_back(id);
|
draft.push_back(id);
|
||||||
inp.push_back(id);
|
inp.push_back(id);
|
||||||
|
{
|
||||||
|
// Update context ngram cache with the newly accepted token:
|
||||||
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
llama_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
|
||||||
|
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,44 +203,19 @@ int main(int argc, char ** argv){
|
|||||||
llama_batch_clear(batch_tgt);
|
llama_batch_clear(batch_tgt);
|
||||||
llama_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
llama_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
||||||
|
|
||||||
// generate n_pred tokens through prompt lookup
|
// Draft already contains a single token sampled from the model:
|
||||||
auto prompt_lookup = [&]() -> void {
|
GGML_ASSERT(draft.size() == 1);
|
||||||
const int inp_size = inp.size();
|
GGML_ASSERT(draft[0] == inp.back());
|
||||||
for (int ngram_size = ngram_max ; ngram_size > ngram_min; --ngram_size){
|
|
||||||
const llama_token * ngram = &inp[inp_size - ngram_size];
|
|
||||||
|
|
||||||
for (int i = 0; i <= (int) inp_size - (ngram_size * 2); ++i) {
|
|
||||||
bool match = true;
|
|
||||||
for (int j = 0; j < ngram_size; ++j) {
|
|
||||||
if (inp[i + j] != ngram[j]) {
|
|
||||||
match = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (match) {
|
|
||||||
const int startIdx = i + ngram_size;
|
|
||||||
const int endIdx = startIdx + n_draft;
|
|
||||||
if (endIdx < inp_size) {
|
|
||||||
for (int j = startIdx; j < endIdx; ++j) {
|
|
||||||
LOG(" - draft candidate %d: %d\n", j, inp[j]);
|
|
||||||
draft.push_back(inp[j]);
|
|
||||||
llama_batch_add(batch_tgt, inp[j], n_past + (j - startIdx) + 1, { 0 }, true);
|
|
||||||
++n_drafted;
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
const int64_t t_start_draft_us = ggml_time_us();
|
const int64_t t_start_draft_us = ggml_time_us();
|
||||||
|
|
||||||
prompt_lookup();
|
llama_ngram_cache_draft(inp, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
|
||||||
|
|
||||||
|
for (size_t i = 1; i < draft.size(); ++i) {
|
||||||
|
llama_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true);
|
||||||
|
}
|
||||||
|
|
||||||
t_draft_us += ggml_time_us() - t_start_draft_us;
|
t_draft_us += ggml_time_us() - t_start_draft_us;
|
||||||
|
n_drafted += draft.size() - 1;
|
||||||
|
|
||||||
llama_decode(ctx, batch_tgt);
|
llama_decode(ctx, batch_tgt);
|
||||||
++n_past;
|
++n_past;
|
||||||
@ -210,19 +225,24 @@ int main(int argc, char ** argv){
|
|||||||
|
|
||||||
auto t_dec_end = ggml_time_us();
|
auto t_dec_end = ggml_time_us();
|
||||||
|
|
||||||
|
// Update dynamic ngram cache with context ngram cache and save it to disk:
|
||||||
|
llama_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
|
||||||
|
llama_ngram_cache_save(ngram_cache_dynamic, params.lookup_cache_dynamic);
|
||||||
|
|
||||||
LOG_TEE("\n\n");
|
LOG_TEE("\n\n");
|
||||||
|
|
||||||
LOG_TEE("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
|
LOG_TEE("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
|
||||||
LOG_TEE("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
|
LOG_TEE("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
|
||||||
|
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
LOG_TEE("n_draft = %d\n", n_draft);
|
LOG_TEE("n_draft = %d\n", n_draft);
|
||||||
LOG_TEE("n_predict = %d\n", n_predict);
|
LOG_TEE("n_predict = %d\n", n_predict);
|
||||||
LOG_TEE("n_drafted = %d\n", n_drafted);
|
LOG_TEE("n_drafted = %d\n", n_drafted);
|
||||||
LOG_TEE("t_draft = %.2f ms, %.2f us per token, %.2f tokens per second\n",
|
LOG_TEE("t_draft_flat = %.2f ms\n", t_draft_flat_us*1e-3);
|
||||||
|
LOG_TEE("t_draft = %.2f ms, %.2f us per token, %.2f tokens per second\n",
|
||||||
t_draft_us*1e-3, 1.0f*t_draft_us/n_drafted, n_drafted/(1e-6*t_draft_us));
|
t_draft_us*1e-3, 1.0f*t_draft_us/n_drafted, n_drafted/(1e-6*t_draft_us));
|
||||||
LOG_TEE("n_accept = %d\n", n_accept);
|
LOG_TEE("n_accept = %d\n", n_accept);
|
||||||
LOG_TEE("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
|
LOG_TEE("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
|
||||||
|
|
||||||
LOG_TEE("\ntarget:\n");
|
LOG_TEE("\ntarget:\n");
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
|
@ -8,7 +8,7 @@ Because this example is "outside of the source tree", it is important to first b
|
|||||||
|
|
||||||
### Considerations
|
### Considerations
|
||||||
|
|
||||||
When hardware acceleration libraries are used (e.g. CUBlas, Metal, CLBlast, etc.), CMake must be able to locate the associated CMake package. In the example below, when building _main-cmake-pkg_ notice the `CMAKE_PREFIX_PATH` includes the Llama CMake package location _in addition to_ the CLBlast package—which was used when compiling _llama.cpp_.
|
When hardware acceleration libraries are used (e.g. CUDA, Metal, CLBlast, etc.), CMake must be able to locate the associated CMake package. In the example below, when building _main-cmake-pkg_ notice the `CMAKE_PREFIX_PATH` includes the Llama CMake package location _in addition to_ the CLBlast package—which was used when compiling _llama.cpp_.
|
||||||
|
|
||||||
### Build llama.cpp and install to C:\LlamaCPP directory
|
### Build llama.cpp and install to C:\LlamaCPP directory
|
||||||
|
|
||||||
|
@ -316,8 +316,8 @@ These options provide extra functionality and customization when running the LLa
|
|||||||
|
|
||||||
- `-h, --help`: Display a help message showing all available options and their default values. This is particularly useful for checking the latest options and default values, as they can change frequently, and the information in this document may become outdated.
|
- `-h, --help`: Display a help message showing all available options and their default values. This is particularly useful for checking the latest options and default values, as they can change frequently, and the information in this document may become outdated.
|
||||||
- `--verbose-prompt`: Print the prompt before generating text.
|
- `--verbose-prompt`: Print the prompt before generating text.
|
||||||
- `-ngl N, --n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
- `-ngl N, --n-gpu-layers N`: When compiled with GPU support, this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||||
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used.
|
||||||
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance.
|
||||||
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
||||||
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
||||||
|
@ -132,7 +132,6 @@ int main(int argc, char ** argv) {
|
|||||||
llama_context * ctx = NULL;
|
llama_context * ctx = NULL;
|
||||||
|
|
||||||
// load the target model
|
// load the target model
|
||||||
params.logits_all = true;
|
|
||||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
// load the prompts from an external file if there are any
|
// load the prompts from an external file if there are any
|
||||||
|
@ -380,6 +380,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
|||||||
const int batch_size = std::min(end - batch_start, n_batch);
|
const int batch_size = std::min(end - batch_start, n_batch);
|
||||||
|
|
||||||
//fprintf(stderr, " Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
|
//fprintf(stderr, " Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
|
||||||
|
// TODO: use llama_batch.logits instead of relying on logits_all == true
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
||||||
//fprintf(stderr, "%s : failed to eval\n", __func__);
|
//fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return {tokens, -1, logit_history, prob_history};
|
return {tokens, -1, logit_history, prob_history};
|
||||||
@ -552,6 +553,8 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
const int batch_start = start + j * n_batch;
|
const int batch_start = start + j * n_batch;
|
||||||
const int batch_size = std::min(end - batch_start, n_batch);
|
const int batch_size = std::min(end - batch_start, n_batch);
|
||||||
|
|
||||||
|
int n_outputs = 0;
|
||||||
|
|
||||||
batch.n_tokens = 0;
|
batch.n_tokens = 0;
|
||||||
for (int seq = 0; seq < n_seq_batch; seq++) {
|
for (int seq = 0; seq < n_seq_batch; seq++) {
|
||||||
int seq_start = batch_start + seq*n_ctx;
|
int seq_start = batch_start + seq*n_ctx;
|
||||||
@ -566,11 +569,13 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
|
|
||||||
for (int k = 0; k < batch_size; ++k) {
|
for (int k = 0; k < batch_size; ++k) {
|
||||||
const int idx = seq*n_ctx + k;
|
const int idx = seq*n_ctx + k;
|
||||||
batch.token[idx] = tokens[seq_start + k];
|
batch.token [idx] = tokens[seq_start + k];
|
||||||
batch.pos[idx] = j*n_batch + k;
|
batch.pos [idx] = j*n_batch + k;
|
||||||
batch.n_seq_id[idx] = 1;
|
batch.n_seq_id[idx] = 1;
|
||||||
batch.seq_id[idx][0] = seq;
|
batch.seq_id [idx][0] = seq;
|
||||||
batch.logits[idx] = batch.pos[idx] >= first ? 1 : 0;
|
batch.logits [idx] = batch.pos[idx] >= first ? 1 : 0;
|
||||||
|
|
||||||
|
n_outputs += batch.logits[idx] != 0;
|
||||||
}
|
}
|
||||||
batch.n_tokens += batch_size;
|
batch.n_tokens += batch_size;
|
||||||
|
|
||||||
@ -583,9 +588,9 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
return {tokens, -1, logit_history, prob_history};
|
return {tokens, -1, logit_history, prob_history};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_batches > 1) {
|
if (num_batches > 1 && n_outputs > 0) {
|
||||||
const auto * batch_logits = llama_get_logits(ctx);
|
const auto * batch_logits = llama_get_logits(ctx);
|
||||||
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
logits.insert(logits.end(), batch_logits, batch_logits + n_outputs * n_vocab);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -604,14 +609,15 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (int seq = 0; seq < n_seq_batch; seq++) {
|
for (int seq = 0; seq < n_seq_batch; seq++) {
|
||||||
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits_ith(ctx, seq*n_ctx);
|
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits_ith(ctx, seq*n_ctx + first);
|
||||||
|
|
||||||
llama_token * tokens_data = tokens.data() + start + seq*n_ctx + first;
|
llama_token * tokens_data = tokens.data() + start + seq*n_ctx + first;
|
||||||
if (!params.logits_file.empty()) {
|
if (!params.logits_file.empty()) {
|
||||||
process_logits(logits_stream, n_vocab, all_logits + first*n_vocab,
|
process_logits(logits_stream, n_vocab, all_logits,
|
||||||
tokens_data, n_ctx - 1 - first,
|
tokens_data, n_ctx - 1 - first,
|
||||||
workers, log_probs, nll, nll2);
|
workers, log_probs, nll, nll2);
|
||||||
} else {
|
} else {
|
||||||
process_logits(n_vocab, all_logits + first*n_vocab,
|
process_logits(n_vocab, all_logits,
|
||||||
tokens_data, n_ctx - 1 - first,
|
tokens_data, n_ctx - 1 - first,
|
||||||
workers, nll, nll2,
|
workers, nll, nll2,
|
||||||
logit_history.data() + start + seq*n_ctx + first,
|
logit_history.data() + start + seq*n_ctx + first,
|
||||||
@ -652,6 +658,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<float> & batch_logits, int32_t n_batch, int32_t n_vocab) {
|
static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<float> & batch_logits, int32_t n_batch, int32_t n_vocab) {
|
||||||
|
int prev_outputs = 0;
|
||||||
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
|
for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) {
|
||||||
const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
|
const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i));
|
||||||
|
|
||||||
@ -672,7 +679,14 @@ static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(batch_logits.data() + i*n_vocab, llama_get_logits(ctx), n_tokens*n_vocab*sizeof(float));
|
int n_outputs = 0;
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
n_outputs += batch_view.logits[i] != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(batch_logits.data() + prev_outputs*n_vocab, llama_get_logits(ctx), n_outputs*n_vocab*sizeof(float));
|
||||||
|
|
||||||
|
prev_outputs += n_outputs;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -779,7 +793,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
size_t ending_logprob_count[4];
|
size_t ending_logprob_count[4];
|
||||||
double ending_logprob[4];
|
double ending_logprob[4];
|
||||||
|
|
||||||
size_t i_batch; // starting index in the llama_batch
|
size_t i_logits; // starting index of logits in the llama_batch
|
||||||
size_t common_prefix; // max number of initial tokens that are the same in all sentences
|
size_t common_prefix; // max number of initial tokens that are the same in all sentences
|
||||||
size_t required_tokens; // needed number of tokens to evaluate all 4 endings
|
size_t required_tokens; // needed number of tokens to evaluate all 4 endings
|
||||||
std::vector<llama_token> seq_tokens[4];
|
std::vector<llama_token> seq_tokens[4];
|
||||||
@ -844,9 +858,10 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
const int max_tasks_per_batch = 32;
|
const int max_tasks_per_batch = 32;
|
||||||
const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||||
|
|
||||||
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
llama_batch batch = llama_batch_init(n_ctx, 0, 4);
|
||||||
|
|
||||||
std::vector<float> tok_logits(n_vocab);
|
std::vector<float> tok_logits(n_vocab);
|
||||||
|
// TODO: this could be made smaller; it's currently the worst-case size
|
||||||
std::vector<float> batch_logits(n_vocab*n_ctx);
|
std::vector<float> batch_logits(n_vocab*n_ctx);
|
||||||
|
|
||||||
std::vector<std::pair<size_t, llama_token>> eval_pairs;
|
std::vector<std::pair<size_t, llama_token>> eval_pairs;
|
||||||
@ -857,16 +872,17 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
int n_cur = 0;
|
int n_cur = 0;
|
||||||
|
|
||||||
size_t i1 = i0;
|
size_t i1 = i0;
|
||||||
size_t i_batch = 0; // this tells us where in `llama_batch` we are currently
|
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
// batch as much tasks as possible into the available context
|
// batch as much tasks as possible into the available context
|
||||||
// each task has 4 unique seuqnce ids - one for each ending
|
// each task has 4 unique sequence ids - one for each ending
|
||||||
// the common prefix is shared among the 4 sequences to save tokens
|
// the common prefix is shared among the 4 sequences to save tokens
|
||||||
// we extract logits only from the last common token and from all ending tokens of each sequence
|
// we extract logits only from the last common token and from all ending tokens of each sequence
|
||||||
while (n_cur + (int) hs_data[i1].required_tokens <= n_ctx) {
|
while (n_cur + (int) hs_data[i1].required_tokens <= n_ctx) {
|
||||||
auto & hs_cur = hs_data[i1];
|
auto & hs_cur = hs_data[i1];
|
||||||
|
int n_logits = 0;
|
||||||
|
|
||||||
const int s0 = 4*(i1 - i0);
|
const int s0 = 4*(i1 - i0);
|
||||||
if (s0 + 4 > max_seq) {
|
if (s0 + 4 > max_seq) {
|
||||||
@ -874,18 +890,23 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < hs_cur.common_prefix; ++i) {
|
for (size_t i = 0; i < hs_cur.common_prefix; ++i) {
|
||||||
llama_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3}, false);
|
llama_batch_add(batch, hs_cur.seq_tokens[0][i], i, { s0 + 0, s0 + 1, s0 + 2, s0 + 3 }, false);
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
||||||
|
n_logits += 1;
|
||||||
|
|
||||||
for (int s = 0; s < 4; ++s) {
|
for (int s = 0; s < 4; ++s) {
|
||||||
for (size_t i = hs_cur.common_prefix; i < hs_cur.seq_tokens[s].size(); ++i) {
|
const size_t seq_tokens_size = hs_cur.seq_tokens[s].size();
|
||||||
llama_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, true);
|
// TODO: don't evaluate the last token of each sequence
|
||||||
|
for (size_t i = hs_cur.common_prefix; i < seq_tokens_size; ++i) {
|
||||||
|
const bool needs_logits = i < seq_tokens_size - 1;
|
||||||
|
llama_batch_add(batch, hs_cur.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||||
|
n_logits += needs_logits;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hs_cur.i_batch = i_batch;
|
hs_cur.i_logits = i_logits;
|
||||||
i_batch += hs_cur.required_tokens;
|
i_logits += n_logits;
|
||||||
|
|
||||||
n_cur += hs_data[i1].required_tokens;
|
n_cur += hs_data[i1].required_tokens;
|
||||||
if (++i1 == hs_task_count) {
|
if (++i1 == hs_task_count) {
|
||||||
@ -911,12 +932,11 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
eval_pairs.clear();
|
eval_pairs.clear();
|
||||||
for (size_t i = i0; i < i1; ++i) {
|
for (size_t i = i0; i < i1; ++i) {
|
||||||
auto & hs_cur = hs_data[i];
|
auto & hs_cur = hs_data[i];
|
||||||
size_t li = hs_cur.common_prefix;
|
size_t li = 1; // skip the last logit of the common prefix (computed separately below)
|
||||||
for (int s = 0; s < 4; ++s) {
|
for (int s = 0; s < 4; ++s) {
|
||||||
for (size_t j = hs_cur.common_prefix; j < hs_cur.seq_tokens[s].size() - 1; j++) {
|
for (size_t j = hs_cur.common_prefix; j < hs_cur.seq_tokens[s].size() - 1; j++) {
|
||||||
eval_pairs.emplace_back(hs_cur.i_batch + li++, hs_cur.seq_tokens[s][j + 1]);
|
eval_pairs.emplace_back(hs_cur.i_logits + li++, hs_cur.seq_tokens[s][j + 1]);
|
||||||
}
|
}
|
||||||
++li;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Then we do the actual calculation
|
// Then we do the actual calculation
|
||||||
@ -928,7 +948,8 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
for (size_t i = i0; i < i1; ++i) {
|
for (size_t i = i0; i < i1; ++i) {
|
||||||
auto & hs_cur = hs_data[i];
|
auto & hs_cur = hs_data[i];
|
||||||
|
|
||||||
std::memcpy(tok_logits.data(), batch_logits.data() + n_vocab*(hs_cur.i_batch + hs_cur.common_prefix - 1), n_vocab*sizeof(float));
|
// get the logits of the last token of the common prefix
|
||||||
|
std::memcpy(tok_logits.data(), batch_logits.data() + n_vocab*hs_cur.i_logits, n_vocab*sizeof(float));
|
||||||
|
|
||||||
const auto first_probs = softmax(tok_logits);
|
const auto first_probs = softmax(tok_logits);
|
||||||
|
|
||||||
@ -978,7 +999,7 @@ struct winogrande_entry {
|
|||||||
std::array<std::string, 2> choices;
|
std::array<std::string, 2> choices;
|
||||||
int answer;
|
int answer;
|
||||||
|
|
||||||
size_t i_batch;
|
size_t i_logits;
|
||||||
size_t common_prefix;
|
size_t common_prefix;
|
||||||
size_t required_tokens;
|
size_t required_tokens;
|
||||||
size_t n_base1; // number of tokens for context + choice 1
|
size_t n_base1; // number of tokens for context + choice 1
|
||||||
@ -1104,6 +1125,7 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
task.common_prefix++;
|
task.common_prefix++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: the last token of each of the sequences don't need to be evaluated
|
||||||
task.required_tokens = task.common_prefix +
|
task.required_tokens = task.common_prefix +
|
||||||
task.seq_tokens[0].size() - task.common_prefix +
|
task.seq_tokens[0].size() - task.common_prefix +
|
||||||
task.seq_tokens[1].size() - task.common_prefix;
|
task.seq_tokens[1].size() - task.common_prefix;
|
||||||
@ -1121,9 +1143,10 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
const int max_tasks_per_batch = 128;
|
const int max_tasks_per_batch = 128;
|
||||||
const int max_seq = std::min(2*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
const int max_seq = std::min(2*max_tasks_per_batch, (int) llama_n_seq_max(ctx));
|
||||||
|
|
||||||
llama_batch batch = llama_batch_init(n_ctx, 0, max_seq);
|
llama_batch batch = llama_batch_init(n_ctx, 0, 2);
|
||||||
|
|
||||||
std::vector<float> tok_logits(n_vocab);
|
std::vector<float> tok_logits(n_vocab);
|
||||||
|
// TODO: this could be made smaller; it's currently the worst-case size
|
||||||
std::vector<float> batch_logits(n_vocab*n_ctx);
|
std::vector<float> batch_logits(n_vocab*n_ctx);
|
||||||
|
|
||||||
std::vector<std::pair<size_t, llama_token>> eval_pairs;
|
std::vector<std::pair<size_t, llama_token>> eval_pairs;
|
||||||
@ -1137,29 +1160,33 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
int n_cur = 0;
|
int n_cur = 0;
|
||||||
|
|
||||||
size_t i1 = i0;
|
size_t i1 = i0;
|
||||||
size_t i_batch = 0;
|
size_t i_logits = 0;
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
while (n_cur + (int) data[i1].required_tokens <= n_ctx) {
|
while (n_cur + (int) data[i1].required_tokens <= n_ctx) {
|
||||||
|
int n_logits = 0;
|
||||||
const int s0 = 2*(i1 - i0);
|
const int s0 = 2*(i1 - i0);
|
||||||
if (s0 + 2 > max_seq) {
|
if (s0 + 2 > max_seq) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < data[i1].common_prefix; ++i) {
|
for (size_t i = 0; i < data[i1].common_prefix; ++i) {
|
||||||
llama_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1}, false);
|
llama_batch_add(batch, data[i1].seq_tokens[0][i], i, { s0 + 0, s0 + 1 }, false);
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true;
|
batch.logits[batch.n_tokens - 1] = true;
|
||||||
|
n_logits += 1;
|
||||||
|
|
||||||
for (int s = 0; s < 2; ++s) {
|
for (int s = 0; s < 2; ++s) {
|
||||||
|
// TODO: end before the last token, no need to predict past the end of the sequences
|
||||||
for (size_t i = data[i1].common_prefix; i < data[i1].seq_tokens[s].size(); ++i) {
|
for (size_t i = data[i1].common_prefix; i < data[i1].seq_tokens[s].size(); ++i) {
|
||||||
llama_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
|
llama_batch_add(batch, data[i1].seq_tokens[s][i], i, { s0 + s }, true);
|
||||||
|
n_logits += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
data[i1].i_batch = i_batch;
|
data[i1].i_logits = i_logits;
|
||||||
i_batch += data[i1].required_tokens;
|
i_logits += n_logits;
|
||||||
|
|
||||||
n_cur += data[i1].required_tokens;
|
n_cur += data[i1].required_tokens;
|
||||||
if (++i1 == data.size()) {
|
if (++i1 == data.size()) {
|
||||||
@ -1190,15 +1217,16 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
|
|||||||
|
|
||||||
const auto& n_base1 = skip_choice ? task.n_base1 : task.common_prefix;
|
const auto& n_base1 = skip_choice ? task.n_base1 : task.common_prefix;
|
||||||
const int last_1st = task.seq_tokens[0].size() - n_base1 > 1 ? 1 : 0;
|
const int last_1st = task.seq_tokens[0].size() - n_base1 > 1 ? 1 : 0;
|
||||||
size_t li = n_base1 - 1;
|
size_t li = n_base1 - task.common_prefix;
|
||||||
for (size_t j = n_base1-1; j < task.seq_tokens[0].size()-1-last_1st; ++j) {
|
for (size_t j = n_base1-1; j < task.seq_tokens[0].size()-1-last_1st; ++j) {
|
||||||
eval_pairs.emplace_back(task.i_batch + li++, task.seq_tokens[0][j+1]);
|
eval_pairs.emplace_back(task.i_logits + li++, task.seq_tokens[0][j+1]);
|
||||||
}
|
}
|
||||||
const auto& n_base2 = skip_choice ? task.n_base2 : task.common_prefix;
|
const auto& n_base2 = skip_choice ? task.n_base2 : task.common_prefix;
|
||||||
const int last_2nd = task.seq_tokens[1].size() - n_base2 > 1 ? 1 : 0;
|
const int last_2nd = task.seq_tokens[1].size() - n_base2 > 1 ? 1 : 0;
|
||||||
li = task.seq_tokens[0].size() - task.common_prefix + n_base2 - 1;
|
// FIXME: this uses the wrong first logits when not skipping the choice word
|
||||||
|
li = task.seq_tokens[0].size() - task.common_prefix + n_base2 - task.common_prefix;
|
||||||
for (size_t j = n_base2-1; j < task.seq_tokens[1].size()-1-last_2nd; ++j) {
|
for (size_t j = n_base2-1; j < task.seq_tokens[1].size()-1-last_2nd; ++j) {
|
||||||
eval_pairs.emplace_back(task.i_batch + li++, task.seq_tokens[1][j+1]);
|
eval_pairs.emplace_back(task.i_logits + li++, task.seq_tokens[1][j+1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
compute_logprobs(batch_logits.data(), n_vocab, workers, eval_pairs, eval_results);
|
compute_logprobs(batch_logits.data(), n_vocab, workers, eval_pairs, eval_results);
|
||||||
@ -1287,7 +1315,7 @@ struct multiple_choice_task {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For evaluation
|
// For evaluation
|
||||||
size_t i_batch; // starting index in the llama_batch
|
size_t i_logits; // starting index of logits in the llama_batch
|
||||||
size_t common_prefix; // max number of initial tokens that are the same in all sentences
|
size_t common_prefix; // max number of initial tokens that are the same in all sentences
|
||||||
size_t required_tokens; // needed number of tokens to evaluate all answers
|
size_t required_tokens; // needed number of tokens to evaluate all answers
|
||||||
std::vector<std::vector<llama_token>> seq_tokens;
|
std::vector<std::vector<llama_token>> seq_tokens;
|
||||||
@ -1366,7 +1394,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
|||||||
std::vector<uint32_t> task_pos(n_task);
|
std::vector<uint32_t> task_pos(n_task);
|
||||||
strstream.read((char *)task_pos.data(), task_pos.size()*sizeof(uint32_t));
|
strstream.read((char *)task_pos.data(), task_pos.size()*sizeof(uint32_t));
|
||||||
if (strstream.fail()) {
|
if (strstream.fail()) {
|
||||||
printf("%s: failed to raad task positions from prompt\n", __func__);
|
printf("%s: failed to read task positions from prompt\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1447,7 +1475,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
int n_dot = n_task/100;
|
int n_dot = std::max((int) n_task/100, 1);
|
||||||
int i_task = 0;
|
int i_task = 0;
|
||||||
for (auto& task : tasks) {
|
for (auto& task : tasks) {
|
||||||
++i_task;
|
++i_task;
|
||||||
@ -1491,17 +1519,18 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
|||||||
int n_cur = 0;
|
int n_cur = 0;
|
||||||
|
|
||||||
size_t i1 = i0;
|
size_t i1 = i0;
|
||||||
size_t i_batch = 0; // this tells us where in `llama_batch` we are currently
|
size_t i_logits = 0; // this tells us how many logits were needed before this point in the batch
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
// batch as much tasks as possible into the available context
|
// batch as much tasks as possible into the available context
|
||||||
// each task has 4 unique seuqnce ids - one for each ending
|
// each task has 4 unique sequence ids - one for each ending
|
||||||
// the common prefix is shared among the 4 sequences to save tokens
|
// the common prefix is shared among the 4 sequences to save tokens
|
||||||
// we extract logits only from the last common token and from all ending tokens of each sequence
|
// we extract logits only from the last common token and from all ending tokens of each sequence
|
||||||
int s0 = 0;
|
int s0 = 0;
|
||||||
while (n_cur + (int) tasks[i1].required_tokens <= n_ctx) {
|
while (n_cur + (int) tasks[i1].required_tokens <= n_ctx) {
|
||||||
auto& cur_task = tasks[i1];
|
auto& cur_task = tasks[i1];
|
||||||
|
int n_logits = 0;
|
||||||
|
|
||||||
int num_answers = cur_task.seq_tokens.size();
|
int num_answers = cur_task.seq_tokens.size();
|
||||||
if (s0 + num_answers > max_seq) {
|
if (s0 + num_answers > max_seq) {
|
||||||
@ -1518,17 +1547,22 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
|||||||
llama_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
|
llama_batch_add(batch, cur_task.seq_tokens[0][i], i, batch_indeces, false);
|
||||||
}
|
}
|
||||||
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
batch.logits[batch.n_tokens - 1] = true; // we need logits for the last token of the common prefix
|
||||||
|
n_logits += 1;
|
||||||
|
|
||||||
for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) {
|
for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) {
|
||||||
for (size_t i = cur_task.common_prefix; i < cur_task.seq_tokens[s].size(); ++i) {
|
const size_t seq_tokens_size = cur_task.seq_tokens[s].size();
|
||||||
llama_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, true);
|
// TODO: don't evaluate the last token of each sequence
|
||||||
|
for (size_t i = cur_task.common_prefix; i < seq_tokens_size; ++i) {
|
||||||
|
const bool needs_logits = i < seq_tokens_size - 1;
|
||||||
|
llama_batch_add(batch, cur_task.seq_tokens[s][i], i, { s0 + s }, needs_logits);
|
||||||
|
n_logits += needs_logits;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s0 += num_answers;
|
s0 += num_answers;
|
||||||
|
|
||||||
cur_task.i_batch = i_batch;
|
cur_task.i_logits = i_logits;
|
||||||
i_batch += cur_task.required_tokens;
|
i_logits += n_logits;
|
||||||
|
|
||||||
n_cur += cur_task.required_tokens;
|
n_cur += cur_task.required_tokens;
|
||||||
if (++i1 == tasks.size()) {
|
if (++i1 == tasks.size()) {
|
||||||
@ -1554,12 +1588,11 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
|||||||
eval_pairs.clear();
|
eval_pairs.clear();
|
||||||
for (size_t i = i0; i < i1; ++i) {
|
for (size_t i = i0; i < i1; ++i) {
|
||||||
auto& cur_task = tasks[i];
|
auto& cur_task = tasks[i];
|
||||||
size_t li = cur_task.common_prefix;
|
size_t li = 1; // skip the last logit of the common prefix (computed separately below)
|
||||||
for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) {
|
for (int s = 0; s < int(cur_task.seq_tokens.size()); ++s) {
|
||||||
for (size_t j = cur_task.common_prefix; j < cur_task.seq_tokens[s].size() - 1; j++) {
|
for (size_t j = cur_task.common_prefix; j < cur_task.seq_tokens[s].size() - 1; j++) {
|
||||||
eval_pairs.emplace_back(cur_task.i_batch + li++, cur_task.seq_tokens[s][j + 1]);
|
eval_pairs.emplace_back(cur_task.i_logits + li++, cur_task.seq_tokens[s][j + 1]);
|
||||||
}
|
}
|
||||||
++li;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Then we do the actual calculation
|
// Then we do the actual calculation
|
||||||
@ -1578,7 +1611,8 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params
|
|||||||
//}
|
//}
|
||||||
//printf("\n common_prefix: %zu\n", cur_task.common_prefix);
|
//printf("\n common_prefix: %zu\n", cur_task.common_prefix);
|
||||||
|
|
||||||
std::memcpy(tok_logits.data(), batch_logits.data() + n_vocab*(cur_task.i_batch + cur_task.common_prefix - 1), n_vocab*sizeof(float));
|
// get the logits of the last token of the common prefix
|
||||||
|
std::memcpy(tok_logits.data(), batch_logits.data() + n_vocab*cur_task.i_logits, n_vocab*sizeof(float));
|
||||||
|
|
||||||
const auto first_probs = softmax(tok_logits);
|
const auto first_probs = softmax(tok_logits);
|
||||||
|
|
||||||
@ -1730,6 +1764,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
|
|||||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: use llama_batch.logits instead of relying on logits_all == true
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return;
|
return;
|
||||||
|
@ -26,6 +26,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
|||||||
{ "IQ2_S", LLAMA_FTYPE_MOSTLY_IQ2_S, " 2.5 bpw quantization", },
|
{ "IQ2_S", LLAMA_FTYPE_MOSTLY_IQ2_S, " 2.5 bpw quantization", },
|
||||||
{ "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", },
|
{ "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", },
|
||||||
{ "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", },
|
{ "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", },
|
||||||
|
{ "IQ1_M", LLAMA_FTYPE_MOSTLY_IQ1_M, " 1.75 bpw quantization", },
|
||||||
{ "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", },
|
{ "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.16G, +9.0634 ppl @ LLaMA-v1-7B", },
|
{ "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.16G, +9.0634 ppl @ LLaMA-v1-7B", },
|
||||||
{ "IQ3_XXS",LLAMA_FTYPE_MOSTLY_IQ3_XXS," 3.06 bpw quantization", },
|
{ "IQ3_XXS",LLAMA_FTYPE_MOSTLY_IQ3_XXS," 3.06 bpw quantization", },
|
||||||
@ -87,13 +88,17 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
|
|||||||
//
|
//
|
||||||
[[noreturn]]
|
[[noreturn]]
|
||||||
static void usage(const char * executable) {
|
static void usage(const char * executable) {
|
||||||
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
|
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
|
||||||
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
|
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
|
||||||
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
|
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
|
||||||
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
|
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
|
||||||
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
|
printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
|
||||||
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
|
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
|
||||||
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
|
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
|
||||||
|
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
|
||||||
|
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
|
||||||
|
printf(" --override-kv KEY=TYPE:VALUE\n");
|
||||||
|
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
|
||||||
printf("Note: --include-weights and --exclude-weights cannot be used together\n");
|
printf("Note: --include-weights and --exclude-weights cannot be used together\n");
|
||||||
printf("\nAllowed quantization types:\n");
|
printf("\nAllowed quantization types:\n");
|
||||||
for (auto & it : QUANT_OPTIONS) {
|
for (auto & it : QUANT_OPTIONS) {
|
||||||
@ -107,14 +112,14 @@ static void usage(const char * executable) {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std::string, std::vector<float>>& imatrix_data) {
|
static void load_imatrix(const std::string & imatrix_file, std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||||
std::ifstream in(imatrix_file.c_str(), std::ios::binary);
|
std::ifstream in(imatrix_file.c_str(), std::ios::binary);
|
||||||
if (!in) {
|
if (!in) {
|
||||||
printf("%s: failed to open %s\n",__func__,imatrix_file.c_str());
|
printf("%s: failed to open %s\n",__func__, imatrix_file.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int n_entries;
|
int n_entries;
|
||||||
in.read((char*)&n_entries, sizeof(n_entries));
|
in.read((char *)&n_entries, sizeof(n_entries));
|
||||||
if (in.fail() || n_entries < 1) {
|
if (in.fail() || n_entries < 1) {
|
||||||
printf("%s: no data in file %s\n", __func__, imatrix_file.c_str());
|
printf("%s: no data in file %s\n", __func__, imatrix_file.c_str());
|
||||||
return;
|
return;
|
||||||
@ -124,25 +129,25 @@ static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std
|
|||||||
std::vector<char> name_as_vec(len+1);
|
std::vector<char> name_as_vec(len+1);
|
||||||
in.read((char *)name_as_vec.data(), len);
|
in.read((char *)name_as_vec.data(), len);
|
||||||
if (in.fail()) {
|
if (in.fail()) {
|
||||||
printf("%s: failed reading name for entry %d from %s\n",__func__,i+1,imatrix_file.c_str());
|
printf("%s: failed reading name for entry %d from %s\n", __func__, i+1, imatrix_file.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
name_as_vec[len] = 0;
|
name_as_vec[len] = 0;
|
||||||
std::string name{name_as_vec.data()};
|
std::string name{name_as_vec.data()};
|
||||||
auto& e = imatrix_data[std::move(name)];
|
auto & e = imatrix_data[std::move(name)];
|
||||||
int ncall;
|
int ncall;
|
||||||
in.read((char*)&ncall, sizeof(ncall));
|
in.read((char *)&ncall, sizeof(ncall));
|
||||||
int nval;
|
int nval;
|
||||||
in.read((char *)&nval, sizeof(nval));
|
in.read((char *)&nval, sizeof(nval));
|
||||||
if (in.fail() || nval < 1) {
|
if (in.fail() || nval < 1) {
|
||||||
printf("%s: failed reading number of values for entry %d\n",__func__,i);
|
printf("%s: failed reading number of values for entry %d\n", __func__, i);
|
||||||
imatrix_data = {};
|
imatrix_data = {};
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
e.resize(nval);
|
e.resize(nval);
|
||||||
in.read((char*)e.data(), nval*sizeof(float));
|
in.read((char *)e.data(), nval*sizeof(float));
|
||||||
if (in.fail()) {
|
if (in.fail()) {
|
||||||
printf("%s: failed reading data for entry %d\n",__func__,i);
|
printf("%s: failed reading data for entry %d\n", __func__, i);
|
||||||
imatrix_data = {};
|
imatrix_data = {};
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -150,13 +155,13 @@ static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std
|
|||||||
for (auto& v : e) v /= ncall;
|
for (auto& v : e) v /= ncall;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
printf("%s: loaded %d importance matrix entries from %s\n",__func__,int(imatrix_data.size()),imatrix_file.c_str());
|
printf("%s: loaded %d importance matrix entries from %s\n", __func__, int(imatrix_data.size()), imatrix_file.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void prepare_imatrix(const std::string& imatrix_file,
|
static void prepare_imatrix(const std::string & imatrix_file,
|
||||||
const std::vector<std::string>& included_weights,
|
const std::vector<std::string> & included_weights,
|
||||||
const std::vector<std::string>& excluded_weights,
|
const std::vector<std::string> & excluded_weights,
|
||||||
std::unordered_map<std::string, std::vector<float>>& imatrix_data) {
|
std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||||
if (!imatrix_file.empty()) {
|
if (!imatrix_file.empty()) {
|
||||||
load_imatrix(imatrix_file, imatrix_data);
|
load_imatrix(imatrix_file, imatrix_data);
|
||||||
}
|
}
|
||||||
@ -189,6 +194,55 @@ static void prepare_imatrix(const std::string& imatrix_file,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ggml_type parse_ggml_type(const char * arg) {
|
||||||
|
ggml_type result = GGML_TYPE_COUNT;
|
||||||
|
for (int j = 0; j < GGML_TYPE_COUNT; ++j) {
|
||||||
|
auto type = ggml_type(j);
|
||||||
|
const auto * name = ggml_type_name(type);
|
||||||
|
if (name && strcmp(arg, name) == 0) {
|
||||||
|
result = type; break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
|
||||||
|
const char* sep = strchr(data, '=');
|
||||||
|
if (sep == nullptr || sep - data >= 128) {
|
||||||
|
fprintf(stderr, "%s: malformed KV override '%s'\n", __func__, data);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
llama_model_kv_override kvo;
|
||||||
|
std::strncpy(kvo.key, data, sep - data);
|
||||||
|
kvo.key[sep - data] = 0;
|
||||||
|
sep++;
|
||||||
|
if (strncmp(sep, "int:", 4) == 0) {
|
||||||
|
sep += 4;
|
||||||
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||||
|
kvo.int_value = std::atol(sep);
|
||||||
|
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||||
|
sep += 6;
|
||||||
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||||
|
kvo.float_value = std::atof(sep);
|
||||||
|
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||||
|
sep += 5;
|
||||||
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||||
|
if (std::strcmp(sep, "true") == 0) {
|
||||||
|
kvo.bool_value = true;
|
||||||
|
} else if (std::strcmp(sep, "false") == 0) {
|
||||||
|
kvo.bool_value = false;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
overrides.emplace_back(std::move(kvo));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
if (argc < 3) {
|
if (argc < 3) {
|
||||||
usage(argv[0]);
|
usage(argv[0]);
|
||||||
@ -199,10 +253,27 @@ int main(int argc, char ** argv) {
|
|||||||
int arg_idx = 1;
|
int arg_idx = 1;
|
||||||
std::string imatrix_file;
|
std::string imatrix_file;
|
||||||
std::vector<std::string> included_weights, excluded_weights;
|
std::vector<std::string> included_weights, excluded_weights;
|
||||||
|
std::vector<llama_model_kv_override> kv_overrides;
|
||||||
|
|
||||||
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
|
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
|
||||||
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
|
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
|
||||||
params.quantize_output_tensor = false;
|
params.quantize_output_tensor = false;
|
||||||
|
} else if (strcmp(argv[arg_idx], "--output-tensor-type") == 0) {
|
||||||
|
if (arg_idx < argc-1) {
|
||||||
|
params.output_tensor_type = parse_ggml_type(argv[++arg_idx]);
|
||||||
|
} else {
|
||||||
|
usage(argv[0]);
|
||||||
|
}
|
||||||
|
} else if (strcmp(argv[arg_idx], "--token-embedding-type") == 0) {
|
||||||
|
if (arg_idx < argc-1) {
|
||||||
|
params.token_embedding_type = parse_ggml_type(argv[++arg_idx]);
|
||||||
|
} else {
|
||||||
|
usage(argv[0]);
|
||||||
|
}
|
||||||
|
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
|
||||||
|
if (arg_idx == argc-1 || !parse_kv_override(argv[++arg_idx], kv_overrides)) {
|
||||||
|
usage(argv[0]);
|
||||||
|
}
|
||||||
} else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) {
|
} else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) {
|
||||||
params.allow_requantize = true;
|
params.allow_requantize = true;
|
||||||
} else if (strcmp(argv[arg_idx], "--pure") == 0) {
|
} else if (strcmp(argv[arg_idx], "--pure") == 0) {
|
||||||
@ -243,6 +314,11 @@ int main(int argc, char ** argv) {
|
|||||||
if (!imatrix_data.empty()) {
|
if (!imatrix_data.empty()) {
|
||||||
params.imatrix = &imatrix_data;
|
params.imatrix = &imatrix_data;
|
||||||
}
|
}
|
||||||
|
if (!kv_overrides.empty()) {
|
||||||
|
kv_overrides.emplace_back();
|
||||||
|
kv_overrides.back().key[0] = 0;
|
||||||
|
params.kv_overrides = &kv_overrides;
|
||||||
|
}
|
||||||
|
|
||||||
llama_backend_init();
|
llama_backend_init();
|
||||||
|
|
||||||
@ -264,8 +340,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (ftype_str == "COPY") {
|
if (ftype_str == "COPY") {
|
||||||
params.only_copy = true;
|
params.only_copy = true;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
fname_out = argv[arg_idx];
|
fname_out = argv[arg_idx];
|
||||||
arg_idx++;
|
arg_idx++;
|
||||||
|
|
||||||
@ -296,10 +371,12 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
if ((params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS ||
|
if ((params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS ||
|
||||||
params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S ||
|
params.ftype == LLAMA_FTYPE_MOSTLY_IQ2_S ||
|
||||||
params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S || params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) && imatrix_data.empty()) {
|
params.ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S ||
|
||||||
fprintf(stderr, "\n===============================================================================================\n");
|
params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
|
||||||
fprintf(stderr, "Please do not use IQ1_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n");
|
params.ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) && imatrix_data.empty()) {
|
||||||
fprintf(stderr, "===============================================================================================\n\n\n");
|
fprintf(stderr, "\n==========================================================================================================\n");
|
||||||
|
fprintf(stderr, "Please do not use IQ1_S, IQ1_M, IQ2_S, IQ2_XXS, IQ2_XS or Q2_K_S quantization without an importance matrix\n");
|
||||||
|
fprintf(stderr, "==========================================================================================================\n\n\n");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
examples/retrieval/CMakeLists.txt
Normal file
5
examples/retrieval/CMakeLists.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
set(TARGET retrieval)
|
||||||
|
add_executable(${TARGET} retrieval.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
69
examples/retrieval/README.md
Normal file
69
examples/retrieval/README.md
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# llama.cpp/examples/retrieval
|
||||||
|
|
||||||
|
Demonstration of simple retrieval technique based on cosine similarity
|
||||||
|
|
||||||
|
More info:
|
||||||
|
https://github.com/ggerganov/llama.cpp/pull/6193
|
||||||
|
|
||||||
|
### How to use
|
||||||
|
|
||||||
|
`retieval.cpp` has parameters of its own:
|
||||||
|
- `--context-file`: file to be embedded - state this option multiple times to embed multiple files
|
||||||
|
- `--chunk-size`: minimum size of each text chunk to be embedded
|
||||||
|
- `--chunk-separator`: STRING to divide chunks by. newline by default
|
||||||
|
|
||||||
|
`retrieval` example can be tested as follows:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make -j && ./retrieval --model ./models/bge-base-en-v1.5-f16.gguf --top-k 3 --context-file README.md --context-file License --chunk-size 100 --chunk-separator .
|
||||||
|
```
|
||||||
|
|
||||||
|
This chunks and embeds all given files and starts a loop requesting query inputs:
|
||||||
|
|
||||||
|
```
|
||||||
|
Enter query:
|
||||||
|
```
|
||||||
|
|
||||||
|
On each query input, top k chunks are shown along with file name, chunk position within file and original text:
|
||||||
|
|
||||||
|
```
|
||||||
|
Enter query: describe the mit license
|
||||||
|
batch_decode: n_tokens = 6, n_seq = 1
|
||||||
|
Top 3 similar chunks:
|
||||||
|
filename: README.md
|
||||||
|
filepos: 119
|
||||||
|
similarity: 0.762334
|
||||||
|
textdata:
|
||||||
|
png)
|
||||||
|
|
||||||
|
[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
|
||||||
|
|
||||||
|
[Roadmap](https://github.
|
||||||
|
--------------------
|
||||||
|
filename: License
|
||||||
|
filepos: 0
|
||||||
|
similarity: 0.725146
|
||||||
|
textdata:
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2023 Georgi Gerganov
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
--------------------
|
||||||
|
filename: README.md
|
||||||
|
filepos: 9178
|
||||||
|
similarity: 0.621722
|
||||||
|
textdata:
|
||||||
|
com/cztomsik/ava) (MIT)
|
||||||
|
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
|
||||||
|
- [pythops/tenere](https://github.
|
||||||
|
--------------------
|
||||||
|
```
|
350
examples/retrieval/retrieval.cpp
Normal file
350
examples/retrieval/retrieval.cpp
Normal file
@ -0,0 +1,350 @@
|
|||||||
|
#include "common.h"
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
struct retrieval_params {
|
||||||
|
std::vector<std::string> context_files; // context files to embed
|
||||||
|
int32_t chunk_size = 64; // chunk size for context embedding
|
||||||
|
std::string chunk_separator = "\n"; // chunk separator for context embedding
|
||||||
|
};
|
||||||
|
|
||||||
|
static void retrieval_params_print_usage(int argc, char ** argv, gpt_params & gpt_params, retrieval_params & params) {
|
||||||
|
gpt_print_usage(argc, argv, gpt_params);
|
||||||
|
printf("retrieval options:\n");
|
||||||
|
printf(" --context-file FNAME file containing context to embed.\n");
|
||||||
|
printf(" specify multiple files by providing --context-file option multiple times.\n");
|
||||||
|
printf(" --chunk-size N minimum length of embedded text chunk (default:%d)\n", params.chunk_size);
|
||||||
|
printf(" --chunk-separator STRING\n");
|
||||||
|
printf(" string to separate chunks (default: \"\\n\")\n");
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void retrieval_params_parse(int argc, char ** argv, gpt_params & gpt_params, retrieval_params & retrieval_params) {
|
||||||
|
int i = 1;
|
||||||
|
std::string arg;
|
||||||
|
while (i < argc) {
|
||||||
|
arg = argv[i];
|
||||||
|
bool invalid_gpt_param = false;
|
||||||
|
if(gpt_params_find_arg(argc, argv, argv[i], gpt_params, i, invalid_gpt_param)) {
|
||||||
|
if (invalid_gpt_param) {
|
||||||
|
fprintf(stderr, "error: invalid argument: %s\n", arg.c_str());
|
||||||
|
retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
// option was parsed by gpt_params_find_arg
|
||||||
|
} else if (arg == "--context-file") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
fprintf(stderr, "error: missing argument for --context-file\n");
|
||||||
|
retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
std::ifstream file(argv[i]);
|
||||||
|
if (!file) {
|
||||||
|
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
||||||
|
retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
// store the external file name in params
|
||||||
|
retrieval_params.context_files.push_back(argv[i]);
|
||||||
|
} else if (arg == "--chunk-size") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
fprintf(stderr, "error: missing argument for --chunk-size\n");
|
||||||
|
retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
retrieval_params.chunk_size = std::stoi(argv[i]);
|
||||||
|
} else if (arg == "--chunk-separator") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
fprintf(stderr, "error: missing argument for --chunk-separator\n");
|
||||||
|
retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
retrieval_params.chunk_separator = argv[i];
|
||||||
|
} else {
|
||||||
|
// unknown argument
|
||||||
|
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||||
|
retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct chunk {
|
||||||
|
// filename
|
||||||
|
std::string filename;
|
||||||
|
// original file position
|
||||||
|
size_t filepos;
|
||||||
|
// original text data
|
||||||
|
std::string textdata = "";
|
||||||
|
// tokenized text data
|
||||||
|
std::vector<llama_token> tokens;
|
||||||
|
// embedding
|
||||||
|
std::vector<float> embedding;
|
||||||
|
};
|
||||||
|
|
||||||
|
// chunk file data to chunks of size >= chunk_size
|
||||||
|
// chunk_separator is the separator between chunks
|
||||||
|
static std::vector<chunk> chunk_file(const std::string & filename, int chunk_size, const std::string & chunk_separator) {
|
||||||
|
std::vector<chunk> chunks;
|
||||||
|
std::ifstream f(filename.c_str());
|
||||||
|
|
||||||
|
if (!f.is_open()) {
|
||||||
|
fprintf(stderr, "Error: could not open file %s\n", filename.c_str());
|
||||||
|
return chunks;
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk current_chunk;
|
||||||
|
char buffer[1024];
|
||||||
|
int64_t filepos = 0;
|
||||||
|
std::string current = "";
|
||||||
|
while (f.read(buffer, 1024)) {
|
||||||
|
current += std::string(buffer, f.gcount());
|
||||||
|
size_t pos;
|
||||||
|
while ((pos = current.find(chunk_separator)) != std::string::npos) {
|
||||||
|
current_chunk.textdata += current.substr(0, pos + chunk_separator.size());
|
||||||
|
if ((int) current_chunk.textdata.size() > chunk_size) {
|
||||||
|
// save chunk
|
||||||
|
current_chunk.filepos = filepos;
|
||||||
|
current_chunk.filename = filename;
|
||||||
|
chunks.push_back(current_chunk);
|
||||||
|
// update filepos
|
||||||
|
filepos += (int) current_chunk.textdata.size();
|
||||||
|
// reset current_chunk
|
||||||
|
current_chunk = chunk();
|
||||||
|
}
|
||||||
|
current = current.substr(pos + chunk_separator.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// add leftover data to last chunk
|
||||||
|
if (current_chunk.textdata.size() > 0) {
|
||||||
|
if (chunks.empty()) {
|
||||||
|
current_chunk.filepos = filepos;
|
||||||
|
current_chunk.filename = filename;
|
||||||
|
chunks.push_back(current_chunk);
|
||||||
|
} else {
|
||||||
|
chunks.back().textdata += current_chunk.textdata;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.close();
|
||||||
|
return chunks;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, int seq_id) {
|
||||||
|
for (size_t i = 0; i < tokens.size(); i++) {
|
||||||
|
llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) {
|
||||||
|
// clear previous kv_cache values (irrelevant for embeddings)
|
||||||
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
|
// run model
|
||||||
|
fprintf(stderr, "%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
|
||||||
|
if (llama_decode(ctx, batch) < 0) {
|
||||||
|
fprintf(stderr, "%s : failed to decode\n", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < batch.n_tokens; i++) {
|
||||||
|
if (!batch.logits[i]) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to get sequence embeddings - supported only when pooling_type is not NONE
|
||||||
|
const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
|
||||||
|
if (embd == NULL) {
|
||||||
|
embd = llama_get_embeddings_ith(ctx, i);
|
||||||
|
if (embd == NULL) {
|
||||||
|
fprintf(stderr, "%s: failed to get embeddings for token %d\n", __func__, i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||||
|
llama_embd_normalize(embd, out, n_embd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char ** argv) {
|
||||||
|
gpt_params params;
|
||||||
|
retrieval_params retrieval_params;
|
||||||
|
|
||||||
|
retrieval_params_parse(argc, argv, params, retrieval_params);
|
||||||
|
|
||||||
|
// For BERT models, batch size must be equal to ubatch size
|
||||||
|
params.n_ubatch = params.n_batch;
|
||||||
|
|
||||||
|
if (retrieval_params.chunk_size <= 0) {
|
||||||
|
fprintf(stderr, "chunk_size must be positive\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (retrieval_params.context_files.empty()) {
|
||||||
|
fprintf(stderr, "context_files must be specified\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
params.embedding = true;
|
||||||
|
|
||||||
|
print_build_info();
|
||||||
|
|
||||||
|
printf("processing files:\n");
|
||||||
|
for (auto & context_file : retrieval_params.context_files) {
|
||||||
|
printf("%s\n", context_file.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<chunk> chunks;
|
||||||
|
for (auto & context_file : retrieval_params.context_files) {
|
||||||
|
std::vector<chunk> file_chunk = chunk_file(context_file, retrieval_params.chunk_size, retrieval_params.chunk_separator);
|
||||||
|
chunks.insert(chunks.end(), file_chunk.begin(), file_chunk.end());
|
||||||
|
}
|
||||||
|
printf("Number of chunks: %ld\n", chunks.size());
|
||||||
|
|
||||||
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
|
llama_model * model;
|
||||||
|
llama_context * ctx;
|
||||||
|
|
||||||
|
// load the model
|
||||||
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
if (model == NULL) {
|
||||||
|
fprintf(stderr, "%s: error: unable to load model\n", __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n_ctx_train = llama_n_ctx_train(model);
|
||||||
|
const int n_ctx = llama_n_ctx(ctx);
|
||||||
|
|
||||||
|
if (n_ctx > n_ctx_train) {
|
||||||
|
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
|
||||||
|
__func__, n_ctx_train, n_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// print system information
|
||||||
|
{
|
||||||
|
fprintf(stderr, "\n");
|
||||||
|
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// max batch size
|
||||||
|
const uint64_t n_batch = params.n_batch;
|
||||||
|
GGML_ASSERT(params.n_batch >= params.n_ctx);
|
||||||
|
|
||||||
|
// tokenize the prompts and trim
|
||||||
|
for (auto & chunk : chunks) {
|
||||||
|
auto inp = ::llama_tokenize(ctx, chunk.textdata, true, false);
|
||||||
|
if (inp.size() > n_batch) {
|
||||||
|
fprintf(stderr, "%s: error: chunk size (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
||||||
|
__func__, (long long int) inp.size(), (long long int) n_batch);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
// add eos if not present
|
||||||
|
if (inp.empty() || inp.back() != llama_token_eos(model)) {
|
||||||
|
inp.push_back(llama_token_eos(model));
|
||||||
|
}
|
||||||
|
chunk.tokens = inp;
|
||||||
|
}
|
||||||
|
|
||||||
|
// tokenization stats
|
||||||
|
if (params.verbose_prompt) {
|
||||||
|
for (int i = 0; i < (int) chunks.size(); i++) {
|
||||||
|
fprintf(stderr, "%s: prompt %d: '%s'\n", __func__, i, chunks[i].textdata.c_str());
|
||||||
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, chunks[i].tokens.size());
|
||||||
|
for (int j = 0; j < (int) chunks[i].tokens.size(); j++) {
|
||||||
|
fprintf(stderr, "%6d -> '%s'\n", chunks[i].tokens[j], llama_token_to_piece(ctx, chunks[i].tokens[j]).c_str());
|
||||||
|
}
|
||||||
|
fprintf(stderr, "\n\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize batch
|
||||||
|
const int n_chunks = chunks.size();
|
||||||
|
struct llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||||
|
|
||||||
|
// allocate output
|
||||||
|
const int n_embd = llama_n_embd(model);
|
||||||
|
std::vector<float> embeddings(n_chunks * n_embd, 0);
|
||||||
|
float * emb = embeddings.data();
|
||||||
|
|
||||||
|
// break into batches
|
||||||
|
int p = 0; // number of prompts processed already
|
||||||
|
int s = 0; // number of prompts in current batch
|
||||||
|
for (int k = 0; k < n_chunks; k++) {
|
||||||
|
// clamp to n_batch tokens
|
||||||
|
auto & inp = chunks[k].tokens;
|
||||||
|
|
||||||
|
const uint64_t n_toks = inp.size();
|
||||||
|
|
||||||
|
// encode if at capacity
|
||||||
|
if (batch.n_tokens + n_toks > n_batch) {
|
||||||
|
float * out = emb + p * n_embd;
|
||||||
|
batch_decode(ctx, batch, out, s, n_embd);
|
||||||
|
llama_batch_clear(batch);
|
||||||
|
p += s;
|
||||||
|
s = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// add to batch
|
||||||
|
batch_add_seq(batch, inp, s);
|
||||||
|
s += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// final batch
|
||||||
|
float * out = emb + p * n_embd;
|
||||||
|
batch_decode(ctx, batch, out, s, n_embd);
|
||||||
|
|
||||||
|
// save embeddings to chunks
|
||||||
|
for (int i = 0; i < n_chunks; i++) {
|
||||||
|
chunks[i].embedding = std::vector<float>(emb + i * n_embd, emb + (i + 1) * n_embd);
|
||||||
|
// clear tokens as they are no longer needed
|
||||||
|
chunks[i].tokens.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// start loop, receive query and return top k similar chunks based on cosine similarity
|
||||||
|
std::string query;
|
||||||
|
while (true) {
|
||||||
|
printf("Enter query: ");
|
||||||
|
std::getline(std::cin, query);
|
||||||
|
std::vector<int32_t> query_tokens = llama_tokenize(ctx, query, true);
|
||||||
|
|
||||||
|
struct llama_batch query_batch = llama_batch_init(n_batch, 0, 1);
|
||||||
|
batch_add_seq(query_batch, query_tokens, 0);
|
||||||
|
|
||||||
|
std::vector<float> query_emb(n_embd, 0);
|
||||||
|
batch_decode(ctx, query_batch, query_emb.data(), 1, n_embd);
|
||||||
|
|
||||||
|
llama_batch_clear(query_batch);
|
||||||
|
|
||||||
|
// compute cosine similarities
|
||||||
|
{
|
||||||
|
std::vector<std::pair<int, float>> similarities;
|
||||||
|
for (int i = 0; i < n_chunks; i++) {
|
||||||
|
float sim = llama_embd_similarity_cos(chunks[i].embedding.data(), query_emb.data(), n_embd);
|
||||||
|
similarities.push_back(std::make_pair(i, sim));
|
||||||
|
}
|
||||||
|
|
||||||
|
// sort similarities
|
||||||
|
std::sort(similarities.begin(), similarities.end(), [](const std::pair<int, float> & a, const std::pair<int, float> & b) {
|
||||||
|
return a.second > b.second;
|
||||||
|
});
|
||||||
|
|
||||||
|
printf("Top %d similar chunks:\n", params.sparams.top_k);
|
||||||
|
for (int i = 0; i < std::min(params.sparams.top_k, (int) chunks.size()); i++) {
|
||||||
|
printf("filename: %s\n", chunks[similarities[i].first].filename.c_str());
|
||||||
|
printf("filepos: %lld\n", (long long int) chunks[similarities[i].first].filepos);
|
||||||
|
printf("similarity: %f\n", similarities[i].second);
|
||||||
|
printf("textdata:\n%s\n", chunks[similarities[i].first].textdata.c_str());
|
||||||
|
printf("--------------------\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// clean up
|
||||||
|
llama_print_timings(ctx);
|
||||||
|
llama_free(ctx);
|
||||||
|
llama_free_model(model);
|
||||||
|
llama_backend_free();
|
||||||
|
}
|
@ -16,17 +16,20 @@ The project is under active development, and we are [looking for feedback and co
|
|||||||
|
|
||||||
**Command line options:**
|
**Command line options:**
|
||||||
|
|
||||||
- `--threads N`, `-t N`: Set the number of threads to use during generation.
|
- `--threads N`, `-t N`: Set the number of threads to use during generation. Not used if model layers are offloaded to GPU. The server is using batching, this parameter is used only if one token is to be processed on CPU backend.
|
||||||
- `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation.
|
- `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation. Not used if model layers are offloaded to GPU.
|
||||||
- `--threads-http N`: number of threads in the http server pool to process requests (default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)`)
|
- `--threads-http N`: number of threads in the http server pool to process requests (default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)`)
|
||||||
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
||||||
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf).
|
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (default: unused).
|
||||||
|
- `-hfr REPO, --hf-repo REPO`: Hugging Face model repository (default: unused).
|
||||||
|
- `-hff FILE, --hf-file FILE`: Hugging Face model file (default: unused).
|
||||||
- `-a ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
- `-a ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
||||||
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
||||||
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
- `-ngl N`, `--n-gpu-layers N`: When compiled with GPU support, this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||||
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used.
|
||||||
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance.
|
||||||
- `-b N`, `--batch-size N`: Set the batch size for prompt processing. Default: `512`.
|
- `-b N`, `--batch-size N`: Set the batch size for prompt processing. Default: `2048`.
|
||||||
|
- `-ub N`, `--ubatch-size N`: physical maximum batch size. Default: `512`.
|
||||||
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
|
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
|
||||||
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
|
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
|
||||||
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
|
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
|
||||||
@ -57,7 +60,7 @@ see https://github.com/ggerganov/llama.cpp/issues/1437
|
|||||||
- `--slots-endpoint-disable`: To disable slots state monitoring endpoint. Slots state may contain user data, prompts included.
|
- `--slots-endpoint-disable`: To disable slots state monitoring endpoint. Slots state may contain user data, prompts included.
|
||||||
- `--metrics`: enable prometheus `/metrics` compatible endpoint (default: disabled)
|
- `--metrics`: enable prometheus `/metrics` compatible endpoint (default: disabled)
|
||||||
- `--chat-template JINJA_TEMPLATE`: Set custom jinja chat template. This parameter accepts a string, not a file name (default: template taken from model's metadata). We only support [some pre-defined templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
|
- `--chat-template JINJA_TEMPLATE`: Set custom jinja chat template. This parameter accepts a string, not a file name (default: template taken from model's metadata). We only support [some pre-defined templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
|
||||||
- `--log-disable`: Output logs to stdout only, default: enabled.
|
- `--log-disable`: Output logs to stdout only, not to `llama.log`. default: enabled.
|
||||||
- `--log-format FORMAT`: Define the log output to FORMAT: json or text (default: json)
|
- `--log-format FORMAT`: Define the log output to FORMAT: json or text (default: json)
|
||||||
|
|
||||||
**If compiled with `LLAMA_SERVER_SSL=ON`**
|
**If compiled with `LLAMA_SERVER_SSL=ON`**
|
||||||
@ -357,7 +360,7 @@ Notice that each `probs` is an array of length `n_probs`.
|
|||||||
- `default_generation_settings` - the default generation settings for the `/completion` endpoint, has the same fields as the `generation_settings` response object from the `/completion` endpoint.
|
- `default_generation_settings` - the default generation settings for the `/completion` endpoint, has the same fields as the `generation_settings` response object from the `/completion` endpoint.
|
||||||
- `total_slots` - the total number of slots for process requests (defined by `--parallel` option)
|
- `total_slots` - the total number of slots for process requests (defined by `--parallel` option)
|
||||||
|
|
||||||
- **POST** `/v1/chat/completions`: OpenAI-compatible Chat Completions API. Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only ChatML-tuned models, such as Dolphin, OpenOrca, OpenHermes, OpenChat-3.5, etc can be used with this endpoint.
|
- **POST** `/v1/chat/completions`: OpenAI-compatible Chat Completions API. Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only model with [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, ChatML template will be used.
|
||||||
|
|
||||||
*Options:*
|
*Options:*
|
||||||
|
|
||||||
|
@ -43,444 +43,454 @@ unsigned char completion_js[] = {
|
|||||||
0x7d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
0x7d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
||||||
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20,
|
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20,
|
||||||
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x69, 0x66,
|
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
0x20, 0x28, 0x21, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
|
0x73, 0x74, 0x20, 0x61, 0x70, 0x69, 0x5f, 0x75, 0x72, 0x6c, 0x20, 0x3d,
|
||||||
0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x70, 0x69, 0x5f,
|
||||||
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65,
|
0x75, 0x72, 0x6c, 0x20, 0x7c, 0x7c, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x0a,
|
||||||
0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72,
|
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d,
|
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f,
|
0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20,
|
||||||
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61,
|
0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43,
|
||||||
0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61,
|
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b,
|
||||||
0x72, 0x61, 0x6d, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2c,
|
0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20,
|
0x74, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20,
|
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e,
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f,
|
0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x44, 0x65, 0x66, 0x61, 0x75,
|
||||||
0x6e, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
0x6c, 0x74, 0x73, 0x2c, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61,
|
||||||
0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x22, 0x2f, 0x63, 0x6f, 0x6d, 0x70,
|
0x6d, 0x73, 0x2c, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x7d,
|
||||||
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x20, 0x7b, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x20, 0x27,
|
|
||||||
0x50, 0x4f, 0x53, 0x54, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x62,
|
|
||||||
0x6f, 0x64, 0x79, 0x3a, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x73, 0x74,
|
|
||||||
0x72, 0x69, 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x63, 0x6f, 0x6d, 0x70,
|
|
||||||
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
|
||||||
0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65, 0x61, 0x64, 0x65,
|
|
||||||
0x72, 0x73, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x27, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x27,
|
|
||||||
0x3a, 0x20, 0x27, 0x6b, 0x65, 0x65, 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76,
|
|
||||||
0x65, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x43,
|
|
||||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x54, 0x79, 0x70, 0x65, 0x27,
|
|
||||||
0x3a, 0x20, 0x27, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
|
|
||||||
0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x27, 0x2c, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x27, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x27,
|
|
||||||
0x3a, 0x20, 0x27, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e,
|
|
||||||
0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x27, 0x2c, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x28, 0x70, 0x61, 0x72,
|
|
||||||
0x61, 0x6d, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x20,
|
|
||||||
0x3f, 0x20, 0x7b, 0x27, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
|
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x60, 0x42, 0x65, 0x61,
|
|
||||||
0x72, 0x65, 0x72, 0x20, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
|
||||||
0x2e, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x60, 0x7d, 0x20,
|
|
||||||
0x3a, 0x20, 0x7b, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x2c,
|
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x3a,
|
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e,
|
|
||||||
0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x0a, 0x20, 0x20, 0x7d, 0x29,
|
|
||||||
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72,
|
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72,
|
||||||
0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x70,
|
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x77,
|
||||||
0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x67, 0x65,
|
0x61, 0x69, 0x74, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x60, 0x24,
|
||||||
0x74, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20,
|
0x7b, 0x61, 0x70, 0x69, 0x5f, 0x75, 0x72, 0x6c, 0x7d, 0x2f, 0x63, 0x6f,
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64,
|
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x60, 0x2c, 0x20, 0x7b,
|
||||||
0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x54, 0x65, 0x78,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a,
|
||||||
0x74, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a,
|
0x20, 0x27, 0x50, 0x4f, 0x53, 0x54, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
0x20, 0x62, 0x6f, 0x64, 0x79, 0x3a, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e,
|
||||||
0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x6c,
|
0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x63, 0x6f,
|
||||||
0x65, 0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20,
|
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61,
|
||||||
0x3d, 0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x42, 0x75, 0x66,
|
0x6d, 0x73, 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65, 0x61,
|
||||||
0x66, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x61, 0x72, 0x74,
|
0x64, 0x65, 0x72, 0x73, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x69, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x72, 0x65, 0x61, 0x64, 0x20, 0x6c,
|
0x20, 0x20, 0x27, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
|
||||||
0x69, 0x6e, 0x65, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20,
|
0x6e, 0x27, 0x3a, 0x20, 0x27, 0x6b, 0x65, 0x65, 0x70, 0x2d, 0x61, 0x6c,
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f,
|
0x69, 0x76, 0x65, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6e, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x0a,
|
0x27, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x54, 0x79, 0x70,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x63,
|
0x65, 0x27, 0x3a, 0x20, 0x27, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61,
|
||||||
0x6f, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x27, 0x2c, 0x0a,
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x41, 0x63, 0x63, 0x65, 0x70,
|
||||||
0x74, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x65,
|
0x74, 0x27, 0x3a, 0x20, 0x27, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x65, 0x76,
|
||||||
0x61, 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x28, 0x29, 0x3b,
|
0x65, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x27, 0x2c,
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x2e, 0x2e, 0x28, 0x70,
|
||||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x6f, 0x6e, 0x65, 0x29, 0x20,
|
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65,
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72,
|
0x79, 0x20, 0x3f, 0x20, 0x7b, 0x27, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72,
|
||||||
0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x27, 0x3a, 0x20, 0x60, 0x42,
|
||||||
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x41,
|
0x65, 0x61, 0x72, 0x65, 0x72, 0x20, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61,
|
||||||
0x64, 0x64, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f,
|
0x6d, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x60,
|
||||||
0x76, 0x65, 0x72, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x74, 0x6f, 0x20,
|
0x7d, 0x20, 0x3a, 0x20, 0x7b, 0x7d, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20,
|
0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61,
|
||||||
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74,
|
0x6c, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
|
||||||
0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x0a, 0x20, 0x20,
|
||||||
0x74, 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x6c, 0x65, 0x66,
|
0x7d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||||
0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x2b, 0x20, 0x64, 0x65, 0x63, 0x6f,
|
0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x72, 0x65,
|
||||||
0x64, 0x65, 0x72, 0x2e, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x28, 0x72,
|
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x62, 0x6f, 0x64, 0x79, 0x2e,
|
||||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29,
|
0x67, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28, 0x29, 0x3b,
|
||||||
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x64, 0x65, 0x63,
|
||||||
0x43, 0x68, 0x65, 0x63, 0x6b, 0x20, 0x69, 0x66, 0x20, 0x74, 0x68, 0x65,
|
0x6f, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x54,
|
||||||
0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63,
|
0x65, 0x78, 0x74, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x28, 0x29,
|
||||||
0x74, 0x65, 0x72, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x6e,
|
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20,
|
||||||
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x6e, 0x64, 0x73,
|
0x20, 0x6c, 0x65, 0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65,
|
||||||
0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x6e, 0x65, 0x42, 0x72, 0x65, 0x61,
|
0x72, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x42,
|
||||||
0x6b, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x65, 0x6e, 0x64,
|
0x75, 0x66, 0x66, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x61,
|
||||||
0x73, 0x57, 0x69, 0x74, 0x68, 0x28, 0x27, 0x5c, 0x6e, 0x27, 0x29, 0x3b,
|
0x72, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x72, 0x65, 0x61, 0x64,
|
||||||
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x53,
|
0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x0a, 0x0a, 0x20, 0x20, 0x74, 0x72,
|
||||||
0x70, 0x6c, 0x69, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x65, 0x78,
|
0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20,
|
||||||
0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73,
|
0x63, 0x6f, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x74, 0x72, 0x75, 0x65, 0x3b,
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x6c,
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20,
|
||||||
0x69, 0x6e, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e,
|
0x28, 0x63, 0x6f, 0x6e, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x73, 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, 0x5c, 0x6e, 0x27, 0x29, 0x3b,
|
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73,
|
||||||
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x49,
|
0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
||||||
0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x64,
|
0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x28,
|
||||||
0x6f, 0x65, 0x73, 0x6e, 0x27, 0x74, 0x20, 0x65, 0x6e, 0x64, 0x20, 0x77,
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
||||||
0x69, 0x74, 0x68, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x62,
|
0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x6f, 0x6e, 0x65,
|
||||||
0x72, 0x65, 0x61, 0x6b, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x6e, 0x20, 0x74,
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x6c, 0x69, 0x6e, 0x65,
|
0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f,
|
||||||
0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
0x20, 0x41, 0x64, 0x64, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x6c, 0x65, 0x66,
|
||||||
0x53, 0x74, 0x6f, 0x72, 0x65, 0x20, 0x69, 0x74, 0x20, 0x69, 0x6e, 0x20,
|
0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x74,
|
||||||
0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20,
|
0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
||||||
0x62, 0x65, 0x20, 0x61, 0x64, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20,
|
0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x64,
|
||||||
0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x63, 0x68, 0x75,
|
0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20,
|
0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x6c,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x65, 0x6e,
|
0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x2b, 0x20, 0x64, 0x65,
|
||||||
|
0x63, 0x6f, 0x64, 0x65, 0x72, 0x2e, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65,
|
||||||
|
0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f,
|
||||||
|
0x2f, 0x20, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x20, 0x69, 0x66, 0x20, 0x74,
|
||||||
|
0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x72,
|
||||||
|
0x61, 0x63, 0x74, 0x65, 0x72, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x6c,
|
||||||
|
0x69, 0x6e, 0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x6e,
|
||||||
0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x6e, 0x65, 0x42, 0x72,
|
0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x6e, 0x65, 0x42, 0x72,
|
||||||
0x65, 0x61, 0x6b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x65, 0x61, 0x6b, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x65,
|
||||||
0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20,
|
0x6e, 0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x28, 0x27, 0x5c, 0x6e, 0x27,
|
||||||
0x3d, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x70, 0x6f, 0x70, 0x28,
|
0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f,
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x65,
|
0x20, 0x53, 0x70, 0x6c, 0x69, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74,
|
||||||
0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x65, 0x78, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x6c, 0x69, 0x6e,
|
||||||
0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x3d,
|
0x65, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74,
|
||||||
0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65, 0x73, 0x65,
|
0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x74, 0x65, 0x78,
|
||||||
0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x69,
|
0x74, 0x2e, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x28, 0x27, 0x5c, 0x6e, 0x27,
|
||||||
0x66, 0x20, 0x77, 0x65, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x20,
|
0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f,
|
||||||
0x6c, 0x69, 0x6e, 0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x20, 0x61,
|
0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x65, 0x78, 0x74,
|
||||||
0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x64, 0x0a, 0x20, 0x20,
|
0x20, 0x64, 0x6f, 0x65, 0x73, 0x6e, 0x27, 0x74, 0x20, 0x65, 0x6e, 0x64,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x6e, 0x65,
|
||||||
0x20, 0x2f, 0x2f, 0x20, 0x50, 0x61, 0x72, 0x73, 0x65, 0x20, 0x61, 0x6c,
|
0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x6e,
|
||||||
0x6c, 0x20, 0x73, 0x73, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73,
|
0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x6c, 0x69,
|
||||||
0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x64, 0x64, 0x20, 0x74, 0x68, 0x65,
|
0x6e, 0x65, 0x20, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x70,
|
||||||
0x6d, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x0a,
|
0x6c, 0x65, 0x74, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
0x2f, 0x20, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x20, 0x69, 0x74, 0x20, 0x69,
|
||||||
0x72, 0x65, 0x67, 0x65, 0x78, 0x20, 0x3d, 0x20, 0x2f, 0x5e, 0x28, 0x5c,
|
0x6e, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x74,
|
||||||
0x53, 0x2b, 0x29, 0x3a, 0x5c, 0x73, 0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f,
|
0x6f, 0x20, 0x62, 0x65, 0x20, 0x61, 0x64, 0x64, 0x65, 0x64, 0x20, 0x74,
|
||||||
0x67, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f,
|
0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x63,
|
||||||
0x72, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x69, 0x6e,
|
0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61,
|
||||||
0x65, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x29, 0x20,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21,
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
0x65, 0x6e, 0x64, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4c, 0x69, 0x6e, 0x65,
|
||||||
0x6e, 0x73, 0x74, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x20, 0x3d, 0x20,
|
0x42, 0x72, 0x65, 0x61, 0x6b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x28, 0x6c,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65,
|
||||||
0x69, 0x6e, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x72, 0x20, 0x3d, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x70, 0x6f,
|
||||||
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x29,
|
0x70, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6d, 0x61, 0x74, 0x63,
|
0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72,
|
||||||
0x68, 0x5b, 0x31, 0x5d, 0x5d, 0x20, 0x3d, 0x20, 0x6d, 0x61, 0x74, 0x63,
|
0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x65,
|
||||||
0x68, 0x5b, 0x32, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x73, 0x65, 0x74, 0x20, 0x6c, 0x65, 0x66, 0x74, 0x6f, 0x76, 0x65, 0x72,
|
||||||
0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x20,
|
0x20, 0x69, 0x66, 0x20, 0x77, 0x65, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20,
|
||||||
0x77, 0x65, 0x20, 0x6b, 0x6e, 0x6f, 0x77, 0x20, 0x74, 0x68, 0x69, 0x73,
|
0x61, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b,
|
||||||
0x20, 0x69, 0x73, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70,
|
0x20, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x6e, 0x64, 0x0a,
|
||||||
0x70, 0x2c, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, 0x20, 0x6a, 0x75, 0x73,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65,
|
0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x50, 0x61, 0x72, 0x73, 0x65, 0x20,
|
||||||
0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x64, 0x61, 0x74,
|
0x61, 0x6c, 0x6c, 0x20, 0x73, 0x73, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e,
|
||||||
0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x74, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x64, 0x64, 0x20, 0x74,
|
||||||
0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64,
|
0x68, 0x65, 0x6d, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
||||||
0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
0x74, 0x20, 0x72, 0x65, 0x67, 0x65, 0x78, 0x20, 0x3d, 0x20, 0x2f, 0x5e,
|
||||||
0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x4a, 0x53, 0x4f,
|
0x28, 0x5c, 0x53, 0x2b, 0x29, 0x3a, 0x5c, 0x73, 0x28, 0x2e, 0x2a, 0x29,
|
||||||
0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65, 0x73, 0x75,
|
0x24, 0x2f, 0x67, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
0x66, 0x6f, 0x72, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
0x69, 0x6e, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73,
|
||||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x72, 0x65, 0x73,
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e,
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x20,
|
||||||
0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x3d, 0x20, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x65, 0x78, 0x65, 0x63,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x79, 0x69,
|
0x28, 0x6c, 0x69, 0x6e, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x65, 0x6c, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x61, 0x74, 0x63,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x79, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x72, 0x65,
|
0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5b, 0x6d, 0x61,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69, 0x66,
|
0x74, 0x63, 0x68, 0x5b, 0x31, 0x5d, 0x5d, 0x20, 0x3d, 0x20, 0x6d, 0x61,
|
||||||
0x20, 0x77, 0x65, 0x20, 0x67, 0x6f, 0x74, 0x20, 0x61, 0x20, 0x73, 0x74,
|
0x74, 0x63, 0x68, 0x5b, 0x32, 0x5d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6f, 0x70, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x66, 0x72, 0x6f,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6e, 0x63,
|
||||||
0x6d, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2c, 0x20, 0x77, 0x65,
|
0x65, 0x20, 0x77, 0x65, 0x20, 0x6b, 0x6e, 0x6f, 0x77, 0x20, 0x74, 0x68,
|
||||||
0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x20,
|
0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e,
|
||||||
0x68, 0x65, 0x72, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x63, 0x70, 0x70, 0x2c, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, 0x20, 0x6a,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73,
|
0x75, 0x73, 0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x74,
|
||||||
0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x74, 0x6f,
|
0x68, 0x65, 0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x64,
|
||||||
0x70, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x61, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
||||||
|
0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73,
|
||||||
|
0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x4a,
|
||||||
|
0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65,
|
||||||
|
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x72,
|
||||||
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63,
|
||||||
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
||||||
|
0x79, 0x69, 0x65, 0x6c, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x79, 0x69, 0x65, 0x6c, 0x64, 0x20,
|
||||||
|
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
||||||
|
0x69, 0x66, 0x20, 0x77, 0x65, 0x20, 0x67, 0x6f, 0x74, 0x20, 0x61, 0x20,
|
||||||
|
0x73, 0x74, 0x6f, 0x70, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x66,
|
||||||
|
0x72, 0x6f, 0x6d, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2c, 0x20,
|
||||||
|
0x77, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x72, 0x65, 0x61,
|
||||||
|
0x6b, 0x20, 0x68, 0x65, 0x72, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72,
|
||||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67,
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73,
|
||||||
|
0x74, 0x6f, 0x70, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
||||||
|
0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61,
|
||||||
|
0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
||||||
|
0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||||
|
0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20,
|
||||||
|
0x3d, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74,
|
||||||
|
0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x20, 0x3d, 0x20,
|
||||||
|
0x66, 0x61, 0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72, 0x65,
|
||||||
|
0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75,
|
||||||
|
0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75,
|
||||||
|
0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x4a,
|
||||||
|
0x53, 0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65,
|
||||||
|
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
||||||
|
0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x6d, 0x65, 0x73, 0x73,
|
||||||
|
0x61, 0x67, 0x65, 0x2e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x73,
|
||||||
|
0x28, 0x27, 0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61, 0x76, 0x61,
|
||||||
|
0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x68, 0x72, 0x6f, 0x77,
|
||||||
|
0x20, 0x61, 0x6e, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x74, 0x6f,
|
||||||
|
0x20, 0x62, 0x65, 0x20, 0x63, 0x61, 0x75, 0x67, 0x68, 0x74, 0x20, 0x62,
|
||||||
|
0x79, 0x20, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x63,
|
||||||
|
0x61, 0x6c, 0x6c, 0x65, 0x72, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74,
|
||||||
|
0x68, 0x72, 0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45, 0x72, 0x72,
|
||||||
|
0x6f, 0x72, 0x28, 0x27, 0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61,
|
||||||
|
0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x65,
|
||||||
|
0x72, 0x72, 0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e,
|
||||||
|
0x63, 0x70, 0x70, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x5b, 0x24,
|
||||||
|
0x7b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f,
|
||||||
|
0x72, 0x2e, 0x63, 0x6f, 0x64, 0x65, 0x7d, 0x20, 0x2d, 0x20, 0x24, 0x7b,
|
||||||
|
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
||||||
|
0x2e, 0x74, 0x79, 0x70, 0x65, 0x7d, 0x5d, 0x3a, 0x20, 0x24, 0x7b, 0x72,
|
||||||
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
|
||||||
|
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x60, 0x29, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x28,
|
||||||
|
0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f,
|
||||||
|
0x6c, 0x65, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c,
|
||||||
|
0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x20, 0x65, 0x72, 0x72, 0x6f,
|
||||||
|
0x72, 0x20, 0x24, 0x7b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65,
|
||||||
|
0x72, 0x72, 0x6f, 0x72, 0x7d, 0x60, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
||||||
|
0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20,
|
||||||
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x2e,
|
||||||
|
0x6e, 0x61, 0x6d, 0x65, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x41, 0x62,
|
||||||
|
0x6f, 0x72, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x27, 0x29, 0x20, 0x7b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f,
|
||||||
|
0x6c, 0x65, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x6c, 0x6c,
|
||||||
|
0x61, 0x6d, 0x61, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x22,
|
||||||
|
0x2c, 0x20, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c,
|
||||||
|
0x6c, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x61, 0x62, 0x6f, 0x72,
|
||||||
|
0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
|
||||||
|
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
|
0x6e, 0x74, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61,
|
||||||
|
0x6c, 0x6c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65,
|
||||||
|
0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e,
|
||||||
|
0x74, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61,
|
||||||
|
0x74, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x75,
|
||||||
|
0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x20, 0x74, 0x6f, 0x0a, 0x2f,
|
||||||
|
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
|
||||||
|
0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x69,
|
||||||
|
0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
||||||
|
0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
|
||||||
|
0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f,
|
||||||
|
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27,
|
||||||
|
0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
|
0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x20, 0x3d, 0x20, 0x6c,
|
||||||
|
0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72,
|
||||||
|
0x67, 0x65, 0x74, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a,
|
||||||
|
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x61,
|
||||||
|
0x64, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65,
|
||||||
|
0x6e, 0x65, 0x72, 0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||||
|
0x22, 0x2c, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x20, 0x3d,
|
||||||
|
0x3e, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69,
|
||||||
|
0x74, 0x65, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x65, 0x74,
|
||||||
|
0x61, 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
||||||
|
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f,
|
||||||
|
0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
|
0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72,
|
||||||
|
0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
||||||
|
0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69,
|
||||||
|
0x67, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b,
|
||||||
|
0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x76, 0x65,
|
||||||
|
0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x6e,
|
||||||
|
0x65, 0x77, 0x20, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67,
|
||||||
|
0x65, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x28, 0x61, 0x73, 0x79,
|
||||||
|
0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
|
0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20,
|
||||||
|
0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f,
|
||||||
|
0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68,
|
||||||
|
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
|
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
|
0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65,
|
||||||
|
0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64,
|
||||||
|
0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45,
|
||||||
|
0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
||||||
|
0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61,
|
||||||
|
0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28,
|
||||||
|
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67,
|
||||||
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
||||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54,
|
||||||
0x20, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74,
|
||||||
0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20,
|
0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20,
|
||||||
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28,
|
||||||
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
0x22, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
||||||
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75,
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65,
|
||||||
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x66, 0x61,
|
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69,
|
||||||
0x6c, 0x73, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x6e, 0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b,
|
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69,
|
||||||
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74,
|
||||||
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b,
|
||||||
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
|
||||||
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72,
|
|
||||||
0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
|
|
||||||
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x3d, 0x20, 0x4a, 0x53, 0x4f,
|
|
||||||
0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65, 0x73, 0x75,
|
|
||||||
0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e,
|
|
||||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
|
||||||
0x65, 0x2e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x73, 0x28, 0x27,
|
|
||||||
0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c,
|
|
||||||
0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x61,
|
|
||||||
0x6e, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x62,
|
|
||||||
0x65, 0x20, 0x63, 0x61, 0x75, 0x67, 0x68, 0x74, 0x20, 0x62, 0x79, 0x20,
|
|
||||||
0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6c,
|
|
||||||
0x6c, 0x65, 0x72, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x68, 0x72,
|
|
||||||
0x6f, 0x77, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72,
|
|
||||||
0x28, 0x27, 0x73, 0x6c, 0x6f, 0x74, 0x20, 0x75, 0x6e, 0x61, 0x76, 0x61,
|
|
||||||
0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x7d, 0x20, 0x65, 0x6c, 0x73, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x65, 0x72, 0x72,
|
|
||||||
0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70,
|
|
||||||
0x70, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x5b, 0x24, 0x7b, 0x72,
|
|
||||||
0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
|
|
||||||
0x63, 0x6f, 0x64, 0x65, 0x7d, 0x20, 0x2d, 0x20, 0x24, 0x7b, 0x72, 0x65,
|
|
||||||
0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x74,
|
|
||||||
0x79, 0x70, 0x65, 0x7d, 0x5d, 0x3a, 0x20, 0x24, 0x7b, 0x72, 0x65, 0x73,
|
|
||||||
0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x6d, 0x65,
|
|
||||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x7d, 0x60, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x28, 0x65, 0x29,
|
|
||||||
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65,
|
|
||||||
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x60, 0x6c, 0x6c, 0x61, 0x6d,
|
|
||||||
0x61, 0x2e, 0x63, 0x70, 0x70, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20,
|
|
||||||
0x24, 0x7b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x65, 0x72, 0x72,
|
|
||||||
0x6f, 0x72, 0x7d, 0x60, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
|
||||||
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x20,
|
|
||||||
0x63, 0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x2e, 0x6e, 0x61,
|
|
||||||
0x6d, 0x65, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x41, 0x62, 0x6f, 0x72,
|
|
||||||
0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65,
|
|
||||||
0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x6c, 0x6c, 0x61, 0x6d,
|
|
||||||
0x61, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x22, 0x2c, 0x20,
|
|
||||||
0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20,
|
|
||||||
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x6c, 0x79,
|
|
||||||
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
|
||||||
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28,
|
|
||||||
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65,
|
|
||||||
0x74, 0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
|
||||||
0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c,
|
|
||||||
0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75,
|
|
||||||
0x72, 0x6e, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20,
|
|
||||||
0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20,
|
|
||||||
0x79, 0x6f, 0x75, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x75, 0x62, 0x73,
|
|
||||||
0x63, 0x72, 0x69, 0x62, 0x65, 0x20, 0x74, 0x6f, 0x0a, 0x2f, 0x2f, 0x0a,
|
|
||||||
0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a,
|
|
||||||
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70,
|
|
||||||
0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45,
|
|
||||||
0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x7d,
|
|
||||||
0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70,
|
|
||||||
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27, 0x0a, 0x2f,
|
|
||||||
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
|
||||||
0x74, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61,
|
|
||||||
0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
|
||||||
0x74, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x61, 0x64, 0x64,
|
|
||||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65,
|
|
||||||
0x72, 0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c,
|
|
||||||
0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
|
||||||
0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f,
|
|
||||||
0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65,
|
|
||||||
0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x65, 0x74, 0x61, 0x69,
|
|
||||||
0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f,
|
|
||||||
0x2f, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65,
|
|
||||||
0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
|
||||||
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
|
||||||
0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d,
|
|
||||||
0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d,
|
|
||||||
0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20,
|
|
||||||
0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20,
|
|
||||||
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
|
||||||
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77,
|
|
||||||
0x20, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
|
|
||||||
0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63,
|
|
||||||
0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
|
||||||
0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
|
||||||
0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66,
|
|
||||||
0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f,
|
|
||||||
0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66,
|
|
||||||
0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
|
||||||
0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63,
|
|
||||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e,
|
|
||||||
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
|
||||||
0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64,
|
|
||||||
0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b,
|
|
||||||
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65,
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65,
|
||||||
0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73,
|
0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73,
|
||||||
0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e,
|
0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e,
|
||||||
0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65,
|
0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65,
|
||||||
0x6e, 0x74, 0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22,
|
0x6e, 0x74, 0x28, 0x22, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22,
|
||||||
0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20,
|
0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20,
|
||||||
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x7d,
|
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74,
|
||||||
0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e,
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54,
|
||||||
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74,
|
||||||
0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72,
|
0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28,
|
||||||
0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68,
|
0x22, 0x64, 0x6f, 0x6e, 0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65,
|
||||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75,
|
0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x67,
|
0x65, 0x6e, 0x74, 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
0x20, 0x7d, 0x29, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74,
|
||||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64,
|
0x75, 0x72, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72,
|
||||||
0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
0x67, 0x65, 0x74, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43,
|
||||||
0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
|
0x61, 0x6c, 0x6c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72,
|
||||||
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
|
0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6d,
|
||||||
0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x69, 0x73, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x73,
|
||||||
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
0x6f, 0x6c, 0x76, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65,
|
||||||
0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x74,
|
||||||
0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
0x65, 0x78, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, 0x6f,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
0x65, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6f,
|
||||||
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61,
|
0x72, 0x74, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
|
||||||
0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77,
|
0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70,
|
||||||
0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20,
|
||||||
0x28, 0x22, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20,
|
0x20, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69,
|
||||||
0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68,
|
0x73, 0x65, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x2e, 0x74,
|
||||||
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d,
|
0x68, 0x65, 0x6e, 0x28, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
0x69, 0x6e, 0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72,
|
0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68,
|
0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75,
|
0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x64,
|
0x6f, 0x72, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6f, 0x6e, 0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61,
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
0x69, 0x6c, 0x3a, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
0x6e, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x6c,
|
||||||
0x74, 0x20, 0x7d, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d,
|
0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28,
|
||||||
0x29, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20,
|
||||||
0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
|
||||||
0x74, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c,
|
0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
0x6c, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74,
|
0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74,
|
||||||
0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73,
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c,
|
0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x70,
|
||||||
0x76, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63,
|
0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d,
|
||||||
0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x74, 0x65, 0x78,
|
0x73, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66,
|
||||||
0x74, 0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, 0x6f, 0x65, 0x73,
|
0x69, 0x67, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
||||||
0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
|
0x7b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e,
|
||||||
0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x0a, 0x2f,
|
0x65, 0x77, 0x20, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x61,
|
||||||
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
|
0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76,
|
||||||
0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x65, 0x2c, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, 0x3d,
|
||||||
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65,
|
0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20,
|
||||||
0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x2e, 0x74, 0x68, 0x65,
|
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22,
|
||||||
0x6e, 0x28, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20,
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a,
|
||||||
0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77,
|
||||||
0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77,
|
0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63,
|
||||||
0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d,
|
||||||
0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a,
|
0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61,
|
||||||
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x72,
|
0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
||||||
0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
|
0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20,
|
||||||
0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x6c, 0x6c, 0x61,
|
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63,
|
||||||
0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70, 0x72,
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20,
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
|
||||||
0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72,
|
0x73, 0x6f, 0x6c, 0x76, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
0x69, 0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61,
|
||||||
0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63,
|
0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20,
|
||||||
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72,
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6a, 0x65,
|
||||||
0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f,
|
0x63, 0x74, 0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20,
|
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x7d,
|
||||||
0x3d, 0x20, 0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
0x3b, 0x0a, 0x0a, 0x2f, 0x2a, 0x2a, 0x0a, 0x20, 0x2a, 0x20, 0x28, 0x64,
|
||||||
0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x0a, 0x20,
|
||||||
0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77,
|
0x2a, 0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f,
|
||||||
0x20, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x61, 0x73, 0x79,
|
0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x43, 0x6f, 0x6d,
|
||||||
0x6e, 0x63, 0x20, 0x28, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2c,
|
0x70, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e,
|
||||||
0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
0x63, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63,
|
||||||
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f,
|
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2c, 0x20, 0x63,
|
||||||
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a,
|
0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
0x7b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69,
|
|
||||||
0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75,
|
0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75,
|
||||||
0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28,
|
0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28,
|
||||||
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61,
|
0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
||||||
0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29,
|
0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b,
|
||||||
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
|
0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20,
|
||||||
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68,
|
0x7d, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61,
|
||||||
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e,
|
0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f,
|
||||||
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f,
|
0x20, 0x47, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64,
|
||||||
0x6c, 0x76, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
0x65, 0x6c, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x20, 0x66, 0x72, 0x6f, 0x6d,
|
||||||
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63,
|
0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e,
|
||||||
0x68, 0x20, 0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a,
|
0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65,
|
||||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74,
|
0x66, 0x75, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x67, 0x65, 0x74, 0x74,
|
||||||
0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x7d, 0x3b, 0x0a,
|
0x65, 0x78, 0x74, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x61,
|
||||||
0x0a, 0x2f, 0x2a, 0x2a, 0x0a, 0x20, 0x2a, 0x20, 0x28, 0x64, 0x65, 0x70,
|
0x6e, 0x64, 0x20, 0x73, 0x6f, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x65, 0x78,
|
||||||
0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x2a, 0x2f,
|
0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c,
|
||||||
0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
0x6c, 0x61, 0x6d, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x49, 0x6e, 0x66,
|
||||||
0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x6c,
|
0x6f, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x63,
|
||||||
0x65, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20,
|
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20, 0x7b, 0x7d, 0x29, 0x20,
|
||||||
0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e,
|
0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21,
|
||||||
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2c, 0x20, 0x63, 0x61, 0x6c,
|
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
||||||
0x6c, 0x62, 0x61, 0x63, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x61, 0x70, 0x69,
|
||||||
0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
0x5f, 0x75, 0x72, 0x6c, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69,
|
||||||
0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x61,
|
0x67, 0x2e, 0x61, 0x70, 0x69, 0x5f, 0x75, 0x72, 0x6c, 0x20, 0x7c, 0x7c,
|
||||||
0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c,
|
0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63,
|
0x73, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x20, 0x3d, 0x20, 0x61,
|
||||||
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x7d, 0x29,
|
0x77, 0x61, 0x69, 0x74, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x60,
|
||||||
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x6c, 0x6c,
|
0x24, 0x7b, 0x61, 0x70, 0x69, 0x5f, 0x75, 0x72, 0x6c, 0x7d, 0x2f, 0x70,
|
||||||
0x62, 0x61, 0x63, 0x6b, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x3b,
|
0x72, 0x6f, 0x70, 0x73, 0x60, 0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28,
|
||||||
0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x47,
|
0x72, 0x20, 0x3d, 0x3e, 0x20, 0x72, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x28,
|
||||||
0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c,
|
0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x67, 0x65, 0x6e, 0x65,
|
||||||
0x20, 0x69, 0x6e, 0x66, 0x6f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74,
|
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69,
|
||||||
0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x20, 0x54,
|
0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e,
|
||||||
0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75,
|
0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65,
|
||||||
0x6c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x67, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69,
|
||||||
0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78,
|
0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x72,
|
||||||
0x74, 0x20, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x61, 0x6e, 0x64,
|
0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
|
||||||
0x20, 0x73, 0x6f, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x65, 0x78, 0x70, 0x6f,
|
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
|
||||||
0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61,
|
0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
||||||
0x6d, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x20,
|
|
||||||
0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d,
|
|
||||||
0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x67,
|
|
||||||
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
|
||||||
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
|
||||||
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x70,
|
|
||||||
0x73, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x66, 0x65,
|
|
||||||
0x74, 0x63, 0x68, 0x28, 0x22, 0x2f, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x22,
|
|
||||||
0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28, 0x72, 0x20, 0x3d, 0x3e, 0x20,
|
|
||||||
0x72, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x28, 0x29, 0x29, 0x3b, 0x0a, 0x20,
|
|
||||||
0x20, 0x20, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
|
|
||||||
0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d,
|
|
||||||
0x20, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x2e, 0x64, 0x65, 0x66, 0x61, 0x75,
|
|
||||||
0x6c, 0x74, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
|
|
||||||
0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a,
|
|
||||||
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e,
|
|
||||||
0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
|
||||||
0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
|
||||||
};
|
};
|
||||||
size_t completion_js_len = 5796;
|
unsigned int completion_js_len = 5909;
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1928,4 +1928,4 @@ unsigned char index_js[] = {
|
|||||||
0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2c, 0x50, 0x74, 0x20, 0x61, 0x73,
|
0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2c, 0x50, 0x74, 0x20, 0x61, 0x73,
|
||||||
0x20, 0x75, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x7d, 0x3b, 0x0a
|
0x20, 0x75, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x7d, 0x3b, 0x0a
|
||||||
};
|
};
|
||||||
size_t index_js_len = 23136;
|
unsigned int index_js_len = 23136;
|
||||||
|
@ -21,6 +21,7 @@ let generation_settings = null;
|
|||||||
//
|
//
|
||||||
export async function* llama(prompt, params = {}, config = {}) {
|
export async function* llama(prompt, params = {}, config = {}) {
|
||||||
let controller = config.controller;
|
let controller = config.controller;
|
||||||
|
const api_url = config.api_url || "";
|
||||||
|
|
||||||
if (!controller) {
|
if (!controller) {
|
||||||
controller = new AbortController();
|
controller = new AbortController();
|
||||||
@ -28,7 +29,7 @@ export async function* llama(prompt, params = {}, config = {}) {
|
|||||||
|
|
||||||
const completionParams = { ...paramDefaults, ...params, prompt };
|
const completionParams = { ...paramDefaults, ...params, prompt };
|
||||||
|
|
||||||
const response = await fetch("/completion", {
|
const response = await fetch(`${api_url}/completion`, {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
body: JSON.stringify(completionParams),
|
body: JSON.stringify(completionParams),
|
||||||
headers: {
|
headers: {
|
||||||
@ -193,9 +194,10 @@ export const llamaComplete = async (params, controller, callback) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the model info from the server. This is useful for getting the context window and so on.
|
// Get the model info from the server. This is useful for getting the context window and so on.
|
||||||
export const llamaModelInfo = async () => {
|
export const llamaModelInfo = async (config = {}) => {
|
||||||
if (!generation_settings) {
|
if (!generation_settings) {
|
||||||
const props = await fetch("/props").then(r => r.json());
|
const api_url = config.api_url || "";
|
||||||
|
const props = await fetch(`${api_url}/props`).then(r => r.json());
|
||||||
generation_settings = props.default_generation_settings;
|
generation_settings = props.default_generation_settings;
|
||||||
}
|
}
|
||||||
return generation_settings;
|
return generation_settings;
|
||||||
|
@ -199,10 +199,10 @@
|
|||||||
<script type="module">
|
<script type="module">
|
||||||
import {
|
import {
|
||||||
html, h, signal, effect, computed, render, useSignal, useEffect, useRef, Component
|
html, h, signal, effect, computed, render, useSignal, useEffect, useRef, Component
|
||||||
} from '/index.js';
|
} from './index.js';
|
||||||
|
|
||||||
import { llama } from '/completion.js';
|
import { llama } from './completion.js';
|
||||||
import { SchemaConverter } from '/json-schema-to-grammar.mjs';
|
import { SchemaConverter } from './json-schema-to-grammar.mjs';
|
||||||
let selected_image = false;
|
let selected_image = false;
|
||||||
var slot_id = -1;
|
var slot_id = -1;
|
||||||
|
|
||||||
@ -405,7 +405,7 @@
|
|||||||
throw new Error("already running");
|
throw new Error("already running");
|
||||||
}
|
}
|
||||||
controller.value = new AbortController();
|
controller.value = new AbortController();
|
||||||
for await (const chunk of llama(prompt, llamaParams, { controller: controller.value })) {
|
for await (const chunk of llama(prompt, llamaParams, { controller: controller.value, api_url: document.baseURI.replace(/\/+$/, '') })) {
|
||||||
const data = chunk.data;
|
const data = chunk.data;
|
||||||
|
|
||||||
if (data.stop) {
|
if (data.stop) {
|
||||||
|
@ -99,6 +99,7 @@ struct slot_params {
|
|||||||
|
|
||||||
uint32_t seed = -1; // RNG seed
|
uint32_t seed = -1; // RNG seed
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||||
|
int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
|
||||||
int32_t n_predict = -1; // new tokens to predict
|
int32_t n_predict = -1; // new tokens to predict
|
||||||
|
|
||||||
std::vector<std::string> antiprompt;
|
std::vector<std::string> antiprompt;
|
||||||
@ -746,7 +747,8 @@ struct server_context {
|
|||||||
{
|
{
|
||||||
const int32_t n_batch = llama_n_batch(ctx);
|
const int32_t n_batch = llama_n_batch(ctx);
|
||||||
|
|
||||||
batch = llama_batch_init(n_batch, 0, params.n_parallel);
|
// only a single seq_id per token is needed
|
||||||
|
batch = llama_batch_init(n_batch, 0, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.init();
|
metrics.init();
|
||||||
@ -846,10 +848,18 @@ struct server_context {
|
|||||||
slot.sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
|
slot.sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
|
||||||
slot.sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
|
slot.sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
|
||||||
slot.params.n_keep = json_value(data, "n_keep", slot.params.n_keep);
|
slot.params.n_keep = json_value(data, "n_keep", slot.params.n_keep);
|
||||||
|
slot.params.n_discard = json_value(data, "n_discard", default_params.n_discard);
|
||||||
slot.params.seed = json_value(data, "seed", default_params.seed);
|
slot.params.seed = json_value(data, "seed", default_params.seed);
|
||||||
if (data.contains("json_schema") && !data.contains("grammar")) {
|
slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
|
||||||
|
slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
|
||||||
|
|
||||||
|
// process "json_schema" and "grammar"
|
||||||
|
if (data.contains("json_schema") && data.contains("grammar")) {
|
||||||
|
send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST);
|
||||||
|
return false;
|
||||||
|
} else if (data.contains("json_schema") && !data.contains("grammar")) {
|
||||||
try {
|
try {
|
||||||
auto schema = json_value(data, "json_schema", json::object());
|
auto schema = json_value(data, "json_schema", json::object());
|
||||||
slot.sparams.grammar = json_schema_to_grammar(schema);
|
slot.sparams.grammar = json_schema_to_grammar(schema);
|
||||||
} catch (const std::exception & e) {
|
} catch (const std::exception & e) {
|
||||||
send_error(task, std::string("\"json_schema\": ") + e.what(), ERROR_TYPE_INVALID_REQUEST);
|
send_error(task, std::string("\"json_schema\": ") + e.what(), ERROR_TYPE_INVALID_REQUEST);
|
||||||
@ -858,8 +868,6 @@ struct server_context {
|
|||||||
} else {
|
} else {
|
||||||
slot.sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
|
slot.sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
|
||||||
}
|
}
|
||||||
slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
|
|
||||||
slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
|
|
||||||
|
|
||||||
if (slot.params.cache_prompt && slot.ga_n != 1) {
|
if (slot.params.cache_prompt && slot.ga_n != 1) {
|
||||||
LOG_WARNING("cache_prompt is not supported with group-attention", {});
|
LOG_WARNING("cache_prompt is not supported with group-attention", {});
|
||||||
@ -1248,6 +1256,7 @@ struct server_context {
|
|||||||
{"stop", slot.params.antiprompt},
|
{"stop", slot.params.antiprompt},
|
||||||
{"n_predict", slot.params.n_predict}, // TODO: fix duplicate key n_predict
|
{"n_predict", slot.params.n_predict}, // TODO: fix duplicate key n_predict
|
||||||
{"n_keep", slot.params.n_keep},
|
{"n_keep", slot.params.n_keep},
|
||||||
|
{"n_discard", slot.params.n_discard},
|
||||||
{"ignore_eos", ignore_eos},
|
{"ignore_eos", ignore_eos},
|
||||||
{"stream", slot.params.stream},
|
{"stream", slot.params.stream},
|
||||||
{"logit_bias", slot.sparams.logit_bias},
|
{"logit_bias", slot.sparams.logit_bias},
|
||||||
@ -1691,7 +1700,7 @@ struct server_context {
|
|||||||
// Shift context
|
// Shift context
|
||||||
const int n_keep = slot.params.n_keep + add_bos_token;
|
const int n_keep = slot.params.n_keep + add_bos_token;
|
||||||
const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
|
const int n_left = (int) system_tokens.size() + slot.n_past - n_keep;
|
||||||
const int n_discard = n_left / 2;
|
const int n_discard = slot.params.n_discard ? slot.params.n_discard : (n_left / 2);
|
||||||
|
|
||||||
LOG_INFO("slot context shift", {
|
LOG_INFO("slot context shift", {
|
||||||
{"id_slot", slot.id},
|
{"id_slot", slot.id},
|
||||||
@ -2208,7 +2217,11 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co
|
|||||||
printf(" -m FNAME, --model FNAME\n");
|
printf(" -m FNAME, --model FNAME\n");
|
||||||
printf(" model path (default: %s)\n", params.model.c_str());
|
printf(" model path (default: %s)\n", params.model.c_str());
|
||||||
printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
|
printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
|
||||||
printf(" model download url (default: %s)\n", params.model_url.c_str());
|
printf(" model download url (default: unused)\n");
|
||||||
|
printf(" -hfr REPO, --hf-repo REPO\n");
|
||||||
|
printf(" Hugging Face model repository (default: unused)\n");
|
||||||
|
printf(" -hff FILE, --hf-file FILE\n");
|
||||||
|
printf(" Hugging Face model file (default: unused)\n");
|
||||||
printf(" -a ALIAS, --alias ALIAS\n");
|
printf(" -a ALIAS, --alias ALIAS\n");
|
||||||
printf(" set an alias for the model, will be added as `model` field in completion response\n");
|
printf(" set an alias for the model, will be added as `model` field in completion response\n");
|
||||||
printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
|
printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
|
||||||
@ -2337,6 +2350,18 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.model_url = argv[i];
|
params.model_url = argv[i];
|
||||||
|
} else if (arg == "-hfr" || arg == "--hf-repo") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.hf_repo = argv[i];
|
||||||
|
} else if (arg == "-hff" || arg == "--hf-file") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.hf_file = argv[i];
|
||||||
} else if (arg == "-a" || arg == "--alias") {
|
} else if (arg == "-a" || arg == "--alias") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -2489,15 +2514,15 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#ifndef GGML_USE_CUBLAS
|
#ifndef GGML_USE_CUDA
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n");
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUDA
|
||||||
} else if (arg == "--tensor-split" || arg == "-ts") {
|
} else if (arg == "--tensor-split" || arg == "-ts") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
|
#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
|
||||||
std::string arg_next = argv[i];
|
std::string arg_next = argv[i];
|
||||||
|
|
||||||
// split string by , and /
|
// split string by , and /
|
||||||
@ -2514,17 +2539,17 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
|
LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n", {});
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUDA
|
||||||
} else if (arg == "--main-gpu" || arg == "-mg") {
|
} else if (arg == "--main-gpu" || arg == "-mg") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
|
#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
|
||||||
params.main_gpu = std::stoi(argv[i]);
|
params.main_gpu = std::stoi(argv[i]);
|
||||||
#else
|
#else
|
||||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
|
LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a main GPU.", {});
|
||||||
#endif
|
#endif
|
||||||
} else if (arg == "--lora") {
|
} else if (arg == "--lora") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
|
@ -4,7 +4,8 @@ Feature: Parallel
|
|||||||
|
|
||||||
Background: Server startup
|
Background: Server startup
|
||||||
Given a server listening on localhost:8080
|
Given a server listening on localhost:8080
|
||||||
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
And a model file tinyllamas/split/stories15M-00001-of-00003.gguf from HF repo ggml-org/models
|
||||||
|
And a model file test-model-00001-of-00003.gguf
|
||||||
And 42 as server seed
|
And 42 as server seed
|
||||||
And 128 as batch size
|
And 128 as batch size
|
||||||
And 256 KV cache size
|
And 256 KV cache size
|
||||||
|
@ -4,8 +4,8 @@ Feature: llama.cpp server
|
|||||||
|
|
||||||
Background: Server startup
|
Background: Server startup
|
||||||
Given a server listening on localhost:8080
|
Given a server listening on localhost:8080
|
||||||
And a model url https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories260K.gguf
|
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
|
||||||
And a model file stories260K.gguf
|
And a model file test-model.gguf
|
||||||
And a model alias tinyllama-2
|
And a model alias tinyllama-2
|
||||||
And 42 as server seed
|
And 42 as server seed
|
||||||
# KV Cache corresponds to the total amount of tokens
|
# KV Cache corresponds to the total amount of tokens
|
||||||
|
@ -16,7 +16,6 @@ import numpy as np
|
|||||||
import openai
|
import openai
|
||||||
from behave import step
|
from behave import step
|
||||||
from behave.api.async_step import async_run_until_complete
|
from behave.api.async_step import async_run_until_complete
|
||||||
from huggingface_hub import hf_hub_download
|
|
||||||
from prometheus_client import parser
|
from prometheus_client import parser
|
||||||
|
|
||||||
|
|
||||||
@ -39,6 +38,8 @@ def step_server_config(context, server_fqdn, server_port):
|
|||||||
|
|
||||||
context.model_alias = None
|
context.model_alias = None
|
||||||
context.model_file = None
|
context.model_file = None
|
||||||
|
context.model_hf_repo = None
|
||||||
|
context.model_hf_file = None
|
||||||
context.model_url = None
|
context.model_url = None
|
||||||
context.n_batch = None
|
context.n_batch = None
|
||||||
context.n_ubatch = None
|
context.n_ubatch = None
|
||||||
@ -68,9 +69,9 @@ def step_server_config(context, server_fqdn, server_port):
|
|||||||
|
|
||||||
@step('a model file {hf_file} from HF repo {hf_repo}')
|
@step('a model file {hf_file} from HF repo {hf_repo}')
|
||||||
def step_download_hf_model(context, hf_file, hf_repo):
|
def step_download_hf_model(context, hf_file, hf_repo):
|
||||||
context.model_file = hf_hub_download(repo_id=hf_repo, filename=hf_file)
|
context.model_hf_repo = hf_repo
|
||||||
if context.debug:
|
context.model_hf_file = hf_file
|
||||||
print(f"model file: {context.model_file}")
|
context.model_file = os.path.basename(hf_file)
|
||||||
|
|
||||||
|
|
||||||
@step('a model file {model_file}')
|
@step('a model file {model_file}')
|
||||||
@ -1079,6 +1080,10 @@ def start_server_background(context):
|
|||||||
server_args.extend(['--model', context.model_file])
|
server_args.extend(['--model', context.model_file])
|
||||||
if context.model_url:
|
if context.model_url:
|
||||||
server_args.extend(['--model-url', context.model_url])
|
server_args.extend(['--model-url', context.model_url])
|
||||||
|
if context.model_hf_repo:
|
||||||
|
server_args.extend(['--hf-repo', context.model_hf_repo])
|
||||||
|
if context.model_hf_file:
|
||||||
|
server_args.extend(['--hf-file', context.model_hf_file])
|
||||||
if context.n_batch:
|
if context.n_batch:
|
||||||
server_args.extend(['--batch-size', context.n_batch])
|
server_args.extend(['--batch-size', context.n_batch])
|
||||||
if context.n_ubatch:
|
if context.n_ubatch:
|
||||||
|
@ -95,8 +95,8 @@ static inline void server_log(const char *level, const char *function, int line,
|
|||||||
|
|
||||||
const std::string str = ss.str();
|
const std::string str = ss.str();
|
||||||
printf("%.*s\n", (int)str.size(), str.data());
|
printf("%.*s\n", (int)str.size(), str.data());
|
||||||
fflush(stdout);
|
|
||||||
}
|
}
|
||||||
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -352,51 +352,71 @@ static json oaicompat_completion_params_parse(
|
|||||||
// https://platform.openai.com/docs/api-reference/chat/create
|
// https://platform.openai.com/docs/api-reference/chat/create
|
||||||
llama_sampling_params default_sparams;
|
llama_sampling_params default_sparams;
|
||||||
llama_params["model"] = json_value(body, "model", std::string("unknown"));
|
llama_params["model"] = json_value(body, "model", std::string("unknown"));
|
||||||
llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
|
|
||||||
llama_params["cache_prompt"] = json_value(body, "cache_prompt", false);
|
|
||||||
llama_params["temperature"] = json_value(body, "temperature", 0.0);
|
|
||||||
llama_params["top_k"] = json_value(body, "top_k", default_sparams.top_k);
|
|
||||||
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
|
||||||
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
|
|
||||||
llama_params["logit_bias"] = json_value(body, "logit_bias", json::object());
|
|
||||||
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
|
llama_params["frequency_penalty"] = json_value(body, "frequency_penalty", 0.0);
|
||||||
|
llama_params["logit_bias"] = json_value(body, "logit_bias", json::object());
|
||||||
|
llama_params["n_predict"] = json_value(body, "max_tokens", -1);
|
||||||
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0);
|
||||||
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
|
llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED);
|
||||||
llama_params["stream"] = json_value(body, "stream", false);
|
llama_params["stream"] = json_value(body, "stream", false);
|
||||||
llama_params["mirostat"] = json_value(body, "mirostat", default_sparams.mirostat);
|
llama_params["temperature"] = json_value(body, "temperature", 0.0);
|
||||||
llama_params["mirostat_tau"] = json_value(body, "mirostat_tau", default_sparams.mirostat_tau);
|
llama_params["top_p"] = json_value(body, "top_p", 1.0);
|
||||||
llama_params["mirostat_eta"] = json_value(body, "mirostat_eta", default_sparams.mirostat_eta);
|
|
||||||
llama_params["penalize_nl"] = json_value(body, "penalize_nl", default_sparams.penalize_nl);
|
|
||||||
llama_params["typical_p"] = json_value(body, "typical_p", default_sparams.typical_p);
|
|
||||||
llama_params["repeat_last_n"] = json_value(body, "repeat_last_n", default_sparams.penalty_last_n);
|
|
||||||
llama_params["ignore_eos"] = json_value(body, "ignore_eos", false);
|
|
||||||
llama_params["tfs_z"] = json_value(body, "tfs_z", default_sparams.tfs_z);
|
|
||||||
llama_params["n_keep"] = json_value(body, "n_keep", 0);
|
|
||||||
|
|
||||||
if (body.contains("grammar")) {
|
// Apply chat template to the list of messages
|
||||||
llama_params["grammar"] = json_value(body, "grammar", json::object());
|
llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
|
||||||
}
|
|
||||||
|
|
||||||
if (body.contains("response_format")) {
|
// Handle "stop" field
|
||||||
auto response_format = json_value(body, "response_format", json::object());
|
|
||||||
if (response_format.contains("type")) {
|
|
||||||
if (response_format["type"] == "json_object") {
|
|
||||||
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error("response_format type not supported: " + response_format["type"].dump());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle 'stop' field
|
|
||||||
if (body.contains("stop") && body["stop"].is_string()) {
|
if (body.contains("stop") && body["stop"].is_string()) {
|
||||||
llama_params["stop"] = json::array({body["stop"].get<std::string>()});
|
llama_params["stop"] = json::array({body["stop"].get<std::string>()});
|
||||||
} else {
|
} else {
|
||||||
llama_params["stop"] = json_value(body, "stop", json::array());
|
llama_params["stop"] = json_value(body, "stop", json::array());
|
||||||
}
|
}
|
||||||
|
// Some chat templates don't use EOS token to stop generation
|
||||||
|
// We must add their end sequences to list of stop words
|
||||||
|
llama_params["stop"].push_back("<|im_end|>"); // chatml
|
||||||
|
llama_params["stop"].push_back("<end_of_turn>"); // gemma
|
||||||
|
|
||||||
// Ensure there is ChatML-specific end sequence among stop words
|
// Handle "response_format" field
|
||||||
llama_params["stop"].push_back("<|im_end|>");
|
if (body.contains("response_format")) {
|
||||||
|
json response_format = json_value(body, "response_format", json::object());
|
||||||
|
std::string response_type = json_value(response_format, "type", std::string());
|
||||||
|
if (response_type == "json_object") {
|
||||||
|
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
|
||||||
|
} else if (!response_type.empty() && response_type != "text") {
|
||||||
|
throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle "n" field
|
||||||
|
int n_choices = json_value(body, "n", 1);
|
||||||
|
if (n_choices != 1) {
|
||||||
|
throw std::runtime_error("Only one completion choice is allowed");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle "logprobs" field
|
||||||
|
// TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
|
||||||
|
if (body.contains("logprobs")) {
|
||||||
|
llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
|
||||||
|
} else if (body.contains("top_logprobs")) {
|
||||||
|
throw std::runtime_error("top_logprobs requires logprobs to be set to true");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Params supported by OAI but unsupported by llama.cpp
|
||||||
|
static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
|
||||||
|
for (auto & param : unsupported_params) {
|
||||||
|
if (body.contains(param)) {
|
||||||
|
throw std::runtime_error("Unsupported param: " + param);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy remaining properties to llama_params
|
||||||
|
// This allows user to use llama.cpp-specific params like "mirostat", "tfs_z",... via OAI endpoint.
|
||||||
|
// See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
|
||||||
|
for (const auto & item : body.items()) {
|
||||||
|
// Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
|
||||||
|
if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
|
||||||
|
llama_params[item.key()] = item.value();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return llama_params;
|
return llama_params;
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,6 @@ int main(int argc, char ** argv) {
|
|||||||
llama_context * ctx_dft = NULL;
|
llama_context * ctx_dft = NULL;
|
||||||
|
|
||||||
// load the target model
|
// load the target model
|
||||||
params.logits_all = true;
|
|
||||||
std::tie(model_tgt, ctx_tgt) = llama_init_from_gpt_params(params);
|
std::tie(model_tgt, ctx_tgt) = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
// load the draft model
|
// load the draft model
|
||||||
@ -219,7 +218,8 @@ int main(int argc, char ** argv) {
|
|||||||
if (params.sparams.temp > 0) {
|
if (params.sparams.temp > 0) {
|
||||||
// stochastic verification
|
// stochastic verification
|
||||||
|
|
||||||
llama_token_data_array dist_tgt = llama_sampling_probability_distribution(ctx_sampling, ctx_tgt, NULL, drafts[s_keep].i_batch_tgt[i_dft]);
|
llama_token_data_array dist_tgt = llama_sampling_prepare(ctx_sampling, ctx_tgt, NULL, drafts[s_keep].i_batch_tgt[i_dft], true, NULL);
|
||||||
|
llama_sample_softmax(ctx_tgt, &dist_tgt);
|
||||||
float p_tgt = 0, p_dft = 0;
|
float p_tgt = 0, p_dft = 0;
|
||||||
|
|
||||||
// GGML_ASSERT(dist_tgt.size() == dist_dft.size());
|
// GGML_ASSERT(dist_tgt.size() == dist_dft.size());
|
||||||
|
@ -3,9 +3,13 @@
|
|||||||
:: Copyright (C) 2024 Intel Corporation
|
:: Copyright (C) 2024 Intel Corporation
|
||||||
:: SPDX-License-Identifier: MIT
|
:: SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
mkdir -p build
|
|
||||||
|
IF not exist build (mkdir build)
|
||||||
cd build
|
cd build
|
||||||
|
if %errorlevel% neq 0 goto ERROR
|
||||||
|
|
||||||
@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
|
@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
|
||||||
|
if %errorlevel% neq 0 goto ERROR
|
||||||
|
|
||||||
:: for FP16
|
:: for FP16
|
||||||
:: faster for long-prompt inference
|
:: faster for long-prompt inference
|
||||||
@ -13,11 +17,18 @@ cd build
|
|||||||
|
|
||||||
:: for FP32
|
:: for FP32
|
||||||
cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release
|
cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release
|
||||||
|
if %errorlevel% neq 0 goto ERROR
|
||||||
|
|
||||||
:: build example/main only
|
:: build example/main only
|
||||||
:: make main
|
:: make main
|
||||||
|
|
||||||
:: build all binary
|
:: build all binary
|
||||||
make -j
|
make -j
|
||||||
|
if %errorlevel% neq 0 goto ERROR
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
|
exit /B 0
|
||||||
|
|
||||||
|
:ERROR
|
||||||
|
echo comomand error: %errorlevel%
|
||||||
|
exit /B %errorlevel%
|
||||||
|
|
||||||
|
6
flake.lock
generated
6
flake.lock
generated
@ -20,11 +20,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1710451336,
|
"lastModified": 1711163522,
|
||||||
"narHash": "sha256-pP86Pcfu3BrAvRO7R64x7hs+GaQrjFes+mEPowCfkxY=",
|
"narHash": "sha256-YN/Ciidm+A0fmJPWlHBGvVkcarYWSC+s3NTPk/P+q3c=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "d691274a972b3165335d261cc4671335f5c67de9",
|
"rev": "44d0940ea560dee511026a53f0e2e2cde489b4d4",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -420,7 +420,7 @@ GGML_CALL static void ggml_backend_registry_init(void) {
|
|||||||
ggml_backend_register("CPU", ggml_backend_reg_cpu_init, ggml_backend_cpu_buffer_type(), NULL);
|
ggml_backend_register("CPU", ggml_backend_reg_cpu_init, ggml_backend_cpu_buffer_type(), NULL);
|
||||||
|
|
||||||
// add forward decls here to avoid including the backend headers
|
// add forward decls here to avoid including the backend headers
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUDA
|
||||||
extern GGML_CALL void ggml_backend_cuda_reg_devices(void);
|
extern GGML_CALL void ggml_backend_cuda_reg_devices(void);
|
||||||
ggml_backend_cuda_reg_devices();
|
ggml_backend_cuda_reg_devices();
|
||||||
#endif
|
#endif
|
||||||
|
@ -377,6 +377,27 @@ typedef struct {
|
|||||||
} block_iq1_s;
|
} block_iq1_s;
|
||||||
static_assert(sizeof(block_iq1_s) == sizeof(ggml_half) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding");
|
static_assert(sizeof(block_iq1_s) == sizeof(ggml_half) + QK_K/8 + QK_K/16, "wrong iq1_s block size/padding");
|
||||||
|
|
||||||
|
// 1.75 bpw
|
||||||
|
typedef struct {
|
||||||
|
uint8_t qs[QK_K/8]; // grid index, low 8 bits
|
||||||
|
uint8_t qh[QK_K/16]; // grid index, high 3 bits + grid shift bit (for two groups of 8)
|
||||||
|
#if QK_K == 64
|
||||||
|
ggml_half d;
|
||||||
|
#endif
|
||||||
|
uint8_t scales[QK_K/32]; // 3-bit block scales (4-bit if QK_K == 64)
|
||||||
|
} block_iq1_m;
|
||||||
|
#if QK_K == 64
|
||||||
|
static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32 + sizeof(ggml_half), "wrong iq1_m block size/padding");
|
||||||
|
#else
|
||||||
|
static_assert(sizeof(block_iq1_m) == QK_K/8 + QK_K/16 + QK_K/32, "wrong iq1_m block size/padding");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Used by IQ1_M quants
|
||||||
|
typedef union {
|
||||||
|
ggml_half f16;
|
||||||
|
uint16_t u16;
|
||||||
|
} iq1m_scale_t;
|
||||||
|
|
||||||
// Non-linear quants
|
// Non-linear quants
|
||||||
#define QK4_NL 32
|
#define QK4_NL 32
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -1050,6 +1071,7 @@ GGML_TABLE_END()
|
|||||||
|
|
||||||
#define NGRID_IQ1S 2048
|
#define NGRID_IQ1S 2048
|
||||||
#define IQ1S_DELTA 0.125f
|
#define IQ1S_DELTA 0.125f
|
||||||
|
#define IQ1M_DELTA 0.125f
|
||||||
#if defined(GGML_COMMON_IMPL_C)
|
#if defined(GGML_COMMON_IMPL_C)
|
||||||
GGML_TABLE_BEGIN(uint64_t, iq1s_grid, NGRID_IQ1S)
|
GGML_TABLE_BEGIN(uint64_t, iq1s_grid, NGRID_IQ1S)
|
||||||
0xffffffffffffffff, 0xffffffffffffff01, 0xffffffffffff0000, 0xffffffffffff01ff,
|
0xffffffffffffffff, 0xffffffffffffff01, 0xffffffffffff0000, 0xffffffffffff01ff,
|
||||||
|
9852
ggml-cuda.cu
9852
ggml-cuda.cu
File diff suppressed because it is too large
Load Diff
47
ggml-cuda/acc.cu
Normal file
47
ggml-cuda/acc.cu
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#include "acc.cuh"
|
||||||
|
|
||||||
|
static __global__ void acc_f32(const float * x, const float * y, float * dst, const int ne,
|
||||||
|
const int ne10, const int ne11, const int ne12,
|
||||||
|
const int nb1, const int nb2, int offset) {
|
||||||
|
const int i = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
if (i >= ne) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
int src1_idx = i - offset;
|
||||||
|
int oz = src1_idx / nb2;
|
||||||
|
int oy = (src1_idx - (oz * nb2)) / nb1;
|
||||||
|
int ox = src1_idx % nb1;
|
||||||
|
if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) {
|
||||||
|
dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11];
|
||||||
|
} else {
|
||||||
|
dst[i] = x[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void acc_f32_cuda(const float * x, const float * y, float * dst, const int n_elements,
|
||||||
|
const int ne10, const int ne11, const int ne12,
|
||||||
|
const int nb1, const int nb2, const int offset, cudaStream_t stream) {
|
||||||
|
int num_blocks = (n_elements + CUDA_ACC_BLOCK_SIZE - 1) / CUDA_ACC_BLOCK_SIZE;
|
||||||
|
acc_f32<<<num_blocks, CUDA_ACC_BLOCK_SIZE, 0, stream>>>(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const ggml_tensor * src1 = dst->src[1];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
const float * src1_d = (const float *)src1->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported
|
||||||
|
|
||||||
|
int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
|
||||||
|
int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
|
||||||
|
// int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
|
||||||
|
int offset = dst->op_params[3] / 4; // offset in bytes
|
||||||
|
|
||||||
|
acc_f32_cuda(src0_d, src1_d, dst_d, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, stream);
|
||||||
|
}
|
5
ggml-cuda/acc.cuh
Normal file
5
ggml-cuda/acc.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_ACC_BLOCK_SIZE 256
|
||||||
|
|
||||||
|
void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
63
ggml-cuda/alibi.cu
Normal file
63
ggml-cuda/alibi.cu
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
#include "alibi.cuh"
|
||||||
|
|
||||||
|
static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows,
|
||||||
|
const int n_heads_log2_floor, const float m0, const float m1) {
|
||||||
|
const int col = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (col >= ncols) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int row = blockDim.y*blockIdx.y + threadIdx.y;
|
||||||
|
const int i = row*ncols + col;
|
||||||
|
|
||||||
|
const int k = row/k_rows;
|
||||||
|
|
||||||
|
float m_k;
|
||||||
|
if (k < n_heads_log2_floor) {
|
||||||
|
m_k = powf(m0, k + 1);
|
||||||
|
} else {
|
||||||
|
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[i] = col * m_k + x[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
|
||||||
|
const int k_rows, const int n_heads_log2_floor, const float m0,
|
||||||
|
const float m1, cudaStream_t stream) {
|
||||||
|
const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1);
|
||||||
|
const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE);
|
||||||
|
const dim3 block_nums(num_blocks_x, nrows, 1);
|
||||||
|
alibi_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
const int64_t ne00 = src0->ne[0];
|
||||||
|
const int64_t ne01 = src0->ne[1];
|
||||||
|
const int64_t ne02 = src0->ne[2];
|
||||||
|
const int64_t nrows = ggml_nrows(src0);
|
||||||
|
|
||||||
|
//const int n_past = ((int32_t *) dst->op_params)[0];
|
||||||
|
const int n_head = ((int32_t *) dst->op_params)[1];
|
||||||
|
float max_bias;
|
||||||
|
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
||||||
|
|
||||||
|
//GGML_ASSERT(ne01 + n_past == ne00);
|
||||||
|
GGML_ASSERT(n_head == ne02);
|
||||||
|
|
||||||
|
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
||||||
|
|
||||||
|
alibi_f32_cuda(src0_d, dst_d, ne00, nrows, ne01, n_heads_log2_floor, m0, m1, stream);
|
||||||
|
}
|
5
ggml-cuda/alibi.cuh
Normal file
5
ggml-cuda/alibi.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_ALIBI_BLOCK_SIZE 32
|
||||||
|
|
||||||
|
void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
34
ggml-cuda/arange.cu
Normal file
34
ggml-cuda/arange.cu
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
#include "arange.cuh"
|
||||||
|
|
||||||
|
static __global__ void arange_f32(float * dst, const int ne0, const float start, const float step) {
|
||||||
|
// blockIDx.x: idx of ne0 / BLOCK_SIZE
|
||||||
|
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
if (nidx >= ne0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
dst[nidx] = start + step * nidx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void arange_f32_cuda(float * dst, const int ne0, const float start, const float step, cudaStream_t stream) {
|
||||||
|
int num_blocks = (ne0 + CUDA_ARANGE_BLOCK_SIZE - 1) / CUDA_ARANGE_BLOCK_SIZE;
|
||||||
|
arange_f32<<<num_blocks, CUDA_ARANGE_BLOCK_SIZE, 0, stream>>>(dst, ne0, start, step);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
float start;
|
||||||
|
float stop;
|
||||||
|
float step;
|
||||||
|
memcpy(&start, (float *)dst->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&stop, (float *)dst->op_params + 1, sizeof(float));
|
||||||
|
memcpy(&step, (float *)dst->op_params + 2, sizeof(float));
|
||||||
|
|
||||||
|
int64_t steps = (int64_t)ceil((stop - start) / step);
|
||||||
|
GGML_ASSERT(ggml_nelements(dst) == steps);
|
||||||
|
|
||||||
|
arange_f32_cuda(dst_d, dst->ne[0], start, step, stream);
|
||||||
|
}
|
5
ggml-cuda/arange.cuh
Normal file
5
ggml-cuda/arange.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_ARANGE_BLOCK_SIZE 256
|
||||||
|
|
||||||
|
void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
77
ggml-cuda/argsort.cu
Normal file
77
ggml-cuda/argsort.cu
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
#include "argsort.cuh"
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
static inline __device__ void ggml_cuda_swap(T & a, T & b) {
|
||||||
|
T tmp = a;
|
||||||
|
a = b;
|
||||||
|
b = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<ggml_sort_order order>
|
||||||
|
static __global__ void k_argsort_f32_i32(const float * x, int * dst, const int ncols) {
|
||||||
|
// bitonic sort
|
||||||
|
int col = threadIdx.x;
|
||||||
|
int row = blockIdx.y;
|
||||||
|
|
||||||
|
if (col >= ncols) return;
|
||||||
|
|
||||||
|
const float * x_row = x + row * ncols;
|
||||||
|
int * dst_row = dst + row * ncols;
|
||||||
|
|
||||||
|
// initialize indices
|
||||||
|
if (col < ncols) {
|
||||||
|
dst_row[col] = col;
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
for (int k = 2; k <= ncols; k *= 2) {
|
||||||
|
for (int j = k / 2; j > 0; j /= 2) {
|
||||||
|
int ixj = col ^ j;
|
||||||
|
if (ixj > col) {
|
||||||
|
if ((col & k) == 0) {
|
||||||
|
if (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]]) {
|
||||||
|
ggml_cuda_swap(dst_row[col], dst_row[ixj]);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (order == GGML_SORT_ORDER_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]]) {
|
||||||
|
ggml_cuda_swap(dst_row[col], dst_row[ixj]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
__syncthreads();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, const int nrows, ggml_sort_order order, cudaStream_t stream) {
|
||||||
|
// bitonic sort requires ncols to be power of 2
|
||||||
|
GGML_ASSERT((ncols & (ncols - 1)) == 0);
|
||||||
|
|
||||||
|
const dim3 block_dims(ncols, 1, 1);
|
||||||
|
const dim3 block_nums(1, nrows, 1);
|
||||||
|
if (order == GGML_SORT_ORDER_ASC) {
|
||||||
|
k_argsort_f32_i32<GGML_SORT_ORDER_ASC><<<block_nums, block_dims, 0, stream>>>(x, dst, ncols);
|
||||||
|
} else if (order == GGML_SORT_ORDER_DESC) {
|
||||||
|
k_argsort_f32_i32<GGML_SORT_ORDER_DESC><<<block_nums, block_dims, 0, stream>>>(x, dst, ncols);
|
||||||
|
} else {
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_I32);
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
|
||||||
|
const int64_t ncols = src0->ne[0];
|
||||||
|
const int64_t nrows = ggml_nrows(src0);
|
||||||
|
|
||||||
|
enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
|
||||||
|
|
||||||
|
argsort_f32_i32_cuda(src0_d, (int *)dst_d, ncols, nrows, order, stream);
|
||||||
|
}
|
3
ggml-cuda/argsort.cuh
Normal file
3
ggml-cuda/argsort.cuh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
236
ggml-cuda/binbcast.cu
Normal file
236
ggml-cuda/binbcast.cu
Normal file
@ -0,0 +1,236 @@
|
|||||||
|
#include "binbcast.cuh"
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float op_repeat(const float a, const float b) {
|
||||||
|
return b;
|
||||||
|
GGML_UNUSED(a);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float op_add(const float a, const float b) {
|
||||||
|
return a + b;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float op_mul(const float a, const float b) {
|
||||||
|
return a * b;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float op_div(const float a, const float b) {
|
||||||
|
return a / b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
|
||||||
|
static __global__ void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst,
|
||||||
|
int ne0, int ne1, int ne2, int ne3,
|
||||||
|
int ne10, int ne11, int ne12, int ne13,
|
||||||
|
/*int s0, */ int s1, int s2, int s3,
|
||||||
|
/*int s10,*/ int s11, int s12, int s13) {
|
||||||
|
const int i0s = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
const int i1 = (blockDim.y*blockIdx.y + threadIdx.y);
|
||||||
|
const int i2 = (blockDim.z*blockIdx.z + threadIdx.z) / ne3;
|
||||||
|
const int i3 = (blockDim.z*blockIdx.z + threadIdx.z) % ne3;
|
||||||
|
|
||||||
|
if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int i11 = i1 % ne11;
|
||||||
|
const int i12 = i2 % ne12;
|
||||||
|
const int i13 = i3 % ne13;
|
||||||
|
|
||||||
|
const size_t i_src0 = i3*s3 + i2*s2 + i1*s1;
|
||||||
|
const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
|
||||||
|
const size_t i_dst = i_src0;
|
||||||
|
|
||||||
|
const src0_t * src0_row = src0 + i_src0;
|
||||||
|
const src1_t * src1_row = src1 + i_src1;
|
||||||
|
dst_t * dst_row = dst + i_dst;
|
||||||
|
|
||||||
|
for (int i0 = i0s; i0 < ne0; i0 += blockDim.x*gridDim.x) {
|
||||||
|
const int i10 = i0 % ne10;
|
||||||
|
dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
|
||||||
|
static __global__ void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst,
|
||||||
|
int ne0, int ne1, int ne2, int ne3,
|
||||||
|
int ne10, int ne11, int ne12, int ne13,
|
||||||
|
/*int s0, */ int s1, int s2, int s3,
|
||||||
|
/*int s10,*/ int s11, int s12, int s13) {
|
||||||
|
|
||||||
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
const int i3 = i/(ne2*ne1*ne0);
|
||||||
|
const int i2 = (i/(ne1*ne0)) % ne2;
|
||||||
|
const int i1 = (i/ne0) % ne1;
|
||||||
|
const int i0 = i % ne0;
|
||||||
|
|
||||||
|
if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int i11 = i1 % ne11;
|
||||||
|
const int i12 = i2 % ne12;
|
||||||
|
const int i13 = i3 % ne13;
|
||||||
|
|
||||||
|
const size_t i_src0 = i3*s3 + i2*s2 + i1*s1;
|
||||||
|
const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
|
||||||
|
const size_t i_dst = i_src0;
|
||||||
|
|
||||||
|
const src0_t * src0_row = src0 + i_src0;
|
||||||
|
const src1_t * src1_row = src1 + i_src1;
|
||||||
|
dst_t * dst_row = dst + i_dst;
|
||||||
|
|
||||||
|
const int i10 = i0 % ne10;
|
||||||
|
dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<float (*bin_op)(const float, const float)>
|
||||||
|
struct bin_bcast_cuda {
|
||||||
|
template<typename src0_t, typename src1_t, typename dst_t>
|
||||||
|
void operator()(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst,
|
||||||
|
const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd,
|
||||||
|
cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_TENSOR_BINARY_OP_LOCALS
|
||||||
|
|
||||||
|
int nr0 = ne10/ne0;
|
||||||
|
int nr1 = ne11/ne1;
|
||||||
|
int nr2 = ne12/ne2;
|
||||||
|
int nr3 = ne13/ne3;
|
||||||
|
|
||||||
|
int nr[4] = { nr0, nr1, nr2, nr3 };
|
||||||
|
|
||||||
|
// collapse dimensions until first broadcast dimension
|
||||||
|
int64_t cne0[] = {ne0, ne1, ne2, ne3};
|
||||||
|
int64_t cne1[] = {ne10, ne11, ne12, ne13};
|
||||||
|
size_t cnb0[] = {nb0, nb1, nb2, nb3};
|
||||||
|
size_t cnb1[] = {nb10, nb11, nb12, nb13};
|
||||||
|
auto collapse = [](int64_t cne[]) {
|
||||||
|
cne[0] *= cne[1];
|
||||||
|
cne[1] = cne[2];
|
||||||
|
cne[2] = cne[3];
|
||||||
|
cne[3] = 1;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto collapse_nb = [](size_t cnb[], const int64_t cne[]) {
|
||||||
|
cnb[1] *= cne[1];
|
||||||
|
cnb[2] *= cne[2];
|
||||||
|
cnb[3] *= cne[3];
|
||||||
|
};
|
||||||
|
|
||||||
|
for (int i = 0; i < 4; i++) {
|
||||||
|
if (nr[i] != 1) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (i > 0) {
|
||||||
|
collapse_nb(cnb0, cne0);
|
||||||
|
collapse_nb(cnb1, cne1);
|
||||||
|
collapse(cne0);
|
||||||
|
collapse(cne1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
int64_t ne0 = cne0[0];
|
||||||
|
int64_t ne1 = cne0[1];
|
||||||
|
int64_t ne2 = cne0[2];
|
||||||
|
int64_t ne3 = cne0[3];
|
||||||
|
|
||||||
|
int64_t ne10 = cne1[0];
|
||||||
|
int64_t ne11 = cne1[1];
|
||||||
|
int64_t ne12 = cne1[2];
|
||||||
|
int64_t ne13 = cne1[3];
|
||||||
|
|
||||||
|
size_t nb0 = cnb0[0];
|
||||||
|
size_t nb1 = cnb0[1];
|
||||||
|
size_t nb2 = cnb0[2];
|
||||||
|
size_t nb3 = cnb0[3];
|
||||||
|
|
||||||
|
size_t nb10 = cnb1[0];
|
||||||
|
size_t nb11 = cnb1[1];
|
||||||
|
size_t nb12 = cnb1[2];
|
||||||
|
size_t nb13 = cnb1[3];
|
||||||
|
|
||||||
|
size_t s0 = nb0 / sizeof(dst_t);
|
||||||
|
size_t s1 = nb1 / sizeof(dst_t);
|
||||||
|
size_t s2 = nb2 / sizeof(dst_t);
|
||||||
|
size_t s3 = nb3 / sizeof(dst_t);
|
||||||
|
|
||||||
|
size_t s10 = nb10 / sizeof(src1_t);
|
||||||
|
size_t s11 = nb11 / sizeof(src1_t);
|
||||||
|
size_t s12 = nb12 / sizeof(src1_t);
|
||||||
|
size_t s13 = nb13 / sizeof(src1_t);
|
||||||
|
|
||||||
|
GGML_ASSERT(s0 == 1);
|
||||||
|
GGML_ASSERT(s10 == 1);
|
||||||
|
|
||||||
|
const int block_size = 128;
|
||||||
|
|
||||||
|
int64_t hne0 = std::max(ne0/2LL, 1LL);
|
||||||
|
|
||||||
|
dim3 block_dims;
|
||||||
|
block_dims.x = std::min<unsigned int>(hne0, block_size);
|
||||||
|
block_dims.y = std::min<unsigned int>(ne1, block_size / block_dims.x);
|
||||||
|
block_dims.z = std::min(std::min<unsigned int>(ne2*ne3, block_size / block_dims.x / block_dims.y), 64U);
|
||||||
|
|
||||||
|
dim3 block_nums(
|
||||||
|
(hne0 + block_dims.x - 1) / block_dims.x,
|
||||||
|
(ne1 + block_dims.y - 1) / block_dims.y,
|
||||||
|
(ne2*ne3 + block_dims.z - 1) / block_dims.z
|
||||||
|
);
|
||||||
|
|
||||||
|
if (block_nums.z > 65535) {
|
||||||
|
// this is the maximum number of blocks in z direction, fallback to 1D grid kernel
|
||||||
|
int block_num = (ne0*ne1*ne2*ne3 + block_size - 1) / block_size;
|
||||||
|
k_bin_bcast_unravel<bin_op><<<block_num, block_size, 0, stream>>>(
|
||||||
|
src0_dd, src1_dd, dst_dd,
|
||||||
|
ne0, ne1, ne2, ne3,
|
||||||
|
ne10, ne11, ne12, ne13,
|
||||||
|
/* s0, */ s1, s2, s3,
|
||||||
|
/* s10, */ s11, s12, s13);
|
||||||
|
} else {
|
||||||
|
k_bin_bcast<bin_op><<<block_nums, block_dims, 0, stream>>>(
|
||||||
|
src0_dd, src1_dd, dst_dd,
|
||||||
|
ne0, ne1, ne2, ne3,
|
||||||
|
ne10, ne11, ne12, ne13,
|
||||||
|
/* s0, */ s1, s2, s3,
|
||||||
|
/* s10, */ s11, s12, s13);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class op>
|
||||||
|
static void ggml_cuda_op_bin_bcast(
|
||||||
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
|
||||||
|
const void * src0_dd, const void * src1_dd, void * dst_dd, cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
|
||||||
|
op()(src0, src1, dst, (const float *)src0_dd, (const float *)src1_dd, (float *)dst_dd, stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
|
||||||
|
op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (half *) dst_dd, stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
|
||||||
|
op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (float *)dst_dd, stream);
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__,
|
||||||
|
ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type));
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_repeat>>(dst, dst->src[0], dst, nullptr, dst->src[0]->data, dst->data, ctx.stream());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_add>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_mul>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_div>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
|
||||||
|
}
|
6
ggml-cuda/binbcast.cuh
Normal file
6
ggml-cuda/binbcast.cuh
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
35
ggml-cuda/clamp.cu
Normal file
35
ggml-cuda/clamp.cu
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
#include "clamp.cuh"
|
||||||
|
|
||||||
|
static __global__ void clamp_f32(const float * x, float * dst, const float min, const float max, const int k) {
|
||||||
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (i >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void clamp_f32_cuda(const float * x, float * dst, const float min, const float max, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + CUDA_CLAMP_BLOCK_SIZE - 1) / CUDA_CLAMP_BLOCK_SIZE;
|
||||||
|
clamp_f32<<<num_blocks, CUDA_CLAMP_BLOCK_SIZE, 0, stream>>>(x, dst, min, max, k);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
float min;
|
||||||
|
float max;
|
||||||
|
memcpy(&min, dst->op_params, sizeof(float));
|
||||||
|
memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream);
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
}
|
5
ggml-cuda/clamp.cuh
Normal file
5
ggml-cuda/clamp.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_CLAMP_BLOCK_SIZE 256
|
||||||
|
|
||||||
|
void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
550
ggml-cuda/common.cuh
Normal file
550
ggml-cuda/common.cuh
Normal file
@ -0,0 +1,550 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "../ggml.h"
|
||||||
|
#include "../ggml-cuda.h"
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#if defined(GGML_USE_HIPBLAS)
|
||||||
|
#define GGML_COMMON_DECL_HIP
|
||||||
|
#define GGML_COMMON_IMPL_HIP
|
||||||
|
#else
|
||||||
|
#define GGML_COMMON_DECL_CUDA
|
||||||
|
#define GGML_COMMON_IMPL_CUDA
|
||||||
|
#endif
|
||||||
|
#include "../ggml-common.h"
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <array>
|
||||||
|
#include <cassert>
|
||||||
|
#include <cfloat>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#if defined(GGML_USE_HIPBLAS)
|
||||||
|
#include <hip/hip_runtime.h>
|
||||||
|
#include <hipblas/hipblas.h>
|
||||||
|
#include <hip/hip_fp16.h>
|
||||||
|
#ifdef __HIP_PLATFORM_AMD__
|
||||||
|
// for rocblas_initialize()
|
||||||
|
#include "rocblas/rocblas.h"
|
||||||
|
#endif // __HIP_PLATFORM_AMD__
|
||||||
|
#define CUBLAS_COMPUTE_16F HIPBLAS_R_16F
|
||||||
|
#define CUBLAS_COMPUTE_32F HIPBLAS_R_32F
|
||||||
|
#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F
|
||||||
|
#define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT
|
||||||
|
#define CUBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT
|
||||||
|
#define CUBLAS_OP_N HIPBLAS_OP_N
|
||||||
|
#define CUBLAS_OP_T HIPBLAS_OP_T
|
||||||
|
#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS
|
||||||
|
#define CUBLAS_TF32_TENSOR_OP_MATH 0
|
||||||
|
#define CUDA_R_16F HIPBLAS_R_16F
|
||||||
|
#define CUDA_R_32F HIPBLAS_R_32F
|
||||||
|
#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
|
||||||
|
#define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6
|
||||||
|
#define cublasCreate hipblasCreate
|
||||||
|
#define cublasDestroy hipblasDestroy
|
||||||
|
#define cublasGemmEx hipblasGemmEx
|
||||||
|
#define cublasGemmBatchedEx hipblasGemmBatchedEx
|
||||||
|
#define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx
|
||||||
|
#define cublasHandle_t hipblasHandle_t
|
||||||
|
#define cublasSetMathMode(handle, mode) CUBLAS_STATUS_SUCCESS
|
||||||
|
#define cublasSetStream hipblasSetStream
|
||||||
|
#define cublasSgemm hipblasSgemm
|
||||||
|
#define cublasStatus_t hipblasStatus_t
|
||||||
|
#define cudaDataType_t hipblasDatatype_t //deprecated, new hipblasDatatype not in 5.6
|
||||||
|
#define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer
|
||||||
|
#define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess
|
||||||
|
#define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess
|
||||||
|
#define cudaDeviceProp hipDeviceProp_t
|
||||||
|
#define cudaDeviceSynchronize hipDeviceSynchronize
|
||||||
|
#define cudaError_t hipError_t
|
||||||
|
#define cudaErrorPeerAccessAlreadyEnabled hipErrorPeerAccessAlreadyEnabled
|
||||||
|
#define cudaErrorPeerAccessNotEnabled hipErrorPeerAccessNotEnabled
|
||||||
|
#define cudaEventCreateWithFlags hipEventCreateWithFlags
|
||||||
|
#define cudaEventDisableTiming hipEventDisableTiming
|
||||||
|
#define cudaEventRecord hipEventRecord
|
||||||
|
#define cudaEventSynchronize hipEventSynchronize
|
||||||
|
#define cudaEvent_t hipEvent_t
|
||||||
|
#define cudaEventDestroy hipEventDestroy
|
||||||
|
#define cudaFree hipFree
|
||||||
|
#define cudaFreeHost hipHostFree
|
||||||
|
#define cudaGetDevice hipGetDevice
|
||||||
|
#define cudaGetDeviceCount hipGetDeviceCount
|
||||||
|
#define cudaGetDeviceProperties hipGetDeviceProperties
|
||||||
|
#define cudaGetErrorString hipGetErrorString
|
||||||
|
#define cudaGetLastError hipGetLastError
|
||||||
|
#define cudaHostRegister hipHostRegister
|
||||||
|
#define cudaHostRegisterPortable hipHostRegisterPortable
|
||||||
|
#define cudaHostRegisterReadOnly hipHostRegisterReadOnly
|
||||||
|
#define cudaHostUnregister hipHostUnregister
|
||||||
|
#define cudaLaunchHostFunc hipLaunchHostFunc
|
||||||
|
#ifdef GGML_HIP_UMA
|
||||||
|
#define cudaMalloc hipMallocManaged
|
||||||
|
#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size)
|
||||||
|
#else
|
||||||
|
#define cudaMalloc hipMalloc
|
||||||
|
#define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault)
|
||||||
|
#endif
|
||||||
|
#define cudaMemcpy hipMemcpy
|
||||||
|
#define cudaMemcpyAsync hipMemcpyAsync
|
||||||
|
#define cudaMemcpyPeerAsync hipMemcpyPeerAsync
|
||||||
|
#define cudaMemcpy2DAsync hipMemcpy2DAsync
|
||||||
|
#define cudaMemcpyDeviceToDevice hipMemcpyDeviceToDevice
|
||||||
|
#define cudaMemcpyDeviceToHost hipMemcpyDeviceToHost
|
||||||
|
#define cudaMemcpyHostToDevice hipMemcpyHostToDevice
|
||||||
|
#define cudaMemcpyKind hipMemcpyKind
|
||||||
|
#define cudaMemset hipMemset
|
||||||
|
#define cudaMemsetAsync hipMemsetAsync
|
||||||
|
#define cudaMemGetInfo hipMemGetInfo
|
||||||
|
#define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize
|
||||||
|
#define cudaSetDevice hipSetDevice
|
||||||
|
#define cudaStreamCreateWithFlags hipStreamCreateWithFlags
|
||||||
|
#define cudaStreamDestroy hipStreamDestroy
|
||||||
|
#define cudaStreamFireAndForget hipStreamFireAndForget
|
||||||
|
#define cudaStreamNonBlocking hipStreamNonBlocking
|
||||||
|
#define cudaStreamPerThread hipStreamPerThread
|
||||||
|
#define cudaStreamSynchronize hipStreamSynchronize
|
||||||
|
#define cudaStreamWaitEvent(stream, event, flags) hipStreamWaitEvent(stream, event, flags)
|
||||||
|
#define cudaStream_t hipStream_t
|
||||||
|
#define cudaSuccess hipSuccess
|
||||||
|
#define __trap abort
|
||||||
|
#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS
|
||||||
|
#define CUBLAS_STATUS_NOT_INITIALIZED HIPBLAS_STATUS_NOT_INITIALIZED
|
||||||
|
#define CUBLAS_STATUS_ALLOC_FAILED HIPBLAS_STATUS_ALLOC_FAILED
|
||||||
|
#define CUBLAS_STATUS_INVALID_VALUE HIPBLAS_STATUS_INVALID_VALUE
|
||||||
|
#define CUBLAS_STATUS_ARCH_MISMATCH HIPBLAS_STATUS_ARCH_MISMATCH
|
||||||
|
#define CUBLAS_STATUS_MAPPING_ERROR HIPBLAS_STATUS_MAPPING_ERROR
|
||||||
|
#define CUBLAS_STATUS_EXECUTION_FAILED HIPBLAS_STATUS_EXECUTION_FAILED
|
||||||
|
#define CUBLAS_STATUS_INTERNAL_ERROR HIPBLAS_STATUS_INTERNAL_ERROR
|
||||||
|
#define CUBLAS_STATUS_NOT_SUPPORTED HIPBLAS_STATUS_NOT_SUPPORTED
|
||||||
|
#else
|
||||||
|
#include <cuda_runtime.h>
|
||||||
|
#include <cuda.h>
|
||||||
|
#include <cublas_v2.h>
|
||||||
|
#include <cuda_fp16.h>
|
||||||
|
|
||||||
|
#if CUDART_VERSION < 11020
|
||||||
|
#define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED
|
||||||
|
#define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH
|
||||||
|
#define CUBLAS_COMPUTE_16F CUDA_R_16F
|
||||||
|
#define CUBLAS_COMPUTE_32F CUDA_R_32F
|
||||||
|
#define cublasComputeType_t cudaDataType_t
|
||||||
|
#endif // CUDART_VERSION < 11020
|
||||||
|
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS)
|
||||||
|
|
||||||
|
#define STRINGIZE_IMPL(...) #__VA_ARGS__
|
||||||
|
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
|
||||||
|
|
||||||
|
#define WARP_SIZE 32
|
||||||
|
#define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed)
|
||||||
|
|
||||||
|
#define CC_PASCAL 600
|
||||||
|
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
|
||||||
|
#define CC_VOLTA 700
|
||||||
|
#define CC_OFFSET_AMD 1000000
|
||||||
|
#define CC_RDNA1 (CC_OFFSET_AMD + 1010)
|
||||||
|
#define CC_RDNA2 (CC_OFFSET_AMD + 1030)
|
||||||
|
#define CC_RDNA3 (CC_OFFSET_AMD + 1100)
|
||||||
|
|
||||||
|
// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication
|
||||||
|
// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant
|
||||||
|
// for large computational tasks. the drawback is that this requires some extra amount of VRAM:
|
||||||
|
// - 7B quantum model: +100-200 MB
|
||||||
|
// - 13B quantum model: +200-400 MB
|
||||||
|
//
|
||||||
|
//#define GGML_CUDA_FORCE_MMQ
|
||||||
|
|
||||||
|
// TODO: improve this to be correct for more hardware
|
||||||
|
// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores
|
||||||
|
#if !defined(GGML_CUDA_FORCE_MMQ)
|
||||||
|
#define CUDA_USE_TENSOR_CORES
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define MMVQ_MAX_BATCH_SIZE 8 // max batch size to use MMVQ kernels
|
||||||
|
#define MMQ_MAX_BATCH_SIZE 32 // max batch size to use MMQ kernels when tensor cores are available
|
||||||
|
|
||||||
|
#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses
|
||||||
|
|
||||||
|
#if defined(_MSC_VER)
|
||||||
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define GGML_CUDA_MAX_STREAMS 8
|
||||||
|
|
||||||
|
[[noreturn]]
|
||||||
|
void ggml_cuda_error(const char * stmt, const char * func, const char * file, int line, const char * msg);
|
||||||
|
|
||||||
|
#define CUDA_CHECK_GEN(err, success, error_fn) \
|
||||||
|
do { \
|
||||||
|
auto err_ = (err); \
|
||||||
|
if (err_ != (success)) { \
|
||||||
|
ggml_cuda_error(#err, __func__, __FILE__, __LINE__, error_fn(err_)); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define CUDA_CHECK(err) CUDA_CHECK_GEN(err, cudaSuccess, cudaGetErrorString)
|
||||||
|
|
||||||
|
#if CUDART_VERSION >= 12000
|
||||||
|
static const char * cublas_get_error_str(const cublasStatus_t err) {
|
||||||
|
return cublasGetStatusString(err);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static const char * cublas_get_error_str(const cublasStatus_t err) {
|
||||||
|
switch (err) {
|
||||||
|
case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS";
|
||||||
|
case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED";
|
||||||
|
case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED";
|
||||||
|
case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE";
|
||||||
|
case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH";
|
||||||
|
case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR";
|
||||||
|
case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED";
|
||||||
|
case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR";
|
||||||
|
case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED";
|
||||||
|
default: return "unknown error";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // CUDART_VERSION >= 12000
|
||||||
|
|
||||||
|
#define CUBLAS_CHECK(err) CUDA_CHECK_GEN(err, CUBLAS_STATUS_SUCCESS, cublas_get_error_str)
|
||||||
|
|
||||||
|
#if !defined(GGML_USE_HIPBLAS)
|
||||||
|
static const char * cu_get_error_str(CUresult err) {
|
||||||
|
const char * err_str;
|
||||||
|
cuGetErrorString(err, &err_str);
|
||||||
|
return err_str;
|
||||||
|
}
|
||||||
|
#define CU_CHECK(err) CUDA_CHECK_GEN(err, CUDA_SUCCESS, cu_get_error_str)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if CUDART_VERSION >= 11100
|
||||||
|
#define GGML_CUDA_ASSUME(x) __builtin_assume(x)
|
||||||
|
#else
|
||||||
|
#define GGML_CUDA_ASSUME(x)
|
||||||
|
#endif // CUDART_VERSION >= 11100
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
typedef half dfloat; // dequantize float
|
||||||
|
typedef half2 dfloat2;
|
||||||
|
#else
|
||||||
|
typedef float dfloat; // dequantize float
|
||||||
|
typedef float2 dfloat2;
|
||||||
|
#endif //GGML_CUDA_F16
|
||||||
|
|
||||||
|
[[noreturn]]
|
||||||
|
static __device__ void no_device_code(
|
||||||
|
const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) {
|
||||||
|
|
||||||
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n",
|
||||||
|
file_name, line, function_name, arch);
|
||||||
|
GGML_UNUSED(arch_list);
|
||||||
|
#else
|
||||||
|
printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n",
|
||||||
|
file_name, line, function_name, arch, arch_list);
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
__trap();
|
||||||
|
|
||||||
|
GGML_UNUSED(no_device_code); // suppress unused function warning
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef __CUDA_ARCH__
|
||||||
|
#define NO_DEVICE_CODE no_device_code(__FILE__, __LINE__, __FUNCTION__, __CUDA_ARCH__, STRINGIZE(__CUDA_ARCH_LIST__))
|
||||||
|
#else
|
||||||
|
#define NO_DEVICE_CODE //GGML_ASSERT(false && "NO_DEVICE_CODE not valid in host code.")
|
||||||
|
#endif // __CUDA_ARCH__
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float warp_reduce_sum(float x) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
x += __shfl_xor_sync(0xffffffff, x, mask, 32);
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32);
|
||||||
|
a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32);
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
|
||||||
|
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32));
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
#else
|
||||||
|
GGML_UNUSED(a);
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
||||||
|
}
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float warp_reduce_max(float x) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
//static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
|
||||||
|
//#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
||||||
|
//#pragma unroll
|
||||||
|
// for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
// x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
||||||
|
// }
|
||||||
|
// return x;
|
||||||
|
//#else
|
||||||
|
// GGML_UNUSED(x);
|
||||||
|
// NO_DEVICE_CODE;
|
||||||
|
//#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL && CUDART_VERSION >= CUDART_HMAX
|
||||||
|
//}
|
||||||
|
|
||||||
|
|
||||||
|
#if defined(GGML_USE_HIPBLAS)
|
||||||
|
#define __CUDA_ARCH__ 1300
|
||||||
|
|
||||||
|
#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx1103__) || \
|
||||||
|
defined(__gfx1150__) || defined(__gfx1151__)
|
||||||
|
#define RDNA3
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || defined(__gfx1033__) || \
|
||||||
|
defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__) || defined(__gfx1037__)
|
||||||
|
#define RDNA2
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __has_builtin
|
||||||
|
#define __has_builtin(x) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
|
||||||
|
typedef uint8_t uint8x4_t __attribute__((ext_vector_type(4)));
|
||||||
|
static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
|
||||||
|
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
|
||||||
|
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
|
||||||
|
#if __has_builtin(__builtin_elementwise_sub_sat)
|
||||||
|
const int8x4_t c = __builtin_elementwise_sub_sat(va, vb);
|
||||||
|
return reinterpret_cast<const int &>(c);
|
||||||
|
#else
|
||||||
|
int8x4_t c;
|
||||||
|
int16_t tmp;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < 4; i++) {
|
||||||
|
tmp = va[i] - vb[i];
|
||||||
|
if(tmp > std::numeric_limits<int8_t>::max()) tmp = std::numeric_limits<int8_t>::max();
|
||||||
|
if(tmp < std::numeric_limits<int8_t>::min()) tmp = std::numeric_limits<int8_t>::min();
|
||||||
|
c[i] = tmp;
|
||||||
|
}
|
||||||
|
return reinterpret_cast<int &>(c);
|
||||||
|
#endif // __has_builtin(__builtin_elementwise_sub_sat)
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ int __vsub4(const int a, const int b) {
|
||||||
|
return __vsubss4(a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ unsigned int __vcmpeq4(unsigned int a, unsigned int b) {
|
||||||
|
const uint8x4_t& va = reinterpret_cast<const uint8x4_t&>(a);
|
||||||
|
const uint8x4_t& vb = reinterpret_cast<const uint8x4_t&>(b);
|
||||||
|
unsigned int c;
|
||||||
|
uint8x4_t& vc = reinterpret_cast<uint8x4_t&>(c);
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < 4; ++i) {
|
||||||
|
vc[i] = va[i] == vb[i] ? 0xff : 0x00;
|
||||||
|
}
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
|
||||||
|
#if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__)
|
||||||
|
c = __builtin_amdgcn_sdot4(a, b, c, false);
|
||||||
|
#elif defined(RDNA3)
|
||||||
|
c = __builtin_amdgcn_sudot4( true, a, true, b, c, false);
|
||||||
|
#elif defined(__gfx1010__) || defined(__gfx900__)
|
||||||
|
int tmp1;
|
||||||
|
int tmp2;
|
||||||
|
asm("\n \
|
||||||
|
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \
|
||||||
|
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \
|
||||||
|
v_add3_u32 %0, %1, %2, %0 \n \
|
||||||
|
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \
|
||||||
|
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \
|
||||||
|
v_add3_u32 %0, %1, %2, %0 \n \
|
||||||
|
"
|
||||||
|
: "+v"(c), "=&v"(tmp1), "=&v"(tmp2)
|
||||||
|
: "v"(a), "v"(b)
|
||||||
|
);
|
||||||
|
#else
|
||||||
|
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
|
||||||
|
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
|
||||||
|
c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3];
|
||||||
|
#endif
|
||||||
|
return c;
|
||||||
|
}
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS)
|
||||||
|
|
||||||
|
// TODO: move to ggml-common.h
|
||||||
|
static const __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
||||||
|
|
||||||
|
typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v);
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////
|
||||||
|
|
||||||
|
struct ggml_cuda_device_info {
|
||||||
|
int device_count;
|
||||||
|
|
||||||
|
struct cuda_device_info {
|
||||||
|
int cc; // compute capability
|
||||||
|
size_t smpb; // max. shared memory per block
|
||||||
|
bool vmm; // virtual memory support
|
||||||
|
size_t vmm_granularity; // granularity of virtual memory
|
||||||
|
size_t total_vram;
|
||||||
|
};
|
||||||
|
|
||||||
|
cuda_device_info devices[GGML_CUDA_MAX_DEVICES] = {};
|
||||||
|
|
||||||
|
std::array<float, GGML_CUDA_MAX_DEVICES> default_tensor_split = {};
|
||||||
|
};
|
||||||
|
|
||||||
|
const ggml_cuda_device_info & ggml_cuda_info();
|
||||||
|
|
||||||
|
void ggml_cuda_set_device(int device);
|
||||||
|
int ggml_cuda_get_device();
|
||||||
|
|
||||||
|
struct ggml_cuda_pool {
|
||||||
|
virtual ~ggml_cuda_pool() = default;
|
||||||
|
|
||||||
|
virtual void * alloc(size_t size, size_t * actual_size) = 0;
|
||||||
|
virtual void free(void * ptr, size_t size) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
struct ggml_cuda_pool_alloc {
|
||||||
|
ggml_cuda_pool * pool = nullptr;
|
||||||
|
T * ptr = nullptr;
|
||||||
|
size_t actual_size = 0;
|
||||||
|
|
||||||
|
ggml_cuda_pool_alloc() = default;
|
||||||
|
|
||||||
|
explicit ggml_cuda_pool_alloc(ggml_cuda_pool & pool) : pool(&pool) {
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cuda_pool_alloc(ggml_cuda_pool & pool, size_t size) : pool(&pool) {
|
||||||
|
alloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
~ggml_cuda_pool_alloc() {
|
||||||
|
if (ptr != nullptr) {
|
||||||
|
pool->free(ptr, actual_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// size is in number of elements
|
||||||
|
T * alloc(size_t size) {
|
||||||
|
GGML_ASSERT(pool != nullptr);
|
||||||
|
GGML_ASSERT(ptr == nullptr);
|
||||||
|
ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
T * alloc(ggml_cuda_pool & pool, size_t size) {
|
||||||
|
this->pool = &pool;
|
||||||
|
return alloc(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
T * get() {
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cuda_pool_alloc(const ggml_cuda_pool_alloc &) = delete;
|
||||||
|
ggml_cuda_pool_alloc(ggml_cuda_pool_alloc &&) = delete;
|
||||||
|
ggml_cuda_pool_alloc& operator=(const ggml_cuda_pool_alloc &) = delete;
|
||||||
|
ggml_cuda_pool_alloc& operator=(ggml_cuda_pool_alloc &&) = delete;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
// backend interface
|
||||||
|
|
||||||
|
struct ggml_tensor_extra_gpu {
|
||||||
|
void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
|
||||||
|
cudaEvent_t events[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS]; // events for synchronizing multiple GPUs
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_backend_cuda_context {
|
||||||
|
int device;
|
||||||
|
std::string name;
|
||||||
|
cudaEvent_t copy_event = nullptr;
|
||||||
|
|
||||||
|
cudaStream_t streams[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { { nullptr } };
|
||||||
|
cublasHandle_t cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
||||||
|
|
||||||
|
explicit ggml_backend_cuda_context(int device) :
|
||||||
|
device(device),
|
||||||
|
name(GGML_CUDA_NAME + std::to_string(device)) {
|
||||||
|
}
|
||||||
|
|
||||||
|
~ggml_backend_cuda_context() {
|
||||||
|
if (copy_event != nullptr) {
|
||||||
|
CUDA_CHECK(cudaEventDestroy(copy_event));
|
||||||
|
}
|
||||||
|
for (int i = 0; i < GGML_CUDA_MAX_DEVICES; ++i) {
|
||||||
|
for (int j = 0; j < GGML_CUDA_MAX_STREAMS; ++j) {
|
||||||
|
if (streams[i][j] != nullptr) {
|
||||||
|
CUDA_CHECK(cudaStreamDestroy(streams[i][j]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (cublas_handles[i] != nullptr) {
|
||||||
|
CUBLAS_CHECK(cublasDestroy(cublas_handles[i]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaStream_t stream(int device, int stream) {
|
||||||
|
if (streams[device][stream] == nullptr) {
|
||||||
|
ggml_cuda_set_device(device);
|
||||||
|
CUDA_CHECK(cudaStreamCreateWithFlags(&streams[device][stream], cudaStreamNonBlocking));
|
||||||
|
}
|
||||||
|
return streams[device][stream];
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaStream_t stream() {
|
||||||
|
return stream(device, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
cublasHandle_t cublas_handle(int device) {
|
||||||
|
if (cublas_handles[device] == nullptr) {
|
||||||
|
ggml_cuda_set_device(device);
|
||||||
|
CUBLAS_CHECK(cublasCreate(&cublas_handles[device]));
|
||||||
|
CUBLAS_CHECK(cublasSetMathMode(cublas_handles[device], CUBLAS_TF32_TENSOR_OP_MATH));
|
||||||
|
}
|
||||||
|
return cublas_handles[device];
|
||||||
|
}
|
||||||
|
|
||||||
|
cublasHandle_t cublas_handle() {
|
||||||
|
return cublas_handle(device);
|
||||||
|
}
|
||||||
|
|
||||||
|
// pool
|
||||||
|
std::unique_ptr<ggml_cuda_pool> pools[GGML_CUDA_MAX_DEVICES];
|
||||||
|
|
||||||
|
static std::unique_ptr<ggml_cuda_pool> new_pool_for_device(int device);
|
||||||
|
|
||||||
|
ggml_cuda_pool & pool(int device) {
|
||||||
|
if (pools[device] == nullptr) {
|
||||||
|
pools[device] = new_pool_for_device(device);
|
||||||
|
}
|
||||||
|
return *pools[device];
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cuda_pool & pool() {
|
||||||
|
return pool(device);
|
||||||
|
}
|
||||||
|
};
|
49
ggml-cuda/concat.cu
Normal file
49
ggml-cuda/concat.cu
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
#include "concat.cuh"
|
||||||
|
|
||||||
|
static __global__ void concat_f32(const float * x,const float * y, float * dst, const int ne0, const int ne02) {
|
||||||
|
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
if (nidx >= ne0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// operation
|
||||||
|
int offset_dst =
|
||||||
|
nidx +
|
||||||
|
blockIdx.y * ne0 +
|
||||||
|
blockIdx.z * ne0 * gridDim.y;
|
||||||
|
if (blockIdx.z < ne02) { // src0
|
||||||
|
int offset_src =
|
||||||
|
nidx +
|
||||||
|
blockIdx.y * ne0 +
|
||||||
|
blockIdx.z * ne0 * gridDim.y;
|
||||||
|
dst[offset_dst] = x[offset_src];
|
||||||
|
} else {
|
||||||
|
int offset_src =
|
||||||
|
nidx +
|
||||||
|
blockIdx.y * ne0 +
|
||||||
|
(blockIdx.z - ne02) * ne0 * gridDim.y;
|
||||||
|
dst[offset_dst] = y[offset_src];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void concat_f32_cuda(const float * x, const float * y, float * dst, const int ne0, int ne1, int ne2, int ne02, cudaStream_t stream) {
|
||||||
|
int num_blocks = (ne0 + CUDA_CONCAT_BLOCK_SIZE - 1) / CUDA_CONCAT_BLOCK_SIZE;
|
||||||
|
dim3 gridDim(num_blocks, ne1, ne2);
|
||||||
|
concat_f32<<<gridDim, CUDA_CONCAT_BLOCK_SIZE, 0, stream>>>(x, y, dst, ne0, ne02);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const ggml_tensor * src1 = dst->src[1];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
const float * src1_d = (const float *)src1->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
for (int i3 = 0; i3 < dst->ne[3]; i3++) {
|
||||||
|
concat_f32_cuda(src0_d + i3 * (src0->nb[3] / 4), src1_d + i3 * (src1->nb[3] / 4), dst_d + i3 * (dst->nb[3] / 4), dst->ne[0], dst->ne[1], dst->ne[2], src0->ne[2], stream);
|
||||||
|
}
|
||||||
|
}
|
5
ggml-cuda/concat.cuh
Normal file
5
ggml-cuda/concat.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_CONCAT_BLOCK_SIZE 256
|
||||||
|
|
||||||
|
void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
824
ggml-cuda/convert.cu
Normal file
824
ggml-cuda/convert.cu
Normal file
@ -0,0 +1,824 @@
|
|||||||
|
#include "convert.cuh"
|
||||||
|
#include "dequantize.cuh"
|
||||||
|
|
||||||
|
#define CUDA_Q8_0_NE_ALIGN 2048
|
||||||
|
|
||||||
|
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
||||||
|
static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) {
|
||||||
|
const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x);
|
||||||
|
|
||||||
|
if (i >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int ib = i/qk; // block index
|
||||||
|
const int iqs = (i%qk)/qr; // quant index
|
||||||
|
const int iybs = i - i%qk; // y block start index
|
||||||
|
const int y_offset = qr == 1 ? 1 : qk/2;
|
||||||
|
|
||||||
|
// dequantize
|
||||||
|
dfloat2 v;
|
||||||
|
dequantize_kernel(vx, ib, iqs, v);
|
||||||
|
|
||||||
|
y[iybs + iqs + 0] = v.x;
|
||||||
|
y[iybs + iqs + y_offset] = v.y;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <bool need_check>
|
||||||
|
static __global__ void dequantize_block_q8_0_f16(const void * __restrict__ vx, half * __restrict__ y, const int k) {
|
||||||
|
#if __CUDA_ARCH__ >= CC_PASCAL
|
||||||
|
constexpr int nint = CUDA_Q8_0_NE_ALIGN/sizeof(int) + WARP_SIZE;
|
||||||
|
|
||||||
|
const int i0 = CUDA_Q8_0_NE_ALIGN*blockIdx.x;
|
||||||
|
const int * x0 = ((int *) vx) + blockIdx.x * nint;
|
||||||
|
half2 * y2 = (half2 *) (y + i0);
|
||||||
|
|
||||||
|
__shared__ int vals[nint];
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int ix0 = 0; ix0 < nint; ix0 += WARP_SIZE) {
|
||||||
|
if (need_check && i0*sizeof(block_q8_0)/QK8_0 + sizeof(int)*(ix0 + threadIdx.x) >= k*sizeof(block_q8_0)/QK8_0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int ix = ix0 + threadIdx.x;
|
||||||
|
vals[ix] = x0[ix];
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int iy = 0; iy < CUDA_Q8_0_NE_ALIGN; iy += 2*WARP_SIZE) {
|
||||||
|
if (need_check && i0 + iy + 2*threadIdx.x >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const half * b0 = ((const half *) vals) + (sizeof(block_q8_0)/sizeof(half)) * ((iy + 2*threadIdx.x)/QK8_0);
|
||||||
|
const half d = *b0;
|
||||||
|
const char2 qs = ((const char2 *) (b0 + 1))[threadIdx.x % (QK8_0/2)];
|
||||||
|
|
||||||
|
y2[iy/2 + threadIdx.x] = __hmul2(make_half2(qs.x, qs.y), __half2half2(d));
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
GGML_UNUSED(vx);
|
||||||
|
GGML_UNUSED(y);
|
||||||
|
GGML_UNUSED(k);
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif // __CUDA_ARCH__ >= CC_PASCAL
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
|
||||||
|
// assume 32 threads
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int il = tid/8;
|
||||||
|
const int ir = tid%8;
|
||||||
|
const int ib = 8*i + ir;
|
||||||
|
if (ib >= nb32) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst_t * y = yy + 256*i + 32*ir + 4*il;
|
||||||
|
|
||||||
|
const block_q4_0 * x = (const block_q4_0 *)vx + ib;
|
||||||
|
const float d = __half2float(x->d);
|
||||||
|
const float dm = -8*d;
|
||||||
|
|
||||||
|
const uint8_t * q = x->qs + 4*il;
|
||||||
|
|
||||||
|
for (int l = 0; l < 4; ++l) {
|
||||||
|
y[l+ 0] = d * (q[l] & 0xF) + dm;
|
||||||
|
y[l+16] = d * (q[l] >> 4) + dm;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
|
||||||
|
// assume 32 threads
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int il = tid/8;
|
||||||
|
const int ir = tid%8;
|
||||||
|
const int ib = 8*i + ir;
|
||||||
|
if (ib >= nb32) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst_t * y = yy + 256*i + 32*ir + 4*il;
|
||||||
|
|
||||||
|
const block_q4_1 * x = (const block_q4_1 *)vx + ib;
|
||||||
|
const float2 d = __half22float2(x->dm);
|
||||||
|
|
||||||
|
const uint8_t * q = x->qs + 4*il;
|
||||||
|
|
||||||
|
for (int l = 0; l < 4; ++l) {
|
||||||
|
y[l+ 0] = d.x * (q[l] & 0xF) + d.y;
|
||||||
|
y[l+16] = d.x * (q[l] >> 4) + d.y;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//================================== k-quants
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_q2_K * x = (const block_q2_K *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int n = tid/32;
|
||||||
|
const int l = tid - 32*n;
|
||||||
|
const int is = 8*n + l/16;
|
||||||
|
|
||||||
|
const uint8_t q = x[i].qs[32*n + l];
|
||||||
|
dst_t * y = yy + i*QK_K + 128*n;
|
||||||
|
|
||||||
|
float dall = __low2half(x[i].dm);
|
||||||
|
float dmin = __high2half(x[i].dm);
|
||||||
|
y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
|
||||||
|
y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
|
||||||
|
y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
|
||||||
|
y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
|
||||||
|
#else
|
||||||
|
const int is = tid/16; // 0 or 1
|
||||||
|
const int il = tid%16; // 0...15
|
||||||
|
const uint8_t q = x[i].qs[il] >> (2*is);
|
||||||
|
dst_t * y = yy + i*QK_K + 16*is + il;
|
||||||
|
float dall = __low2half(x[i].dm);
|
||||||
|
float dmin = __high2half(x[i].dm);
|
||||||
|
y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
|
||||||
|
y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_q3_K * x = (const block_q3_K *) vx;
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
const int r = threadIdx.x/4;
|
||||||
|
const int tid = r/2;
|
||||||
|
const int is0 = r%2;
|
||||||
|
const int l0 = 16*is0 + 4*(threadIdx.x%4);
|
||||||
|
const int n = tid / 4;
|
||||||
|
const int j = tid - 4*n;
|
||||||
|
|
||||||
|
uint8_t m = 1 << (4*n + j);
|
||||||
|
int is = 8*n + 2*j + is0;
|
||||||
|
int shift = 2*j;
|
||||||
|
|
||||||
|
int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
|
||||||
|
is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
|
||||||
|
is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
|
||||||
|
(x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
|
||||||
|
float d_all = x[i].d;
|
||||||
|
float dl = d_all * (us - 32);
|
||||||
|
|
||||||
|
dst_t * y = yy + i*QK_K + 128*n + 32*j;
|
||||||
|
const uint8_t * q = x[i].qs + 32*n;
|
||||||
|
const uint8_t * hm = x[i].hmask;
|
||||||
|
|
||||||
|
for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
|
||||||
|
#else
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int is = tid/16; // 0 or 1
|
||||||
|
const int il = tid%16; // 0...15
|
||||||
|
const int im = il/8; // 0...1
|
||||||
|
const int in = il%8; // 0...7
|
||||||
|
|
||||||
|
dst_t * y = yy + i*QK_K + 16*is + il;
|
||||||
|
|
||||||
|
const uint8_t q = x[i].qs[il] >> (2*is);
|
||||||
|
const uint8_t h = x[i].hmask[in] >> (2*is + im);
|
||||||
|
const float d = (float)x[i].d;
|
||||||
|
|
||||||
|
if (is == 0) {
|
||||||
|
y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
|
||||||
|
y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
|
||||||
|
} else {
|
||||||
|
y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
|
||||||
|
y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
|
||||||
|
if (j < 4) {
|
||||||
|
d = q[j] & 63; m = q[j + 4] & 63;
|
||||||
|
} else {
|
||||||
|
d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
|
||||||
|
m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
const block_q4_K * x = (const block_q4_K *) vx;
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
// assume 32 threads
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int il = tid/8;
|
||||||
|
const int ir = tid%8;
|
||||||
|
const int is = 2*il;
|
||||||
|
const int n = 4;
|
||||||
|
|
||||||
|
dst_t * y = yy + i*QK_K + 64*il + n*ir;
|
||||||
|
|
||||||
|
const float dall = __low2half(x[i].dm);
|
||||||
|
const float dmin = __high2half(x[i].dm);
|
||||||
|
|
||||||
|
const uint8_t * q = x[i].qs + 32*il + n*ir;
|
||||||
|
|
||||||
|
uint8_t sc, m;
|
||||||
|
get_scale_min_k4(is + 0, x[i].scales, sc, m);
|
||||||
|
const float d1 = dall * sc; const float m1 = dmin * m;
|
||||||
|
get_scale_min_k4(is + 1, x[i].scales, sc, m);
|
||||||
|
const float d2 = dall * sc; const float m2 = dmin * m;
|
||||||
|
for (int l = 0; l < n; ++l) {
|
||||||
|
y[l + 0] = d1 * (q[l] & 0xF) - m1;
|
||||||
|
y[l +32] = d2 * (q[l] >> 4) - m2;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const uint8_t * q = x[i].qs;
|
||||||
|
dst_t * y = yy + i*QK_K;
|
||||||
|
const float d = (float)x[i].dm[0];
|
||||||
|
const float m = (float)x[i].dm[1];
|
||||||
|
y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4);
|
||||||
|
y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
const block_q5_K * x = (const block_q5_K *) vx;
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
// assume 64 threads - this is very slightly better than the one below
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int il = tid/16; // il is in 0...3
|
||||||
|
const int ir = tid%16; // ir is in 0...15
|
||||||
|
const int is = 2*il; // is is in 0...6
|
||||||
|
|
||||||
|
dst_t * y = yy + i*QK_K + 64*il + 2*ir;
|
||||||
|
|
||||||
|
const float dall = __low2half(x[i].dm);
|
||||||
|
const float dmin = __high2half(x[i].dm);
|
||||||
|
|
||||||
|
const uint8_t * ql = x[i].qs + 32*il + 2*ir;
|
||||||
|
const uint8_t * qh = x[i].qh + 2*ir;
|
||||||
|
|
||||||
|
uint8_t sc, m;
|
||||||
|
get_scale_min_k4(is + 0, x[i].scales, sc, m);
|
||||||
|
const float d1 = dall * sc; const float m1 = dmin * m;
|
||||||
|
get_scale_min_k4(is + 1, x[i].scales, sc, m);
|
||||||
|
const float d2 = dall * sc; const float m2 = dmin * m;
|
||||||
|
|
||||||
|
uint8_t hm = 1 << (2*il);
|
||||||
|
y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
|
||||||
|
y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
|
||||||
|
hm <<= 1;
|
||||||
|
y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
|
||||||
|
y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
|
||||||
|
#else
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const uint8_t q = x[i].qs[tid];
|
||||||
|
const int im = tid/8; // 0...3
|
||||||
|
const int in = tid%8; // 0...7
|
||||||
|
const int is = tid/16; // 0 or 1
|
||||||
|
const uint8_t h = x[i].qh[in] >> im;
|
||||||
|
const float d = x[i].d;
|
||||||
|
dst_t * y = yy + i*QK_K + tid;
|
||||||
|
y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16));
|
||||||
|
y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
const block_q6_K * x = (const block_q6_K *) vx;
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
|
||||||
|
// assume 64 threads - this is very slightly better than the one below
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int ip = tid/32; // ip is 0 or 1
|
||||||
|
const int il = tid - 32*ip; // 0...32
|
||||||
|
const int is = 8*ip + il/16;
|
||||||
|
|
||||||
|
dst_t * y = yy + i*QK_K + 128*ip + il;
|
||||||
|
|
||||||
|
const float d = x[i].d;
|
||||||
|
|
||||||
|
const uint8_t * ql = x[i].ql + 64*ip + il;
|
||||||
|
const uint8_t qh = x[i].qh[32*ip + il];
|
||||||
|
const int8_t * sc = x[i].scales + is;
|
||||||
|
|
||||||
|
y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
|
||||||
|
y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
|
||||||
|
y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
|
||||||
|
y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
|
||||||
|
#else
|
||||||
|
|
||||||
|
// assume 32 threads
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int ip = tid/16; // 0 or 1
|
||||||
|
const int il = tid - 16*ip; // 0...15
|
||||||
|
|
||||||
|
dst_t * y = yy + i*QK_K + 16*ip + il;
|
||||||
|
|
||||||
|
const float d = x[i].d;
|
||||||
|
|
||||||
|
const uint8_t ql = x[i].ql[16*ip + il];
|
||||||
|
const uint8_t qh = x[i].qh[il] >> (2*ip);
|
||||||
|
const int8_t * sc = x[i].scales;
|
||||||
|
|
||||||
|
y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
|
||||||
|
y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq2_xxs * x = (const block_iq2_xxs *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
||||||
|
const uint16_t * q2 = x[i].qs + 4*ib;
|
||||||
|
const uint8_t * aux8 = (const uint8_t *)q2;
|
||||||
|
const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[il]);
|
||||||
|
const uint32_t aux32 = q2[2] | (q2[3] << 16);
|
||||||
|
const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.25f;
|
||||||
|
const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127];
|
||||||
|
for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq2_xs * x = (const block_iq2_xs *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
||||||
|
const uint16_t * q2 = x[i].qs + 4*ib;
|
||||||
|
const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511));
|
||||||
|
const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
|
||||||
|
const uint8_t signs = ksigns_iq2xs[q2[il] >> 9];
|
||||||
|
for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq2_s(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq2_s * x = (const block_iq2_s *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
||||||
|
const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300)));
|
||||||
|
const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
|
||||||
|
const uint8_t signs = x[i].qs[QK_K/8+4*ib+il];
|
||||||
|
for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq3_xxs * x = (const block_iq3_xxs *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
||||||
|
const uint8_t * q3 = x[i].qs + 8*ib;
|
||||||
|
const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib;
|
||||||
|
const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*il+0]);
|
||||||
|
const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*il+1]);
|
||||||
|
const uint32_t aux32 = gas[0] | (gas[1] << 16);
|
||||||
|
const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.5f;
|
||||||
|
const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127];
|
||||||
|
for (int j = 0; j < 4; ++j) {
|
||||||
|
y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
|
||||||
|
y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq3_s(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq3_s * x = (const block_iq3_s *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
||||||
|
const uint8_t * qs = x[i].qs + 8*ib;
|
||||||
|
const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256)));
|
||||||
|
const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*il+1] | ((x[i].qh[ib] << (7-2*il)) & 256)));
|
||||||
|
const float d = (float)x[i].d * (1 + 2*((x[i].scales[ib/2] >> 4*(ib%2)) & 0xf));
|
||||||
|
const uint8_t signs = x[i].signs[4*ib + il];
|
||||||
|
for (int j = 0; j < 4; ++j) {
|
||||||
|
y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
|
||||||
|
y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq1_s(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq1_s * x = (const block_iq1_s *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
||||||
|
const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA;
|
||||||
|
const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1);
|
||||||
|
uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32;
|
||||||
|
grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[ib] >> 3*il) & 7) << 8)];
|
||||||
|
grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
|
||||||
|
grid32[0] &= 0x0f0f0f0f;
|
||||||
|
for (int j = 0; j < 8; ++j) {
|
||||||
|
y[j] = d * (q[j] + delta);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq1_m(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq1_m * x = (const block_iq1_m *) vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
#if QK_K == 256
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 8*il;
|
||||||
|
const uint16_t * sc = (const uint16_t *)x[i].scales;
|
||||||
|
iq1m_scale_t scale;
|
||||||
|
scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
|
||||||
|
const int ib16 = 2*ib + il/2; // sc[ib16/4] >> 3*(ib16%4) -> sc[ib/2] >> 3*((2*ib+il/2)%4);
|
||||||
|
const float d = (float)scale.f16 * (2*((sc[ib16/4] >> 3*(ib16%4)) & 0x7) + 1);
|
||||||
|
const float delta = x[i].qh[2*ib+il/2] & (0x08 << 4*(il%2)) ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA;
|
||||||
|
uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32;
|
||||||
|
grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[2*ib+il/2] >> 4*(il%2)) & 7) << 8)];
|
||||||
|
grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
|
||||||
|
grid32[0] &= 0x0f0f0f0f;
|
||||||
|
for (int j = 0; j < 8; ++j) {
|
||||||
|
y[j] = d * (q[j] + delta);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq4_nl(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL);
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 4*il;
|
||||||
|
const uint8_t * q4 = x[ib].qs + 4*il;
|
||||||
|
const float d = (float)x[ib].d;
|
||||||
|
for (int j = 0; j < 4; ++j) {
|
||||||
|
y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf];
|
||||||
|
y[j+16] = d * kvalues_iq4nl[q4[j] >> 4];
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#if QK_K != 64
|
||||||
|
template<typename dst_t>
|
||||||
|
static __global__ void dequantize_block_iq4_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
|
||||||
|
const int i = blockIdx.x;
|
||||||
|
const block_iq4_xs * x = (const block_iq4_xs *)vx;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
const int il = tid/8; // 0...3
|
||||||
|
const int ib = tid%8; // 0...7
|
||||||
|
dst_t * y = yy + i*QK_K + 32*ib + 4*il;
|
||||||
|
const uint8_t * q4 = x[i].qs + 16*ib + 4*il;
|
||||||
|
const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32);
|
||||||
|
for (int j = 0; j < 4; ++j) {
|
||||||
|
y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf];
|
||||||
|
y[j+16] = d * kvalues_iq4nl[q4[j] >> 4];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
|
||||||
|
static void dequantize_block_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE);
|
||||||
|
dequantize_block<qk, qr, dequantize_kernel><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_block_q8_0_f16_cuda(const void * __restrict__ vx, half * __restrict__ y, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + CUDA_Q8_0_NE_ALIGN - 1) / CUDA_Q8_0_NE_ALIGN;
|
||||||
|
if (k % CUDA_Q8_0_NE_ALIGN == 0) {
|
||||||
|
const bool need_check = false;
|
||||||
|
dequantize_block_q8_0_f16<need_check><<<num_blocks, WARP_SIZE, 0, stream>>>(vx, y, k);
|
||||||
|
} else {
|
||||||
|
const bool need_check = true;
|
||||||
|
dequantize_block_q8_0_f16<need_check><<<num_blocks, WARP_SIZE, 0, stream>>>(vx, y, k);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_q2_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
#if QK_K == 256
|
||||||
|
dequantize_block_q2_K<<<nb, 64, 0, stream>>>(vx, y);
|
||||||
|
#else
|
||||||
|
dequantize_block_q2_K<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_q3_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
#if QK_K == 256
|
||||||
|
dequantize_block_q3_K<<<nb, 64, 0, stream>>>(vx, y);
|
||||||
|
#else
|
||||||
|
dequantize_block_q3_K<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_q4_0_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb32 = k / 32;
|
||||||
|
const int nb = (k + 255) / 256;
|
||||||
|
dequantize_block_q4_0<<<nb, 32, 0, stream>>>(vx, y, nb32);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_q4_1_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb32 = k / 32;
|
||||||
|
const int nb = (k + 255) / 256;
|
||||||
|
dequantize_block_q4_1<<<nb, 32, 0, stream>>>(vx, y, nb32);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_q4_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_q4_K<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_q5_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
#if QK_K == 256
|
||||||
|
dequantize_block_q5_K<<<nb, 64, 0, stream>>>(vx, y);
|
||||||
|
#else
|
||||||
|
dequantize_block_q5_K<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
#if QK_K == 256
|
||||||
|
dequantize_block_q6_K<<<nb, 64, 0, stream>>>(vx, y);
|
||||||
|
#else
|
||||||
|
dequantize_block_q6_K<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq2_xxs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_iq2_xxs<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq2_xs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_iq2_xs<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq2_s_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_iq2_s<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq3_xxs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_iq3_xxs<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq3_s_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_iq3_s<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq1_s_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_iq1_s<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq4_nl_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = (k + QK_K - 1) / QK_K;
|
||||||
|
dequantize_block_iq4_nl<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq1_m_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = k / QK_K;
|
||||||
|
dequantize_block_iq1_m<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename dst_t>
|
||||||
|
static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
|
||||||
|
const int nb = (k + QK_K - 1) / QK_K;
|
||||||
|
#if QK_K == 64
|
||||||
|
dequantize_block_iq4_nl<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
#else
|
||||||
|
dequantize_block_iq4_xs<<<nb, 32, 0, stream>>>(vx, y);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename src_t, typename dst_t>
|
||||||
|
static __global__ void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) {
|
||||||
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (i >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const src_t * x = (src_t *) vx;
|
||||||
|
|
||||||
|
y[i] = x[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename src_t, typename dst_t>
|
||||||
|
static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
||||||
|
convert_unary<src_t><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
||||||
|
}
|
||||||
|
|
||||||
|
to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
|
||||||
|
int id;
|
||||||
|
switch (type) {
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
return dequantize_row_q4_0_cuda;
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
return dequantize_row_q4_1_cuda;
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
return dequantize_block_cuda<QK5_0, QR5_0, dequantize_q5_0>;
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>;
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
CUDA_CHECK(cudaGetDevice(&id));
|
||||||
|
if (ggml_cuda_info().devices[id].cc >= CC_PASCAL) {
|
||||||
|
return dequantize_block_q8_0_f16_cuda;
|
||||||
|
}
|
||||||
|
return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>;
|
||||||
|
case GGML_TYPE_Q2_K:
|
||||||
|
return dequantize_row_q2_K_cuda;
|
||||||
|
case GGML_TYPE_Q3_K:
|
||||||
|
return dequantize_row_q3_K_cuda;
|
||||||
|
case GGML_TYPE_Q4_K:
|
||||||
|
return dequantize_row_q4_K_cuda;
|
||||||
|
case GGML_TYPE_Q5_K:
|
||||||
|
return dequantize_row_q5_K_cuda;
|
||||||
|
case GGML_TYPE_Q6_K:
|
||||||
|
return dequantize_row_q6_K_cuda;
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
return dequantize_row_iq2_xxs_cuda;
|
||||||
|
case GGML_TYPE_IQ2_XS:
|
||||||
|
return dequantize_row_iq2_xs_cuda;
|
||||||
|
case GGML_TYPE_IQ2_S:
|
||||||
|
return dequantize_row_iq2_s_cuda;
|
||||||
|
case GGML_TYPE_IQ3_XXS:
|
||||||
|
return dequantize_row_iq3_xxs_cuda;
|
||||||
|
case GGML_TYPE_IQ1_S:
|
||||||
|
return dequantize_row_iq1_s_cuda;
|
||||||
|
case GGML_TYPE_IQ1_M:
|
||||||
|
return dequantize_row_iq1_m_cuda;
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
return dequantize_row_iq4_nl_cuda;
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
return dequantize_row_iq4_xs_cuda;
|
||||||
|
case GGML_TYPE_IQ3_S:
|
||||||
|
return dequantize_row_iq3_s_cuda;
|
||||||
|
case GGML_TYPE_F32:
|
||||||
|
return convert_unary_cuda<float>;
|
||||||
|
default:
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
|
||||||
|
switch (type) {
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
return dequantize_row_q4_0_cuda;
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
return dequantize_row_q4_1_cuda;
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
return dequantize_block_cuda<QK5_0, QR5_0, dequantize_q5_0>;
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>;
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>;
|
||||||
|
case GGML_TYPE_Q2_K:
|
||||||
|
return dequantize_row_q2_K_cuda;
|
||||||
|
case GGML_TYPE_Q3_K:
|
||||||
|
return dequantize_row_q3_K_cuda;
|
||||||
|
case GGML_TYPE_Q4_K:
|
||||||
|
return dequantize_row_q4_K_cuda;
|
||||||
|
case GGML_TYPE_Q5_K:
|
||||||
|
return dequantize_row_q5_K_cuda;
|
||||||
|
case GGML_TYPE_Q6_K:
|
||||||
|
return dequantize_row_q6_K_cuda;
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
return dequantize_row_iq2_xxs_cuda;
|
||||||
|
case GGML_TYPE_IQ2_XS:
|
||||||
|
return dequantize_row_iq2_xs_cuda;
|
||||||
|
case GGML_TYPE_IQ2_S:
|
||||||
|
return dequantize_row_iq2_s_cuda;
|
||||||
|
case GGML_TYPE_IQ3_XXS:
|
||||||
|
return dequantize_row_iq3_xxs_cuda;
|
||||||
|
case GGML_TYPE_IQ1_S:
|
||||||
|
return dequantize_row_iq1_s_cuda;
|
||||||
|
case GGML_TYPE_IQ1_M:
|
||||||
|
return dequantize_row_iq1_m_cuda;
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
return dequantize_row_iq4_nl_cuda;
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
return dequantize_row_iq4_xs_cuda;
|
||||||
|
case GGML_TYPE_IQ3_S:
|
||||||
|
return dequantize_row_iq3_s_cuda;
|
||||||
|
case GGML_TYPE_F16:
|
||||||
|
return convert_unary_cuda<half>;
|
||||||
|
default:
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
13
ggml-cuda/convert.cuh
Normal file
13
ggml-cuda/convert.cuh
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int k, cudaStream_t stream);
|
||||||
|
|
||||||
|
typedef to_t_cuda_t<float> to_fp32_cuda_t;
|
||||||
|
typedef to_t_cuda_t<half> to_fp16_cuda_t;
|
||||||
|
|
||||||
|
to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type);
|
||||||
|
|
||||||
|
to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type);
|
461
ggml-cuda/cpy.cu
Normal file
461
ggml-cuda/cpy.cu
Normal file
@ -0,0 +1,461 @@
|
|||||||
|
#include "cpy.cuh"
|
||||||
|
|
||||||
|
typedef void (*cpy_kernel_t)(const char * cx, char * cdst);
|
||||||
|
|
||||||
|
static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
float * dsti = (float *) cdsti;
|
||||||
|
|
||||||
|
*dsti = *xi;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
half * dsti = (half *) cdsti;
|
||||||
|
|
||||||
|
*dsti = __float2half(*xi);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_1_f16_f16(const char * cxi, char * cdsti) {
|
||||||
|
const half * xi = (const half *) cxi;
|
||||||
|
half * dsti = (half *) cdsti;
|
||||||
|
|
||||||
|
*dsti = *xi;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_1_f16_f32(const char * cxi, char * cdsti) {
|
||||||
|
const half * xi = (const half *) cxi;
|
||||||
|
float * dsti = (float *) cdsti;
|
||||||
|
|
||||||
|
*dsti = *xi;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <cpy_kernel_t cpy_1>
|
||||||
|
static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
|
||||||
|
const int nb12, const int nb13) {
|
||||||
|
const int64_t i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (i >= ne) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// determine indices i03/i13, i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor
|
||||||
|
// then combine those indices with the corresponding byte offsets to get the total offsets
|
||||||
|
const int64_t i03 = i/(ne00 * ne01 * ne02);
|
||||||
|
const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01);
|
||||||
|
const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00;
|
||||||
|
const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00;
|
||||||
|
const int64_t x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03;
|
||||||
|
|
||||||
|
const int64_t i13 = i/(ne10 * ne11 * ne12);
|
||||||
|
const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11);
|
||||||
|
const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10;
|
||||||
|
const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10;
|
||||||
|
const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13 * nb13;
|
||||||
|
|
||||||
|
cpy_1(cx + x_offset, cdst + dst_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
block_q8_0 * dsti = (block_q8_0 *) cdsti;
|
||||||
|
|
||||||
|
float amax = 0.0f; // absolute max
|
||||||
|
|
||||||
|
for (int j = 0; j < QK8_0; j++) {
|
||||||
|
const float v = xi[j];
|
||||||
|
amax = fmaxf(amax, fabsf(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = amax / ((1 << 7) - 1);
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
dsti->d = d;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK8_0; ++j) {
|
||||||
|
const float x0 = xi[j]*id;
|
||||||
|
|
||||||
|
dsti->qs[j] = roundf(x0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
block_q4_0 * dsti = (block_q4_0 *) cdsti;
|
||||||
|
|
||||||
|
float amax = 0.0f;
|
||||||
|
float vmax = 0.0f;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK4_0; ++j) {
|
||||||
|
const float v = xi[j];
|
||||||
|
if (amax < fabsf(v)) {
|
||||||
|
amax = fabsf(v);
|
||||||
|
vmax = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = vmax / -8;
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
dsti->d = d;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK4_0/2; ++j) {
|
||||||
|
const float x0 = xi[0 + j]*id;
|
||||||
|
const float x1 = xi[QK4_0/2 + j]*id;
|
||||||
|
|
||||||
|
const uint8_t xi0 = min(15, (int8_t)(x0 + 8.5f));
|
||||||
|
const uint8_t xi1 = min(15, (int8_t)(x1 + 8.5f));
|
||||||
|
|
||||||
|
dsti->qs[j] = xi0;
|
||||||
|
dsti->qs[j] |= xi1 << 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
block_q4_1 * dsti = (block_q4_1 *) cdsti;
|
||||||
|
|
||||||
|
float vmin = FLT_MAX;
|
||||||
|
float vmax = -FLT_MAX;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK4_1; ++j) {
|
||||||
|
const float v = xi[j];
|
||||||
|
|
||||||
|
if (v < vmin) vmin = v;
|
||||||
|
if (v > vmax) vmax = v;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = (vmax - vmin) / ((1 << 4) - 1);
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
dsti->dm.x = d;
|
||||||
|
dsti->dm.y = vmin;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK4_1/2; ++j) {
|
||||||
|
const float x0 = (xi[0 + j] - vmin)*id;
|
||||||
|
const float x1 = (xi[QK4_1/2 + j] - vmin)*id;
|
||||||
|
|
||||||
|
const uint8_t xi0 = min(15, (int8_t)(x0 + 0.5f));
|
||||||
|
const uint8_t xi1 = min(15, (int8_t)(x1 + 0.5f));
|
||||||
|
|
||||||
|
dsti->qs[j] = xi0;
|
||||||
|
dsti->qs[j] |= xi1 << 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_blck_f32_q5_0(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
block_q5_0 * dsti = (block_q5_0 *) cdsti;
|
||||||
|
|
||||||
|
float amax = 0.0f;
|
||||||
|
float vmax = 0.0f;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK5_0; ++j) {
|
||||||
|
const float v = xi[j];
|
||||||
|
if (amax < fabsf(v)) {
|
||||||
|
amax = fabsf(v);
|
||||||
|
vmax = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = vmax / -16;
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
dsti->d = d;
|
||||||
|
|
||||||
|
uint32_t qh = 0;
|
||||||
|
for (int j = 0; j < QK5_0/2; ++j) {
|
||||||
|
const float x0 = xi[0 + j]*id;
|
||||||
|
const float x1 = xi[QK5_0/2 + j]*id;
|
||||||
|
|
||||||
|
const uint8_t xi0 = min(31, (int8_t)(x0 + 16.5f));
|
||||||
|
const uint8_t xi1 = min(31, (int8_t)(x1 + 16.5f));
|
||||||
|
|
||||||
|
dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4);
|
||||||
|
qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
|
||||||
|
qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
|
||||||
|
}
|
||||||
|
memcpy(dsti->qh, &qh, sizeof(qh));
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_blck_f32_q5_1(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
block_q5_1 * dsti = (block_q5_1 *) cdsti;
|
||||||
|
|
||||||
|
float min = xi[0];
|
||||||
|
float max = xi[0];
|
||||||
|
|
||||||
|
for (int j = 1; j < QK5_1; ++j) {
|
||||||
|
const float v = xi[j];
|
||||||
|
min = v < min ? v : min;
|
||||||
|
max = v > max ? v : max;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = (max - min) / 31;
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
dsti->dm.x = d;
|
||||||
|
dsti->dm.y = min;
|
||||||
|
|
||||||
|
uint32_t qh = 0;
|
||||||
|
for (int j = 0; j < QK5_1/2; ++j) {
|
||||||
|
const float x0 = (xi[0 + j] - min)*id;
|
||||||
|
const float x1 = (xi[QK5_1/2 + j] - min)*id;
|
||||||
|
|
||||||
|
const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
|
||||||
|
const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
|
||||||
|
|
||||||
|
dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4);
|
||||||
|
qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
|
||||||
|
qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2);
|
||||||
|
}
|
||||||
|
memcpy(dsti->qh, &qh, sizeof(qh));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static __device__ __forceinline__ int best_index_int8(int n, const int8_t * val, float x) {
|
||||||
|
if (x <= val[0]) return 0;
|
||||||
|
if (x >= val[n-1]) return n-1;
|
||||||
|
int ml = 0, mu = n-1;
|
||||||
|
while (mu-ml > 1) {
|
||||||
|
int mav = (ml+mu)/2;
|
||||||
|
if (x < val[mav]) mu = mav; else ml = mav;
|
||||||
|
}
|
||||||
|
return x - val[mu-1] < val[mu] - x ? mu-1 : mu;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void cpy_blck_f32_iq4_nl(const char * cxi, char * cdsti) {
|
||||||
|
const float * xi = (const float *) cxi;
|
||||||
|
block_iq4_nl * dsti = (block_iq4_nl *) cdsti;
|
||||||
|
|
||||||
|
float amax = 0.0f;
|
||||||
|
float vmax = 0.0f;
|
||||||
|
|
||||||
|
for (int j = 0; j < QK4_NL; ++j) {
|
||||||
|
const float v = xi[j];
|
||||||
|
if (amax < fabsf(v)) {
|
||||||
|
amax = fabsf(v);
|
||||||
|
vmax = v;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
float d = vmax / kvalues_iq4nl[0];
|
||||||
|
const float id = d ? 1.0f/d : 0.0f;
|
||||||
|
|
||||||
|
float sumqx = 0, sumq2 = 0;
|
||||||
|
for (int j = 0; j < QK4_NL/2; ++j) {
|
||||||
|
const float x0 = xi[0 + j]*id;
|
||||||
|
const float x1 = xi[QK4_NL/2 + j]*id;
|
||||||
|
const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl, x0);
|
||||||
|
const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl, x1);
|
||||||
|
dsti->qs[j] = xi0 | (xi1 << 4);
|
||||||
|
const float v0 = kvalues_iq4nl[xi0];
|
||||||
|
const float v1 = kvalues_iq4nl[xi1];
|
||||||
|
const float w0 = xi[0 + j]*xi[0 + j];
|
||||||
|
const float w1 = xi[QK4_NL/2 + j]*xi[QK4_NL/2 + j];
|
||||||
|
sumqx += w0*v0*xi[j] + w1*v1*xi[QK4_NL/2 + j];
|
||||||
|
sumq2 += w0*v0*v0 + w1*v1*v1;
|
||||||
|
}
|
||||||
|
|
||||||
|
dsti->d = sumq2 > 0 ? sumqx/sumq2 : d;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <cpy_kernel_t cpy_blck, int qk>
|
||||||
|
static __global__ void cpy_f32_q(const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
|
||||||
|
const int nb12, const int nb13) {
|
||||||
|
const int i = (blockDim.x*blockIdx.x + threadIdx.x)*qk;
|
||||||
|
|
||||||
|
if (i >= ne) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int i03 = i/(ne00 * ne01 * ne02);
|
||||||
|
const int i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01);
|
||||||
|
const int i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00;
|
||||||
|
const int i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00;
|
||||||
|
const int x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03;
|
||||||
|
|
||||||
|
const int i13 = i/(ne10 * ne11 * ne12);
|
||||||
|
const int i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11);
|
||||||
|
const int i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10;
|
||||||
|
const int i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10;
|
||||||
|
const int dst_offset = (i10/qk)*nb10 + i11*nb11 + i12*nb12 + i13*nb13;
|
||||||
|
|
||||||
|
cpy_blck(cx + x_offset, cdst + dst_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f16_f32_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
|
||||||
|
cpy_f32_f16<cpy_1_f16_f32><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_f32_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
|
||||||
|
cpy_f32_f16<cpy_1_f32_f32><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_f16_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
|
||||||
|
cpy_f32_f16<cpy_1_f32_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_q8_0_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_ASSERT(ne % QK8_0 == 0);
|
||||||
|
const int num_blocks = ne / QK8_0;
|
||||||
|
cpy_f32_q<cpy_blck_f32_q8_0, QK8_0><<<num_blocks, 1, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_q4_0_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_ASSERT(ne % QK4_0 == 0);
|
||||||
|
const int num_blocks = ne / QK4_0;
|
||||||
|
cpy_f32_q<cpy_blck_f32_q4_0, QK4_0><<<num_blocks, 1, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_q4_1_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_ASSERT(ne % QK4_1 == 0);
|
||||||
|
const int num_blocks = ne / QK4_1;
|
||||||
|
cpy_f32_q<cpy_blck_f32_q4_1, QK4_1><<<num_blocks, 1, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_q5_0_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_ASSERT(ne % QK5_0 == 0);
|
||||||
|
const int num_blocks = ne / QK5_0;
|
||||||
|
cpy_f32_q<cpy_blck_f32_q5_0, QK5_0><<<num_blocks, 1, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_q5_1_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_ASSERT(ne % QK5_1 == 0);
|
||||||
|
const int num_blocks = ne / QK5_1;
|
||||||
|
cpy_f32_q<cpy_blck_f32_q5_1, QK5_1><<<num_blocks, 1, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f32_iq4_nl_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
GGML_ASSERT(ne % QK4_NL == 0);
|
||||||
|
const int num_blocks = ne / QK4_NL;
|
||||||
|
cpy_f32_q<cpy_blck_f32_iq4_nl, QK4_NL><<<num_blocks, 1, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_cpy_f16_f16_cuda(
|
||||||
|
const char * cx, char * cdst, const int ne,
|
||||||
|
const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02,
|
||||||
|
const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream) {
|
||||||
|
|
||||||
|
const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE;
|
||||||
|
cpy_f32_f16<cpy_1_f16_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>>
|
||||||
|
(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1) {
|
||||||
|
const int64_t ne = ggml_nelements(src0);
|
||||||
|
GGML_ASSERT(ne == ggml_nelements(src1));
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_nbytes(src0) <= INT_MAX);
|
||||||
|
GGML_ASSERT(ggml_nbytes(src1) <= INT_MAX);
|
||||||
|
|
||||||
|
const int64_t ne00 = src0->ne[0];
|
||||||
|
const int64_t ne01 = src0->ne[1];
|
||||||
|
const int64_t ne02 = src0->ne[2];
|
||||||
|
|
||||||
|
//GGML_ASSERT(src0->ne[3] == 1);
|
||||||
|
|
||||||
|
const int64_t nb00 = src0->nb[0];
|
||||||
|
const int64_t nb01 = src0->nb[1];
|
||||||
|
const int64_t nb02 = src0->nb[2];
|
||||||
|
const int64_t nb03 = src0->nb[3];
|
||||||
|
|
||||||
|
const int64_t ne10 = src1->ne[0];
|
||||||
|
const int64_t ne11 = src1->ne[1];
|
||||||
|
const int64_t ne12 = src1->ne[2];
|
||||||
|
|
||||||
|
//GGML_ASSERT(src1->ne[3] == 1);
|
||||||
|
|
||||||
|
const int64_t nb10 = src1->nb[0];
|
||||||
|
const int64_t nb11 = src1->nb[1];
|
||||||
|
const int64_t nb12 = src1->nb[2];
|
||||||
|
const int64_t nb13 = src1->nb[3];
|
||||||
|
|
||||||
|
cudaStream_t main_stream = ctx.stream();
|
||||||
|
|
||||||
|
char * src0_ddc = (char *) src0->data;
|
||||||
|
char * src1_ddc = (char *) src1->data;
|
||||||
|
|
||||||
|
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
|
||||||
|
ggml_cpy_f32_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
|
||||||
|
ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) {
|
||||||
|
ggml_cpy_f32_q8_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) {
|
||||||
|
ggml_cpy_f32_q4_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) {
|
||||||
|
ggml_cpy_f32_q4_1_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) {
|
||||||
|
ggml_cpy_f32_q5_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) {
|
||||||
|
ggml_cpy_f32_iq4_nl_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) {
|
||||||
|
ggml_cpy_f32_q5_1_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) {
|
||||||
|
ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
|
||||||
|
ggml_cpy_f16_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__,
|
||||||
|
ggml_type_name(src0->type), ggml_type_name(src1->type));
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
ggml_cuda_cpy(ctx, src0, dst);
|
||||||
|
}
|
7
ggml-cuda/cpy.cuh
Normal file
7
ggml-cuda/cpy.cuh
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_CPY_BLOCK_SIZE 32
|
||||||
|
|
||||||
|
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1);
|
||||||
|
|
||||||
|
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
103
ggml-cuda/dequantize.cuh
Normal file
103
ggml-cuda/dequantize.cuh
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
||||||
|
const block_q4_0 * x = (const block_q4_0 *) vx;
|
||||||
|
|
||||||
|
const dfloat d = x[ib].d;
|
||||||
|
|
||||||
|
const int vui = x[ib].qs[iqs];
|
||||||
|
|
||||||
|
v.x = vui & 0xF;
|
||||||
|
v.y = vui >> 4;
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
v = __hsub2(v, {8.0f, 8.0f});
|
||||||
|
v = __hmul2(v, {d, d});
|
||||||
|
#else
|
||||||
|
v.x = (v.x - 8.0f) * d;
|
||||||
|
v.y = (v.y - 8.0f) * d;
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
||||||
|
const block_q4_1 * x = (const block_q4_1 *) vx;
|
||||||
|
|
||||||
|
const dfloat d = __low2half(x[ib].dm);
|
||||||
|
const dfloat m = __high2half(x[ib].dm);
|
||||||
|
|
||||||
|
const int vui = x[ib].qs[iqs];
|
||||||
|
|
||||||
|
v.x = vui & 0xF;
|
||||||
|
v.y = vui >> 4;
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
v = __hmul2(v, {d, d});
|
||||||
|
v = __hadd2(v, {m, m});
|
||||||
|
#else
|
||||||
|
v.x = (v.x * d) + m;
|
||||||
|
v.y = (v.y * d) + m;
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
||||||
|
const block_q5_0 * x = (const block_q5_0 *) vx;
|
||||||
|
|
||||||
|
const dfloat d = x[ib].d;
|
||||||
|
|
||||||
|
uint32_t qh;
|
||||||
|
memcpy(&qh, x[ib].qh, sizeof(qh));
|
||||||
|
|
||||||
|
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
|
||||||
|
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
|
||||||
|
|
||||||
|
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
|
||||||
|
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
v = __hsub2(v, {16.0f, 16.0f});
|
||||||
|
v = __hmul2(v, {d, d});
|
||||||
|
#else
|
||||||
|
v.x = (v.x - 16.0f) * d;
|
||||||
|
v.y = (v.y - 16.0f) * d;
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
||||||
|
const block_q5_1 * x = (const block_q5_1 *) vx;
|
||||||
|
|
||||||
|
const dfloat d = __low2half(x[ib].dm);
|
||||||
|
const dfloat m = __high2half(x[ib].dm);
|
||||||
|
|
||||||
|
uint32_t qh;
|
||||||
|
memcpy(&qh, x[ib].qh, sizeof(qh));
|
||||||
|
|
||||||
|
const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
|
||||||
|
const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
|
||||||
|
|
||||||
|
v.x = ((x[ib].qs[iqs] & 0xf) | xh_0);
|
||||||
|
v.y = ((x[ib].qs[iqs] >> 4) | xh_1);
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
v = __hmul2(v, {d, d});
|
||||||
|
v = __hadd2(v, {m, m});
|
||||||
|
#else
|
||||||
|
v.x = (v.x * d) + m;
|
||||||
|
v.y = (v.y * d) + m;
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
||||||
|
const block_q8_0 * x = (const block_q8_0 *) vx;
|
||||||
|
|
||||||
|
const dfloat d = x[ib].d;
|
||||||
|
|
||||||
|
v.x = x[ib].qs[iqs + 0];
|
||||||
|
v.y = x[ib].qs[iqs + 1];
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
v = __hmul2(v, {d, d});
|
||||||
|
#else
|
||||||
|
v.x *= d;
|
||||||
|
v.y *= d;
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
}
|
40
ggml-cuda/diagmask.cu
Normal file
40
ggml-cuda/diagmask.cu
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#include "diagmask.cuh"
|
||||||
|
|
||||||
|
static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) {
|
||||||
|
const int col = blockDim.y*blockIdx.y + threadIdx.y;
|
||||||
|
const int row = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (col >= ncols) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int i = row*ncols + col;
|
||||||
|
//dst[i] = col > (n_past + row % rows_per_channel) ? -INFINITY : x[i];
|
||||||
|
//dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU
|
||||||
|
dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, cudaStream_t stream) {
|
||||||
|
const dim3 block_dims(1, CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1);
|
||||||
|
const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE;
|
||||||
|
const dim3 block_nums(nrows_x, block_num_x, 1);
|
||||||
|
diag_mask_inf_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x, rows_per_channel, n_past);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
const int64_t ne00 = src0->ne[0];
|
||||||
|
const int64_t ne01 = src0->ne[1];
|
||||||
|
const int nrows0 = ggml_nrows(src0);
|
||||||
|
|
||||||
|
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||||
|
|
||||||
|
diag_mask_inf_f32_cuda(src0_d, dst_d, ne00, nrows0, ne01, n_past, stream);
|
||||||
|
}
|
5
ggml-cuda/diagmask.cuh
Normal file
5
ggml-cuda/diagmask.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
|
||||||
|
|
||||||
|
void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
821
ggml-cuda/dmmv.cu
Normal file
821
ggml-cuda/dmmv.cu
Normal file
@ -0,0 +1,821 @@
|
|||||||
|
#include "dmmv.cuh"
|
||||||
|
#include "dequantize.cuh"
|
||||||
|
#include "convert.cuh"
|
||||||
|
|
||||||
|
// dmmv = dequantize_mul_mat_vec
|
||||||
|
#ifndef GGML_CUDA_DMMV_X
|
||||||
|
#define GGML_CUDA_DMMV_X 32
|
||||||
|
#endif
|
||||||
|
#ifndef GGML_CUDA_MMV_Y
|
||||||
|
#define GGML_CUDA_MMV_Y 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef K_QUANTS_PER_ITERATION
|
||||||
|
#define K_QUANTS_PER_ITERATION 2
|
||||||
|
#else
|
||||||
|
static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
|
||||||
|
|
||||||
|
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
|
||||||
|
|
||||||
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
|
if (row > nrows) return;
|
||||||
|
|
||||||
|
const int num_blocks_per_row = ncols / QK_K;
|
||||||
|
const int ib0 = row*num_blocks_per_row;
|
||||||
|
|
||||||
|
const block_q2_K * x = (const block_q2_K *)vx + ib0;
|
||||||
|
|
||||||
|
float tmp = 0; // partial sum for thread in warp
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15
|
||||||
|
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
|
||||||
|
|
||||||
|
const int step = 16/K_QUANTS_PER_ITERATION;
|
||||||
|
|
||||||
|
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
||||||
|
const int in = tid - step*im; // 0...15 or 0...7
|
||||||
|
|
||||||
|
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
|
||||||
|
const int q_offset = 32*im + l0;
|
||||||
|
const int s_offset = 8*im;
|
||||||
|
const int y_offset = 128*im + l0;
|
||||||
|
|
||||||
|
uint32_t aux[4];
|
||||||
|
const uint8_t * d = (const uint8_t *)aux;
|
||||||
|
const uint8_t * m = (const uint8_t *)(aux + 2);
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||||
|
|
||||||
|
const float * y = yy + i * QK_K + y_offset;
|
||||||
|
const uint8_t * q = x[i].qs + q_offset;
|
||||||
|
|
||||||
|
const float dall = __low2half(x[i].dm);
|
||||||
|
const float dmin = __high2half(x[i].dm);
|
||||||
|
|
||||||
|
const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset);
|
||||||
|
aux[0] = a[0] & 0x0f0f0f0f;
|
||||||
|
aux[1] = a[1] & 0x0f0f0f0f;
|
||||||
|
aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
|
||||||
|
aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
|
||||||
|
|
||||||
|
float sum1 = 0, sum2 = 0;
|
||||||
|
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
||||||
|
sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
|
||||||
|
+ y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
|
||||||
|
+ y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
|
||||||
|
+ y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
|
||||||
|
+ y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
|
||||||
|
+ y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
|
||||||
|
+ y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
|
||||||
|
+y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
|
||||||
|
sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
|
||||||
|
+ y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
|
||||||
|
|
||||||
|
}
|
||||||
|
tmp += dall * sum1 - dmin * sum2;
|
||||||
|
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
|
||||||
|
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
|
||||||
|
const int offset = tid * K_QUANTS_PER_ITERATION;
|
||||||
|
|
||||||
|
uint32_t uaux[2];
|
||||||
|
const uint8_t * d = (const uint8_t *)uaux;
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
|
||||||
|
|
||||||
|
const float * y = yy + i * QK_K + offset;
|
||||||
|
const uint8_t * q = x[i].qs + offset;
|
||||||
|
const uint32_t * s = (const uint32_t *)x[i].scales;
|
||||||
|
|
||||||
|
uaux[0] = s[0] & 0x0f0f0f0f;
|
||||||
|
uaux[1] = (s[0] >> 4) & 0x0f0f0f0f;
|
||||||
|
|
||||||
|
const float2 dall = __half22float2(x[i].dm);
|
||||||
|
|
||||||
|
float sum1 = 0, sum2 = 0;
|
||||||
|
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
||||||
|
const uint8_t ql = q[l];
|
||||||
|
sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3)
|
||||||
|
+ y[l+16] * d[1] * ((ql >> 2) & 3)
|
||||||
|
+ y[l+32] * d[2] * ((ql >> 4) & 3)
|
||||||
|
+ y[l+48] * d[3] * ((ql >> 6) & 3);
|
||||||
|
sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7];
|
||||||
|
}
|
||||||
|
tmp += dall.x * sum1 - dall.y * sum2;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// sum up partial sums and write back result
|
||||||
|
tmp = warp_reduce_sum(tmp);
|
||||||
|
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
dst[row] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
|
||||||
|
|
||||||
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
|
if (row > nrows) return;
|
||||||
|
|
||||||
|
const int num_blocks_per_row = ncols / QK_K;
|
||||||
|
const int ib0 = row*num_blocks_per_row;
|
||||||
|
|
||||||
|
const block_q3_K * x = (const block_q3_K *)vx + ib0;
|
||||||
|
|
||||||
|
float tmp = 0; // partial sum for thread in warp
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
|
||||||
|
const uint16_t kmask1 = 0x0303;
|
||||||
|
const uint16_t kmask2 = 0x0f0f;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
||||||
|
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
|
||||||
|
|
||||||
|
const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
|
||||||
|
const int step = 16/K_QUANTS_PER_ITERATION;
|
||||||
|
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
||||||
|
const int in = tid - step*im; // 0....15 or 0...7
|
||||||
|
|
||||||
|
const uint8_t m = 1 << (4*im);
|
||||||
|
|
||||||
|
const int l0 = n*in; // 0...15 or 0...14 in steps of 2
|
||||||
|
const int q_offset = 32*im + l0;
|
||||||
|
const int y_offset = 128*im + l0;
|
||||||
|
|
||||||
|
uint16_t utmp[4];
|
||||||
|
const int8_t * s = (const int8_t *)utmp;
|
||||||
|
|
||||||
|
const uint16_t s_shift = 4*im;
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||||
|
|
||||||
|
const float * y = yy + i * QK_K + y_offset;
|
||||||
|
const uint8_t * q = x[i].qs + q_offset;
|
||||||
|
const uint8_t * h = x[i].hmask + l0;
|
||||||
|
|
||||||
|
const uint16_t * a = (const uint16_t *)x[i].scales;
|
||||||
|
utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
|
||||||
|
utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
|
||||||
|
utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
|
||||||
|
utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
|
||||||
|
|
||||||
|
const float d = x[i].d;
|
||||||
|
|
||||||
|
float sum = 0;
|
||||||
|
for (int l = 0; l < n; ++l) {
|
||||||
|
sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
|
||||||
|
+ y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
|
||||||
|
+ y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
|
||||||
|
+ y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
|
||||||
|
sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
|
||||||
|
+ y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
|
||||||
|
+ y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
|
||||||
|
+ y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
|
||||||
|
}
|
||||||
|
tmp += d * sum;
|
||||||
|
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
|
||||||
|
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
|
||||||
|
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
|
||||||
|
const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14
|
||||||
|
const int in = offset/8; // 0 or 1
|
||||||
|
const int im = offset%8; // 0...7
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
|
||||||
|
|
||||||
|
const float * y = yy + i * QK_K + offset;
|
||||||
|
const uint8_t * q = x[i].qs + offset;
|
||||||
|
const uint8_t * s = x[i].scales;
|
||||||
|
|
||||||
|
const float dall = (float)x[i].d;
|
||||||
|
|
||||||
|
float sum = 0;
|
||||||
|
for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
|
||||||
|
const uint8_t hl = x[i].hmask[im+l] >> in;
|
||||||
|
const uint8_t ql = q[l];
|
||||||
|
sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4))
|
||||||
|
+ y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4))
|
||||||
|
+ y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4))
|
||||||
|
+ y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4));
|
||||||
|
}
|
||||||
|
tmp += sum;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// sum up partial sums and write back result
|
||||||
|
tmp = warp_reduce_sum(tmp);
|
||||||
|
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
dst[row] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
|
||||||
|
|
||||||
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
|
if (row > nrows) return;
|
||||||
|
const int num_blocks_per_row = ncols / QK_K;
|
||||||
|
const int ib0 = row*num_blocks_per_row;
|
||||||
|
|
||||||
|
const block_q4_K * x = (const block_q4_K *)vx + ib0;
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
const uint16_t kmask1 = 0x3f3f;
|
||||||
|
const uint16_t kmask2 = 0x0f0f;
|
||||||
|
const uint16_t kmask3 = 0xc0c0;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
||||||
|
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
|
||||||
|
|
||||||
|
const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
|
||||||
|
|
||||||
|
const int il = tid/step; // 0...3
|
||||||
|
const int ir = tid - step*il; // 0...7 or 0...3
|
||||||
|
const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
|
||||||
|
|
||||||
|
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
||||||
|
const int in = il%2;
|
||||||
|
|
||||||
|
const int l0 = n*(2*ir + in);
|
||||||
|
const int q_offset = 32*im + l0;
|
||||||
|
const int y_offset = 64*im + l0;
|
||||||
|
|
||||||
|
uint16_t aux[4];
|
||||||
|
const uint8_t * sc = (const uint8_t *)aux;
|
||||||
|
|
||||||
|
#if K_QUANTS_PER_ITERATION == 2
|
||||||
|
uint32_t q32[4];
|
||||||
|
const uint8_t * q4 = (const uint8_t *)q32;
|
||||||
|
#else
|
||||||
|
uint16_t q16[4];
|
||||||
|
const uint8_t * q4 = (const uint8_t *)q16;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
float tmp = 0; // partial sum for thread in warp
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||||
|
|
||||||
|
const float * y1 = yy + i*QK_K + y_offset;
|
||||||
|
const float * y2 = y1 + 128;
|
||||||
|
|
||||||
|
const float dall = __low2half(x[i].dm);
|
||||||
|
const float dmin = __high2half(x[i].dm);
|
||||||
|
|
||||||
|
const uint16_t * a = (const uint16_t *)x[i].scales;
|
||||||
|
aux[0] = a[im+0] & kmask1;
|
||||||
|
aux[1] = a[im+2] & kmask1;
|
||||||
|
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
|
||||||
|
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
|
||||||
|
|
||||||
|
#if K_QUANTS_PER_ITERATION == 2
|
||||||
|
const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset);
|
||||||
|
const uint32_t * q2 = q1 + 16;
|
||||||
|
|
||||||
|
q32[0] = q1[0] & 0x0f0f0f0f;
|
||||||
|
q32[1] = q1[0] & 0xf0f0f0f0;
|
||||||
|
q32[2] = q2[0] & 0x0f0f0f0f;
|
||||||
|
q32[3] = q2[0] & 0xf0f0f0f0;
|
||||||
|
|
||||||
|
float4 s = {0.f, 0.f, 0.f, 0.f};
|
||||||
|
float smin = 0;
|
||||||
|
for (int l = 0; l < 4; ++l) {
|
||||||
|
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4];
|
||||||
|
s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12];
|
||||||
|
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
|
||||||
|
}
|
||||||
|
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
|
||||||
|
#else
|
||||||
|
const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset);
|
||||||
|
const uint16_t * q2 = q1 + 32;
|
||||||
|
|
||||||
|
q16[0] = q1[0] & 0x0f0f;
|
||||||
|
q16[1] = q1[0] & 0xf0f0;
|
||||||
|
q16[2] = q2[0] & 0x0f0f;
|
||||||
|
q16[3] = q2[0] & 0xf0f0;
|
||||||
|
|
||||||
|
float4 s = {0.f, 0.f, 0.f, 0.f};
|
||||||
|
float smin = 0;
|
||||||
|
for (int l = 0; l < 2; ++l) {
|
||||||
|
s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2];
|
||||||
|
s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6];
|
||||||
|
smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
|
||||||
|
}
|
||||||
|
tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
|
||||||
|
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
|
||||||
|
|
||||||
|
const int step = tid * K_QUANTS_PER_ITERATION;
|
||||||
|
|
||||||
|
uint16_t aux16[2];
|
||||||
|
const uint8_t * s = (const uint8_t *)aux16;
|
||||||
|
|
||||||
|
float tmp = 0;
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
|
||||||
|
const uint8_t * q = x[i].qs + step;
|
||||||
|
const float * y = yy + i*QK_K + step;
|
||||||
|
const uint16_t * a = (const uint16_t *)x[i].scales;
|
||||||
|
aux16[0] = a[0] & 0x0f0f;
|
||||||
|
aux16[1] = (a[0] >> 4) & 0x0f0f;
|
||||||
|
const float d = (float)x[i].dm[0];
|
||||||
|
const float m = (float)x[i].dm[1];
|
||||||
|
float sum = 0.f;
|
||||||
|
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
|
||||||
|
sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2])
|
||||||
|
+ y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2])
|
||||||
|
+ y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3])
|
||||||
|
+ y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]);
|
||||||
|
}
|
||||||
|
tmp += sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// sum up partial sums and write back result
|
||||||
|
tmp = warp_reduce_sum(tmp);
|
||||||
|
|
||||||
|
if (tid == 0) {
|
||||||
|
dst[row] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) {
|
||||||
|
|
||||||
|
const int row = blockIdx.x;
|
||||||
|
const int num_blocks_per_row = ncols / QK_K;
|
||||||
|
const int ib0 = row*num_blocks_per_row;
|
||||||
|
|
||||||
|
const block_q5_K * x = (const block_q5_K *)vx + ib0;
|
||||||
|
|
||||||
|
float tmp = 0; // partial sum for thread in warp
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
const uint16_t kmask1 = 0x3f3f;
|
||||||
|
const uint16_t kmask2 = 0x0f0f;
|
||||||
|
const uint16_t kmask3 = 0xc0c0;
|
||||||
|
|
||||||
|
const int tid = threadIdx.x/2; // 0...15
|
||||||
|
const int ix = threadIdx.x%2;
|
||||||
|
|
||||||
|
const int il = tid/4; // 0...3
|
||||||
|
const int ir = tid - 4*il;// 0...3
|
||||||
|
const int n = 2;
|
||||||
|
|
||||||
|
const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
|
||||||
|
const int in = il%2;
|
||||||
|
|
||||||
|
const int l0 = n*(2*ir + in);
|
||||||
|
const int q_offset = 32*im + l0;
|
||||||
|
const int y_offset = 64*im + l0;
|
||||||
|
|
||||||
|
const uint8_t hm1 = 1 << (2*im);
|
||||||
|
const uint8_t hm2 = hm1 << 4;
|
||||||
|
|
||||||
|
uint16_t aux[4];
|
||||||
|
const uint8_t * sc = (const uint8_t *)aux;
|
||||||
|
|
||||||
|
uint16_t q16[8];
|
||||||
|
const uint8_t * q4 = (const uint8_t *)q16;
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += 2) {
|
||||||
|
|
||||||
|
const uint8_t * ql1 = x[i].qs + q_offset;
|
||||||
|
const uint8_t * qh = x[i].qh + l0;
|
||||||
|
const float * y1 = yy + i*QK_K + y_offset;
|
||||||
|
const float * y2 = y1 + 128;
|
||||||
|
|
||||||
|
const float dall = __low2half(x[i].dm);
|
||||||
|
const float dmin = __high2half(x[i].dm);
|
||||||
|
|
||||||
|
const uint16_t * a = (const uint16_t *)x[i].scales;
|
||||||
|
aux[0] = a[im+0] & kmask1;
|
||||||
|
aux[1] = a[im+2] & kmask1;
|
||||||
|
aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
|
||||||
|
aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
|
||||||
|
|
||||||
|
float4 sum = {0.f, 0.f, 0.f, 0.f};
|
||||||
|
float smin = 0;
|
||||||
|
const uint16_t * q1 = (const uint16_t *)ql1;
|
||||||
|
const uint16_t * q2 = q1 + 32;
|
||||||
|
q16[0] = q1[0] & 0x0f0f;
|
||||||
|
q16[1] = q1[8] & 0x0f0f;
|
||||||
|
q16[2] = (q1[0] >> 4) & 0x0f0f;
|
||||||
|
q16[3] = (q1[8] >> 4) & 0x0f0f;
|
||||||
|
q16[4] = q2[0] & 0x0f0f;
|
||||||
|
q16[5] = q2[8] & 0x0f0f;
|
||||||
|
q16[6] = (q2[0] >> 4) & 0x0f0f;
|
||||||
|
q16[7] = (q2[8] >> 4) & 0x0f0f;
|
||||||
|
for (int l = 0; l < n; ++l) {
|
||||||
|
sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0))
|
||||||
|
+ y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0));
|
||||||
|
sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0))
|
||||||
|
+ y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0));
|
||||||
|
sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0))
|
||||||
|
+ y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0));
|
||||||
|
sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0))
|
||||||
|
+ y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0));
|
||||||
|
smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
|
||||||
|
+ (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
|
||||||
|
}
|
||||||
|
tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15
|
||||||
|
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION);
|
||||||
|
const int step = tid * K_QUANTS_PER_ITERATION;
|
||||||
|
const int im = step/8;
|
||||||
|
const int in = step%8;
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
|
||||||
|
const uint8_t * q = x[i].qs + step;
|
||||||
|
const int8_t * s = x[i].scales;
|
||||||
|
const float * y = yy + i*QK_K + step;
|
||||||
|
const float d = x[i].d;
|
||||||
|
float sum = 0.f;
|
||||||
|
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
|
||||||
|
const uint8_t h = x[i].qh[in+j] >> im;
|
||||||
|
sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16))
|
||||||
|
+ y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16))
|
||||||
|
+ y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16))
|
||||||
|
+ y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16));
|
||||||
|
}
|
||||||
|
tmp += sum;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// sum up partial sums and write back result
|
||||||
|
tmp = warp_reduce_sum(tmp);
|
||||||
|
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
dst[row] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) {
|
||||||
|
|
||||||
|
static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
|
||||||
|
|
||||||
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
|
if (row > nrows) return;
|
||||||
|
|
||||||
|
const int num_blocks_per_row = ncols / QK_K;
|
||||||
|
const int ib0 = row*num_blocks_per_row;
|
||||||
|
|
||||||
|
const block_q6_K * x = (const block_q6_K *)vx + ib0;
|
||||||
|
|
||||||
|
#if QK_K == 256
|
||||||
|
|
||||||
|
const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
|
||||||
|
const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
|
||||||
|
|
||||||
|
const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
|
||||||
|
|
||||||
|
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
||||||
|
const int in = tid - step*im; // 0...15 or 0...7
|
||||||
|
|
||||||
|
#if K_QUANTS_PER_ITERATION == 1
|
||||||
|
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
|
||||||
|
const int is = 0;
|
||||||
|
#else
|
||||||
|
const int l0 = 4 * in; // 0, 4, 8, ..., 28
|
||||||
|
const int is = in / 4;
|
||||||
|
#endif
|
||||||
|
const int ql_offset = 64*im + l0;
|
||||||
|
const int qh_offset = 32*im + l0;
|
||||||
|
const int s_offset = 8*im + is;
|
||||||
|
const int y_offset = 128*im + l0;
|
||||||
|
|
||||||
|
float tmp = 0; // partial sum for thread in warp
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
|
||||||
|
|
||||||
|
const float * y = yy + i * QK_K + y_offset;
|
||||||
|
const uint8_t * ql = x[i].ql + ql_offset;
|
||||||
|
const uint8_t * qh = x[i].qh + qh_offset;
|
||||||
|
const int8_t * s = x[i].scales + s_offset;
|
||||||
|
|
||||||
|
const float d = x[i].d;
|
||||||
|
|
||||||
|
#if K_QUANTS_PER_ITERATION == 1
|
||||||
|
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
|
||||||
|
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
|
||||||
|
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
|
||||||
|
+ y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
|
||||||
|
+ y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
|
||||||
|
+ y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
|
||||||
|
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
|
||||||
|
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
|
||||||
|
tmp += sum;
|
||||||
|
#else
|
||||||
|
float sum = 0;
|
||||||
|
for (int l = 0; l < 4; ++l) {
|
||||||
|
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
|
||||||
|
+ y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
|
||||||
|
+ y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
|
||||||
|
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
|
||||||
|
}
|
||||||
|
tmp += sum;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7
|
||||||
|
const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3
|
||||||
|
|
||||||
|
const int step = tid * K_QUANTS_PER_ITERATION;
|
||||||
|
|
||||||
|
float tmp = 0; // partial sum for thread in warp
|
||||||
|
|
||||||
|
for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
|
||||||
|
|
||||||
|
const float * y = yy + i * QK_K + step;
|
||||||
|
const uint8_t * ql = x[i].ql + step;
|
||||||
|
const uint8_t * qh = x[i].qh + step;
|
||||||
|
const int8_t * s = x[i].scales;
|
||||||
|
|
||||||
|
const float d = x[i+0].d;
|
||||||
|
|
||||||
|
float sum = 0;
|
||||||
|
for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
|
||||||
|
sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32)
|
||||||
|
+ y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32)
|
||||||
|
+ y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32)
|
||||||
|
+ y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32);
|
||||||
|
}
|
||||||
|
tmp += sum;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// sum up partial sums and write back result
|
||||||
|
tmp = warp_reduce_sum(tmp);
|
||||||
|
|
||||||
|
if (tid == 0) {
|
||||||
|
dst[row] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
||||||
|
const half * x = (const half *) vx;
|
||||||
|
|
||||||
|
// automatic half -> float type cast if dfloat == float
|
||||||
|
v.x = x[ib + iqs + 0];
|
||||||
|
v.y = x[ib + iqs + 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
||||||
|
static __global__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) {
|
||||||
|
// qk = quantized weights per x block
|
||||||
|
// qr = number of quantized weights per data value in x block
|
||||||
|
const int row = blockIdx.x*blockDim.y + threadIdx.y;
|
||||||
|
|
||||||
|
if (row >= nrows) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int tid = threadIdx.x;
|
||||||
|
|
||||||
|
const int iter_stride = 2*GGML_CUDA_DMMV_X;
|
||||||
|
const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
|
||||||
|
const int y_offset = qr == 1 ? 1 : qk/2;
|
||||||
|
|
||||||
|
// partial sum for each thread
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics
|
||||||
|
#else
|
||||||
|
float tmp = 0.0f;
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
|
||||||
|
for (int i = 0; i < ncols; i += iter_stride) {
|
||||||
|
const int col = i + vals_per_iter*tid;
|
||||||
|
const int ib = (row*ncols + col)/qk; // x block index
|
||||||
|
const int iqs = (col%qk)/qr; // x quant index
|
||||||
|
const int iybs = col - col%qk; // y block start index
|
||||||
|
|
||||||
|
// processing >2 values per i iter is faster for fast GPUs
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < vals_per_iter; j += 2) {
|
||||||
|
// process 2 vals per j iter
|
||||||
|
|
||||||
|
// dequantize
|
||||||
|
// for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
|
||||||
|
dfloat2 v;
|
||||||
|
dequantize_kernel(vx, ib, iqs + j/qr, v);
|
||||||
|
|
||||||
|
// matrix multiplication
|
||||||
|
// for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
tmp += __hmul2(v, {
|
||||||
|
y[iybs + iqs + j/qr + 0],
|
||||||
|
y[iybs + iqs + j/qr + y_offset]
|
||||||
|
});
|
||||||
|
#else
|
||||||
|
tmp += v.x * y[iybs + iqs + j/qr + 0];
|
||||||
|
tmp += v.y * y[iybs + iqs + j/qr + y_offset];
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sum up partial sums and write back result
|
||||||
|
tmp = warp_reduce_sum(tmp);
|
||||||
|
|
||||||
|
if (tid == 0) {
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
dst[row] = tmp.x + tmp.y;
|
||||||
|
#else
|
||||||
|
dst[row] = tmp;
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
// the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % QK_K == 0);
|
||||||
|
const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
|
||||||
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(32, ny, 1);
|
||||||
|
dequantize_mul_mat_vec_q2_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % QK_K == 0);
|
||||||
|
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
||||||
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(32, ny, 1);
|
||||||
|
dequantize_mul_mat_vec_q3_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % QK_K == 0);
|
||||||
|
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
||||||
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(32, ny, 1);
|
||||||
|
dequantize_mul_mat_vec_q4_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q5_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % QK_K == 0);
|
||||||
|
const dim3 block_dims(32, 1, 1);
|
||||||
|
dequantize_mul_mat_vec_q5_k<<<nrows, block_dims, 0, stream>>>(vx, y, dst, ncols);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % QK_K == 0);
|
||||||
|
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
||||||
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(32, ny, 1);
|
||||||
|
dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(block_num_y, 1, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
dequantize_mul_mat_vec<1, 1, convert_f16>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_dequantize_mul_mat_vec(
|
||||||
|
ggml_backend_cuda_context & ctx,
|
||||||
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
|
||||||
|
const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
|
||||||
|
const int64_t src1_padded_row_size, cudaStream_t stream) {
|
||||||
|
GGML_UNUSED(ctx);
|
||||||
|
const int64_t ne00 = src0->ne[0];
|
||||||
|
const int64_t row_diff = row_high - row_low;
|
||||||
|
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
|
||||||
|
#ifdef GGML_CUDA_F16
|
||||||
|
ggml_cuda_pool_alloc<half> src1_dfloat_a(ctx.pool());
|
||||||
|
half * src1_dfloat = nullptr; // dfloat == half
|
||||||
|
|
||||||
|
bool src1_convert_f16 =
|
||||||
|
src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
|
||||||
|
src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
|
||||||
|
src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
|
||||||
|
|
||||||
|
if (src1_convert_f16) {
|
||||||
|
src1_dfloat = src1_dfloat_a.alloc(ne00);
|
||||||
|
const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type);
|
||||||
|
GGML_ASSERT(to_fp16_cuda != nullptr);
|
||||||
|
to_fp16_cuda(src1_ddf_i, src1_dfloat, ne00, stream);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion
|
||||||
|
#endif // GGML_CUDA_F16
|
||||||
|
|
||||||
|
switch (src0->type) {
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
dequantize_mul_mat_vec_q4_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
dequantize_mul_mat_vec_q4_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
dequantize_mul_mat_vec_q5_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
dequantize_mul_mat_vec_q5_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
dequantize_mul_mat_vec_q8_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q2_K:
|
||||||
|
dequantize_mul_mat_vec_q2_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q3_K:
|
||||||
|
dequantize_mul_mat_vec_q3_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q4_K:
|
||||||
|
dequantize_mul_mat_vec_q4_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q5_K:
|
||||||
|
dequantize_mul_mat_vec_q5_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q6_K:
|
||||||
|
dequantize_mul_mat_vec_q6_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_F16:
|
||||||
|
convert_mul_mat_vec_f16_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_UNUSED(src1);
|
||||||
|
GGML_UNUSED(dst);
|
||||||
|
GGML_UNUSED(src1_ddq_i);
|
||||||
|
GGML_UNUSED(src1_ncols);
|
||||||
|
GGML_UNUSED(src1_padded_row_size);
|
||||||
|
}
|
7
ggml-cuda/dmmv.cuh
Normal file
7
ggml-cuda/dmmv.cuh
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_op_dequantize_mul_mat_vec(
|
||||||
|
ggml_backend_cuda_context & ctx,
|
||||||
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
|
||||||
|
const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
|
||||||
|
const int64_t src1_padded_row_size, cudaStream_t stream);
|
557
ggml-cuda/fattn.cu
Normal file
557
ggml-cuda/fattn.cu
Normal file
@ -0,0 +1,557 @@
|
|||||||
|
#include "fattn.cuh"
|
||||||
|
|
||||||
|
#if __CUDA_ARCH__ >= CC_VOLTA
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, 16, 16, 16, half, nvcuda::wmma::row_major> half16x16_a;
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, 16, 16, 16, half, nvcuda::wmma::row_major> half16x16_b;
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, 16, 16, 16, half, nvcuda::wmma::col_major> half16x16_bT;
|
||||||
|
typedef nvcuda::wmma::fragment<nvcuda::wmma::accumulator, 16, 16, 16, half> half16x16_acc;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// based on metal version
|
||||||
|
template<int D, int Q, int C> // D head size, Q queries per block, C cache items per block
|
||||||
|
static __global__ void flash_attn_ext_f16(
|
||||||
|
const char* __restrict__ q,
|
||||||
|
const char* __restrict__ k,
|
||||||
|
const char* __restrict__ v,
|
||||||
|
const char* __restrict__ mask,
|
||||||
|
float* __restrict__ dst,
|
||||||
|
float scale,
|
||||||
|
int ne00,
|
||||||
|
int ne01,
|
||||||
|
int ne02,
|
||||||
|
int ne03,
|
||||||
|
int ne10,
|
||||||
|
int ne11,
|
||||||
|
int ne12,
|
||||||
|
int ne13,
|
||||||
|
int ne31,
|
||||||
|
int nb31,
|
||||||
|
int nb01,
|
||||||
|
int nb02,
|
||||||
|
int nb03,
|
||||||
|
int nb11,
|
||||||
|
int nb12,
|
||||||
|
int nb13,
|
||||||
|
int ne0,
|
||||||
|
int ne1,
|
||||||
|
int ne2,
|
||||||
|
int ne3) {
|
||||||
|
#if __CUDA_ARCH__ >= CC_VOLTA
|
||||||
|
const int warp_id = threadIdx.y;
|
||||||
|
const int lane_id = threadIdx.x;
|
||||||
|
|
||||||
|
const int num_warps = blockDim.y; // number of warps
|
||||||
|
const int iq3 = blockIdx.z;
|
||||||
|
const int iq2 = blockIdx.y;
|
||||||
|
const int iq1 = blockIdx.x * Q;
|
||||||
|
|
||||||
|
const int D16 = D/16;
|
||||||
|
const int Q16 = Q/16;
|
||||||
|
const int C16 = C/16;
|
||||||
|
|
||||||
|
const int NW = WARP_SIZE;
|
||||||
|
const int SH = (C + Q); // shared memory per simdgroup in (half)
|
||||||
|
|
||||||
|
const int T = D + num_warps*SH; // shared memory size per query in (half)
|
||||||
|
const int T2 = T/2; // shared memory size per query in (half2)
|
||||||
|
const int C2 = C/2;
|
||||||
|
const int D2 = D/2;
|
||||||
|
|
||||||
|
extern __shared__ half __flash_attn_f16_shmem[];
|
||||||
|
// pq
|
||||||
|
half * sq = (half *) (__flash_attn_f16_shmem + 0*D); // holds the query data
|
||||||
|
half2 * sq2 = (half2 *) (__flash_attn_f16_shmem + 0*D); // same as above but in half2
|
||||||
|
half * ss = (half *) (__flash_attn_f16_shmem + warp_id*SH + 1*D); // scratch buffer for attention and diagonal matrix
|
||||||
|
half2 * ss2 = (half2 *) (__flash_attn_f16_shmem + warp_id*SH + 1*D); // same as above but in half2
|
||||||
|
|
||||||
|
half16x16_acc zr;
|
||||||
|
half16x16_acc lo[Q16][D16];
|
||||||
|
|
||||||
|
// load heads from Q to shared memory
|
||||||
|
#pragma unroll
|
||||||
|
for (int j0 = 0; j0 < Q; j0 += num_warps) {
|
||||||
|
const int j = j0 + warp_id;
|
||||||
|
if (j >= Q) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
const float2 * q2 = (const float2 *) (q + ((iq1 + j)*nb01 + iq2*nb02 + iq3*nb03));
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < D2; i0 += NW) {
|
||||||
|
const int i = i0 + lane_id;
|
||||||
|
if (i >= D2) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (iq1 + j < ne01) {
|
||||||
|
sq2[j*T2 + i] = __float22half2_rn(q2[i]);
|
||||||
|
} else {
|
||||||
|
sq2[j*T2 + i] = make_half2(0.0, 0.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nvcuda::wmma::fill_fragment(zr, 0.0);
|
||||||
|
|
||||||
|
// zero out lo
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
nvcuda::wmma::fill_fragment(lo[j][i], 0.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// zero out shared memory SH
|
||||||
|
for (int j = 0; j < Q; ++j) {
|
||||||
|
for (int i0 = 0; i0 < SH; i0 += NW) {
|
||||||
|
const int i = i0 + lane_id;
|
||||||
|
if (i >= SH) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ss[j*T + i] = 0.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
{
|
||||||
|
half S = __float2half(0.0f);
|
||||||
|
half M[Q];
|
||||||
|
|
||||||
|
for (int i = 0; i < Q; ++i) {
|
||||||
|
M[i] = CUDART_MIN_DENORM_FP16;
|
||||||
|
}
|
||||||
|
|
||||||
|
// assume K and V are same shape
|
||||||
|
const int ne22 = ne12;
|
||||||
|
const int ne23 = ne13;
|
||||||
|
|
||||||
|
const int nb21 = nb11;
|
||||||
|
const int nb22 = nb12;
|
||||||
|
const int nb23 = nb13;
|
||||||
|
|
||||||
|
// broadcast
|
||||||
|
const int rk2 = ne02/ne12;
|
||||||
|
const int rk3 = ne03/ne13;
|
||||||
|
|
||||||
|
const int rv2 = ne02/ne22;
|
||||||
|
const int rv3 = ne03/ne23;
|
||||||
|
|
||||||
|
// k indices
|
||||||
|
const int ik2 = iq2 / rk2;
|
||||||
|
const int ik3 = iq3 / rk3;
|
||||||
|
|
||||||
|
// v indices
|
||||||
|
const int iv2 = iq2 / rv2;
|
||||||
|
const int iv3 = iq3 / rv3;
|
||||||
|
|
||||||
|
// load the queries from shared memory into local memory
|
||||||
|
half16x16_a mq[Q16][D16];
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
nvcuda::wmma::load_matrix_sync(mq[j][i], sq + 16*j*T + i*16, T);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// pointer to the mask
|
||||||
|
const half * mp = mask ? (const half *) (mask + iq1*nb31) : nullptr;
|
||||||
|
|
||||||
|
// prepare diagonal scale matrix
|
||||||
|
half16x16_b mscale;
|
||||||
|
for (int i = 0; i < 16; ++i) {
|
||||||
|
ss[i*T + i] = __float2half(scale);
|
||||||
|
}
|
||||||
|
nvcuda::wmma::load_matrix_sync(mscale, ss, T);
|
||||||
|
|
||||||
|
// loop over the KV cache
|
||||||
|
// each simdgroup handles blocks of Q rows and C columns
|
||||||
|
for (int ic0 = 0; ic0 < ne11; ic0 += C*num_warps) {
|
||||||
|
const int ic = ic0 + warp_id*C;
|
||||||
|
if (ic >= ne11) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Q*K^T
|
||||||
|
{
|
||||||
|
#pragma unroll
|
||||||
|
for (int cc = 0; cc < C16; ++cc) {
|
||||||
|
half16x16_acc mqk[Q16];
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
nvcuda::wmma::fill_fragment(mqk[j], 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
const half * pk = (const half *) ((const char *) k + ((ic + 16*cc)*nb11 + ik2*nb12 + ik3*nb13));
|
||||||
|
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
half16x16_bT mk; // transposed key
|
||||||
|
nvcuda::wmma::load_matrix_sync(mk, pk + i*16, nb11/sizeof(half));
|
||||||
|
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
nvcuda::wmma::mma_sync(mqk[j], mq[j][i], mk, mqk[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mqk = mqk*scale + mask
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
half16x16_a mqka;
|
||||||
|
half16x16_acc mm;
|
||||||
|
|
||||||
|
if (mp) {
|
||||||
|
nvcuda::wmma::load_matrix_sync(mm, mp + 16*j*(nb31/sizeof(half)) + ic + 16*cc, nb31/sizeof(half), nvcuda::wmma::mem_row_major);
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert accumulator to matrix_a
|
||||||
|
nvcuda::wmma::store_matrix_sync( ss + 16*j*T + 16*cc, mqk[j], T, nvcuda::wmma::mem_row_major);
|
||||||
|
nvcuda::wmma::load_matrix_sync (mqka, ss + 16*j*T + 16*cc, T);
|
||||||
|
|
||||||
|
nvcuda::wmma::mma_sync(mqk[j], mqka, mscale, mp ? mm : zr);
|
||||||
|
nvcuda::wmma::store_matrix_sync(ss + 16*j*T + 16*cc, mqk[j], T, nvcuda::wmma::mem_row_major);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// used to detect blocks full of -INF
|
||||||
|
half2 smax = make_half2(-INFINITY, -INFINITY);
|
||||||
|
|
||||||
|
// online softmax
|
||||||
|
for (int j = 0; j < Q; ++j) {
|
||||||
|
const half m = M[j];
|
||||||
|
|
||||||
|
for (int p0 = 0; p0 < C2; p0 += NW) {
|
||||||
|
const int p = p0 + lane_id;
|
||||||
|
|
||||||
|
const half2 s = ss2[j*T2 + p];
|
||||||
|
|
||||||
|
smax = __hmax2(smax, s);
|
||||||
|
M[j] = __hmax(M[j], __hmax(s.x, s.y));
|
||||||
|
}
|
||||||
|
|
||||||
|
M[j] = warp_reduce_max(M[j]);
|
||||||
|
|
||||||
|
// local sum
|
||||||
|
half2 ls = make_half2(0.0f, 0.0f);
|
||||||
|
half2 M2 = make_half2(M[j], M[j]);
|
||||||
|
|
||||||
|
for (int p0 = 0; p0 < C2; p0 += NW) {
|
||||||
|
const int p = p0 + lane_id;
|
||||||
|
|
||||||
|
const half2 s = ss2[j*T2 + p];
|
||||||
|
|
||||||
|
const half2 vs = h2exp(s - M2);
|
||||||
|
|
||||||
|
ls += vs;
|
||||||
|
|
||||||
|
// the P matrix from the paper (Q rows, C columns)
|
||||||
|
ss2[j*T2 + p] = vs;
|
||||||
|
}
|
||||||
|
|
||||||
|
ls = warp_reduce_sum(ls);
|
||||||
|
|
||||||
|
const half ms = hexp(m - M[j]);
|
||||||
|
|
||||||
|
// create a QxQ diagonal matrix for rescaling the output
|
||||||
|
if (lane_id == j) {
|
||||||
|
ss[j*T + C + j] = ms;
|
||||||
|
|
||||||
|
S = S*ms + ls.x + ls.y;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
smax = warp_reduce_max(smax);
|
||||||
|
|
||||||
|
// skip -INF blocks
|
||||||
|
if (__hisinf(smax.x) == -1 && __hisinf(smax.y) == -1) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// O = diag(ms)*O
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
half16x16_a mm;
|
||||||
|
half16x16_b lob;
|
||||||
|
|
||||||
|
nvcuda::wmma::load_matrix_sync(mm, ss + 16*j*T + C + 16*j, T);
|
||||||
|
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
// convert accumulator to matrix_b
|
||||||
|
nvcuda::wmma::store_matrix_sync( ss + 16*j*T + C + 16*j, lo[j][i], T, nvcuda::wmma::mem_row_major);
|
||||||
|
nvcuda::wmma::load_matrix_sync (lob, ss + 16*j*T + C + 16*j, T);
|
||||||
|
|
||||||
|
nvcuda::wmma::mma_sync(lo[j][i], mm, lob, zr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore zeros
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
nvcuda::wmma::store_matrix_sync(ss + 16*j*T + C + 16*j, zr, T, nvcuda::wmma::mem_row_major);
|
||||||
|
}
|
||||||
|
|
||||||
|
// O = O + (Q*K^T)*V
|
||||||
|
{
|
||||||
|
for (int cc = 0; cc < C16; ++cc) {
|
||||||
|
const half * pv = (const half *) ((const char *) v + ((ic + 16*cc)*nb21 + iv2*nb22 + iv3*nb23));
|
||||||
|
|
||||||
|
half16x16_b mv[D16];
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
nvcuda::wmma::load_matrix_sync(mv[i], pv + i*16, nb21/sizeof(half));
|
||||||
|
}
|
||||||
|
|
||||||
|
half16x16_a ms[Q16];
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
nvcuda::wmma::load_matrix_sync(ms[j], ss + 16*j*T + 16*cc, T);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
nvcuda::wmma::mma_sync(lo[j][i], ms[j], mv[i], lo[j][i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// these are needed for reducing the results from the simdgroups (reuse the ss buffer)
|
||||||
|
if (lane_id < Q) {
|
||||||
|
ss[lane_id*T + 0] = S;
|
||||||
|
ss[lane_id*T + 1] = M[lane_id];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// reduce the warps sequentially
|
||||||
|
for (int sg = 1; sg < num_warps; ++sg) {
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
// each simdgroup stores its output to shared memory, reusing sq
|
||||||
|
if (warp_id == sg) {
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
nvcuda::wmma::store_matrix_sync(sq + 16*j*T + i*16, lo[j][i], T, nvcuda::wmma::mem_row_major);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__syncthreads();
|
||||||
|
|
||||||
|
// the first simdgroup accumulates the results from the other simdgroups
|
||||||
|
if (warp_id == 0) {
|
||||||
|
for (int j = lane_id; j < Q; j += NW) {
|
||||||
|
const half S0 = ss[j*T + 0];
|
||||||
|
const half S1 = ss[j*T + sg*SH + 0];
|
||||||
|
|
||||||
|
const half M0 = ss[j*T + 1];
|
||||||
|
const half M1 = ss[j*T + sg*SH + 1];
|
||||||
|
|
||||||
|
const half M = __hmax(M0, M1);
|
||||||
|
|
||||||
|
const half ms0 = hexp(M0 - M);
|
||||||
|
const half ms1 = hexp(M1 - M);
|
||||||
|
|
||||||
|
const half S = S0*ms0 + S1*ms1;
|
||||||
|
|
||||||
|
ss[j*T + 0] = S;
|
||||||
|
ss[j*T + 1] = M;
|
||||||
|
|
||||||
|
ss[j*T + C + j ] = ms0;
|
||||||
|
ss[j*T + C + j + sg*SH] = ms1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// O_0 = diag(ms0)*O_0 + diag(ms1)*O_1
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
half16x16_a ms0;
|
||||||
|
half16x16_a ms1;
|
||||||
|
half16x16_b t;
|
||||||
|
half16x16_acc t2;
|
||||||
|
|
||||||
|
nvcuda::wmma::load_matrix_sync(ms0, ss + 16*j*T + C + 16*j, T);
|
||||||
|
nvcuda::wmma::load_matrix_sync(ms1, ss + 16*j*T + C + 16*j + sg*SH, T);
|
||||||
|
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
nvcuda::wmma::load_matrix_sync(t, sq + 16*j*T + i*16, T);
|
||||||
|
nvcuda::wmma::mma_sync(t2, ms1, t, zr);
|
||||||
|
|
||||||
|
// convert accumulator to matrix_b
|
||||||
|
nvcuda::wmma::store_matrix_sync( sq + 16*j*T + i*16, lo[j][i], T, nvcuda::wmma::mem_row_major);
|
||||||
|
nvcuda::wmma::load_matrix_sync (t, sq + 16*j*T + i*16, T);
|
||||||
|
|
||||||
|
nvcuda::wmma::mma_sync(lo[j][i], ms0, t, t2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// store result to shared memory (reuse sq)
|
||||||
|
if (warp_id == 0) {
|
||||||
|
for (int j = 0; j < Q16; ++j) {
|
||||||
|
for (int i = 0; i < D16; ++i) {
|
||||||
|
nvcuda::wmma::store_matrix_sync(sq + 16*j*T + i*16, lo[j][i], T, nvcuda::wmma::mem_row_major);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// final rescale with 1/S and store to global memory
|
||||||
|
if (warp_id == 0) {
|
||||||
|
for (int j = 0; j < Q && iq1 + j < ne01; ++j) {
|
||||||
|
const half S = ss[j*T + 0];
|
||||||
|
|
||||||
|
for (int i0 = 0; i0 < D; i0 += NW) {
|
||||||
|
const int i = i0 + lane_id;
|
||||||
|
if (i >= D) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[(iq3*ne2*ne1 + iq2 + (iq1 + j)*ne1)*D + i] = __half2float(sq[j*T + i] / S);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
NO_DEVICE_CODE;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, const ggml_tensor * mask, ggml_tensor * KQV) {
|
||||||
|
GGML_ASSERT(Q->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(K->type == GGML_TYPE_F16);
|
||||||
|
GGML_ASSERT(V->type == GGML_TYPE_F16);
|
||||||
|
GGML_ASSERT(KQV->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
GGML_ASSERT(!mask || mask->type == GGML_TYPE_F16);
|
||||||
|
GGML_ASSERT(!mask || mask->ne[1] >= GGML_PAD(Q->ne[1], 16) &&
|
||||||
|
"the Flash-Attention CUDA kernel requires the mask to be padded to 16 and at least n_queries big");
|
||||||
|
|
||||||
|
ggml_cuda_set_device(ctx.device);
|
||||||
|
|
||||||
|
const cudaStream_t main_stream = ctx.stream();
|
||||||
|
|
||||||
|
float scale;
|
||||||
|
memcpy(&scale, KQV->op_params, sizeof(float));
|
||||||
|
|
||||||
|
#define NQPB 16
|
||||||
|
#define NCPW 128
|
||||||
|
|
||||||
|
const int nqpb = NQPB; // queries per block
|
||||||
|
const int ncpw = NCPW; // cache values per warp (does not work for other values)
|
||||||
|
|
||||||
|
GGML_ASSERT(NQPB <= 32);
|
||||||
|
|
||||||
|
const int nwarps_max = 8; // TODO: we don't want to launch too much warps. how much is too much?
|
||||||
|
// TODO: produces wrong results for nwarps > 8 (RTX 2060) - not sure why
|
||||||
|
const int nwarps = Q->ne[1] <= nqpb ? std::max(2, std::min((int) K->ne[1]/ncpw, nwarps_max)) : 1;
|
||||||
|
|
||||||
|
dim3 blocks_num((Q->ne[1] + nqpb - 1) / nqpb, Q->ne[2], Q->ne[3]);
|
||||||
|
dim3 block_dim(32, nwarps, 1);
|
||||||
|
|
||||||
|
const size_t shmem = nqpb*(Q->ne[0] + nwarps*(ncpw + nqpb))*(sizeof(float)/2);
|
||||||
|
|
||||||
|
// increase shared memory limit to 96KB
|
||||||
|
//const size_t shmem_max = 96*1024;
|
||||||
|
//cudaFuncSetAttribute(flash_attn_ext_f16<128, NQPB, NCPW>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem_max);
|
||||||
|
|
||||||
|
switch (Q->ne[0]) {
|
||||||
|
case 64:
|
||||||
|
flash_attn_ext_f16<64, NQPB, NCPW>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data, // Query
|
||||||
|
(const char *) K->data, // Key
|
||||||
|
(const char *) V->data, // Value
|
||||||
|
mask ? (const char *) mask->data : nullptr, // Mask
|
||||||
|
(float *) KQV->data, // dst
|
||||||
|
scale,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case 80:
|
||||||
|
flash_attn_ext_f16<80, NQPB, NCPW>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data, // Query
|
||||||
|
(const char *) K->data, // Key
|
||||||
|
(const char *) V->data, // Value
|
||||||
|
mask ? (const char *) mask->data : nullptr, // Mask
|
||||||
|
(float *) KQV->data, // dst
|
||||||
|
scale,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case 96:
|
||||||
|
flash_attn_ext_f16<96, NQPB, NCPW>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data, // Query
|
||||||
|
(const char *) K->data, // Key
|
||||||
|
(const char *) V->data, // Value
|
||||||
|
mask ? (const char *) mask->data : nullptr, // Mask
|
||||||
|
(float *) KQV->data, // dst
|
||||||
|
scale,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case 112:
|
||||||
|
flash_attn_ext_f16<112, NQPB, NCPW>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data, // Query
|
||||||
|
(const char *) K->data, // Key
|
||||||
|
(const char *) V->data, // Value
|
||||||
|
mask ? (const char *) mask->data : nullptr, // Mask
|
||||||
|
(float *) KQV->data, // dst
|
||||||
|
scale,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case 128:
|
||||||
|
flash_attn_ext_f16<128, NQPB, NCPW>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data, // Query
|
||||||
|
(const char *) K->data, // Key
|
||||||
|
(const char *) V->data, // Value
|
||||||
|
mask ? (const char *) mask->data : nullptr, // Mask
|
||||||
|
(float *) KQV->data, // dst
|
||||||
|
scale,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case 256:
|
||||||
|
flash_attn_ext_f16<256, NQPB, NCPW>
|
||||||
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
(const char *) Q->data, // Query
|
||||||
|
(const char *) K->data, // Key
|
||||||
|
(const char *) V->data, // Value
|
||||||
|
mask ? (const char *) mask->data : nullptr, // Mask
|
||||||
|
(float *) KQV->data, // dst
|
||||||
|
scale,
|
||||||
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
Q->nb[1], Q->nb[2], Q->nb[3],
|
||||||
|
K->nb[1], K->nb[2], K->nb[3],
|
||||||
|
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
CUDA_CHECK(cudaGetLastError());
|
||||||
|
}
|
6
ggml-cuda/fattn.cuh
Normal file
6
ggml-cuda/fattn.cuh
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_flash_attn_ext(
|
||||||
|
ggml_backend_cuda_context & ctx,
|
||||||
|
const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V,
|
||||||
|
const ggml_tensor * mask, ggml_tensor * KQV);
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user