From 67984921a70a7e680a24494aeb7575a66e90685d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 7 Jan 2024 08:45:26 +0200 Subject: [PATCH 01/10] server : fix n_predict check (#4798) --- examples/server/server.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index d1469fb08..6c7fcd176 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -447,8 +447,14 @@ struct llama_client_slot } bool has_budget(gpt_params &global_params) { + if (params.n_predict == -1 && global_params.n_predict == -1) + { + return true; // limitless + } + n_remaining = -1; - if(params.n_predict != -1) + + if (params.n_predict != -1) { n_remaining = params.n_predict - n_decoded; } @@ -456,7 +462,8 @@ struct llama_client_slot { n_remaining = global_params.n_predict - n_decoded; } - return n_remaining > 0 || n_remaining == -1; // no budget || limitless + + return n_remaining > 0; // no budget } bool available() const { @@ -1102,7 +1109,7 @@ struct llama_server_context } // check the limits - if (slot.n_decoded > 2 && slot.has_next_token && !slot.has_budget(params)) + if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params)) { slot.stopped_limit = true; slot.has_next_token = false; @@ -1703,7 +1710,6 @@ struct llama_server_context llama_batch_add(batch, slot.sampled, system_tokens.size() + slot.n_past, { slot.id }, true); - slot.n_decoded += 1; slot.n_past += 1; } @@ -1921,6 +1927,7 @@ struct llama_server_context llama_sampling_accept(slot.ctx_sampling, ctx, id, true); + slot.n_decoded += 1; if (slot.n_decoded == 1) { slot.t_start_genereration = ggml_time_us(); From 63ee677efd92060b14894b984597c62e3742b8da Mon Sep 17 00:00:00 2001 From: Konstantin Zhuravlyov Date: Sun, 7 Jan 2024 01:52:42 -0500 Subject: [PATCH 02/10] ggml : use __builtin_amdgcn_sudot4 in __dp4a for gfx11 (#4787) --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 10c21615e..54b266be4 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -183,7 +183,7 @@ static __device__ __forceinline__ int __vsubss4(const int a, const int b) { static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { #if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__) c = __builtin_amdgcn_sdot4(a, b, c, false); -#elif defined(__gfx1100__) +#elif defined(RDNA3) c = __builtin_amdgcn_sudot4( true, a, true, b, c, false); #elif defined(__gfx1010__) || defined(__gfx900__) int tmp1; From 3418c03ecc149fd657527ebb06776239b60a3f3b Mon Sep 17 00:00:00 2001 From: Alex Azarov Date: Sun, 7 Jan 2024 08:46:55 +0100 Subject: [PATCH 03/10] llama.swiftui : add visionOS target (#4805) --- .../llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj b/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj index 14b93f26c..9b1a9787b 100644 --- a/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj +++ b/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj @@ -420,11 +420,13 @@ MARKETING_VERSION = 1.0; PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift"; PRODUCT_NAME = "$(TARGET_NAME)"; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator xros xrsimulator"; + SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = NO; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; SWIFT_OPTIMIZATION_LEVEL = "-Onone"; SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; + TARGETED_DEVICE_FAMILY = "1,2,7"; }; name = Debug; }; @@ -453,10 +455,12 @@ MARKETING_VERSION = 1.0; PRODUCT_BUNDLE_IDENTIFIER = "com.bachittle.llama-swift"; PRODUCT_NAME = "$(TARGET_NAME)"; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator xros xrsimulator"; + SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = NO; SWIFT_EMIT_LOC_STRINGS = YES; SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; SWIFT_VERSION = 5.0; - TARGETED_DEVICE_FAMILY = "1,2"; + TARGETED_DEVICE_FAMILY = "1,2,7"; }; name = Release; }; From d117d4dc5dadb46831036bfa4d6e5e8c86babaf1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 7 Jan 2024 09:50:31 +0200 Subject: [PATCH 04/10] llama : print tensor meta for debugging --- llama.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 3bb056dba..06db40303 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2180,7 +2180,11 @@ struct llama_model_loader { type_max = type; } - // LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + // TODO: make runtime configurable +#if 0 + struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i)); + LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, ggml_get_name(meta), ggml_type_name(type), llama_format_tensor_shape(meta).c_str()); +#endif } switch (type_max) { From 72d8407b3696dd1293bd233b6db392be108bc377 Mon Sep 17 00:00:00 2001 From: Alex Azarov Date: Sun, 7 Jan 2024 09:20:50 +0100 Subject: [PATCH 05/10] llama.swiftui : use llama.cpp as SPM package (#4804) --- .../llama.cpp.swift/LibLlama.swift | 5 +- .../llama.cpp.swift/bridging-header.h | 5 -- .../llama.swiftui.xcodeproj/project.pbxproj | 80 +++---------------- .../AccentColor.colorset/Contents.json | 11 --- .../Preview Assets.xcassets/Contents.json | 6 -- 5 files changed, 13 insertions(+), 94 deletions(-) delete mode 100644 examples/llama.swiftui/llama.cpp.swift/bridging-header.h delete mode 100644 examples/llama.swiftui/llama.swiftui/Assets.xcassets/AccentColor.colorset/Contents.json delete mode 100644 examples/llama.swiftui/llama.swiftui/Preview Content/Preview Assets.xcassets/Contents.json diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 8696b493c..fc79fd346 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -1,8 +1,5 @@ import Foundation - -// To use this in your own project, add llama.cpp as a swift package dependency -// and uncomment this import line. -// import llama +import llama enum LlamaError: Error { case couldNotInitializeContext diff --git a/examples/llama.swiftui/llama.cpp.swift/bridging-header.h b/examples/llama.swiftui/llama.cpp.swift/bridging-header.h deleted file mode 100644 index 6cd72c979..000000000 --- a/examples/llama.swiftui/llama.cpp.swift/bridging-header.h +++ /dev/null @@ -1,5 +0,0 @@ -// -// Use this file to import your target's public headers that you would like to expose to Swift. -// - -#import "llama.h" diff --git a/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj b/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj index 9b1a9787b..a8848a49f 100644 --- a/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj +++ b/examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj @@ -7,52 +7,31 @@ objects = { /* Begin PBXBuildFile section */ - 542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */ = {isa = PBXBuildFile; fileRef = 542376072B0D9BFB008E6A1C /* ggml-quants.c */; settings = {COMPILER_FLAGS = "-O3"; }; }; - 5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */ = {isa = PBXBuildFile; fileRef = 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */; settings = {COMPILER_FLAGS = "-O3"; }; }; - 542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09B2AC8723900A8AEE9 /* ggml.c */; settings = {COMPILER_FLAGS = "-DGGML_USE_ACCELERATE -DGGML_USE_METAL -DGGML_USE_K_QUANTS -O3"; }; }; - 542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */; settings = {COMPILER_FLAGS = "-O3"; }; }; - 542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 542EA0A12AC8729100A8AEE9 /* llama.cpp */; settings = {COMPILER_FLAGS = "-DGGML_USE_K_QUANTS -DGGML_USE_METAL -O3"; }; }; 549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 549479CA2AC9E16000E0F78B /* Metal.framework */; }; - 549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */ = {isa = PBXBuildFile; fileRef = 549479C52AC9E0F200E0F78B /* ggml-metal.m */; settings = {COMPILER_FLAGS = "-fno-objc-arc -DGGML_SWIFT -DGGML_USE_METAL -O3"; }; }; 7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */; }; 8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */; }; 8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A1C83782AC328BD0096AF73 /* ContentView.swift */; }; 8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */; }; - 8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */; }; 8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8A39BE092AC7601000BFEB40 /* Accelerate.framework */; }; 8A3F84242AC4C891005E2EE8 /* models in Resources */ = {isa = PBXBuildFile; fileRef = 8A3F84232AC4C891005E2EE8 /* models */; }; 8A907F332AC7138A006146EA /* LibLlama.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A907F322AC7134E006146EA /* LibLlama.swift */; }; 8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */; }; - F1FE20DC2B465C4500B45541 /* ggml-metal.metal in Resources */ = {isa = PBXBuildFile; fileRef = 549479C82AC9E10B00E0F78B /* ggml-metal.metal */; }; + DF810E132B4A5BA200301144 /* llama in Frameworks */ = {isa = PBXBuildFile; productRef = DF810E122B4A5BA200301144 /* llama */; }; F1FE20E22B465ECA00B45541 /* LoadCustomButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = F1FE20E12B465EC900B45541 /* LoadCustomButton.swift */; }; /* End PBXBuildFile section */ /* Begin PBXFileReference section */ - 542376062B0D9BEA008E6A1C /* ggml-quants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-quants.h"; path = "../../ggml-quants.h"; sourceTree = ""; }; - 542376072B0D9BFB008E6A1C /* ggml-quants.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-quants.c"; path = "../../ggml-quants.c"; sourceTree = ""; }; - 542376092B0D9C40008E6A1C /* ggml-backend.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "ggml-backend.h"; path = "../../ggml-backend.h"; sourceTree = ""; }; - 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-backend.c"; path = "../../ggml-backend.c"; sourceTree = ""; }; - 542EA09B2AC8723900A8AEE9 /* ggml.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = ggml.c; path = ../../ggml.c; sourceTree = ""; }; - 542EA09C2AC8723900A8AEE9 /* ggml.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ggml.h; path = ../../ggml.h; sourceTree = ""; }; - 542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-alloc.h"; path = "../../ggml-alloc.h"; sourceTree = ""; }; - 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-alloc.c"; path = "../../ggml-alloc.c"; sourceTree = ""; }; - 542EA0A12AC8729100A8AEE9 /* llama.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = llama.cpp; path = ../../llama.cpp; sourceTree = ""; }; - 542EA0A22AC8729100A8AEE9 /* llama.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = llama.h; path = ../../llama.h; sourceTree = ""; }; - 549479C52AC9E0F200E0F78B /* ggml-metal.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "ggml-metal.m"; path = "../../ggml-metal.m"; sourceTree = ""; }; - 549479C62AC9E0F200E0F78B /* ggml-metal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-metal.h"; path = "../../ggml-metal.h"; sourceTree = ""; }; - 549479C82AC9E10B00E0F78B /* ggml-metal.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; name = "ggml-metal.metal"; path = "../../ggml-metal.metal"; sourceTree = ""; }; 549479CA2AC9E16000E0F78B /* Metal.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Metal.framework; path = System/Library/Frameworks/Metal.framework; sourceTree = SDKROOT; }; 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DownloadButton.swift; sourceTree = ""; }; - 8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "bridging-header.h"; sourceTree = ""; }; 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = llama.swiftui.app; sourceTree = BUILT_PRODUCTS_DIR; }; 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = llama_swiftuiApp.swift; sourceTree = ""; }; 8A1C83782AC328BD0096AF73 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = ""; }; 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; - 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = "Preview Assets.xcassets"; sourceTree = ""; }; 8A39BE092AC7601000BFEB40 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; }; 8A3F84232AC4C891005E2EE8 /* models */ = {isa = PBXFileReference; lastKnownFileType = folder; name = models; path = llama.swiftui/Resources/models; sourceTree = ""; }; 8A907F322AC7134E006146EA /* LibLlama.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibLlama.swift; sourceTree = ""; }; 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LlamaState.swift; sourceTree = ""; }; + DF2D2FE72B4A59BE00FCB72D /* llama.cpp */ = {isa = PBXFileReference; lastKnownFileType = wrapper; name = llama.cpp; path = ../..; sourceTree = ""; }; F1FE20E12B465EC900B45541 /* LoadCustomButton.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LoadCustomButton.swift; sourceTree = ""; }; /* End PBXFileReference section */ @@ -61,6 +40,7 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( + DF810E132B4A5BA200301144 /* llama in Frameworks */, 549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */, 8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */, ); @@ -69,30 +49,10 @@ /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ - 8A08D1F62AC7383900FE6CD4 /* llama.cpp */ = { - isa = PBXGroup; - children = ( - 5423760A2B0D9C4B008E6A1C /* ggml-backend.c */, - 542376092B0D9C40008E6A1C /* ggml-backend.h */, - 542376062B0D9BEA008E6A1C /* ggml-quants.h */, - 542376072B0D9BFB008E6A1C /* ggml-quants.c */, - 549479C82AC9E10B00E0F78B /* ggml-metal.metal */, - 549479C62AC9E0F200E0F78B /* ggml-metal.h */, - 549479C52AC9E0F200E0F78B /* ggml-metal.m */, - 542EA09B2AC8723900A8AEE9 /* ggml.c */, - 542EA09C2AC8723900A8AEE9 /* ggml.h */, - 542EA09F2AC8725700A8AEE9 /* ggml-alloc.c */, - 542EA09E2AC8725700A8AEE9 /* ggml-alloc.h */, - 542EA0A12AC8729100A8AEE9 /* llama.cpp */, - 542EA0A22AC8729100A8AEE9 /* llama.h */, - ); - name = llama.cpp; - sourceTree = ""; - }; 8A1C836A2AC328BD0096AF73 = { isa = PBXGroup; children = ( - 8A08D1F62AC7383900FE6CD4 /* llama.cpp */, + DF2D2FE72B4A59BE00FCB72D /* llama.cpp */, 8A907F312AC7134E006146EA /* llama.cpp.swift */, 8A3F84232AC4C891005E2EE8 /* models */, 8A1C83752AC328BD0096AF73 /* llama.swiftui */, @@ -117,19 +77,10 @@ 8A9F7C4A2AC332BF008AE1EA /* UI */, 8A1C83762AC328BD0096AF73 /* llama_swiftuiApp.swift */, 8A1C837A2AC328BE0096AF73 /* Assets.xcassets */, - 8A1C837C2AC328BE0096AF73 /* Preview Content */, ); path = llama.swiftui; sourceTree = ""; }; - 8A1C837C2AC328BE0096AF73 /* Preview Content */ = { - isa = PBXGroup; - children = ( - 8A1C837D2AC328BE0096AF73 /* Preview Assets.xcassets */, - ); - path = "Preview Content"; - sourceTree = ""; - }; 8A39BE082AC7601000BFEB40 /* Frameworks */ = { isa = PBXGroup; children = ( @@ -157,7 +108,6 @@ 8A907F312AC7134E006146EA /* llama.cpp.swift */ = { isa = PBXGroup; children = ( - 8A08D20A2AC73B1500FE6CD4 /* bridging-header.h */, 8A907F322AC7134E006146EA /* LibLlama.swift */, ); path = llama.cpp.swift; @@ -198,6 +148,7 @@ ); name = llama.swiftui; packageProductDependencies = ( + DF810E122B4A5BA200301144 /* llama */, ); productName = llama.swiftui; productReference = 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */; @@ -244,9 +195,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( - F1FE20DC2B465C4500B45541 /* ggml-metal.metal in Resources */, 8A3F84242AC4C891005E2EE8 /* models in Resources */, - 8A1C837E2AC328BE0096AF73 /* Preview Assets.xcassets in Resources */, 8A1C837B2AC328BE0096AF73 /* Assets.xcassets in Resources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -258,18 +207,12 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 542376082B0D9BFB008E6A1C /* ggml-quants.c in Sources */, - 549479CD2AC9E42A00E0F78B /* ggml-metal.m in Sources */, F1FE20E22B465ECA00B45541 /* LoadCustomButton.swift in Sources */, - 542EA09D2AC8723900A8AEE9 /* ggml.c in Sources */, 8A907F332AC7138A006146EA /* LibLlama.swift in Sources */, - 542EA0A32AC8729100A8AEE9 /* llama.cpp in Sources */, 8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */, 8A1C83792AC328BD0096AF73 /* ContentView.swift in Sources */, 8A1C83772AC328BD0096AF73 /* llama_swiftuiApp.swift in Sources */, 7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */, - 542EA0A02AC8725700A8AEE9 /* ggml-alloc.c in Sources */, - 5423760B2B0D9C4B008E6A1C /* ggml-backend.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -399,11 +342,9 @@ isa = XCBuildConfiguration; buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; CLANG_ENABLE_MODULES = YES; CODE_SIGN_STYLE = Automatic; CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\""; DEVELOPMENT_TEAM = STLSG3FG8Q; ENABLE_PREVIEWS = YES; GENERATE_INFOPLIST_FILE = YES; @@ -423,7 +364,6 @@ SUPPORTED_PLATFORMS = "iphoneos iphonesimulator xros xrsimulator"; SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = NO; SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; SWIFT_OPTIMIZATION_LEVEL = "-Onone"; SWIFT_VERSION = 5.0; TARGETED_DEVICE_FAMILY = "1,2,7"; @@ -434,11 +374,9 @@ isa = XCBuildConfiguration; buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; - ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; CLANG_ENABLE_MODULES = YES; CODE_SIGN_STYLE = Automatic; CURRENT_PROJECT_VERSION = 1; - DEVELOPMENT_ASSET_PATHS = "\"llama.swiftui/Preview Content\""; DEVELOPMENT_TEAM = STLSG3FG8Q; ENABLE_PREVIEWS = YES; GENERATE_INFOPLIST_FILE = YES; @@ -458,7 +396,6 @@ SUPPORTED_PLATFORMS = "iphoneos iphonesimulator xros xrsimulator"; SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = NO; SWIFT_EMIT_LOC_STRINGS = YES; - SWIFT_OBJC_BRIDGING_HEADER = "llama.cpp.swift/bridging-header.h"; SWIFT_VERSION = 5.0; TARGETED_DEVICE_FAMILY = "1,2,7"; }; @@ -486,6 +423,13 @@ defaultConfigurationName = Release; }; /* End XCConfigurationList section */ + +/* Begin XCSwiftPackageProductDependency section */ + DF810E122B4A5BA200301144 /* llama */ = { + isa = XCSwiftPackageProductDependency; + productName = llama; + }; +/* End XCSwiftPackageProductDependency section */ }; rootObject = 8A1C836B2AC328BD0096AF73 /* Project object */; } diff --git a/examples/llama.swiftui/llama.swiftui/Assets.xcassets/AccentColor.colorset/Contents.json b/examples/llama.swiftui/llama.swiftui/Assets.xcassets/AccentColor.colorset/Contents.json deleted file mode 100644 index eb8789700..000000000 --- a/examples/llama.swiftui/llama.swiftui/Assets.xcassets/AccentColor.colorset/Contents.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "colors" : [ - { - "idiom" : "universal" - } - ], - "info" : { - "author" : "xcode", - "version" : 1 - } -} diff --git a/examples/llama.swiftui/llama.swiftui/Preview Content/Preview Assets.xcassets/Contents.json b/examples/llama.swiftui/llama.swiftui/Preview Content/Preview Assets.xcassets/Contents.json deleted file mode 100644 index 73c00596a..000000000 --- a/examples/llama.swiftui/llama.swiftui/Preview Content/Preview Assets.xcassets/Contents.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "info" : { - "author" : "xcode", - "version" : 1 - } -} From 3c36213df850a2353e95572b3636797c79b7c815 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 7 Jan 2024 11:21:53 +0200 Subject: [PATCH 06/10] llama : remove redundant GQA check (#4796) --- llama.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/llama.cpp b/llama.cpp index 06db40303..021e79a8f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4776,7 +4776,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -4900,7 +4899,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); struct ggml_tensor * cur; struct ggml_tensor * pos; @@ -5001,7 +4999,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); const int64_t n_rot = n_embd_head_k / 2; @@ -5215,7 +5212,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -5308,7 +5304,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -5404,7 +5399,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -5731,7 +5725,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); struct ggml_tensor * cur; struct ggml_tensor * attn_norm_output; @@ -5955,7 +5948,6 @@ struct llm_build_context { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_gqa == n_embd); struct ggml_tensor * cur; struct ggml_tensor * pos; From 9dede37d812604897496dd9d276ae9fbe13d1042 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 7 Jan 2024 14:29:36 +0200 Subject: [PATCH 07/10] llama : remove unused vars (#4796) --- llama.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 021e79a8f..91aa3f8e7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4997,7 +4997,6 @@ struct llm_build_context { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); const int64_t n_rot = n_embd_head_k / 2; @@ -5210,7 +5209,6 @@ struct llm_build_context { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); const int64_t n_embd_head = hparams.n_embd_head_v; - const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); struct ggml_tensor * cur; From d5a410e8556191672465f7ff58682ea2474038b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sun, 7 Jan 2024 17:24:08 +0100 Subject: [PATCH 08/10] CUDA: fixed redundant value dequantization (#4809) --- ggml-cuda.cu | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 54b266be4..2df64b111 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -1872,14 +1872,6 @@ static __device__ void convert_f16(const void * vx, const int ib, const int iqs, v.y = x[ib + iqs + 1]; } -static __device__ void convert_f32(const void * vx, const int ib, const int iqs, dfloat2 & v){ - const float * x = (const float *) vx; - - // automatic half -> float type cast if dfloat == float - v.x = x[ib + iqs + 0]; - v.y = x[ib + iqs + 1]; -} - static __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) { const int ix = blockDim.x*blockIdx.x + threadIdx.x; @@ -1983,7 +1975,7 @@ static __global__ void k_get_rows_float( template static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { - const int i = blockDim.x*blockIdx.x + 2*threadIdx.x; + const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x); if (i >= k) { return; @@ -2002,6 +1994,19 @@ static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __ y[iybs + iqs + y_offset] = v.y; } +template +static __global__ void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + const src_t * x = (src_t *) vx; + + y[i] = x[i]; +} + // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q @@ -5609,7 +5614,7 @@ static void quantize_row_q8_1_cuda(const float * x, void * vy, const int kx, con template static void dequantize_block_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int k, cudaStream_t stream) { - const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE; + const int num_blocks = (k + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE); dequantize_block<<>>(vx, y, k); } @@ -5659,6 +5664,12 @@ static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int k, cu #endif } +template +static void convert_unary_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE; + convert_unary<<>>(vx, y, k); +} + static to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { switch (type) { case GGML_TYPE_Q4_0: @@ -5682,7 +5693,7 @@ static to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { case GGML_TYPE_Q6_K: return dequantize_row_q6_K_cuda; case GGML_TYPE_F32: - return dequantize_block_cuda<1, 1, convert_f32>; + return convert_unary_cuda; default: return nullptr; } @@ -5711,7 +5722,7 @@ static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { case GGML_TYPE_Q6_K: return dequantize_row_q6_K_cuda; case GGML_TYPE_F16: - return dequantize_block_cuda<1, 1, convert_f16>; + return convert_unary_cuda; default: return nullptr; } From 226460cc0d5b185bc6685fb76f418fd9418d7add Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 7 Jan 2024 17:59:01 +0100 Subject: [PATCH 09/10] llama-bench : add no-kv-offload parameter (#4812) --- examples/llama-bench/llama-bench.cpp | 34 +++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 6617c050d..7f7186cde 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -138,6 +138,7 @@ struct cmd_params { std::vector n_threads; std::vector n_gpu_layers; std::vector main_gpu; + std::vector no_kv_offload; std::vector mul_mat_q; std::vector> tensor_split; int reps; @@ -155,6 +156,7 @@ static const cmd_params cmd_params_defaults = { /* n_threads */ {get_num_physical_cores()}, /* n_gpu_layers */ {99}, /* main_gpu */ {0}, + /* no_kv_offload */ {false}, /* mul_mat_q */ {true}, /* tensor_split */ {{}}, /* reps */ 5, @@ -176,6 +178,7 @@ static void print_usage(int /* argc */, char ** argv) { printf(" -t, --threads (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str()); printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str()); printf(" -mg, --main-gpu (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str()); + printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str()); printf(" -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str()); printf(" -ts, --tensor_split \n"); printf(" -r, --repetitions (default: %d)\n", cmd_params_defaults.reps); @@ -309,6 +312,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { break; } params.main_gpu = split(argv[i], split_delim); + } else if (arg == "-nkvo" || arg == "--no-kv-offload") { + if (++i >= argc) { + invalid_param = true; + break; + } + auto p = split(argv[i], split_delim); + params.no_kv_offload.insert(params.no_kv_offload.end(), p.begin(), p.end()); } else if (arg == "-mmq" || arg == "--mul-mat-q") { if (++i >= argc) { invalid_param = true; @@ -383,6 +393,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; } if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; } if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; } + if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; } if (params.mul_mat_q.empty()) { params.mul_mat_q = cmd_params_defaults.mul_mat_q; } if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; } if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; } @@ -400,6 +411,7 @@ struct cmd_params_instance { int n_threads; int n_gpu_layers; int main_gpu; + bool no_kv_offload; bool mul_mat_q; std::array tensor_split; @@ -428,6 +440,7 @@ struct cmd_params_instance { cparams.type_k = type_k; cparams.type_v = type_v; cparams.mul_mat_q = mul_mat_q; + cparams.offload_kqv = !no_kv_offload; return cparams; } @@ -444,6 +457,7 @@ static std::vector get_cmd_params_instances_int(const cmd_p for (const auto & tk : params.type_k) for (const auto & tv : params.type_v) for (const auto & mmq : params.mul_mat_q) + for (const auto & nkvo : params.no_kv_offload) for (const auto & nt : params.n_threads) { cmd_params_instance instance = { /* .model = */ m, @@ -455,6 +469,7 @@ static std::vector get_cmd_params_instances_int(const cmd_p /* .n_threads = */ nt, /* .n_gpu_layers = */ nl, /* .main_gpu = */ mg, + /* .no_kv_offload= */ nkvo, /* .mul_mat_q = */ mmq, /* .tensor_split = */ ts, }; @@ -476,6 +491,7 @@ static std::vector get_cmd_params_instances(const cmd_param for (const auto & tk : params.type_k) for (const auto & tv : params.type_v) for (const auto & mmq : params.mul_mat_q) + for (const auto & nkvo : params.no_kv_offload) for (const auto & nt : params.n_threads) { for (const auto & n_prompt : params.n_prompt) { if (n_prompt == 0) { @@ -491,6 +507,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .n_threads = */ nt, /* .n_gpu_layers = */ nl, /* .main_gpu = */ mg, + /* .no_kv_offload= */ nkvo, /* .mul_mat_q = */ mmq, /* .tensor_split = */ ts, }; @@ -511,6 +528,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .n_threads = */ nt, /* .n_gpu_layers = */ nl, /* .main_gpu = */ mg, + /* .no_kv_offload= */ nkvo, /* .mul_mat_q = */ mmq, /* .tensor_split = */ ts, }; @@ -559,6 +577,7 @@ struct test { ggml_type type_v; int n_gpu_layers; int main_gpu; + bool no_kv_offload; bool mul_mat_q; std::array tensor_split; int n_prompt; @@ -579,6 +598,7 @@ struct test { type_v = inst.type_v; n_gpu_layers = inst.n_gpu_layers; main_gpu = inst.main_gpu; + no_kv_offload = inst.no_kv_offload; mul_mat_q = inst.mul_mat_q; tensor_split = inst.tensor_split; n_prompt = inst.n_prompt; @@ -640,7 +660,8 @@ struct test { "cpu_info", "gpu_info", "model_filename", "model_type", "model_size", "model_n_params", "n_batch", "n_threads", "type_k", "type_v", - "n_gpu_layers", "main_gpu", "mul_mat_q", "tensor_split", + "n_gpu_layers", "main_gpu", "no_kv_offload", + "mul_mat_q", "tensor_split", "n_prompt", "n_gen", "test_time", "avg_ns", "stddev_ns", "avg_ts", "stddev_ts" @@ -659,7 +680,7 @@ struct test { return INT; } if (field == "cuda" || field == "opencl" || field == "metal" || field == "gpu_blas" || field == "blas" || - field == "f16_kv" || field == "mul_mat_q") { + field == "f16_kv" || field == "no_kv_offload" || field == "mul_mat_q") { return BOOL; } if (field == "avg_ts" || field == "stddev_ts") { @@ -690,7 +711,8 @@ struct test { cpu_info, gpu_info, model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params), std::to_string(n_batch), std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v), - std::to_string(n_gpu_layers), std::to_string(main_gpu), std::to_string(mul_mat_q), tensor_split_str, + std::to_string(n_gpu_layers), std::to_string(main_gpu), std::to_string(no_kv_offload), + std::to_string(mul_mat_q), tensor_split_str, std::to_string(n_prompt), std::to_string(n_gen), test_time, std::to_string(avg_ns()), std::to_string(stdev_ns()), std::to_string(avg_ts()), std::to_string(stdev_ts()) @@ -851,6 +873,9 @@ struct markdown_printer : public printer { if (field == "mul_mat_q") { return "mmq"; } + if (field == "no_kv_offload") { + return "nkvo"; + } if (field == "tensor_split") { return "ts"; } @@ -885,6 +910,9 @@ struct markdown_printer : public printer { if (params.mul_mat_q.size() > 1 || params.mul_mat_q != cmd_params_defaults.mul_mat_q) { fields.push_back("mul_mat_q"); } + if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) { + fields.push_back("no_kv_offload"); + } if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) { fields.push_back("tensor_split"); } From b7e7982953f80a656e03feb5cfb17a17a173eb26 Mon Sep 17 00:00:00 2001 From: Lars Grammel Date: Sun, 7 Jan 2024 21:24:11 +0100 Subject: [PATCH 10/10] readme : add lgrammel/modelfusion JS/TS client for llama.cpp (#4814) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ca6d14e17..2f6e6ffee 100644 --- a/README.md +++ b/README.md @@ -118,6 +118,7 @@ as the main playground for developing new features for the [ggml](https://github - Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python) - Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp) - Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp) +- JS/TS (llama.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/llamacpp) - Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb) - Rust: [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp) - C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp)