mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-27 20:43:07 +01:00
d08c20edde
* use warp_size macro for all sycl kernels * fix mask of permute_sub_group_by_xor * fix rms_norm with correct warp number * fix rms_norm_f32/group_norm_f32 * move norm to norm.cpp file * fix quantize bug * fix mmvq's batch size
36 lines
1.1 KiB
C++
36 lines
1.1 KiB
C++
//
|
|
// MIT license
|
|
// Copyright (C) 2024 Intel Corporation
|
|
// SPDX-License-Identifier: MIT
|
|
//
|
|
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
|
|
#ifndef GGML_SYCL_NORM_HPP
|
|
#define GGML_SYCL_NORM_HPP
|
|
|
|
#include "common.hpp"
|
|
|
|
void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, const ggml_tensor* src1,
|
|
ggml_tensor* dst, const float* src0_dd,
|
|
const float* src1_dd, float* dst_dd,
|
|
const queue_ptr& main_stream);
|
|
|
|
void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
|
|
const ggml_tensor* src1, ggml_tensor* dst,
|
|
const float* src0_dd, const float* src1_dd,
|
|
float* dst_dd,
|
|
const queue_ptr& main_stream);
|
|
|
|
void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0,
|
|
const ggml_tensor* src1, ggml_tensor* dst,
|
|
const float* src0_dd, const float* src1_dd,
|
|
float* dst_dd,
|
|
const queue_ptr& main_stream);
|
|
|
|
#endif // GGML_SYCL_NORM_HPP
|