mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-02-05 16:10:42 +01:00
SYCL : SOFTMAX F16 mask support and other fixes (#11261)
Implemented ggml_sycl_op_soft_max() F16 src1(mask) support for which a pragma deprecation warning was added during #5021. To do this, had to decouple it from ggml_sycl_op_flatten which always considered src1 to be of fp32 type(many OP functions are dependent on it). * SYCL: SOFTMAX F16 mask support and other fixes * test-backend-ops: Add F16 mask test cases
This commit is contained in:
parent
2b8525d5c8
commit
6e84b0ab8e
@ -3878,10 +3878,6 @@ static void ggml_sycl_diag_mask_inf(ggml_backend_sycl_context & ctx, ggml_tensor
|
|||||||
ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_diag_mask_inf);
|
ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_diag_mask_inf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_sycl_soft_max(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
|
|
||||||
ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_soft_max);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_sycl_rope(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
|
static void ggml_sycl_rope(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
|
||||||
GGML_ASSERT(ggml_is_contiguous(dst->src[0])); // TODO: this restriction is temporary until non-cont support is implemented
|
GGML_ASSERT(ggml_is_contiguous(dst->src[0])); // TODO: this restriction is temporary until non-cont support is implemented
|
||||||
ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_rope);
|
ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_rope);
|
||||||
@ -4090,7 +4086,7 @@ bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tens
|
|||||||
ggml_sycl_diag_mask_inf(ctx, dst);
|
ggml_sycl_diag_mask_inf(ctx, dst);
|
||||||
break;
|
break;
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
ggml_sycl_soft_max(ctx, dst);
|
ggml_sycl_op_soft_max(ctx, dst);
|
||||||
break;
|
break;
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
ggml_sycl_rope(ctx, dst);
|
ggml_sycl_rope(ctx, dst);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include "norm.hpp"
|
#include "softmax.hpp"
|
||||||
|
|
||||||
template <bool vals_smem, int ncols_template, int block_size_template>
|
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
||||||
static void soft_max_f32(const float * x, const float * mask, float * dst, const int ncols_par,
|
static void soft_max_f32(const float * x, const T * mask, float * dst, const int ncols_par,
|
||||||
const int nrows_y, const float scale, const float max_bias, const float m0,
|
const int nrows_y, const float scale, const float max_bias, const float m0,
|
||||||
const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) {
|
const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) {
|
||||||
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
||||||
@ -29,7 +29,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const
|
|||||||
slope = sycl::pow(base, float(exp));
|
slope = sycl::pow(base, float(exp));
|
||||||
}
|
}
|
||||||
|
|
||||||
float *vals = vals_smem ? buf + std::max(nwarps, WARP_SIZE) : dst + rowx * ncols;
|
float *vals = vals_smem ? buf + sycl::max(nwarps, WARP_SIZE) : dst + rowx * ncols;
|
||||||
float max_val = -INFINITY;
|
float max_val = -INFINITY;
|
||||||
|
|
||||||
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
||||||
@ -42,7 +42,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const
|
|||||||
const int ix = rowx*ncols + col;
|
const int ix = rowx*ncols + col;
|
||||||
const int iy = rowy*ncols + col;
|
const int iy = rowy*ncols + col;
|
||||||
|
|
||||||
const float val = x[ix]*scale + (mask ? slope*mask[iy] : 0.0f);
|
const float val = x[ix]*scale + (mask ? slope*static_cast<float>(mask[iy]) : 0.0f);
|
||||||
|
|
||||||
vals[col] = val;
|
vals[col] = val;
|
||||||
max_val = sycl::max(max_val, val);
|
max_val = sycl::max(max_val, val);
|
||||||
@ -65,7 +65,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const
|
|||||||
item_ct1.barrier(sycl::access::fence_space::local_space);
|
item_ct1.barrier(sycl::access::fence_space::local_space);
|
||||||
max_val = buf[lane_id];
|
max_val = buf[lane_id];
|
||||||
for (size_t i = 1; i < nreduce; i += 1) {
|
for (size_t i = 1; i < nreduce; i += 1) {
|
||||||
max_val = std::max(max_val, buf[lane_id + i * WARP_SIZE]);
|
max_val = sycl::max(max_val, buf[lane_id + i * WARP_SIZE]);
|
||||||
}
|
}
|
||||||
max_val = warp_reduce_max(max_val, item_ct1);
|
max_val = warp_reduce_max(max_val, item_ct1);
|
||||||
}
|
}
|
||||||
@ -122,8 +122,8 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool vals_smem, int ncols_template, int block_size_template>
|
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
||||||
static void soft_max_f32_submitter(const float * x, const float * mask, float * dst, const int ncols_par,
|
static void soft_max_f32_submitter(const float * x, const T * mask, float * dst, const int ncols_par,
|
||||||
const int nrows_y, const float scale, const float max_bias, const float m0,
|
const int nrows_y, const float scale, const float max_bias, const float m0,
|
||||||
const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims,
|
const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims,
|
||||||
const size_t n_local_scratch, queue_ptr stream) {
|
const size_t n_local_scratch, queue_ptr stream) {
|
||||||
@ -141,7 +141,8 @@ static void soft_max_f32_submitter(const float * x, const float * mask, float *
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
static void soft_max_f32_sycl(const float * x, const float * mask,
|
template<typename T>
|
||||||
|
static void soft_max_f32_sycl(const float * x, const T * mask,
|
||||||
float * dst, const int ncols_x, const int nrows_x,
|
float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int nrows_y, const float scale, const float max_bias,
|
const int nrows_y, const float scale, const float max_bias,
|
||||||
queue_ptr stream, int device) {
|
queue_ptr stream, int device) {
|
||||||
@ -223,22 +224,16 @@ static void soft_max_f32_sycl(const float * x, const float * mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
|
void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor *src1, ggml_tensor *dst,
|
|
||||||
const float *src0_dd, const float *src1_dd,
|
|
||||||
float *dst_dd,
|
|
||||||
const queue_ptr &main_stream) {
|
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
#pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 support")
|
GGML_ASSERT(!dst->src[1] || dst->src[1]->type == GGML_TYPE_F16 || dst->src[1]->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
||||||
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
|
|
||||||
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = dst->src[0]->ne[0];
|
||||||
const int64_t nrows_x = ggml_nrows(src0);
|
const int64_t nrows_x = ggml_nrows(dst->src[0]);
|
||||||
const int64_t nrows_y = src0->ne[1];
|
const int64_t nrows_y = dst->src[0]->ne[1];
|
||||||
|
|
||||||
float scale = 1.0f;
|
float scale = 1.0f;
|
||||||
float max_bias = 0.0f;
|
float max_bias = 0.0f;
|
||||||
@ -246,6 +241,21 @@ void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, const ggml_tensor *s
|
|||||||
memcpy(&scale, dst->op_params + 0, sizeof(float));
|
memcpy(&scale, dst->op_params + 0, sizeof(float));
|
||||||
memcpy(&max_bias, dst->op_params + 1, sizeof(float));
|
memcpy(&max_bias, dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00,
|
const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
|
||||||
nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device);
|
float * dst_dd = static_cast<float *>(dst->data);
|
||||||
|
|
||||||
|
ggml_sycl_set_device(ctx.device);
|
||||||
|
dpct::queue_ptr main_stream = ctx.stream();
|
||||||
|
|
||||||
|
if (dst->src[1] && dst->src[1]->type == GGML_TYPE_F16) {
|
||||||
|
const sycl::half * src1_dd = static_cast<sycl::half *>(dst->src[1]->data);
|
||||||
|
soft_max_f32_sycl<sycl::half>(src0_dd, src1_dd, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias,
|
||||||
|
main_stream, ctx.device);
|
||||||
|
} else if (dst->src[1] && dst->src[1]->type == GGML_TYPE_F32) {
|
||||||
|
const float * src1_dd = static_cast<const float *>(dst->src[1]->data);
|
||||||
|
soft_max_f32_sycl<float>(src0_dd, src1_dd, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device);
|
||||||
|
} else {
|
||||||
|
/* mask unavailable */
|
||||||
|
soft_max_f32_sycl<float>(src0_dd, nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,10 +15,6 @@
|
|||||||
|
|
||||||
#include "common.hpp"
|
#include "common.hpp"
|
||||||
|
|
||||||
void ggml_sycl_op_soft_max(ggml_backend_sycl_context &ctx, const ggml_tensor *src0,
|
void ggml_sycl_op_soft_max(ggml_backend_sycl_context &ctx, ggml_tensor *dst);
|
||||||
const ggml_tensor *src1, ggml_tensor *dst,
|
|
||||||
const float *src0_dd, const float *src1_dd,
|
|
||||||
float *dst_dd,
|
|
||||||
const queue_ptr &main_stream);
|
|
||||||
|
|
||||||
#endif // GGML_SYCL_SOFTMAX_HPP
|
#endif // GGML_SYCL_SOFTMAX_HPP
|
||||||
|
@ -2347,11 +2347,12 @@ struct test_soft_max : public test_case {
|
|||||||
const ggml_type type;
|
const ggml_type type;
|
||||||
const std::array<int64_t, 4> ne;
|
const std::array<int64_t, 4> ne;
|
||||||
const bool mask;
|
const bool mask;
|
||||||
|
const ggml_type m_prec;
|
||||||
const float scale;
|
const float scale;
|
||||||
const float max_bias;
|
const float max_bias;
|
||||||
|
|
||||||
std::string vars() override {
|
std::string vars() override {
|
||||||
return VARS_TO_STR5(type, ne, mask, scale, max_bias);
|
return VARS_TO_STR6(type, ne, mask, m_prec, scale, max_bias);
|
||||||
}
|
}
|
||||||
|
|
||||||
// the 1024 test with bias occasionally fails:
|
// the 1024 test with bias occasionally fails:
|
||||||
@ -2363,9 +2364,10 @@ struct test_soft_max : public test_case {
|
|||||||
test_soft_max(ggml_type type = GGML_TYPE_F32,
|
test_soft_max(ggml_type type = GGML_TYPE_F32,
|
||||||
std::array<int64_t, 4> ne = {10, 5, 4, 3},
|
std::array<int64_t, 4> ne = {10, 5, 4, 3},
|
||||||
bool mask = false,
|
bool mask = false,
|
||||||
|
ggml_type m_prec = GGML_TYPE_F32,
|
||||||
float scale = 1.0f,
|
float scale = 1.0f,
|
||||||
float max_bias = 0.0f)
|
float max_bias = 0.0f)
|
||||||
: type(type), ne(ne), mask(mask), scale(scale), max_bias(max_bias) {}
|
: type(type), ne(ne), mask(mask), m_prec(m_prec), scale(scale), max_bias(max_bias) {}
|
||||||
|
|
||||||
ggml_tensor * build_graph(ggml_context * ctx) override {
|
ggml_tensor * build_graph(ggml_context * ctx) override {
|
||||||
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
|
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
|
||||||
@ -2374,7 +2376,7 @@ struct test_soft_max : public test_case {
|
|||||||
|
|
||||||
ggml_tensor * mask = nullptr;
|
ggml_tensor * mask = nullptr;
|
||||||
if (this->mask) {
|
if (this->mask) {
|
||||||
mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]);
|
mask = ggml_new_tensor_2d(ctx, m_prec, ne[0], ne[1]);
|
||||||
ggml_set_name(mask, "mask");
|
ggml_set_name(mask, "mask");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4150,17 +4152,28 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
|
|||||||
for (float scale : {1.0f, 0.1f}) {
|
for (float scale : {1.0f, 0.1f}) {
|
||||||
for (int64_t ne0 : {16, 1024}) {
|
for (int64_t ne0 : {16, 1024}) {
|
||||||
for (int64_t ne1 : {16, 1024}) {
|
for (int64_t ne1 : {16, 1024}) {
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, scale, max_bias));
|
if (mask) {
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, scale, max_bias));
|
for (ggml_type m_prec : {GGML_TYPE_F32, GGML_TYPE_F16}) {
|
||||||
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, m_prec, scale, max_bias));
|
||||||
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, m_prec, scale, max_bias));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* The precision of mask here doesn't matter as boolean mask is false */
|
||||||
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, GGML_TYPE_F32, scale, max_bias));
|
||||||
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, GGML_TYPE_F32, scale, max_bias));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, 0.1f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, GGML_TYPE_F32, 0.1f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 0.0f));
|
||||||
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 0.0f));
|
||||||
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 8.0f));
|
||||||
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 8.0f));
|
||||||
|
|
||||||
for (float max_bias : {0.0f, 8.0f}) {
|
for (float max_bias : {0.0f, 8.0f}) {
|
||||||
for (float scale : {1.0f, 0.1f}) {
|
for (float scale : {1.0f, 0.1f}) {
|
||||||
@ -4296,13 +4309,13 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() {
|
|||||||
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {8192, 512, 2, 1}, {0, 2, 1, 3}));
|
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {8192, 512, 2, 1}, {0, 2, 1, 3}));
|
||||||
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {3072, 512, 2, 1}, {0, 2, 1, 3}));
|
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {3072, 512, 2, 1}, {0, 2, 1, 3}));
|
||||||
|
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, 1.0f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, 1.0f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, 1.0f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, 1.0f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, 1.0f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, 1.0f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, 1.0f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f));
|
||||||
|
|
||||||
test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 10, 1, 1}));
|
test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 10, 1, 1}));
|
||||||
test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1}));
|
test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1}));
|
||||||
|
Loading…
Reference in New Issue
Block a user