mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 06:39:25 +01:00
vulkan: Throttle the number of shader compiles during the build step. (#10222)
Fixes #9582 Spawning too many concurrent copies of glslc leads to "Failed to create pipes" errors on Linux. This change applies the same throttling we use for multithreaded pipeline creation.
This commit is contained in:
parent
b0cefea58a
commit
54ef9cfc72
@ -16,6 +16,7 @@
|
|||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
#include <cassert>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
|
||||||
@ -92,11 +93,11 @@ void execute_command(const std::string& command, std::string& stdout_str, std::s
|
|||||||
std::array<char, 128> buffer;
|
std::array<char, 128> buffer;
|
||||||
DWORD bytes_read;
|
DWORD bytes_read;
|
||||||
|
|
||||||
while (ReadFile(stdout_read, buffer.data(), buffer.size(), &bytes_read, NULL) && bytes_read > 0) {
|
while (ReadFile(stdout_read, buffer.data(), (DWORD)buffer.size(), &bytes_read, NULL) && bytes_read > 0) {
|
||||||
stdout_str.append(buffer.data(), bytes_read);
|
stdout_str.append(buffer.data(), bytes_read);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (ReadFile(stderr_read, buffer.data(), buffer.size(), &bytes_read, NULL) && bytes_read > 0) {
|
while (ReadFile(stderr_read, buffer.data(), (DWORD)buffer.size(), &bytes_read, NULL) && bytes_read > 0) {
|
||||||
stderr_str.append(buffer.data(), bytes_read);
|
stderr_str.append(buffer.data(), bytes_read);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,7 +191,12 @@ std::string basename(const std::string &path) {
|
|||||||
return path.substr(path.find_last_of("/\\") + 1);
|
return path.substr(path.find_last_of("/\\") + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void string_to_spv(const std::string& _name, const std::string& in_fname, const std::map<std::string, std::string>& defines, bool fp16 = true) {
|
// variables to track number of compiles in progress
|
||||||
|
static uint32_t compile_count = 0;
|
||||||
|
static std::mutex compile_count_mutex;
|
||||||
|
static std::condition_variable compile_count_cond;
|
||||||
|
|
||||||
|
void string_to_spv_func(const std::string& _name, const std::string& in_fname, const std::map<std::string, std::string>& defines, bool fp16 = true) {
|
||||||
std::string name = _name + (fp16 ? "" : "_fp32");
|
std::string name = _name + (fp16 ? "" : "_fp32");
|
||||||
std::string out_fname = join_paths(output_dir, name + ".spv");
|
std::string out_fname = join_paths(output_dir, name + ".spv");
|
||||||
std::string in_path = join_paths(input_dir, in_fname);
|
std::string in_path = join_paths(input_dir, in_fname);
|
||||||
@ -233,6 +239,12 @@ void string_to_spv(const std::string& _name, const std::string& in_fname, const
|
|||||||
} catch (const std::exception& e) {
|
} catch (const std::exception& e) {
|
||||||
std::cerr << "Error executing command for " << name << ": " << e.what() << std::endl;
|
std::cerr << "Error executing command for " << name << ": " << e.what() << std::endl;
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> guard(compile_count_mutex);
|
||||||
|
assert(compile_count > 0);
|
||||||
|
compile_count--;
|
||||||
|
}
|
||||||
|
compile_count_cond.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::map<std::string, std::string> merge_maps(const std::map<std::string, std::string>& a, const std::map<std::string, std::string>& b) {
|
std::map<std::string, std::string> merge_maps(const std::map<std::string, std::string>& a, const std::map<std::string, std::string>& b) {
|
||||||
@ -241,7 +253,22 @@ std::map<std::string, std::string> merge_maps(const std::map<std::string, std::s
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void matmul_shaders(std::vector<std::future<void>>& tasks, bool fp16, bool matmul_id) {
|
static std::vector<std::future<void>> compiles;
|
||||||
|
void string_to_spv(const std::string& _name, const std::string& in_fname, const std::map<std::string, std::string>& defines, bool fp16 = true) {
|
||||||
|
{
|
||||||
|
// wait until fewer than N compiles are in progress.
|
||||||
|
// 16 is an arbitrary limit, the goal is to avoid "failed to create pipe" errors.
|
||||||
|
uint32_t N = 16;
|
||||||
|
std::unique_lock<std::mutex> guard(compile_count_mutex);
|
||||||
|
while (compile_count >= N) {
|
||||||
|
compile_count_cond.wait(guard);
|
||||||
|
}
|
||||||
|
compile_count++;
|
||||||
|
}
|
||||||
|
compiles.push_back(std::async(string_to_spv_func, _name, in_fname, defines, fp16));
|
||||||
|
}
|
||||||
|
|
||||||
|
void matmul_shaders(bool fp16, bool matmul_id) {
|
||||||
std::string load_vec = fp16 ? "8" : "4";
|
std::string load_vec = fp16 ? "8" : "4";
|
||||||
std::string aligned_b_type_f32 = fp16 ? "mat2x4" : "vec4";
|
std::string aligned_b_type_f32 = fp16 ? "mat2x4" : "vec4";
|
||||||
std::string aligned_b_type_f16 = fp16 ? "f16mat2x4" : "f16vec4";
|
std::string aligned_b_type_f16 = fp16 ? "f16mat2x4" : "f16vec4";
|
||||||
@ -259,19 +286,11 @@ void matmul_shaders(std::vector<std::future<void>>& tasks, bool fp16, bool matmu
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Shaders with f16 B_TYPE
|
// Shaders with f16 B_TYPE
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv(shader_name + "_f32_f16", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16);
|
||||||
string_to_spv(shader_name + "_f32_f16", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16);
|
string_to_spv(shader_name + "_f32_f16_aligned", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}}), fp16);
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv(shader_name + "_f32_f16_aligned", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}}), fp16);
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv(shader_name + "_f16", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16);
|
||||||
string_to_spv(shader_name + "_f16", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16);
|
string_to_spv(shader_name + "_f16_aligned", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}}), fp16);
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv(shader_name + "_f16_aligned", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}}), fp16);
|
|
||||||
}));
|
|
||||||
|
|
||||||
for (const auto& tname : type_names) {
|
for (const auto& tname : type_names) {
|
||||||
std::string data_a_key = "DATA_A_" + to_uppercase(tname);
|
std::string data_a_key = "DATA_A_" + to_uppercase(tname);
|
||||||
@ -279,22 +298,18 @@ void matmul_shaders(std::vector<std::future<void>>& tasks, bool fp16, bool matmu
|
|||||||
std::string load_vec_a_unaligned = (tname == "f32" || tname == "f16") ? "1" : "2";
|
std::string load_vec_a_unaligned = (tname == "f32" || tname == "f16") ? "1" : "2";
|
||||||
// For aligned matmul loads
|
// For aligned matmul loads
|
||||||
std::string load_vec_a = (tname == "f32" || tname == "f16") ? load_vec : "2";
|
std::string load_vec_a = (tname == "f32" || tname == "f16") ? load_vec : "2";
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv(shader_name + "_" + tname + "_f32", "mul_mm.comp", merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}), fp16);
|
||||||
string_to_spv(shader_name + "_" + tname + "_f32", "mul_mm.comp", merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}), fp16);
|
string_to_spv(shader_name + "_" + tname + "_f32_aligned", "mul_mm.comp", merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f32}, {"D_TYPE", "float"}}), fp16);
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv(shader_name + "_" + tname + "_f32_aligned", "mul_mm.comp", merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f32}, {"D_TYPE", "float"}}), fp16);
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void process_shaders(std::vector<std::future<void>>& tasks) {
|
void process_shaders() {
|
||||||
std::cout << "ggml_vulkan: Generating and compiling shaders to SPIR-V" << std::endl;
|
std::cout << "ggml_vulkan: Generating and compiling shaders to SPIR-V" << std::endl;
|
||||||
std::map<std::string, std::string> base_dict = {{"FLOAT_TYPE", "float"}};
|
std::map<std::string, std::string> base_dict = {{"FLOAT_TYPE", "float"}};
|
||||||
|
|
||||||
for (const auto& fp16 : {false, true}) {
|
for (const auto& fp16 : {false, true}) {
|
||||||
matmul_shaders(tasks, fp16, false);
|
matmul_shaders(fp16, false);
|
||||||
matmul_shaders(tasks, fp16, true);
|
matmul_shaders(fp16, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const auto& tname : type_names) {
|
for (const auto& tname : type_names) {
|
||||||
@ -302,201 +317,103 @@ void process_shaders(std::vector<std::future<void>>& tasks) {
|
|||||||
std::string data_a_key = "DATA_A_" + to_uppercase(tname);
|
std::string data_a_key = "DATA_A_" + to_uppercase(tname);
|
||||||
std::string shader = (string_ends_with(tname, "_k")) ? "mul_mat_vec_" + tname + ".comp" : "mul_mat_vec.comp";
|
std::string shader = (string_ends_with(tname, "_k")) ? "mul_mat_vec_" + tname + ".comp" : "mul_mat_vec.comp";
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("mul_mat_vec_" + tname + "_f32_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("mul_mat_vec_" + tname + "_f32_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
|
string_to_spv("mul_mat_vec_" + tname + "_f16_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}));
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv("mul_mat_vec_" + tname + "_f16_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("mul_mat_vec_id_" + tname + "_f32", shader, merge_maps(base_dict, {{"MUL_MAT_ID", "1"}, {data_a_key, "1"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("mul_mat_vec_id_" + tname + "_f32", shader, merge_maps(base_dict, {{"MUL_MAT_ID", "1"}, {data_a_key, "1"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Dequant shaders
|
// Dequant shaders
|
||||||
if (tname != "f16") {
|
if (tname != "f16") {
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("dequant_" + tname, "dequant_" + tname + ".comp", merge_maps(base_dict, {{data_a_key, "1"}, {"D_TYPE", "float16_t"}}));
|
||||||
string_to_spv("dequant_" + tname, "dequant_" + tname + ".comp", merge_maps(base_dict, {{data_a_key, "1"}, {"D_TYPE", "float16_t"}}));
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!string_ends_with(tname, "_k")) {
|
if (!string_ends_with(tname, "_k")) {
|
||||||
shader = (tname == "f32" || tname == "f16") ? "get_rows.comp" : "get_rows_quant.comp";
|
shader = (tname == "f32" || tname == "f16") ? "get_rows.comp" : "get_rows_quant.comp";
|
||||||
|
|
||||||
if (tname == "f16") {
|
if (tname == "f16") {
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("get_rows_" + tname, shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
||||||
string_to_spv("get_rows_" + tname, shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
|
||||||
}));
|
|
||||||
} else {
|
} else {
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("get_rows_" + tname, shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}});
|
||||||
string_to_spv("get_rows_" + tname, shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}});
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("get_rows_" + tname + "_f32", shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("get_rows_" + tname + "_f32", shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("mul_mat_vec_p021_f16_f32", "mul_mat_vec_p021.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("mul_mat_vec_p021_f16_f32", "mul_mat_vec_p021.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
string_to_spv("mul_mat_vec_nc_f16_f32", "mul_mat_vec_nc.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("mul_mat_vec_nc_f16_f32", "mul_mat_vec_nc.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Norms
|
// Norms
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("norm_f32", "norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("norm_f32", "norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
string_to_spv("group_norm_f32", "group_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
}));
|
string_to_spv("rms_norm_f32", "rms_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv("group_norm_f32", "group_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv("rms_norm_f32", "rms_norm.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("cpy_f32_f32", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("cpy_f32_f32", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
string_to_spv("cpy_f32_f16", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
|
||||||
}));
|
string_to_spv("cpy_f16_f16", "copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("cpy_f32_f16", "copy.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}});
|
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("cpy_f16_f16", "copy.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("add_f32", "add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("add_f32", "add.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
string_to_spv("add_f16_f32_f16", "add.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("add_f16_f32_f16", "add.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("acc_f32", "acc.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("acc_f32", "acc.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("split_k_reduce", "mul_mat_split_k_reduce.comp", {});
|
||||||
string_to_spv("split_k_reduce", "mul_mat_split_k_reduce.comp", {});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("mul_f32", "mul.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("mul_f32", "mul.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("div_f32", "div.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("div_f32", "div.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("repeat_f32", "repeat.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("repeat_f32", "repeat.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("scale_f32", "scale.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("scale_f32", "scale.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("sqr_f32", "square.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("sqr_f32", "square.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("sin_f32", "sin.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("sin_f32", "sin.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("cos_f32", "cos.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("cos_f32", "cos.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("clamp_f32", "clamp.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
||||||
string_to_spv("clamp_f32", "clamp.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}, {"FLOAT_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("pad_f32", "pad.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("pad_f32", "pad.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("concat_f32", "concat.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("concat_f32", "concat.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
string_to_spv("concat_f16", "concat.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
||||||
}));
|
string_to_spv("concat_i32", "concat.comp", {{"A_TYPE", "int"}, {"B_TYPE", "int"}, {"D_TYPE", "int"}});
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("concat_f16", "concat.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}});
|
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("concat_i32", "concat.comp", {{"A_TYPE", "int"}, {"B_TYPE", "int"}, {"D_TYPE", "int"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("upscale_f32", "upscale.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("upscale_f32", "upscale.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("gelu_f32", "gelu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("gelu_f32", "gelu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
string_to_spv("gelu_quick_f32", "gelu_quick.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
}));
|
string_to_spv("silu_f32", "silu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("relu_f32", "relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("gelu_quick_f32", "gelu_quick.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
string_to_spv("leaky_relu_f32", "leaky_relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
}));
|
string_to_spv("tanh_f32", "tanh.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("silu_f32", "silu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("relu_f32", "relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("leaky_relu_f32", "leaky_relu.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("tanh_f32", "tanh.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("diag_mask_inf_f32", "diag_mask_inf.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("diag_mask_inf_f32", "diag_mask_inf.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("soft_max_f32", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("soft_max_f32", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
|
string_to_spv("soft_max_f32_f16", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}));
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv("soft_max_f32_f16", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("rope_norm_f32", "rope_norm.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("rope_norm_f32", "rope_norm.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
string_to_spv("rope_norm_f16", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("rope_norm_f16", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("rope_neox_f32", "rope_neox.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
||||||
string_to_spv("rope_neox_f32", "rope_neox.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
|
string_to_spv("rope_neox_f16", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
|
||||||
string_to_spv("rope_neox_f16", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [] {
|
string_to_spv("argsort_f32", "argsort.comp", {{"A_TYPE", "float"}});
|
||||||
string_to_spv("argsort_f32", "argsort.comp", {{"A_TYPE", "float"}});
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("sum_rows_f32", "sum_rows.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("sum_rows_f32", "sum_rows.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("im2col_f32", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("im2col_f32", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
string_to_spv("im2col_f32_f16", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}}));
|
||||||
}));
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
|
||||||
string_to_spv("im2col_f32_f16", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}}));
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("timestep_embedding_f32", "timestep_embedding.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("timestep_embedding_f32", "timestep_embedding.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
|
||||||
|
|
||||||
tasks.push_back(std::async(std::launch::async, [=] {
|
string_to_spv("pool2d_f32", "pool2d.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
||||||
string_to_spv("pool2d_f32", "pool2d.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}}));
|
|
||||||
}));
|
for (auto &c : compiles) {
|
||||||
|
c.wait();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void write_output_files() {
|
void write_output_files() {
|
||||||
@ -591,12 +508,7 @@ int main(int argc, char** argv) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::future<void>> tasks;
|
process_shaders();
|
||||||
process_shaders(tasks);
|
|
||||||
|
|
||||||
for (auto& task : tasks) {
|
|
||||||
task.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
write_output_files();
|
write_output_files();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user