2024-12-11 17:29:23 +01:00
|
|
|
#include "llama-model.h"
|
2024-12-22 15:20:20 +01:00
|
|
|
|
2024-12-22 17:56:29 +01:00
|
|
|
#include "llama-impl.h"
|
|
|
|
|
2024-12-22 21:28:20 +01:00
|
|
|
#include <algorithm>
|
2024-12-22 20:00:44 +01:00
|
|
|
#include <cassert>
|
2024-12-23 16:32:31 +01:00
|
|
|
#include <stdexcept>
|
2024-12-22 20:00:44 +01:00
|
|
|
|
2024-12-22 19:41:05 +01:00
|
|
|
const char * llm_type_name(llm_type type) {
|
|
|
|
switch (type) {
|
|
|
|
case MODEL_14M: return "14M";
|
|
|
|
case MODEL_17M: return "17M";
|
|
|
|
case MODEL_22M: return "22M";
|
|
|
|
case MODEL_33M: return "33M";
|
|
|
|
case MODEL_60M: return "60M";
|
|
|
|
case MODEL_70M: return "70M";
|
|
|
|
case MODEL_80M: return "80M";
|
|
|
|
case MODEL_109M: return "109M";
|
|
|
|
case MODEL_137M: return "137M";
|
|
|
|
case MODEL_160M: return "160M";
|
|
|
|
case MODEL_220M: return "220M";
|
|
|
|
case MODEL_250M: return "250M";
|
|
|
|
case MODEL_270M: return "270M";
|
|
|
|
case MODEL_335M: return "335M";
|
|
|
|
case MODEL_410M: return "410M";
|
|
|
|
case MODEL_450M: return "450M";
|
|
|
|
case MODEL_770M: return "770M";
|
|
|
|
case MODEL_780M: return "780M";
|
|
|
|
case MODEL_0_5B: return "0.5B";
|
|
|
|
case MODEL_1B: return "1B";
|
|
|
|
case MODEL_1_3B: return "1.3B";
|
|
|
|
case MODEL_1_4B: return "1.4B";
|
|
|
|
case MODEL_1_5B: return "1.5B";
|
|
|
|
case MODEL_1_6B: return "1.6B";
|
|
|
|
case MODEL_2B: return "2B";
|
|
|
|
case MODEL_2_8B: return "2.8B";
|
|
|
|
case MODEL_3B: return "3B";
|
|
|
|
case MODEL_4B: return "4B";
|
|
|
|
case MODEL_6B: return "6B";
|
|
|
|
case MODEL_6_9B: return "6.9B";
|
|
|
|
case MODEL_7B: return "7B";
|
|
|
|
case MODEL_8B: return "8B";
|
|
|
|
case MODEL_9B: return "9B";
|
|
|
|
case MODEL_11B: return "11B";
|
|
|
|
case MODEL_12B: return "12B";
|
|
|
|
case MODEL_13B: return "13B";
|
|
|
|
case MODEL_14B: return "14B";
|
|
|
|
case MODEL_15B: return "15B";
|
|
|
|
case MODEL_16B: return "16B";
|
|
|
|
case MODEL_20B: return "20B";
|
|
|
|
case MODEL_30B: return "30B";
|
|
|
|
case MODEL_32B: return "32B";
|
|
|
|
case MODEL_34B: return "34B";
|
|
|
|
case MODEL_35B: return "35B";
|
|
|
|
case MODEL_40B: return "40B";
|
|
|
|
case MODEL_65B: return "65B";
|
|
|
|
case MODEL_70B: return "70B";
|
|
|
|
case MODEL_236B: return "236B";
|
|
|
|
case MODEL_314B: return "314B";
|
|
|
|
case MODEL_SMALL: return "0.1B";
|
|
|
|
case MODEL_MEDIUM: return "0.4B";
|
|
|
|
case MODEL_LARGE: return "0.8B";
|
|
|
|
case MODEL_XL: return "1.5B";
|
|
|
|
case MODEL_A1_7B: return "A1.7B";
|
|
|
|
case MODEL_A2_7B: return "A2.7B";
|
|
|
|
case MODEL_8x7B: return "8x7B";
|
|
|
|
case MODEL_8x22B: return "8x22B";
|
|
|
|
case MODEL_16x12B: return "16x12B";
|
|
|
|
case MODEL_10B_128x3_66B: return "10B+128x3.66B";
|
|
|
|
case MODEL_57B_A14B: return "57B.A14B";
|
|
|
|
case MODEL_27B: return "27B";
|
|
|
|
default: return "?B";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string llama_model_ftype_name(llama_ftype ftype) {
|
2024-12-22 15:20:20 +01:00
|
|
|
if (ftype & LLAMA_FTYPE_GUESSED) {
|
|
|
|
return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ftype) {
|
|
|
|
case LLAMA_FTYPE_ALL_F32: return "all F32";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_F16: return "F16";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_BF16: return "BF16";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
|
|
|
|
case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
|
|
|
|
|
|
|
|
default: return "unknown, may not work";
|
|
|
|
}
|
|
|
|
}
|
2024-12-22 17:56:29 +01:00
|
|
|
|
2024-12-22 19:41:05 +01:00
|
|
|
std::string llama_model_arch_name (const llama_model & model) {
|
|
|
|
return llm_arch_name(model.arch);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string llama_model_type_name (const llama_model & model) {
|
|
|
|
return llm_type_name(model.type);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string llama_model_ftype_name(const llama_model & model) {
|
|
|
|
return llama_model_ftype_name(model.ftype);
|
|
|
|
}
|
|
|
|
|
2024-12-22 17:56:29 +01:00
|
|
|
template<typename F>
|
|
|
|
static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
|
|
|
|
ggml_init_params params = {
|
|
|
|
/*.mem_size =*/ ggml_tensor_overhead()*8,
|
|
|
|
/*.mem_buffer =*/ NULL,
|
|
|
|
/*.no_alloc =*/ true,
|
|
|
|
};
|
2024-12-23 12:28:56 +01:00
|
|
|
|
2024-12-22 17:56:29 +01:00
|
|
|
ggml_context_ptr ctx { ggml_init(params) };
|
|
|
|
if (!ctx) {
|
|
|
|
throw std::runtime_error(format("failed to create ggml context"));
|
|
|
|
}
|
|
|
|
|
|
|
|
ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
|
|
|
|
ggml_tensor * op_tensor = fn(ctx.get());
|
|
|
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
|
|
|
if (op_tensor->src[i] != nullptr) {
|
|
|
|
assert(op_tensor->src[i]->buffer == nullptr);
|
|
|
|
op_tensor->src[i]->buffer = buf.get();
|
|
|
|
}
|
|
|
|
}
|
2024-12-23 12:28:56 +01:00
|
|
|
|
2024-12-22 17:56:29 +01:00
|
|
|
bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
|
|
|
|
|
|
|
|
return op_supported;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename F>
|
|
|
|
static ggml_backend_buffer_type_t select_buft(const llama_model::buft_list_t & buft_list, const F & fn) {
|
|
|
|
for (const auto & cur : buft_list) {
|
|
|
|
ggml_backend_dev_t cur_dev = cur.first;
|
|
|
|
ggml_backend_buffer_type_t cur_buft = cur.second;
|
|
|
|
if (buft_supported(cur_buft, cur_dev, fn)) {
|
|
|
|
return cur_buft;
|
|
|
|
}
|
|
|
|
}
|
2024-12-23 12:28:56 +01:00
|
|
|
|
2024-12-22 17:56:29 +01:00
|
|
|
throw std::runtime_error(format("no suitable buffer type found"));
|
|
|
|
}
|
|
|
|
|
|
|
|
ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il) {
|
2024-12-22 19:41:05 +01:00
|
|
|
return select_buft(
|
|
|
|
*model.dev_layer.at(il).buft_list,
|
2024-12-22 17:56:29 +01:00
|
|
|
[&](ggml_context * ctx) {
|
|
|
|
ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
|
|
|
|
ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
|
|
|
|
return ggml_add(ctx, cur, layer_dir);
|
|
|
|
});
|
|
|
|
}
|
2024-12-22 21:28:20 +01:00
|
|
|
|
|
|
|
struct ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name) {
|
|
|
|
auto it = std::find_if(model.tensors_by_name.begin(), model.tensors_by_name.end(),
|
|
|
|
[name](const std::pair<std::string, struct ggml_tensor *> & it) {
|
|
|
|
return it.first == name;
|
|
|
|
});
|
|
|
|
if (it == model.tensors_by_name.end()) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return it->second;
|
|
|
|
}
|
2024-12-23 18:10:27 +01:00
|
|
|
|
|
|
|
size_t llama_model_max_nodes(const llama_model & model) {
|
|
|
|
return std::max<size_t>(8192, model.tensors_by_name.size()*5);
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// interface implementation
|
|
|
|
//
|
|
|
|
|
|
|
|
struct llama_model_params llama_model_default_params() {
|
|
|
|
struct llama_model_params result = {
|
|
|
|
/*.devices =*/ nullptr,
|
|
|
|
/*.n_gpu_layers =*/ 0,
|
|
|
|
/*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER,
|
|
|
|
/*.main_gpu =*/ 0,
|
|
|
|
/*.tensor_split =*/ nullptr,
|
|
|
|
/*.rpc_servers =*/ nullptr,
|
|
|
|
/*.progress_callback =*/ nullptr,
|
|
|
|
/*.progress_callback_user_data =*/ nullptr,
|
|
|
|
/*.kv_overrides =*/ nullptr,
|
|
|
|
/*.vocab_only =*/ false,
|
|
|
|
/*.use_mmap =*/ true,
|
|
|
|
/*.use_mlock =*/ false,
|
|
|
|
/*.check_tensors =*/ false,
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef GGML_USE_METAL
|
|
|
|
// note: we usually have plenty of VRAM, so by default offload all layers to the GPU
|
|
|
|
result.n_gpu_layers = 999;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void llama_free_model(struct llama_model * model) {
|
|
|
|
delete model;
|
|
|
|
}
|
|
|
|
|
|
|
|
enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
|
|
|
|
return model->vocab.type;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_n_vocab(const struct llama_model * model) {
|
|
|
|
return model->hparams.n_vocab;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_n_ctx_train(const struct llama_model * model) {
|
|
|
|
return model->hparams.n_ctx_train;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_n_embd(const struct llama_model * model) {
|
|
|
|
return model->hparams.n_embd;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_n_layer(const struct llama_model * model) {
|
|
|
|
return model->hparams.n_layer;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_n_head(const struct llama_model * model) {
|
|
|
|
return model->hparams.n_head();
|
|
|
|
}
|
|
|
|
|
|
|
|
enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
|
|
|
switch (model->arch) {
|
|
|
|
// these models do not use RoPE
|
|
|
|
case LLM_ARCH_GPT2:
|
|
|
|
case LLM_ARCH_GPTJ:
|
|
|
|
case LLM_ARCH_MPT:
|
|
|
|
case LLM_ARCH_REFACT:
|
|
|
|
case LLM_ARCH_BLOOM:
|
|
|
|
case LLM_ARCH_MAMBA:
|
|
|
|
case LLM_ARCH_JINA_BERT_V2:
|
|
|
|
case LLM_ARCH_T5:
|
|
|
|
case LLM_ARCH_T5ENCODER:
|
|
|
|
case LLM_ARCH_JAIS:
|
|
|
|
case LLM_ARCH_RWKV6:
|
|
|
|
case LLM_ARCH_WAVTOKENIZER_DEC:
|
|
|
|
return LLAMA_ROPE_TYPE_NONE;
|
|
|
|
|
|
|
|
// use what we call a normal RoPE, operating on pairs of consecutive head values
|
|
|
|
case LLM_ARCH_LLAMA:
|
|
|
|
case LLM_ARCH_DECI:
|
|
|
|
case LLM_ARCH_BAICHUAN:
|
|
|
|
case LLM_ARCH_STARCODER:
|
|
|
|
case LLM_ARCH_PLAMO:
|
|
|
|
case LLM_ARCH_ORION:
|
|
|
|
case LLM_ARCH_INTERNLM2:
|
|
|
|
case LLM_ARCH_MINICPM:
|
|
|
|
case LLM_ARCH_XVERSE:
|
|
|
|
case LLM_ARCH_COMMAND_R:
|
|
|
|
case LLM_ARCH_OLMO:
|
|
|
|
case LLM_ARCH_ARCTIC:
|
|
|
|
case LLM_ARCH_DEEPSEEK:
|
|
|
|
case LLM_ARCH_DEEPSEEK2:
|
|
|
|
case LLM_ARCH_CHATGLM:
|
|
|
|
case LLM_ARCH_GRANITE:
|
|
|
|
case LLM_ARCH_GRANITE_MOE:
|
|
|
|
case LLM_ARCH_CHAMELEON:
|
|
|
|
return LLAMA_ROPE_TYPE_NORM;
|
|
|
|
|
|
|
|
// the pairs of head values are offset by n_rot/2
|
|
|
|
case LLM_ARCH_FALCON:
|
|
|
|
case LLM_ARCH_GROK:
|
|
|
|
case LLM_ARCH_DBRX:
|
|
|
|
case LLM_ARCH_BERT:
|
|
|
|
case LLM_ARCH_NOMIC_BERT:
|
|
|
|
case LLM_ARCH_STABLELM:
|
|
|
|
case LLM_ARCH_BITNET:
|
|
|
|
case LLM_ARCH_QWEN:
|
|
|
|
case LLM_ARCH_QWEN2:
|
|
|
|
case LLM_ARCH_QWEN2MOE:
|
|
|
|
case LLM_ARCH_OLMO2:
|
|
|
|
case LLM_ARCH_OLMOE:
|
|
|
|
case LLM_ARCH_PHI2:
|
|
|
|
case LLM_ARCH_PHI3:
|
|
|
|
case LLM_ARCH_GEMMA:
|
|
|
|
case LLM_ARCH_GEMMA2:
|
|
|
|
case LLM_ARCH_STARCODER2:
|
|
|
|
case LLM_ARCH_OPENELM:
|
|
|
|
case LLM_ARCH_GPTNEOX:
|
|
|
|
case LLM_ARCH_CODESHELL:
|
|
|
|
case LLM_ARCH_NEMOTRON:
|
|
|
|
case LLM_ARCH_EXAONE:
|
|
|
|
case LLM_ARCH_MINICPM3:
|
|
|
|
return LLAMA_ROPE_TYPE_NEOX;
|
|
|
|
|
|
|
|
case LLM_ARCH_QWEN2VL:
|
|
|
|
return LLAMA_ROPE_TYPE_MROPE;
|
|
|
|
|
|
|
|
// all model arches should be listed explicitly here
|
|
|
|
case LLM_ARCH_UNKNOWN:
|
|
|
|
GGML_ABORT("unknown architecture");
|
|
|
|
}
|
|
|
|
|
|
|
|
return LLAMA_ROPE_TYPE_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
float llama_rope_freq_scale_train(const struct llama_model * model) {
|
|
|
|
return model->hparams.rope_freq_scale_train;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
|
|
|
|
const auto & it = model->gguf_kv.find(key);
|
|
|
|
if (it == model->gguf_kv.end()) {
|
|
|
|
if (buf_size > 0) {
|
|
|
|
buf[0] = '\0';
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return snprintf(buf, buf_size, "%s", it->second.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_model_meta_count(const struct llama_model * model) {
|
|
|
|
return (int)model->gguf_kv.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
|
|
|
|
if (i < 0 || i >= (int)model->gguf_kv.size()) {
|
|
|
|
if (buf_size > 0) {
|
|
|
|
buf[0] = '\0';
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
auto it = model->gguf_kv.begin();
|
|
|
|
std::advance(it, i);
|
|
|
|
return snprintf(buf, buf_size, "%s", it->first.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
|
|
|
|
if (i < 0 || i >= (int)model->gguf_kv.size()) {
|
|
|
|
if (buf_size > 0) {
|
|
|
|
buf[0] = '\0';
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
auto it = model->gguf_kv.begin();
|
|
|
|
std::advance(it, i);
|
|
|
|
return snprintf(buf, buf_size, "%s", it->second.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
|
|
|
|
return snprintf(buf, buf_size, "%s %s %s",
|
|
|
|
llama_model_arch_name (*model).c_str(),
|
|
|
|
llama_model_type_name (*model).c_str(),
|
|
|
|
llama_model_ftype_name(*model).c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t llama_model_size(const struct llama_model * model) {
|
|
|
|
return model->n_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t llama_model_n_params(const struct llama_model * model) {
|
|
|
|
return model->n_elements;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool llama_model_has_encoder(const struct llama_model * model) {
|
|
|
|
switch (model->arch) {
|
|
|
|
case LLM_ARCH_T5: return true;
|
|
|
|
case LLM_ARCH_T5ENCODER: return true;
|
|
|
|
default: return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool llama_model_has_decoder(const struct llama_model * model) {
|
|
|
|
switch (model->arch) {
|
|
|
|
case LLM_ARCH_T5ENCODER: return false;
|
|
|
|
default: return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
llama_token llama_model_decoder_start_token(const struct llama_model * model) {
|
|
|
|
return model->hparams.dec_start_token_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool llama_model_is_recurrent(const struct llama_model * model) {
|
|
|
|
switch (model->arch) {
|
|
|
|
case LLM_ARCH_MAMBA: return true;
|
|
|
|
case LLM_ARCH_RWKV6: return true;
|
|
|
|
default: return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|