mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
llama : fix integer overflow during quantization (#6063)
This commit is contained in:
parent
6e0438da3c
commit
4755afd1cb
@ -11977,7 +11977,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
|||||||
return new_type;
|
return new_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
|
static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) {
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
int counter = 0;
|
int counter = 0;
|
||||||
size_t new_size = 0;
|
size_t new_size = 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user