2023-06-28 17:53:37 +02:00
|
|
|
#ifndef _EMBD_INPUT_H_
|
|
|
|
#define _EMBD_INPUT_H_ 1
|
|
|
|
|
|
|
|
#include "common.h"
|
|
|
|
#include "llama.h"
|
|
|
|
|
|
|
|
extern "C" {
|
|
|
|
|
|
|
|
typedef struct MyModel {
|
|
|
|
llama_context* ctx;
|
|
|
|
gpt_params params;
|
|
|
|
int n_past = 0;
|
|
|
|
} MyModel;
|
|
|
|
|
|
|
|
struct MyModel* create_mymodel(int argc, char ** argv);
|
|
|
|
|
|
|
|
bool eval_float(void* model, float* input, int N);
|
|
|
|
bool eval_tokens(void* model, std::vector<llama_token> tokens);
|
|
|
|
bool eval_id(struct MyModel* mymodel, int id);
|
|
|
|
bool eval_string(struct MyModel* mymodel, const char* str);
|
2023-07-01 17:46:00 +02:00
|
|
|
const char * sampling(struct MyModel* mymodel);
|
2023-06-28 17:53:37 +02:00
|
|
|
llama_token sampling_id(struct MyModel* mymodel);
|
|
|
|
void free_mymodel(struct MyModel* mymodel);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|