mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-12-24 13:28:59 +01:00
Add more info messages for truncation / instruction template
This commit is contained in:
parent
883701bc40
commit
a85ce5f055
@ -1,4 +1,5 @@
|
||||
from modules import shared
|
||||
from modules.logging_colors import logger
|
||||
from modules.models import load_model, unload_model
|
||||
from modules.models_settings import get_model_metadata, update_model_parameters
|
||||
from modules.utils import get_available_models
|
||||
@ -62,3 +63,7 @@ def _load_model(data):
|
||||
for k in settings:
|
||||
if k in shared.settings:
|
||||
shared.settings[k] = settings[k]
|
||||
if k == 'truncation_length':
|
||||
logger.info(f"TRUNCATION LENGTH (UPDATED): {shared.settings['truncation_length']}")
|
||||
elif k == 'instruction_template':
|
||||
logger.info(f"INSTRUCTION TEMPLATE (UPDATED): {shared.settings['instruction_template']}")
|
||||
|
@ -287,7 +287,7 @@ async def handle_load_model(request_data: LoadModelRequest):
|
||||
|
||||
|
||||
@app.post("/v1/internal/model/unload")
|
||||
async def handle_load_model():
|
||||
async def handle_unload_model():
|
||||
unload_model()
|
||||
return JSONResponse(content="OK")
|
||||
|
||||
|
@ -102,7 +102,7 @@ def load_model(model_name, loader=None):
|
||||
elif loader in ['llama.cpp', 'llamacpp_HF', 'ctransformers']:
|
||||
shared.settings['truncation_length'] = shared.args.n_ctx
|
||||
|
||||
logger.info(f"CONTEXT LENGTH: {shared.settings['truncation_length']}")
|
||||
logger.info(f"TRUNCATION LENGTH: {shared.settings['truncation_length']}")
|
||||
logger.info(f"INSTRUCTION TEMPLATE: {shared.settings['instruction_template']}")
|
||||
logger.info(f"Loaded the model in {(time.time()-t0):.2f} seconds.")
|
||||
return model, tokenizer
|
||||
|
Loading…
Reference in New Issue
Block a user