mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-23 00:18:20 +01:00
451 lines
33 KiB
JSON
451 lines
33 KiB
JSON
{
|
|
"File name": "File name",
|
|
"File folder": "File folder",
|
|
"For reference. Unchangeable.": "For reference. Unchangeable.",
|
|
"File contents": "File contents",
|
|
"Cancel": "Cancel",
|
|
"Save": "Save",
|
|
"Delete": "Delete",
|
|
"The character will be saved to your characters/ folder with this base filename.": "The character will be saved to your characters/ folder with this base filename.",
|
|
"Confirm the character deletion?": "Confirm the character deletion?",
|
|
"The preset will be saved to your presets/ folder with this base filename.": "The preset will be saved to your presets/ folder with this base filename.",
|
|
"Textbox": "Textbox",
|
|
"{": "{",
|
|
"internal": "internal",
|
|
":": ":",
|
|
"[": "[",
|
|
"\"": "\"",
|
|
"<|BEGIN-VISIBLE-CHAT|>": "<|BEGIN-VISIBLE-CHAT|>",
|
|
",": ",",
|
|
"How can I help you today?": "How can I help you today?",
|
|
"]": "]",
|
|
"visible": "visible",
|
|
"}": "}",
|
|
"Chat": "Chat",
|
|
"Default": "Default",
|
|
"Notebook": "Notebook",
|
|
"Parameters": "Parameters",
|
|
"Model": "Model",
|
|
"Training": "Training",
|
|
"Session": "Session",
|
|
"AI": "AI",
|
|
"Show controls (Ctrl+S)": "Show controls (Ctrl+S)",
|
|
"Send to notebook": "Send to notebook",
|
|
"Send to default": "Send to default",
|
|
"Send dummy reply": "Send dummy reply",
|
|
"Send dummy message": "Send dummy message",
|
|
"Impersonate": "Impersonate",
|
|
"Ctrl + Shift + M": "Ctrl + Shift + M",
|
|
"Copy last reply": "Copy last reply",
|
|
"Ctrl + Shift + K": "Ctrl + Shift + K",
|
|
"Replace last reply": "Replace last reply",
|
|
"Ctrl + Shift + L": "Ctrl + Shift + L",
|
|
"Remove last reply": "Remove last reply",
|
|
"Ctrl + Shift + Backspace": "Ctrl + Shift + Backspace",
|
|
"Continue": "Continue",
|
|
"Alt + Enter": "Alt + Enter",
|
|
"Regenerate": "Regenerate",
|
|
"Ctrl + Enter": "Ctrl + Enter",
|
|
"Stop": "Stop",
|
|
"Generate": "Generate",
|
|
"Rename": "Rename",
|
|
"Confirm": "Confirm",
|
|
"New chat": "New chat",
|
|
"Rename to:": "Rename to:",
|
|
"Past chats": "Past chats",
|
|
"Start reply with": "Start reply with",
|
|
"Mode": "Mode",
|
|
"Defines how the chat prompt is generated. In instruct and chat-instruct modes, the instruction template Parameters > Instruction template is used.": "Defines how the chat prompt is generated. In instruct and chat-instruct modes, the instruction template Parameters > Instruction template is used.",
|
|
"chat": "chat",
|
|
"chat-instruct": "chat-instruct",
|
|
"instruct": "instruct",
|
|
"Chat style": "Chat style",
|
|
"Command for chat-instruct mode": "Command for chat-instruct mode",
|
|
"<|character|> and <|prompt|> get replaced with the bot name and the regular chat prompt respectively.": "<|character|> and <|prompt|> get replaced with the bot name and the regular chat prompt respectively.",
|
|
"Input": "Input",
|
|
"Prompt": "Prompt",
|
|
"Raw": "Raw",
|
|
"Markdown": "Markdown",
|
|
"HTML": "HTML",
|
|
"Logits": "Logits",
|
|
"Tokens": "Tokens",
|
|
"Output": "Output",
|
|
"Render": "Render",
|
|
"Get next token probabilities": "Get next token probabilities",
|
|
"Use samplers": "Use samplers",
|
|
"Previous output": "Previous output",
|
|
"Get token IDs for the input": "Get token IDs for the input",
|
|
"Undo": "Undo",
|
|
"Generation": "Generation",
|
|
"Instruction template": "Instruction template",
|
|
"Preset": "Preset",
|
|
"Filter by loader": "Filter by loader",
|
|
"max_new_tokens": "max_new_tokens",
|
|
"temperature": "temperature",
|
|
"top_p": "top_p",
|
|
"top_k": "top_k",
|
|
"typical_p": "typical_p",
|
|
"min_p": "min_p",
|
|
"repetition_penalty": "repetition_penalty",
|
|
"frequency_penalty": "frequency_penalty",
|
|
"presence_penalty": "presence_penalty",
|
|
"repetition_penalty_range": "repetition_penalty_range",
|
|
"do_sample": "do_sample",
|
|
"dry_multiplier": "dry_multiplier",
|
|
"Set to greater than 0 to enable DRY. Recommended value: 0.8.": "Set to greater than 0 to enable DRY. Recommended value: 0.8.",
|
|
"dry_allowed_length": "dry_allowed_length",
|
|
"Longest sequence that can be repeated without being penalized.": "Longest sequence that can be repeated without being penalized.",
|
|
"dry_base": "dry_base",
|
|
"Controls how fast the penalty grows with increasing sequence length.": "Controls how fast the penalty grows with increasing sequence length.",
|
|
"dry_sequence_breakers": "dry_sequence_breakers",
|
|
"Tokens across which sequence matching is not continued. Specified as a comma-separated list of quoted strings.": "Tokens across which sequence matching is not continued. Specified as a comma-separated list of quoted strings.",
|
|
"Learn more": "Learn more",
|
|
"Expand max_new_tokens to the available context length.": "Expand max_new_tokens to the available context length.",
|
|
"auto_max_new_tokens": "auto_max_new_tokens",
|
|
"Forces the model to never end the generation prematurely.": "Forces the model to never end the generation prematurely.",
|
|
"Ban the eos_token": "Ban the eos_token",
|
|
"Disabling this can make the replies more creative.": "Disabling this can make the replies more creative.",
|
|
"Add the bos_token to the beginning of prompts": "Add the bos_token to the beginning of prompts",
|
|
"Custom stopping strings": "Custom stopping strings",
|
|
"Written between \"\" and separated by commas.": "Written between \"\" and separated by commas.",
|
|
"Token bans": "Token bans",
|
|
"Token IDs to ban, separated by commas. The IDs can be found in the Default or Notebook tab.": "Token IDs to ban, separated by commas. The IDs can be found in the Default or Notebook tab.",
|
|
"penalty_alpha": "penalty_alpha",
|
|
"For Contrastive Search. do_sample must be unchecked.": "For Contrastive Search. do_sample must be unchecked.",
|
|
"guidance_scale": "guidance_scale",
|
|
"For CFG. 1.5 is a good value.": "For CFG. 1.5 is a good value.",
|
|
"Negative prompt": "Negative prompt",
|
|
"mirostat_mode": "mirostat_mode",
|
|
"mode=1 is for llama.cpp only.": "mode=1 is for llama.cpp only.",
|
|
"mirostat_tau": "mirostat_tau",
|
|
"mirostat_eta": "mirostat_eta",
|
|
"epsilon_cutoff": "epsilon_cutoff",
|
|
"eta_cutoff": "eta_cutoff",
|
|
"encoder_repetition_penalty": "encoder_repetition_penalty",
|
|
"no_repeat_ngram_size": "no_repeat_ngram_size",
|
|
"Load grammar from file (.gbnf)": "Load grammar from file (.gbnf)",
|
|
"Grammar": "Grammar",
|
|
"tfs": "tfs",
|
|
"top_a": "top_a",
|
|
"smoothing_factor": "smoothing_factor",
|
|
"Activates Quadratic Sampling.": "Activates Quadratic Sampling.",
|
|
"smoothing_curve": "smoothing_curve",
|
|
"Adjusts the dropoff curve of Quadratic Sampling.": "Adjusts the dropoff curve of Quadratic Sampling.",
|
|
"dynamic_temperature": "dynamic_temperature",
|
|
"dynatemp_low": "dynatemp_low",
|
|
"dynatemp_high": "dynatemp_high",
|
|
"dynatemp_exponent": "dynatemp_exponent",
|
|
"Moves temperature/dynamic temperature/quadratic sampling to the end of the sampler stack, ignoring their positions in \"Sampler priority\".": "Moves temperature/dynamic temperature/quadratic sampling to the end of the sampler stack, ignoring their positions in \"Sampler priority\".",
|
|
"temperature_last": "temperature_last",
|
|
"Sampler priority": "Sampler priority",
|
|
"Parameter names separated by new lines or commas.": "Parameter names separated by new lines or commas.",
|
|
"Truncate the prompt up to this length": "Truncate the prompt up to this length",
|
|
"The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.": "The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.",
|
|
"prompt_lookup_num_tokens": "prompt_lookup_num_tokens",
|
|
"Activates Prompt Lookup Decoding.": "Activates Prompt Lookup Decoding.",
|
|
"Maximum tokens/second": "Maximum tokens/second",
|
|
"To make text readable in real time.": "To make text readable in real time.",
|
|
"Maximum UI updates/second": "Maximum UI updates/second",
|
|
"Set this if you experience lag in the UI during streaming.": "Set this if you experience lag in the UI during streaming.",
|
|
"Seed (-1 for random)": "Seed (-1 for random)",
|
|
"Some specific models need this unset.": "Some specific models need this unset.",
|
|
"Skip special tokens": "Skip special tokens",
|
|
"Activate text streaming": "Activate text streaming",
|
|
"Character": "Character",
|
|
"User": "User",
|
|
"Chat history": "Chat history",
|
|
"Upload character": "Upload character",
|
|
"Used in chat and chat-instruct modes.": "Used in chat and chat-instruct modes.",
|
|
"Character's name": "Character's name",
|
|
"Context": "Context",
|
|
"Greeting": "Greeting",
|
|
"Name": "Name",
|
|
"Description": "Description",
|
|
"Here you can optionally write a description of yourself.": "Here you can optionally write a description of yourself.",
|
|
"Save history": "Save history",
|
|
"Upload History JSON": "Upload History JSON",
|
|
"将文件拖放到此处": "将文件拖放到此处",
|
|
"-": "-",
|
|
"或": "或",
|
|
"点击上传": "点击上传",
|
|
"YAML or JSON": "YAML or JSON",
|
|
"TavernAI PNG": "TavernAI PNG",
|
|
"JSON or YAML File": "JSON or YAML File",
|
|
"Profile Picture (optional)": "Profile Picture (optional)",
|
|
"将图像拖放到此处": "将图像拖放到此处",
|
|
"Submit": "Submit",
|
|
"TavernAI PNG File": "TavernAI PNG File",
|
|
"Character picture": "Character picture",
|
|
"Your picture": "Your picture",
|
|
"Saved instruction templates": "Saved instruction templates",
|
|
"After selecting the template, click on \"Load\" to load and apply it.": "After selecting the template, click on \"Load\" to load and apply it.",
|
|
"Load": "Load",
|
|
"Custom system message": "Custom system message",
|
|
"If not empty, will be used instead of the default one.": "If not empty, will be used instead of the default one.",
|
|
"Change this according to the model/LoRA that you are using. Used in instruct and chat-instruct modes.": "Change this according to the model/LoRA that you are using. Used in instruct and chat-instruct modes.",
|
|
"Send to negative prompt": "Send to negative prompt",
|
|
"Chat template": "Chat template",
|
|
"Unload": "Unload",
|
|
"Save settings": "Save settings",
|
|
"LoRA(s)": "LoRA(s)",
|
|
"Apply LoRAs": "Apply LoRAs",
|
|
"Model loader": "Model loader",
|
|
"gpu-memory in MiB for device :0": "gpu-memory in MiB for device :0",
|
|
"cpu-memory in MiB": "cpu-memory in MiB",
|
|
"load-in-4bit params:": "load-in-4bit params:",
|
|
"compute_dtype": "compute_dtype",
|
|
"quant_type": "quant_type",
|
|
"hqq_backend": "hqq_backend",
|
|
"n-gpu-layers": "n-gpu-layers",
|
|
"Must be set to more than 0 for your GPU to be used.": "Must be set to more than 0 for your GPU to be used.",
|
|
"n_ctx": "n_ctx",
|
|
"Context length. Try lowering this if you run out of memory while loading the model.": "Context length. Try lowering this if you run out of memory while loading the model.",
|
|
"tensor_split": "tensor_split",
|
|
"List of proportions to split the model across multiple GPUs. Example: 60,40": "List of proportions to split the model across multiple GPUs. Example: 60,40",
|
|
"n_batch": "n_batch",
|
|
"threads": "threads",
|
|
"threads_batch": "threads_batch",
|
|
"wbits": "wbits",
|
|
"groupsize": "groupsize",
|
|
"gpu-split": "gpu-split",
|
|
"Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7": "Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7",
|
|
"max_seq_len": "max_seq_len",
|
|
"alpha_value": "alpha_value",
|
|
"Positional embeddings alpha factor for NTK RoPE scaling. Recommended values (NTKv1): 1.75 for 1.5x context, 2.5 for 2x context. Use either this or compress_pos_emb, not both.": "Positional embeddings alpha factor for NTK RoPE scaling. Recommended values (NTKv1): 1.75 for 1.5x context, 2.5 for 2x context. Use either this or compress_pos_emb, not both.",
|
|
"rope_freq_base": "rope_freq_base",
|
|
"Positional embeddings frequency base for NTK RoPE scaling. Related to alpha_value by rope_freq_base = 10000 * alpha_value ^ (64 / 63). 0 = from model.": "Positional embeddings frequency base for NTK RoPE scaling. Related to alpha_value by rope_freq_base = 10000 * alpha_value ^ (64 / 63). 0 = from model.",
|
|
"compress_pos_emb": "compress_pos_emb",
|
|
"Positional embeddings compression factor. Should be set to (context length) / (model's original context length). Equal to 1/rope_freq_scale.": "Positional embeddings compression factor. Should be set to (context length) / (model's original context length). Equal to 1/rope_freq_scale.",
|
|
"ExLlamav2_HF is recommended over AutoGPTQ for models derived from Llama.": "ExLlamav2_HF is recommended over AutoGPTQ for models derived from Llama.",
|
|
"load-in-8bit": "load-in-8bit",
|
|
"load-in-4bit": "load-in-4bit",
|
|
"use_double_quant": "use_double_quant",
|
|
"Set use_flash_attention_2=True while loading the model.": "Set use_flash_attention_2=True while loading the model.",
|
|
"use_flash_attention_2": "use_flash_attention_2",
|
|
"Set attn_implementation= eager while loading the model.": "Set attn_implementation= eager while loading the model.",
|
|
"use_eager_attention": "use_eager_attention",
|
|
"Use flash-attention.": "Use flash-attention.",
|
|
"flash_attn": "flash_attn",
|
|
"auto-devices": "auto-devices",
|
|
"NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.": "NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.",
|
|
"tensorcores": "tensorcores",
|
|
"Use 8-bit cache to save VRAM.": "Use 8-bit cache to save VRAM.",
|
|
"cache_8bit": "cache_8bit",
|
|
"Use Q4 cache to save VRAM.": "Use Q4 cache to save VRAM.",
|
|
"cache_4bit": "cache_4bit",
|
|
"(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.": "(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.",
|
|
"streaming_llm": "streaming_llm",
|
|
"attention_sink_size": "attention_sink_size",
|
|
"StreamingLLM: number of sink tokens. Only used if the trimmed prompt doesn't share a prefix with the old prompt.": "StreamingLLM: number of sink tokens. Only used if the trimmed prompt doesn't share a prefix with the old prompt.",
|
|
"llama.cpp: Use llama-cpp-python compiled without GPU acceleration. Transformers: use PyTorch in CPU mode.": "llama.cpp: Use llama-cpp-python compiled without GPU acceleration. Transformers: use PyTorch in CPU mode.",
|
|
"cpu": "cpu",
|
|
"Split the model by rows across GPUs. This may improve multi-gpu performance.": "Split the model by rows across GPUs. This may improve multi-gpu performance.",
|
|
"row_split": "row_split",
|
|
"Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.": "Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.",
|
|
"no_offload_kqv": "no_offload_kqv",
|
|
"Disable the mulmat kernels.": "Disable the mulmat kernels.",
|
|
"no_mul_mat_q": "no_mul_mat_q",
|
|
"triton": "triton",
|
|
"Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.": "Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.",
|
|
"no_inject_fused_mlp": "no_inject_fused_mlp",
|
|
"This can make models faster on some systems.": "This can make models faster on some systems.",
|
|
"no_use_cuda_fp16": "no_use_cuda_fp16",
|
|
"'desc_act', 'wbits', and 'groupsize' are used for old models without a quantize_config.json.": "'desc_act', 'wbits', and 'groupsize' are used for old models without a quantize_config.json.",
|
|
"desc_act": "desc_act",
|
|
"no-mmap": "no-mmap",
|
|
"mlock": "mlock",
|
|
"NUMA support can help on some systems with non-uniform memory access.": "NUMA support can help on some systems with non-uniform memory access.",
|
|
"numa": "numa",
|
|
"disk": "disk",
|
|
"bf16": "bf16",
|
|
"Automatically split the model tensors across the available GPUs.": "Automatically split the model tensors across the available GPUs.",
|
|
"autosplit": "autosplit",
|
|
"no_flash_attn": "no_flash_attn",
|
|
"no_xformers": "no_xformers",
|
|
"no_sdpa": "no_sdpa",
|
|
"Necessary to use CFG with this loader.": "Necessary to use CFG with this loader.",
|
|
"cfg-cache": "cfg-cache",
|
|
"Enable inference with ModelRunnerCpp, which is faster than the default ModelRunner.": "Enable inference with ModelRunnerCpp, which is faster than the default ModelRunner.",
|
|
"cpp-runner": "cpp-runner",
|
|
"Number of experts per token": "Number of experts per token",
|
|
"Only applies to MoE models like Mixtral.": "Only applies to MoE models like Mixtral.",
|
|
"Set trust_remote_code=True while loading the tokenizer/model. To enable this option, start the web UI with the --trust-remote-code flag.": "Set trust_remote_code=True while loading the tokenizer/model. To enable this option, start the web UI with the --trust-remote-code flag.",
|
|
"trust-remote-code": "trust-remote-code",
|
|
"Set use_fast=False while loading the tokenizer.": "Set use_fast=False while loading the tokenizer.",
|
|
"no_use_fast": "no_use_fast",
|
|
"Needs to be set for perplexity evaluation to work with this loader. Otherwise, ignore it, as it makes prompt processing slower.": "Needs to be set for perplexity evaluation to work with this loader. Otherwise, ignore it, as it makes prompt processing slower.",
|
|
"logits_all": "logits_all",
|
|
"Disable ExLlama kernel for GPTQ models.": "Disable ExLlama kernel for GPTQ models.",
|
|
"disable_exllama": "disable_exllama",
|
|
"Disable ExLlamav2 kernel for GPTQ models.": "Disable ExLlamav2 kernel for GPTQ models.",
|
|
"disable_exllamav2": "disable_exllamav2",
|
|
"ExLlamav2_HF is recommended over ExLlamav2 for better integration with extensions and more consistent sampling behavior across loaders.": "ExLlamav2_HF is recommended over ExLlamav2 for better integration with extensions and more consistent sampling behavior across loaders.",
|
|
"llamacpp_HF loads llama.cpp as a Transformers model. To use it, you need to place your GGUF in a subfolder of models/ with the necessary tokenizer files.": "llamacpp_HF loads llama.cpp as a Transformers model. To use it, you need to place your GGUF in a subfolder of models/ with the necessary tokenizer files.",
|
|
"You can use the \"llamacpp_HF creator\" menu to do that automatically.": "You can use the \"llamacpp_HF creator\" menu to do that automatically.",
|
|
"TensorRT-LLM has to be installed manually in a separate Python 3.10 environment at the moment. For a guide, consult the description of": "TensorRT-LLM has to be installed manually in a separate Python 3.10 environment at the moment. For a guide, consult the description of",
|
|
"this PR": "this PR",
|
|
"is only used when": "is only used when",
|
|
"is checked.": "is checked.",
|
|
"cpp_runner": "cpp_runner",
|
|
"does not support streaming at the moment.": "does not support streaming at the moment.",
|
|
"Whether to load the model as soon as it is selected in the Model dropdown.": "Whether to load the model as soon as it is selected in the Model dropdown.",
|
|
"Autoload the model": "Autoload the model",
|
|
"Download": "Download",
|
|
"llamacpp_HF creator": "llamacpp_HF creator",
|
|
"Customize instruction template": "Customize instruction template",
|
|
"Download model or LoRA": "Download model or LoRA",
|
|
"Enter the Hugging Face username/model path, for instance: facebook/galactica-125m. To specify a branch, add it at the end after a \":\" character like this: facebook/galactica-125m:main. To download a single file, enter its name in the second box.": "Enter the Hugging Face username/model path, for instance: facebook/galactica-125m. To specify a branch, add it at the end after a \":\" character like this: facebook/galactica-125m:main. To download a single file, enter its name in the second box.",
|
|
"Get file list": "Get file list",
|
|
"Choose your GGUF": "Choose your GGUF",
|
|
"Enter the URL for the original (unquantized) model": "Enter the URL for the original (unquantized) model",
|
|
"Example: https://huggingface.co/lmsys/vicuna-13b-v1.5": "Example: https://huggingface.co/lmsys/vicuna-13b-v1.5",
|
|
"This will move your gguf file into a subfolder of": "This will move your gguf file into a subfolder of",
|
|
"models": "models",
|
|
"along with the necessary tokenizer files.": "along with the necessary tokenizer files.",
|
|
"Select the desired instruction template": "Select the desired instruction template",
|
|
"This allows you to set a customized template for the model currently selected in the \"Model loader\" menu. Whenever the model gets loaded, this template will be used in place of the template specified in the model's medatada, which sometimes is wrong.": "This allows you to set a customized template for the model currently selected in the \"Model loader\" menu. Whenever the model gets loaded, this template will be used in place of the template specified in the model's medatada, which sometimes is wrong.",
|
|
"No model is loaded": "No model is loaded",
|
|
"Train LoRA": "Train LoRA",
|
|
"Perplexity evaluation": "Perplexity evaluation",
|
|
"Tutorial": "Tutorial",
|
|
"Copy parameters from": "Copy parameters from",
|
|
"The name of your new LoRA file": "The name of your new LoRA file",
|
|
"If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).": "If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).",
|
|
"Override Existing Files": "Override Existing Files",
|
|
"Target Modules": "Target Modules",
|
|
"▼": "▼",
|
|
"Selects which modules to target in training. Targeting more modules is closer to a full fine-tune at the cost of increased VRAM requirements and adapter size.\nNOTE: Only works for model_id='llama', other types will retain default training behavior and not use these settings.": "Selects which modules to target in training. Targeting more modules is closer to a full fine-tune at the cost of increased VRAM requirements and adapter size.\nNOTE: Only works for model_id='llama', other types will retain default training behavior and not use these settings.",
|
|
"Enable q_proj": "Enable q_proj",
|
|
"Enable v_proj": "Enable v_proj",
|
|
"Enable k_proj": "Enable k_proj",
|
|
"Enable o_proj": "Enable o_proj",
|
|
"Enable gate_proj": "Enable gate_proj",
|
|
"Enable down_proj": "Enable down_proj",
|
|
"Enable up_proj": "Enable up_proj",
|
|
"LoRA Rank": "LoRA Rank",
|
|
"Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.": "Also called dimension count. Higher values = larger file, more content control. Smaller values = smaller file, less control. Use 4 or 8 for style, 128 or 256 to teach, 1024+ for fine-detail on big data. More VRAM is needed for higher ranks.",
|
|
"LoRA Alpha": "LoRA Alpha",
|
|
"This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.": "This divided by the rank becomes the scaling of the LoRA. Higher means stronger. A good standard value is twice your Rank.",
|
|
"Batch Size": "Batch Size",
|
|
"Global batch size. The two batch sizes together determine gradient accumulation (gradientAccum = batch / microBatch). Higher gradient accum values lead to better quality training.": "Global batch size. The two batch sizes together determine gradient accumulation (gradientAccum = batch / microBatch). Higher gradient accum values lead to better quality training.",
|
|
"Micro Batch Size": "Micro Batch Size",
|
|
"Per-device batch size (NOTE: multiple devices not yet implemented). Increasing this will increase VRAM usage.": "Per-device batch size (NOTE: multiple devices not yet implemented). Increasing this will increase VRAM usage.",
|
|
"Cutoff Length": "Cutoff Length",
|
|
"Cutoff length for text input. Essentially, how long of a line of text to feed in at a time. Higher values require drastically more VRAM.": "Cutoff length for text input. Essentially, how long of a line of text to feed in at a time. Higher values require drastically more VRAM.",
|
|
"Save every n steps": "Save every n steps",
|
|
"If above 0, a checkpoint of the LoRA will be saved every time this many steps pass.": "If above 0, a checkpoint of the LoRA will be saved every time this many steps pass.",
|
|
"Epochs": "Epochs",
|
|
"Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.": "Number of times every entry in the dataset should be fed into training. So 1 means feed each item in once, 5 means feed it in five times, etc.",
|
|
"Learning Rate": "Learning Rate",
|
|
"In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.": "In scientific notation. 3e-4 is a good starting base point. 1e-2 is extremely high, 1e-6 is extremely low.",
|
|
"LR Scheduler": "LR Scheduler",
|
|
"Learning rate scheduler - defines how the learning rate changes over time. \"Constant\" means never change, \"linear\" means to go in a straight line from the learning rate down to 0, cosine follows a curve, etc.": "Learning rate scheduler - defines how the learning rate changes over time. \"Constant\" means never change, \"linear\" means to go in a straight line from the learning rate down to 0, cosine follows a curve, etc.",
|
|
"Advanced Options": "Advanced Options",
|
|
"LoRA Dropout": "LoRA Dropout",
|
|
"Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.": "Percentage probability for dropout of LoRA layers. This can help reduce overfitting. Most users should leave at default.",
|
|
"Stop at loss": "Stop at loss",
|
|
"The process will automatically stop once the desired loss value is reached. (reasonable numbers are 1.5-1.8)": "The process will automatically stop once the desired loss value is reached. (reasonable numbers are 1.5-1.8)",
|
|
"Optimizer": "Optimizer",
|
|
"Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.": "Different optimizer implementation options, for advanced users. Effects of different options are not well documented yet.",
|
|
"Warmup Steps": "Warmup Steps",
|
|
"For this many steps at the start, the learning rate will be lower than normal. This helps the trainer prepare the model and precompute statistics to improve the quality of training after the start.": "For this many steps at the start, the learning rate will be lower than normal. This helps the trainer prepare the model and precompute statistics to improve the quality of training after the start.",
|
|
"Train Only After": "Train Only After",
|
|
"Only consider text *after* this string in any given chunk for training. For Alpaca datasets, use \"### Response:\" to only train the response and ignore the input.": "Only consider text *after* this string in any given chunk for training. For Alpaca datasets, use \"### Response:\" to only train the response and ignore the input.",
|
|
"Adds EOS token for each dataset item. In case of raw text, the EOS will be added at the Hard Cut": "Adds EOS token for each dataset item. In case of raw text, the EOS will be added at the Hard Cut",
|
|
"Add EOS token": "Add EOS token",
|
|
"If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.": "If checked, changes Rank/Alpha slider above to go much higher. This will not work without a datacenter-class GPU.",
|
|
"Enable higher ranks": "Enable higher ranks",
|
|
"Save detailed logs with": "Save detailed logs with",
|
|
"None": "None",
|
|
"wandb": "wandb",
|
|
"tensorboard": "tensorboard",
|
|
"Formatted Dataset": "Formatted Dataset",
|
|
"Raw text file": "Raw text file",
|
|
"Data Format": "Data Format",
|
|
"The format file used to decide how to format the dataset input.": "The format file used to decide how to format the dataset input.",
|
|
"Dataset": "Dataset",
|
|
"The dataset file to use for training.": "The dataset file to use for training.",
|
|
"Evaluation Dataset": "Evaluation Dataset",
|
|
"The (optional) dataset file used to evaluate the model after training.": "The (optional) dataset file used to evaluate the model after training.",
|
|
"Evaluate every n steps": "Evaluate every n steps",
|
|
"If an evaluation dataset is given, test it every time this many steps pass.": "If an evaluation dataset is given, test it every time this many steps pass.",
|
|
"Text file": "Text file",
|
|
"The raw text file to use for training.": "The raw text file to use for training.",
|
|
"Overlap Length": "Overlap Length",
|
|
"How many tokens from the prior chunk of text to include into the next chunk. (The chunks themselves will be of a size determined by Cutoff Length). Setting overlap to exactly half the cutoff length may be ideal.": "How many tokens from the prior chunk of text to include into the next chunk. (The chunks themselves will be of a size determined by Cutoff Length). Setting overlap to exactly half the cutoff length may be ideal.",
|
|
"Prefer Newline Cut Length": "Prefer Newline Cut Length",
|
|
"Length (in characters, not tokens) of the maximum distance to shift an overlap cut by to ensure chunks cut at newlines. If too low, cuts may occur in the middle of lines.": "Length (in characters, not tokens) of the maximum distance to shift an overlap cut by to ensure chunks cut at newlines. If too low, cuts may occur in the middle of lines.",
|
|
"Hard Cut String": "Hard Cut String",
|
|
"String that indicates a hard cut between text parts. Helps prevent unwanted overlap.": "String that indicates a hard cut between text parts. Helps prevent unwanted overlap.",
|
|
"Ignore small blocks": "Ignore small blocks",
|
|
"Ignore Hard Cut blocks that have less or equal characters than this number": "Ignore Hard Cut blocks that have less or equal characters than this number",
|
|
"Start LoRA Training": "Start LoRA Training",
|
|
"Interrupt": "Interrupt",
|
|
"Ready": "Ready",
|
|
"Models": "Models",
|
|
"Input dataset": "Input dataset",
|
|
"The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.": "The raw text file on which the model will be evaluated. The first options are automatically downloaded: wikitext, ptb, and ptb_new. The next options are your local text files under training/datasets.",
|
|
"Stride": "Stride",
|
|
"Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.": "Used to make the evaluation faster at the cost of accuracy. 1 = slowest but most accurate. 512 is a common value.",
|
|
"max_length": "max_length",
|
|
"The context for each evaluation. If set to 0, the maximum context length for the model will be used.": "The context for each evaluation. If set to 0, the maximum context length for the model will be used.",
|
|
"Evaluate loaded model": "Evaluate loaded model",
|
|
"Evaluate selected models": "Evaluate selected models",
|
|
"LoRAs": "LoRAs",
|
|
"Perplexity": "Perplexity",
|
|
"stride": "stride",
|
|
"Date": "Date",
|
|
"Comment": "Comment",
|
|
"新行": "新行",
|
|
"新列": "新列",
|
|
"Save comments": "Save comments",
|
|
"Refresh the table": "Refresh the table",
|
|
"Apply flags/extensions and restart": "Apply flags/extensions and restart",
|
|
"Save UI defaults to settings.yaml": "Save UI defaults to settings.yaml",
|
|
"Available extensions": "Available extensions",
|
|
"Note that some of these extensions may require manually installing Python requirements through the command: pip install -r extensions/extension_name/requirements.txt": "Note that some of these extensions may require manually installing Python requirements through the command: pip install -r extensions/extension_name/requirements.txt",
|
|
"character_bias": "character_bias",
|
|
"coqui_tts": "coqui_tts",
|
|
"example": "example",
|
|
"gallery": "gallery",
|
|
"google_translate": "google_translate",
|
|
"long_replies": "long_replies",
|
|
"multimodal": "multimodal",
|
|
"ngrok": "ngrok",
|
|
"openai": "openai",
|
|
"perplexity_colors": "perplexity_colors",
|
|
"sd_api_pictures": "sd_api_pictures",
|
|
"send_pictures": "send_pictures",
|
|
"silero_tts": "silero_tts",
|
|
"superbooga": "superbooga",
|
|
"superboogav2": "superboogav2",
|
|
"Training_PRO": "Training_PRO",
|
|
"whisper_stt": "whisper_stt",
|
|
"Boolean command-line flags": "Boolean command-line flags",
|
|
"api": "api",
|
|
"auto_launch": "auto_launch",
|
|
"chat_buttons": "chat_buttons",
|
|
"deepspeed": "deepspeed",
|
|
"force_safetensors": "force_safetensors",
|
|
"listen": "listen",
|
|
"model_menu": "model_menu",
|
|
"monkey_patch": "monkey_patch",
|
|
"multi_user": "multi_user",
|
|
"no_cache": "no_cache",
|
|
"no_inject_fused_attention": "no_inject_fused_attention",
|
|
"nowebui": "nowebui",
|
|
"public_api": "public_api",
|
|
"share": "share",
|
|
"verbose": "verbose",
|
|
"Install or update an extension": "Install or update an extension",
|
|
"Download localization template": "Download localization template",
|
|
"通过 API 使用": "通过 API 使用",
|
|
"·": "·",
|
|
"使用 Gradio 构建": "使用 Gradio 构建",
|
|
"Send a message": "Send a message",
|
|
"New name": "New name",
|
|
"Sure thing!": "Sure thing!",
|
|
"\"\\n\", \"\\nYou:\"": "\"\\n\", \"\\nYou:\"",
|
|
"{{user}}'s personality: ...": "{{user}}'s personality: ...",
|
|
"File name (for GGUF models)": "File name (for GGUF models)"
|
|
} |