shared.gradio['filter_by_loader']=gr.Dropdown(label="Filter by loader",choices=["All"]+list(loaders.loaders_and_params.keys()),value="All",elem_classes='slim-dropdown')
Foratechnicaldescriptionoftheparameters,the[transformersdocumentation](https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig) is a good reference.
shared.gradio['guidance_scale']=gr.Slider(-0.5,2.5,step=0.05,value=generate_params['guidance_scale'],label='guidance_scale',info='For CFG. 1.5 is a good value.')
shared.gradio['penalty_alpha']=gr.Slider(0,5,value=generate_params['penalty_alpha'],label='penalty_alpha',info='For Contrastive Search. do_sample must be unchecked.')
shared.gradio['num_beams']=gr.Slider(1,20,step=1,value=generate_params['num_beams'],label='num_beams',info='For Beam Search, along with length_penalty and early_stopping.')
shared.gradio['truncation_length']=gr.Slider(value=shared.settings['truncation_length'],minimum=shared.settings['truncation_length_min'],maximum=shared.settings['truncation_length_max'],step=256,label='Truncate the prompt up to this length',info='The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.')
shared.gradio['custom_stopping_strings']=gr.Textbox(lines=1,value=shared.settings["custom_stopping_strings"]orNone,label='Custom stopping strings',info='In addition to the defaults. Written between "" and separated by commas. For instance: "\\nYour Assistant:", "\\nThe assistant:"')
withgr.Column():
shared.gradio['auto_max_new_tokens']=gr.Checkbox(value=shared.settings['auto_max_new_tokens'],label='auto_max_new_tokens',info='Expand max_new_tokens to the available context length.')
shared.gradio['ban_eos_token']=gr.Checkbox(value=shared.settings['ban_eos_token'],label='Ban the eos_token',info='Forces the model to never end the generation prematurely.')
shared.gradio['add_bos_token']=gr.Checkbox(value=shared.settings['add_bos_token'],label='Add the bos_token to the beginning of prompts',info='Disabling this can make the replies more creative.')
shared.gradio['skip_special_tokens']=gr.Checkbox(value=shared.settings['skip_special_tokens'],label='Skip special tokens',info='Some specific models need this unset.')
shared.gradio['stream']=gr.Checkbox(value=notshared.args.no_stream,label='Activate text streaming')
shared.gradio['chat_generation_attempts']=gr.Slider(minimum=shared.settings['chat_generation_attempts_min'],maximum=shared.settings['chat_generation_attempts_max'],value=shared.settings['chat_generation_attempts'],step=1,label='Generation attempts (for longer replies)',info='New generations will be called until either this number is reached or no new content is generated between two iterations.')
withgr.Column():
shared.gradio['stop_at_newline']=gr.Checkbox(value=shared.settings['stop_at_newline'],label='Stop generating at new line character')