| | export const SETTING_CONFIG_DEFAULT: Record<string, string | number | boolean> = { |
| | |
| | |
| | apiKey: '', |
| | systemMessage: '', |
| | theme: 'system', |
| | showTokensPerSecond: false, |
| | showThoughtInProgress: false, |
| | disableReasoningFormat: false, |
| | keepStatsVisible: false, |
| | askForTitleConfirmation: false, |
| | pasteLongTextToFileLen: 2500, |
| | pdfAsImage: false, |
| | showModelInfo: false, |
| | renderUserContentAsMarkdown: false, |
| | |
| | samplers: 'top_k;typ_p;top_p;min_p;temperature', |
| | temperature: 0.8, |
| | dynatemp_range: 0.0, |
| | dynatemp_exponent: 1.0, |
| | top_k: 40, |
| | top_p: 0.95, |
| | min_p: 0.05, |
| | xtc_probability: 0.0, |
| | xtc_threshold: 0.1, |
| | typ_p: 1.0, |
| | repeat_last_n: 64, |
| | repeat_penalty: 1.0, |
| | presence_penalty: 0.0, |
| | frequency_penalty: 0.0, |
| | dry_multiplier: 0.0, |
| | dry_base: 1.75, |
| | dry_allowed_length: 2, |
| | dry_penalty_last_n: -1, |
| | max_tokens: -1, |
| | custom: '', |
| | |
| | pyInterpreterEnabled: false |
| | }; |
| |
|
| | export const SETTING_CONFIG_INFO: Record<string, string> = { |
| | apiKey: 'Set the API Key if you are using --api-key option for the server.', |
| | systemMessage: 'The starting message that defines how model should behave.', |
| | theme: |
| | 'Choose the color theme for the interface. You can choose between System (follows your device settings), Light, or Dark.', |
| | pasteLongTextToFileLen: |
| | 'On pasting long text, it will be converted to a file. You can control the file length by setting the value of this parameter. Value 0 means disable.', |
| | samplers: |
| | 'The order at which samplers are applied, in simplified way. Default is "top_k;typ_p;top_p;min_p;temperature": top_k->typ_p->top_p->min_p->temperature', |
| | temperature: |
| | 'Controls the randomness of the generated text by affecting the probability distribution of the output tokens. Higher = more random, lower = more focused.', |
| | dynatemp_range: |
| | 'Addon for the temperature sampler. The added value to the range of dynamic temperature, which adjusts probabilities by entropy of tokens.', |
| | dynatemp_exponent: |
| | 'Addon for the temperature sampler. Smoothes out the probability redistribution based on the most probable token.', |
| | top_k: 'Keeps only k top tokens.', |
| | top_p: 'Limits tokens to those that together have a cumulative probability of at least p', |
| | min_p: |
| | 'Limits tokens based on the minimum probability for a token to be considered, relative to the probability of the most likely token.', |
| | xtc_probability: |
| | 'XTC sampler cuts out top tokens; this parameter controls the chance of cutting tokens at all. 0 disables XTC.', |
| | xtc_threshold: |
| | 'XTC sampler cuts out top tokens; this parameter controls the token probability that is required to cut that token.', |
| | typ_p: 'Sorts and limits tokens based on the difference between log-probability and entropy.', |
| | repeat_last_n: 'Last n tokens to consider for penalizing repetition', |
| | repeat_penalty: 'Controls the repetition of token sequences in the generated text', |
| | presence_penalty: 'Limits tokens based on whether they appear in the output or not.', |
| | frequency_penalty: 'Limits tokens based on how often they appear in the output.', |
| | dry_multiplier: |
| | 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling multiplier.', |
| | dry_base: |
| | 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling base value.', |
| | dry_allowed_length: |
| | 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the allowed length for DRY sampling.', |
| | dry_penalty_last_n: |
| | 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets DRY penalty for the last n tokens.', |
| | max_tokens: 'The maximum number of token per output. Use -1 for infinite (no limit).', |
| | custom: 'Custom JSON parameters to send to the API. Must be valid JSON format.', |
| | showTokensPerSecond: 'Display generation speed in tokens per second during streaming.', |
| | showThoughtInProgress: 'Expand thought process by default when generating messages.', |
| | disableReasoningFormat: |
| | 'Show raw LLM output without backend parsing and frontend Markdown rendering to inspect streaming across different models.', |
| | keepStatsVisible: 'Keep processing statistics visible after generation finishes.', |
| | askForTitleConfirmation: |
| | 'Ask for confirmation before automatically changing conversation title when editing the first message.', |
| | pdfAsImage: 'Parse PDF as image instead of text (requires vision-capable model).', |
| | showModelInfo: 'Display the model name used to generate each message below the message content.', |
| | renderUserContentAsMarkdown: 'Render user messages using markdown formatting in the chat.', |
| | pyInterpreterEnabled: |
| | 'Enable Python interpreter using Pyodide. Allows running Python code in markdown code blocks.' |
| | }; |
| |
|