Provider Configurations
config.yaml
model: "gpt-3.5-turbo" default_params: temperature: 0.8 # number between 0 - 2 top_p: 1 max_tokens: 100 n: 1 stop: - "" frequency_penalty: 0 presence_penalty: 0 logit_bias: null user: null seed: null tools: [] tool_choice: null response_format: null
default_params: temperature: 0.8 # number between 0 - 2 top_p: 1 max_tokens: 100 n: 1 stop: - "" frequency_penalty: 0 presence_penalty: 0 logit_bias: null user: null seed: null tools: [] tool_choice: null response_format: null
model: "command-light" default_params: temperature: 0.8 # number between 0 - 1 preamble_override: "" chat_history: [] conversation_id: "" prompt_truncation: "" connectors: [] search_queries_only: false citiation_quality: ""
model: "mistral-7b-instruct-fp16" default_params: temperature: 1 # number between 0 - 2 top_p: 1 max_tokens: 100 stop: - "" frequency_penalty: 0 presence_penalty: 0
Was this page helpful?