perf: optimize system concurrency, I/O stability and fix WebSocket disconnects
This commit is contained in:
@@ -444,6 +444,16 @@ def create_model(
|
||||
"""
|
||||
provider = canonicalize_model_provider(provider)
|
||||
|
||||
# If provider is default OPENAI but model name looks like deepseek,
|
||||
# check if we should switch to DASHSCOPE.
|
||||
if provider == "OPENAI" and "deepseek" in model_name.lower() and os.getenv("DASHSCOPE_API_KEY"):
|
||||
provider = "DASHSCOPE"
|
||||
|
||||
# Intelligent routing: if it's a DeepSeek model and we have DashScope credentials,
|
||||
# prefer using DashScopeChatModel over OpenAIChatModel.
|
||||
if provider == "DEEPSEEK" and os.getenv("DASHSCOPE_API_KEY"):
|
||||
provider = "DASHSCOPE"
|
||||
|
||||
model_class = PROVIDER_MODEL_MAP.get(provider)
|
||||
if model_class is None:
|
||||
raise ValueError(f"Unsupported provider: {provider}")
|
||||
|
||||
Reference in New Issue
Block a user