fix linter errors for config registry & cli (#2292)

This commit is contained in:
Shuchang Zheng
2025-05-05 00:12:46 -07:00
committed by GitHub
parent c3072d7572
commit e2a82a75d9
4 changed files with 41 additions and 36 deletions

View File

@@ -7,9 +7,9 @@ import time
import uuid
from pathlib import Path
from typing import Optional
import requests
from urllib.parse import urlparse
import requests
import typer
import uvicorn
from dotenv import load_dotenv, set_key
@@ -490,7 +490,7 @@ def setup_browser_config() -> tuple[str, Optional[str], Optional[str]]:
if response.status_code == 200:
try:
browser_info = response.json()
print(f"Chrome is already running with remote debugging!")
print("Chrome is already running with remote debugging!")
if "Browser" in browser_info:
print(f"Browser: {browser_info['Browser']}")
if "webSocketDebuggerUrl" in browser_info:
@@ -514,7 +514,9 @@ def setup_browser_config() -> tuple[str, Optional[str], Optional[str]]:
print("Unsupported OS for Chrome configuration. Please set it up manually.")
# Ask user if they want to execute the command
execute_browser = input("\nWould you like to start Chrome with remote debugging now? (y/n) [y]: ").strip().lower()
execute_browser = (
input("\nWould you like to start Chrome with remote debugging now? (y/n) [y]: ").strip().lower()
)
if not execute_browser or execute_browser == "y":
print(f"Starting Chrome with remote debugging on port {default_port}...")
try:

View File

@@ -807,11 +807,11 @@ if settings.ENABLE_VERTEX_AI:
if settings.ENABLE_OLLAMA:
# Register Ollama model configured in settings
if settings.OLLAMA_MODEL:
model_name = settings.OLLAMA_MODEL
ollama_model_name = settings.OLLAMA_MODEL
LLMConfigRegistry.register_config(
"OLLAMA",
LLMConfig(
f"ollama/{model_name}",
f"ollama/{ollama_model_name}",
["OLLAMA_SERVER_URL", "OLLAMA_MODEL"],
supports_vision=False, # Ollama does not support vision yet
add_assistant_prefix=False,
@@ -820,7 +820,7 @@ if settings.ENABLE_OLLAMA:
api_base=settings.OLLAMA_SERVER_URL,
api_key=None,
api_version=None,
model_info={"model_name": f"ollama/{model_name}"},
model_info={"model_name": f"ollama/{ollama_model_name}"},
),
),
)
@@ -828,11 +828,11 @@ if settings.ENABLE_OLLAMA:
if settings.ENABLE_OPENROUTER:
# Register OpenRouter model configured in settings
if settings.OPENROUTER_MODEL:
model_name = settings.OPENROUTER_MODEL
openrouter_model_name = settings.OPENROUTER_MODEL
LLMConfigRegistry.register_config(
"OPENROUTER",
LLMConfig(
f"openrouter/{model_name}",
f"openrouter/{openrouter_model_name}",
["OPENROUTER_API_KEY", "OPENROUTER_MODEL"],
supports_vision=settings.LLM_CONFIG_SUPPORT_VISION,
add_assistant_prefix=False,
@@ -841,18 +841,18 @@ if settings.ENABLE_OPENROUTER:
api_key=settings.OPENROUTER_API_KEY,
api_base=settings.OPENROUTER_API_BASE,
api_version=None,
model_info={"model_name": f"openrouter/{model_name}"},
model_info={"model_name": f"openrouter/{openrouter_model_name}"},
),
),
)
if settings.ENABLE_GROQ:
# Register Groq model configured in settings
if settings.GROQ_MODEL:
model_name = settings.GROQ_MODEL
groq_model_name = settings.GROQ_MODEL
LLMConfigRegistry.register_config(
"GROQ",
LLMConfig(
f"groq/{model_name}",
f"groq/{groq_model_name}",
["GROQ_API_KEY", "GROQ_MODEL"],
supports_vision=settings.LLM_CONFIG_SUPPORT_VISION,
add_assistant_prefix=False,
@@ -861,7 +861,7 @@ if settings.ENABLE_GROQ:
api_key=settings.GROQ_API_KEY,
api_version=None,
api_base=settings.GROQ_API_BASE,
model_info={"model_name": f"groq/{model_name}"},
model_info={"model_name": f"groq/{groq_model_name}"},
),
),
)
@@ -870,10 +870,10 @@ if settings.ENABLE_GROQ:
# See documentation: https://docs.litellm.ai/docs/providers/openai_compatible
if settings.ENABLE_OPENAI_COMPATIBLE:
# Check for required model name
model_key = settings.OPENAI_COMPATIBLE_MODEL_KEY
model_name = settings.OPENAI_COMPATIBLE_MODEL_NAME
openai_compatible_model_key = settings.OPENAI_COMPATIBLE_MODEL_KEY
openai_compatible_model_name = settings.OPENAI_COMPATIBLE_MODEL_NAME
if not model_name:
if not openai_compatible_model_name:
raise InvalidLLMConfigError(
"OPENAI_COMPATIBLE_MODEL_NAME is required but not set. OpenAI-compatible model will not be registered."
)
@@ -886,14 +886,14 @@ if settings.ENABLE_OPENAI_COMPATIBLE:
api_key=settings.OPENAI_COMPATIBLE_API_KEY,
api_base=settings.OPENAI_COMPATIBLE_API_BASE,
api_version=settings.OPENAI_COMPATIBLE_API_VERSION,
model_info={"model_name": f"openai/{model_name}"},
model_info={"model_name": f"openai/{openai_compatible_model_name}"},
)
# Configure LLMConfig
LLMConfigRegistry.register_config(
model_key,
openai_compatible_model_key,
LLMConfig(
f"openai/{model_name}", # Add openai/ prefix for liteLLM
f"openai/{openai_compatible_model_name}", # Add openai/ prefix for liteLLM
required_env_vars,
supports_vision=settings.OPENAI_COMPATIBLE_SUPPORTS_VISION,
add_assistant_prefix=settings.OPENAI_COMPATIBLE_ADD_ASSISTANT_PREFIX,
@@ -905,4 +905,7 @@ if settings.ENABLE_OPENAI_COMPATIBLE:
reasoning_effort=settings.OPENAI_COMPATIBLE_REASONING_EFFORT,
),
)
LOG.info(f"Registered OpenAI-compatible model with key {model_key}", model_name=model_name)
LOG.info(
f"Registered OpenAI-compatible model with key {openai_compatible_model_key}",
model_name=openai_compatible_model_name,
)

View File

@@ -473,7 +473,7 @@ async def _create_cdp_connection_browser(
"--remote-debugging-address=0.0.0.0",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
stderr=subprocess.PIPE,
)
# Add small delay to allow browser to start
time.sleep(2)