Strix LLM Documentation and Config Changes (#315)
* feat: add to readme new keys * feat: shoutout strix models, docs * fix: mypy error * fix: base api * docs: update quickstart and models * fixes: changes to docs uniform api_key variable naming * test: git commit hook * nevermind it was nothing * docs: Update default model to claude-sonnet-4.6 and improve Strix Router docs - Replace gpt-5 and opus-4.6 defaults with claude-sonnet-4.6 across all docs and code - Rewrite Strix Router (models.mdx) page with clearer structure and messaging - Add Strix Router as recommended option in overview.mdx and quickstart prerequisites - Update stale Claude 4.5 references to 4.6 in anthropic.mdx, openrouter.mdx, bug_report.md - Fix install.sh links to point to models.strix.ai and correct docs URLs - Update error message examples in main.py to use claude-sonnet-4-6 --------- Co-authored-by: 0xallam <ahmed39652003@gmail.com>
This commit is contained in:
@@ -5,6 +5,9 @@ from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
STRIX_API_BASE = "https://models.strix.ai/api/v1"
|
||||
|
||||
|
||||
class Config:
|
||||
"""Configuration Manager for Strix."""
|
||||
|
||||
@@ -177,3 +180,30 @@ def apply_saved_config(force: bool = False) -> dict[str, str]:
|
||||
|
||||
def save_current_config() -> bool:
|
||||
return Config.save_current()
|
||||
|
||||
|
||||
def resolve_llm_config() -> tuple[str | None, str | None, str | None]:
|
||||
"""Resolve LLM model, api_key, and api_base based on STRIX_LLM prefix.
|
||||
|
||||
Returns:
|
||||
tuple: (model_name, api_key, api_base)
|
||||
"""
|
||||
model = Config.get("strix_llm")
|
||||
if not model:
|
||||
return None, None, None
|
||||
|
||||
api_key = Config.get("llm_api_key")
|
||||
|
||||
if model.startswith("strix/"):
|
||||
model_name = "openai/" + model[6:]
|
||||
api_base: str | None = STRIX_API_BASE
|
||||
else:
|
||||
model_name = model
|
||||
api_base = (
|
||||
Config.get("llm_api_base")
|
||||
or Config.get("openai_api_base")
|
||||
or Config.get("litellm_base_url")
|
||||
or Config.get("ollama_api_base")
|
||||
)
|
||||
|
||||
return model_name, api_key, api_base
|
||||
|
||||
@@ -51,10 +51,13 @@ def validate_environment() -> None: # noqa: PLR0912, PLR0915
|
||||
missing_required_vars = []
|
||||
missing_optional_vars = []
|
||||
|
||||
if not Config.get("strix_llm"):
|
||||
strix_llm = Config.get("strix_llm")
|
||||
uses_strix_models = strix_llm and strix_llm.startswith("strix/")
|
||||
|
||||
if not strix_llm:
|
||||
missing_required_vars.append("STRIX_LLM")
|
||||
|
||||
has_base_url = any(
|
||||
has_base_url = uses_strix_models or any(
|
||||
[
|
||||
Config.get("llm_api_base"),
|
||||
Config.get("openai_api_base"),
|
||||
@@ -96,7 +99,7 @@ def validate_environment() -> None: # noqa: PLR0912, PLR0915
|
||||
error_text.append("• ", style="white")
|
||||
error_text.append("STRIX_LLM", style="bold cyan")
|
||||
error_text.append(
|
||||
" - Model name to use with litellm (e.g., 'openai/gpt-5')\n",
|
||||
" - Model name to use with litellm (e.g., 'anthropic/claude-sonnet-4-6')\n",
|
||||
style="white",
|
||||
)
|
||||
|
||||
@@ -135,7 +138,10 @@ def validate_environment() -> None: # noqa: PLR0912, PLR0915
|
||||
)
|
||||
|
||||
error_text.append("\nExample setup:\n", style="white")
|
||||
error_text.append("export STRIX_LLM='openai/gpt-5'\n", style="dim white")
|
||||
if uses_strix_models:
|
||||
error_text.append("export STRIX_LLM='strix/claude-sonnet-4.6'\n", style="dim white")
|
||||
else:
|
||||
error_text.append("export STRIX_LLM='anthropic/claude-sonnet-4-6'\n", style="dim white")
|
||||
|
||||
if missing_optional_vars:
|
||||
for var in missing_optional_vars:
|
||||
@@ -198,17 +204,12 @@ def check_docker_installed() -> None:
|
||||
|
||||
|
||||
async def warm_up_llm() -> None:
|
||||
from strix.config.config import resolve_llm_config
|
||||
|
||||
console = Console()
|
||||
|
||||
try:
|
||||
model_name = Config.get("strix_llm")
|
||||
api_key = Config.get("llm_api_key")
|
||||
api_base = (
|
||||
Config.get("llm_api_base")
|
||||
or Config.get("openai_api_base")
|
||||
or Config.get("litellm_base_url")
|
||||
or Config.get("ollama_api_base")
|
||||
)
|
||||
model_name, api_key, api_base = resolve_llm_config()
|
||||
|
||||
test_messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from strix.config import Config
|
||||
from strix.config.config import resolve_llm_config
|
||||
|
||||
|
||||
class LLMConfig:
|
||||
@@ -10,7 +11,8 @@ class LLMConfig:
|
||||
timeout: int | None = None,
|
||||
scan_mode: str = "deep",
|
||||
):
|
||||
self.model_name = model_name or Config.get("strix_llm")
|
||||
resolved_model, self.api_key, self.api_base = resolve_llm_config()
|
||||
self.model_name = model_name or resolved_model
|
||||
|
||||
if not self.model_name:
|
||||
raise ValueError("STRIX_LLM environment variable must be set and not empty")
|
||||
|
||||
@@ -5,7 +5,7 @@ from typing import Any
|
||||
|
||||
import litellm
|
||||
|
||||
from strix.config import Config
|
||||
from strix.config.config import resolve_llm_config
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -155,14 +155,7 @@ def check_duplicate(
|
||||
|
||||
comparison_data = {"candidate": candidate_cleaned, "existing_reports": existing_cleaned}
|
||||
|
||||
model_name = Config.get("strix_llm")
|
||||
api_key = Config.get("llm_api_key")
|
||||
api_base = (
|
||||
Config.get("llm_api_base")
|
||||
or Config.get("openai_api_base")
|
||||
or Config.get("litellm_base_url")
|
||||
or Config.get("ollama_api_base")
|
||||
)
|
||||
model_name, api_key, api_base = resolve_llm_config()
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": DEDUPE_SYSTEM_PROMPT},
|
||||
|
||||
@@ -200,15 +200,10 @@ class LLM:
|
||||
"stream_options": {"include_usage": True},
|
||||
}
|
||||
|
||||
if api_key := Config.get("llm_api_key"):
|
||||
args["api_key"] = api_key
|
||||
if api_base := (
|
||||
Config.get("llm_api_base")
|
||||
or Config.get("openai_api_base")
|
||||
or Config.get("litellm_base_url")
|
||||
or Config.get("ollama_api_base")
|
||||
):
|
||||
args["api_base"] = api_base
|
||||
if self.config.api_key:
|
||||
args["api_key"] = self.config.api_key
|
||||
if self.config.api_base:
|
||||
args["api_base"] = self.config.api_base
|
||||
if self._supports_reasoning():
|
||||
args["reasoning_effort"] = self._reasoning_effort
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Any
|
||||
|
||||
import litellm
|
||||
|
||||
from strix.config import Config
|
||||
from strix.config.config import Config, resolve_llm_config
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -104,13 +104,7 @@ def _summarize_messages(
|
||||
conversation = "\n".join(formatted)
|
||||
prompt = SUMMARY_PROMPT_TEMPLATE.format(conversation=conversation)
|
||||
|
||||
api_key = Config.get("llm_api_key")
|
||||
api_base = (
|
||||
Config.get("llm_api_base")
|
||||
or Config.get("openai_api_base")
|
||||
or Config.get("litellm_base_url")
|
||||
or Config.get("ollama_api_base")
|
||||
)
|
||||
_, api_key, api_base = resolve_llm_config()
|
||||
|
||||
try:
|
||||
completion_args: dict[str, Any] = {
|
||||
|
||||
Reference in New Issue
Block a user