diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d6b9a64..f0e8ac5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,7 +30,7 @@ Thank you for your interest in contributing to Strix! This guide will help you g 3. **Configure your LLM provider** ```bash - export STRIX_LLM="openai/gpt-5" + export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="your-api-key" ``` diff --git a/README.md b/README.md index 8dbfa9b..8f5997c 100644 --- a/README.md +++ b/README.md @@ -73,9 +73,7 @@ Strix are autonomous AI agents that act just like real hackers - they run your c **Prerequisites:** - Docker (running) -- An LLM API key: - - Any [supported provider](https://docs.strix.ai/llm-providers/overview) (OpenAI, Anthropic, Google, etc.) - - Or [Strix Router](https://models.strix.ai) — single API key for multiple providers +- An LLM API key from any [supported provider](https://docs.strix.ai/llm-providers/overview) (OpenAI, Anthropic, Google, etc.) ### Installation & First Scan @@ -84,7 +82,7 @@ Strix are autonomous AI agents that act just like real hackers - they run your c curl -sSL https://strix.ai/install | bash # Configure your AI provider -export STRIX_LLM="openai/gpt-5" # or "strix/gpt-5" via Strix Router (https://models.strix.ai) +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="your-api-key" # Run your first security assessment @@ -215,7 +213,7 @@ jobs: ### Configuration ```bash -export STRIX_LLM="openai/gpt-5" +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="your-api-key" # Optional @@ -229,7 +227,7 @@ export STRIX_REASONING_EFFORT="high" # control thinking effort (default: high, **Recommended models for best results:** -- [OpenAI GPT-5](https://openai.com/api/) — `openai/gpt-5` +- [OpenAI GPT-5.4](https://openai.com/api/) — `openai/gpt-5.4` - [Anthropic Claude Sonnet 4.6](https://claude.com/platform/api) — `anthropic/claude-sonnet-4-6` - [Google Gemini 3 Pro Preview](https://cloud.google.com/vertex-ai) — `vertex_ai/gemini-3-pro-preview` diff --git a/docs/advanced/configuration.mdx b/docs/advanced/configuration.mdx index cf8eb93..18ff897 100644 --- a/docs/advanced/configuration.mdx +++ b/docs/advanced/configuration.mdx @@ -8,7 +8,7 @@ Configure Strix using environment variables or a config file. ## LLM Configuration - Model name in LiteLLM format (e.g., `openai/gpt-5`, `anthropic/claude-sonnet-4-6`). + Model name in LiteLLM format (e.g., `openai/gpt-5.4`, `anthropic/claude-sonnet-4-6`). @@ -114,7 +114,7 @@ strix --target ./app --config /path/to/config.json ```json { "env": { - "STRIX_LLM": "openai/gpt-5", + "STRIX_LLM": "openai/gpt-5.4", "LLM_API_KEY": "sk-...", "STRIX_REASONING_EFFORT": "high" } @@ -125,7 +125,7 @@ strix --target ./app --config /path/to/config.json ```bash # Required -export STRIX_LLM="openai/gpt-5" +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="sk-..." # Optional: Enable web search diff --git a/docs/contributing.mdx b/docs/contributing.mdx index b2e50a0..50964cc 100644 --- a/docs/contributing.mdx +++ b/docs/contributing.mdx @@ -32,7 +32,7 @@ description: "Contribute to Strix development" ```bash - export STRIX_LLM="openai/gpt-5" + export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="your-api-key" ``` diff --git a/docs/docs.json b/docs/docs.json index 27ee5dc..e15b496 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -32,7 +32,6 @@ "group": "LLM Providers", "pages": [ "llm-providers/overview", - "llm-providers/models", "llm-providers/openai", "llm-providers/anthropic", "llm-providers/openrouter", diff --git a/docs/index.mdx b/docs/index.mdx index ef5ab9a..2d40148 100644 --- a/docs/index.mdx +++ b/docs/index.mdx @@ -78,7 +78,7 @@ Strix uses a graph of specialized agents for comprehensive security testing: curl -sSL https://strix.ai/install | bash # Configure -export STRIX_LLM="openai/gpt-5" +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="your-api-key" # Scan diff --git a/docs/integrations/github-actions.mdx b/docs/integrations/github-actions.mdx index 827dce0..6399144 100644 --- a/docs/integrations/github-actions.mdx +++ b/docs/integrations/github-actions.mdx @@ -35,7 +35,7 @@ Add these secrets to your repository: | Secret | Description | |--------|-------------| -| `STRIX_LLM` | Model name (e.g., `openai/gpt-5`) | +| `STRIX_LLM` | Model name (e.g., `openai/gpt-5.4`) | | `LLM_API_KEY` | API key for your LLM provider | ## Exit Codes diff --git a/docs/llm-providers/anthropic.mdx b/docs/llm-providers/anthropic.mdx index 47a94be..8328872 100644 --- a/docs/llm-providers/anthropic.mdx +++ b/docs/llm-providers/anthropic.mdx @@ -6,7 +6,7 @@ description: "Configure Strix with Claude models" ## Setup ```bash -export STRIX_LLM="openai/gpt-5" +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="sk-ant-..." ``` diff --git a/docs/llm-providers/azure.mdx b/docs/llm-providers/azure.mdx index 629516d..1a9be00 100644 --- a/docs/llm-providers/azure.mdx +++ b/docs/llm-providers/azure.mdx @@ -24,7 +24,7 @@ export AZURE_API_VERSION="2025-11-01-preview" ## Example ```bash -export STRIX_LLM="azure/gpt-5-deployment" +export STRIX_LLM="azure/gpt-5.4-deployment" export AZURE_API_KEY="abc123..." export AZURE_API_BASE="https://mycompany.openai.azure.com" export AZURE_API_VERSION="2025-11-01-preview" @@ -33,5 +33,5 @@ export AZURE_API_VERSION="2025-11-01-preview" ## Prerequisites 1. Create an Azure OpenAI resource -2. Deploy a model (e.g., GPT-5) +2. Deploy a model (e.g., GPT-5.4) 3. Get the endpoint URL and API key from the Azure portal diff --git a/docs/llm-providers/models.mdx b/docs/llm-providers/models.mdx deleted file mode 100644 index 758679b..0000000 --- a/docs/llm-providers/models.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Strix Router" -description: "Access top LLMs through a single API with high rate limits and zero data retention" ---- - -Strix Router gives you access to the best LLMs through a single API key. - - -Strix Router is currently in **beta**. It's completely optional — Strix works with any [LiteLLM-compatible provider](/llm-providers/overview) using your own API keys, or with [local models](/llm-providers/local). Strix Router is just the setup we test and optimize for. - - -## Why Use Strix Router? - -- **High rate limits** — No throttling during long-running scans -- **Zero data retention** — Routes to providers with zero data retention policies enabled -- **Failover & load balancing** — Automatic fallback across providers for reliability -- **Simple setup** — One API key, one environment variable, no provider accounts needed -- **No markup** — Same token pricing as the underlying providers, no extra fees - -## Quick Start - -1. Get your API key at [models.strix.ai](https://models.strix.ai) -2. Set your environment: - -```bash -export LLM_API_KEY='your-strix-api-key' -export STRIX_LLM='strix/gpt-5' -``` - -3. Run a scan: - -```bash -strix --target ./your-app -``` - -## Available Models - -### Anthropic - -| Model | ID | -|-------|-----| -| Claude Sonnet 4.6 | `strix/claude-sonnet-4.6` | -| Claude Opus 4.6 | `strix/claude-opus-4.6` | - -### OpenAI - -| Model | ID | -|-------|-----| -| GPT-5.2 | `strix/gpt-5.2` | -| GPT-5.1 | `strix/gpt-5.1` | -| GPT-5 | `strix/gpt-5` | - -### Google - -| Model | ID | -|-------|-----| -| Gemini 3 Pro | `strix/gemini-3-pro-preview` | -| Gemini 3 Flash | `strix/gemini-3-flash-preview` | - -### Other - -| Model | ID | -|-------|-----| -| GLM-5 | `strix/glm-5` | -| GLM-4.7 | `strix/glm-4.7` | - -## Configuration Reference - - - Your Strix API key from [models.strix.ai](https://models.strix.ai). - - - - Model ID from the tables above. Must be prefixed with `strix/`. - diff --git a/docs/llm-providers/openai.mdx b/docs/llm-providers/openai.mdx index 77c8ea8..c8a4867 100644 --- a/docs/llm-providers/openai.mdx +++ b/docs/llm-providers/openai.mdx @@ -6,7 +6,7 @@ description: "Configure Strix with OpenAI models" ## Setup ```bash -export STRIX_LLM="openai/gpt-5" +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="sk-..." ``` @@ -25,7 +25,7 @@ See [OpenAI Models Documentation](https://platform.openai.com/docs/models) for t For OpenAI-compatible APIs: ```bash -export STRIX_LLM="openai/gpt-5" +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="your-key" export LLM_API_BASE="https://your-proxy.com/v1" ``` diff --git a/docs/llm-providers/openrouter.mdx b/docs/llm-providers/openrouter.mdx index d4d36bf..2b816e9 100644 --- a/docs/llm-providers/openrouter.mdx +++ b/docs/llm-providers/openrouter.mdx @@ -8,7 +8,7 @@ description: "Configure Strix with models via OpenRouter" ## Setup ```bash -export STRIX_LLM="openrouter/openai/gpt-5" +export STRIX_LLM="openrouter/openai/gpt-5.4" export LLM_API_KEY="sk-or-..." ``` @@ -18,7 +18,7 @@ Access any model on OpenRouter using the format `openrouter//`: | Model | Configuration | |-------|---------------| -| GPT-5 | `openrouter/openai/gpt-5` | +| GPT-5.4 | `openrouter/openai/gpt-5.4` | | Claude Sonnet 4.6 | `openrouter/anthropic/claude-sonnet-4.6` | | Gemini 3 Pro | `openrouter/google/gemini-3-pro-preview` | | GLM-4.7 | `openrouter/z-ai/glm-4.7` | diff --git a/docs/llm-providers/overview.mdx b/docs/llm-providers/overview.mdx index 153ad0c..8c0d500 100644 --- a/docs/llm-providers/overview.mdx +++ b/docs/llm-providers/overview.mdx @@ -5,29 +5,18 @@ description: "Configure your AI model for Strix" Strix uses [LiteLLM](https://docs.litellm.ai/docs/providers) for model compatibility, supporting 100+ LLM providers. -## Strix Router (Recommended) +## Configuration -The fastest way to get started. [Strix Router](/llm-providers/models) gives you access to tested models with the highest rate limits and zero data retention. - -```bash -export STRIX_LLM="strix/gpt-5" -export LLM_API_KEY="your-strix-api-key" -``` - -Get your API key at [models.strix.ai](https://models.strix.ai). - -## Bring Your Own Key - -You can also use any LiteLLM-compatible provider with your own API keys: +Set your model and API key: | Model | Provider | Configuration | | ----------------- | ------------- | -------------------------------- | -| GPT-5 | OpenAI | `openai/gpt-5` | +| GPT-5.4 | OpenAI | `openai/gpt-5.4` | | Claude Sonnet 4.6 | Anthropic | `anthropic/claude-sonnet-4-6` | | Gemini 3 Pro | Google Vertex | `vertex_ai/gemini-3-pro-preview` | ```bash -export STRIX_LLM="openai/gpt-5" +export STRIX_LLM="openai/gpt-5.4" export LLM_API_KEY="your-api-key" ``` @@ -45,11 +34,8 @@ See the [Local Models guide](/llm-providers/local) for setup instructions and re ## Provider Guides - - Recommended models router with high rate limits. - - GPT-5 models. + GPT-5.4 models. Claude Opus, Sonnet, and Haiku. @@ -64,7 +50,7 @@ See the [Local Models guide](/llm-providers/local) for setup instructions and re Claude and Titan models via AWS. - GPT-5 via Azure. + GPT-5.4 via Azure. Llama 4, Mistral, and self-hosted models. @@ -76,7 +62,7 @@ See the [Local Models guide](/llm-providers/local) for setup instructions and re Use LiteLLM's `provider/model-name` format: ``` -openai/gpt-5 +openai/gpt-5.4 anthropic/claude-sonnet-4-6 vertex_ai/gemini-3-pro-preview bedrock/anthropic.claude-4-5-sonnet-20251022-v1:0 diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx index bd7a8d9..681bf02 100644 --- a/docs/quickstart.mdx +++ b/docs/quickstart.mdx @@ -6,7 +6,7 @@ description: "Install Strix and run your first security scan" ## Prerequisites - Docker (running) -- An LLM API key — use [Strix Router](/llm-providers/models) for the easiest setup, or bring your own key from any [supported provider](/llm-providers/overview) +- An LLM API key from any [supported provider](/llm-providers/overview) (OpenAI, Anthropic, Google, etc.) ## Installation @@ -27,23 +27,13 @@ description: "Install Strix and run your first security scan" Set your LLM provider: - - - ```bash - export STRIX_LLM="strix/gpt-5" - export LLM_API_KEY="your-strix-api-key" - ``` - - - ```bash - export STRIX_LLM="openai/gpt-5" - export LLM_API_KEY="your-api-key" - ``` - - +```bash +export STRIX_LLM="openai/gpt-5.4" +export LLM_API_KEY="your-api-key" +``` -For best results, use `strix/gpt-5`, `strix/claude-opus-4.6`, or `strix/gpt-5.2`. +For best results, use `openai/gpt-5.4`, `anthropic/claude-opus-4-6`, or `openai/gpt-5.2`. ## Run Your First Scan diff --git a/scripts/install.sh b/scripts/install.sh index 868a95e..65a2e5d 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -335,14 +335,11 @@ echo -e "${MUTED} AI Penetration Testing Agent${NC}" echo "" echo -e "${MUTED}To get started:${NC}" echo "" -echo -e " ${CYAN}1.${NC} Get your Strix API key:" -echo -e " ${MUTED}https://models.strix.ai${NC}" -echo "" -echo -e " ${CYAN}2.${NC} Set your environment:" +echo -e " ${CYAN}1.${NC} Set your environment:" echo -e " ${MUTED}export LLM_API_KEY='your-api-key'${NC}" -echo -e " ${MUTED}export STRIX_LLM='strix/gpt-5'${NC}" +echo -e " ${MUTED}export STRIX_LLM='openai/gpt-5.4'${NC}" echo "" -echo -e " ${CYAN}3.${NC} Run a penetration test:" +echo -e " ${CYAN}2.${NC} Run a penetration test:" echo -e " ${MUTED}strix --target https://example.com${NC}" echo "" echo -e "${MUTED}For more information visit ${NC}https://strix.ai" diff --git a/strix/interface/main.py b/strix/interface/main.py index 7d340df..d4c91a6 100644 --- a/strix/interface/main.py +++ b/strix/interface/main.py @@ -101,7 +101,7 @@ def validate_environment() -> None: # noqa: PLR0912, PLR0915 error_text.append("• ", style="white") error_text.append("STRIX_LLM", style="bold cyan") error_text.append( - " - Model name to use with litellm (e.g., 'openai/gpt-5')\n", + " - Model name to use with litellm (e.g., 'openai/gpt-5.4')\n", style="white", ) @@ -140,10 +140,7 @@ def validate_environment() -> None: # noqa: PLR0912, PLR0915 ) error_text.append("\nExample setup:\n", style="white") - if uses_strix_models: - error_text.append("export STRIX_LLM='strix/gpt-5'\n", style="dim white") - else: - error_text.append("export STRIX_LLM='openai/gpt-5'\n", style="dim white") + error_text.append("export STRIX_LLM='openai/gpt-5.4'\n", style="dim white") if missing_optional_vars: for var in missing_optional_vars: diff --git a/strix/llm/utils.py b/strix/llm/utils.py index cb61a81..9771854 100644 --- a/strix/llm/utils.py +++ b/strix/llm/utils.py @@ -36,7 +36,7 @@ STRIX_MODEL_MAP: dict[str, str] = { "claude-opus-4.6": "anthropic/claude-opus-4-6", "gpt-5.2": "openai/gpt-5.2", "gpt-5.1": "openai/gpt-5.1", - "gpt-5": "openai/gpt-5", + "gpt-5.4": "openai/gpt-5.4", "gemini-3-pro-preview": "gemini/gemini-3-pro-preview", "gemini-3-flash-preview": "gemini/gemini-3-flash-preview", "glm-5": "openrouter/z-ai/glm-5", diff --git a/tests/llm/test_llm_otel.py b/tests/llm/test_llm_otel.py index 58ee89e..a11ffa5 100644 --- a/tests/llm/test_llm_otel.py +++ b/tests/llm/test_llm_otel.py @@ -1,15 +1,16 @@ import litellm +import pytest from strix.llm.config import LLMConfig from strix.llm.llm import LLM -def test_llm_does_not_modify_litellm_callbacks(monkeypatch) -> None: +def test_llm_does_not_modify_litellm_callbacks(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("STRIX_TELEMETRY", "1") monkeypatch.setenv("STRIX_OTEL_TELEMETRY", "1") monkeypatch.setattr(litellm, "callbacks", ["custom-callback"]) - llm = LLM(LLMConfig(model_name="openai/gpt-5"), agent_name=None) + llm = LLM(LLMConfig(model_name="openai/gpt-5.4"), agent_name=None) assert llm is not None assert litellm.callbacks == ["custom-callback"]