Use gpt-5 as default

This commit is contained in:
Ahmed Allam
2025-08-11 18:00:24 -07:00
parent 81ac98e8b9
commit 139faed8ed
13 changed files with 724 additions and 706 deletions

View File

@@ -19,16 +19,6 @@
---
## 🚨 The AI Security Crisis
Everyone's shipping code faster than ever. Cursor, Windsurf, and Claude made coding easy - but QA and security testing are now the real bottlenecks.
> **Number of security vulnerabilities doubled post-AI.**
Traditional security tools weren't designed for this. SAST was a temporary fix when manual pentesting cost $10k+ and took weeks. Now, Strix delivers real security testing rapidly.
**The solution:** Enable developers to use AI coding at full speed, without compromising on security.
## 🦉 Strix Overview
Strix are autonomous AI agents that act just like real hackers - they run your code dynamically, find vulnerabilities, and validate them through actual exploitation. Built for developers and security teams who need fast, accurate security testing without the overhead of manual pentesting or the false positives of static analysis tools.
@@ -40,7 +30,7 @@ Strix are autonomous AI agents that act just like real hackers - they run your c
pipx install strix-agent
# Configure AI provider
export STRIX_LLM="anthropic/claude-sonnet-4-20250514"
export STRIX_LLM="openai/gpt-5"
export LLM_API_KEY="your-api-key"
# Run security assessment
@@ -103,7 +93,7 @@ strix --target api.your-app.com --instruction "Prioritize authentication and aut
```bash
# Required
export STRIX_LLM="anthropic/claude-sonnet-4-20250514"
export STRIX_LLM="openai/gpt-5"
export LLM_API_KEY="your-api-key"
# Recommended

1351
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "strix-agent"
version = "0.1.4"
version = "0.1.6"
description = "Open-source AI Hackers for your apps"
authors = ["Strix <hi@usestrix.com>"]
readme = "README.md"
@@ -45,7 +45,7 @@ strix = "strix.cli.main:main"
python = "^3.12"
fastapi = "*"
uvicorn = "*"
litellm = {extras = ["proxy"], version = "^1.72.1"}
litellm = {extras = ["proxy"], version = "^1.75.5.post1"}
tenacity = "^9.0.0"
numpydoc = "^1.8.0"
pydantic = {extras = ["email"], version = "^2.11.3"}

View File

@@ -12,6 +12,7 @@ import sys
from pathlib import Path
from typing import Any
from urllib.parse import urlparse
import shutil
import docker
import litellm
@@ -73,7 +74,7 @@ def validate_environment() -> None:
error_text.append("", style="white")
error_text.append("STRIX_LLM", style="bold cyan")
error_text.append(
" - Model name to use with litellm (e.g., 'anthropic/claude-sonnet-4-20250514')\n",
" - Model name to use with litellm (e.g., 'openai/gpt-5')\n",
style="white",
)
error_text.append("", style="white")
@@ -91,7 +92,7 @@ def validate_environment() -> None:
error_text.append("\nExample setup:\n", style="white")
error_text.append(
"export STRIX_LLM='anthropic/claude-sonnet-4-20250514'\n", style="dim white"
"export STRIX_LLM='openai/gpt-5'\n", style="dim white"
)
error_text.append("export LLM_API_KEY='your-api-key-here'\n", style="dim white")
if missing_optional_vars:
@@ -118,11 +119,32 @@ def _validate_llm_response(response: Any) -> None:
raise RuntimeError("Invalid response from LLM")
def check_docker_installed() -> None:
if shutil.which("docker") is None:
console = Console()
error_text = Text()
error_text.append("", style="bold red")
error_text.append("DOCKER NOT INSTALLED", style="bold red")
error_text.append("\n\n", style="white")
error_text.append("The 'docker' CLI was not found in your PATH.\n", style="white")
error_text.append("Please install Docker and ensure the 'docker' command is available.\n\n", style="white")
panel = Panel(
error_text,
title="[bold red]🛡️ STRIX STARTUP ERROR",
title_align="center",
border_style="red",
padding=(1, 2),
)
console.print("\n", panel, "\n")
sys.exit(1)
async def warm_up_llm() -> None:
console = Console()
try:
model_name = os.getenv("STRIX_LLM", "anthropic/claude-sonnet-4-20250514")
model_name = os.getenv("STRIX_LLM", "openai/gpt-5")
api_key = os.getenv("LLM_API_KEY")
if api_key:
@@ -136,7 +158,6 @@ async def warm_up_llm() -> None:
response = litellm.completion(
model=model_name,
messages=test_messages,
max_tokens=10,
)
_validate_llm_response(response)
@@ -523,6 +544,7 @@ def main() -> None:
if sys.platform == "win32":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
check_docker_installed()
pull_docker_image()
validate_environment()

View File

@@ -84,16 +84,10 @@ class AgentFinishRenderer(BaseToolRenderer):
)
if result_summary:
summary_display = (
result_summary[:400] + "..." if len(result_summary) > 400 else result_summary
)
content_parts = [f"{header}\n [bold]{cls.escape_markup(summary_display)}[/]"]
content_parts = [f"{header}\n [bold]{cls.escape_markup(result_summary)}[/]"]
if findings and isinstance(findings, list):
finding_lines = [f"{finding}" for finding in findings[:3]]
if len(findings) > 3:
finding_lines.append(f"• ... +{len(findings) - 3} more findings")
finding_lines = [f"{finding}" for finding in findings]
content_parts.append(
f" [dim]{chr(10).join([cls.escape_markup(line) for line in finding_lines])}[/]"
)

View File

@@ -23,8 +23,7 @@ class FinishScanRenderer(BaseToolRenderer):
)
if content:
content_display = content[:600] + "..." if len(content) > 600 else content
content_text = f"{header}\n [bold]{cls.escape_markup(content_display)}[/]"
content_text = f"{header}\n [bold]{cls.escape_markup(content)}[/]"
else:
content_text = f"{header}\n [dim]Generating final report...[/]"

View File

@@ -31,8 +31,7 @@ class CreateVulnerabilityReportRenderer(BaseToolRenderer):
)
if content:
content_preview = content[:100] + "..." if len(content) > 100 else content
content_parts.append(f" [dim]{cls.escape_markup(content_preview)}[/]")
content_parts.append(f" [dim]{cls.escape_markup(content)}[/]")
content_text = "\n".join(content_parts)
else:

View File

@@ -51,8 +51,7 @@ class SubagentStartInfoRenderer(BaseToolRenderer):
content = f"🤖 Spawned subagent [bold #22c55e]{name}[/bold #22c55e]"
if task:
display_task = task[:80] + "..." if len(task) > 80 else task
content += f"\n Task: [dim]{display_task}[/dim]"
content += f"\n Task: [dim]{task}[/dim]"
css_classes = cls.get_css_classes(status)
return Static(content, classes=css_classes)

View File

@@ -54,8 +54,7 @@ class Tracer:
def get_run_dir(self) -> Path:
if self._run_dir is None:
workspace_root = Path(__file__).parent.parent.parent
runs_dir = workspace_root / "agent_runs"
runs_dir = Path.cwd() / "agent_runs"
runs_dir.mkdir(exist_ok=True)
run_dir_name = self.run_name if self.run_name else self.run_id

View File

@@ -9,7 +9,7 @@ class LLMConfig:
enable_prompt_caching: bool = True,
prompt_modules: list[str] | None = None,
):
self.model_name = model_name or os.getenv("STRIX_LLM", "anthropic/claude-sonnet-4-20250514")
self.model_name = model_name or os.getenv("STRIX_LLM", "openai/gpt-5")
if not self.model_name:
raise ValueError("STRIX_LLM environment variable must be set and not empty")

View File

@@ -248,7 +248,6 @@ class LLM:
"model": self.config.model_name,
"messages": messages,
"temperature": self.config.temperature,
"stop": ["</function>"],
}
queue = get_global_queue()

View File

@@ -145,7 +145,7 @@ class MemoryCompressor:
model_name: str | None = None,
):
self.max_images = max_images
self.model_name = model_name or os.getenv("STRIX_LLM", "anthropic/claude-sonnet-4-20250514")
self.model_name = model_name or os.getenv("STRIX_LLM", "openai/gpt-5")
if not self.model_name:
raise ValueError("STRIX_LLM environment variable must be set and not empty")