Files
strix/strix/interface/tool_components/thinking_renderer.py
0xallam a6dcb7756e feat(tui): add real-time streaming LLM output with full content display
- Convert LiteLLM requests to streaming mode with stream_request()
- Add streaming parser to handle live LLM output segments
- Update TUI for real-time streaming content rendering
- Add tracer methods for streaming content tracking
- Clean function tags from streamed content to prevent display
- Remove all truncation from tool renderers for full content visibility
2026-01-06 16:44:22 -08:00

32 lines
905 B
Python

from typing import Any, ClassVar
from rich.text import Text
from textual.widgets import Static
from .base_renderer import BaseToolRenderer
from .registry import register_tool_renderer
@register_tool_renderer
class ThinkRenderer(BaseToolRenderer):
tool_name: ClassVar[str] = "think"
css_classes: ClassVar[list[str]] = ["tool-call", "thinking-tool"]
@classmethod
def render(cls, tool_data: dict[str, Any]) -> Static:
args = tool_data.get("args", {})
thought = args.get("thought", "")
text = Text()
text.append("🧠 ")
text.append("Thinking", style="bold #a855f7")
text.append("\n ")
if thought:
text.append(thought, style="italic dim")
else:
text.append("Thinking...", style="italic dim")
css_classes = cls.get_css_classes("completed")
return Static(text, classes=css_classes)