From 9b1e04f12885ce555cdbfb8bf622562cdda94d9c Mon Sep 17 00:00:00 2001 From: Advait Paliwal Date: Mon, 23 Mar 2026 18:31:37 -0700 Subject: [PATCH] Add system resource detection, Docker execution skill, and environment-aware recommendations - TUI header now shows CPU cores, RAM, GPU, and Docker availability - System prompt uses resource info to recommend execution environments - Docker skill for running experiment code in isolated containers - Renamed docker-sandbox skill to docker (Feynman stays on host, code runs in containers) - Updated README and website to cite Docker alongside Agent Computer Co-Authored-By: Claude Opus 4.6 (1M context) --- .feynman/SYSTEM.md | 1 + README.md | 9 +- extensions/research-tools/header.ts | 49 ++++++++++- prompts/autoresearch.md | 1 + prompts/replicate.md | 1 + skills-lock.json | 10 +++ skills/docker/SKILL.md | 84 +++++++++++++++++++ website/.astro/data-store.json | 2 +- .../src/content/docs/workflows/replication.md | 1 + website/src/pages/index.astro | 6 +- 10 files changed, 156 insertions(+), 8 deletions(-) create mode 100644 skills-lock.json create mode 100644 skills/docker/SKILL.md diff --git a/.feynman/SYSTEM.md b/.feynman/SYSTEM.md index 80e8f72..b4ec2f0 100644 --- a/.feynman/SYSTEM.md +++ b/.feynman/SYSTEM.md @@ -30,6 +30,7 @@ Operating rules: - For long-running local work such as experiments, crawls, or log-following, use the process package instead of blocking the main thread unnecessarily. Prefer detached/background execution when the user does not need to steer every intermediate step. - Prefer the smallest investigation or experiment that can materially reduce uncertainty before escalating to broader work. - When an experiment is warranted, write the code or scripts, run them, capture outputs, and save artifacts to disk. +- Before recommending an execution environment, consider the system resources shown in the header (CPU, RAM, GPU, Docker availability). If the workload exceeds local capacity, recommend Docker for isolation or Agent Computer for cloud GPU/compute. Do not suggest GPU workloads locally if no GPU is detected. - Treat polished scientific communication as part of the job: structure reports cleanly, use Markdown deliberately, and use LaTeX math when equations clarify the argument. - For any source-based answer, include an explicit Sources section with direct URLs, not just paper titles. - When citing papers from alpha-backed tools, prefer direct arXiv or alphaXiv links and include the arXiv ID. diff --git a/README.md b/README.md index 740b75a..21521ca 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,8 @@ Four bundled research agents, dispatched automatically or via subagent commands. ## Tools - **[AlphaXiv](https://www.alphaxiv.org/)** — paper search, Q&A, code reading, persistent annotations -- **[Agent Computer](https://agentcomputer.ai)** — secure cloud execution for experiments, replications, and long-running research +- **Docker** — isolated container execution for safe experiments on your machine +- **[Agent Computer](https://agentcomputer.ai)** — secure cloud execution for long-running research and GPU workloads - **Web search** — Gemini or Perplexity, zero-config default via signed-in Chromium - **Session search** — indexed recall across prior research sessions - **Preview** — browser and PDF export of generated artifacts @@ -82,9 +83,9 @@ feynman search status # web search config ## How it works -Built on [Pi](https://github.com/mariozechner/pi-coding-agent), [alphaXiv](https://www.alphaxiv.org/), and [Agent Computer](https://agentcomputer.ai). Pi provides the agent runtime. alphaXiv powers paper search, Q&A, code reading, and annotations. Agent Computer provides secure cloud machines for running experiments and replications. +Built on [Pi](https://github.com/mariozechner/pi-coding-agent) for the agent runtime, [alphaXiv](https://www.alphaxiv.org/) for paper search and analysis, [Docker](https://www.docker.com/) for isolated local execution, and [Agent Computer](https://agentcomputer.ai) for secure cloud workloads -Every output is source-grounded. Claims link to papers, docs, or repos with direct URLs. +Every output is source-grounded — claims link to papers, docs, or repos with direct URLs --- @@ -96,5 +97,3 @@ cd feynman && npm install && npm run start ``` [Docs](https://feynman.companion.ai/docs) · [MIT License](LICENSE) - -Built on [Pi](https://github.com/mariozechner/pi-coding-agent), [alphaXiv](https://www.alphaxiv.org/), and [Agent Computer](https://agentcomputer.ai). diff --git a/extensions/research-tools/header.ts b/extensions/research-tools/header.ts index 370d405..6591499 100644 --- a/extensions/research-tools/header.ts +++ b/extensions/research-tools/header.ts @@ -1,5 +1,6 @@ import { readdir } from "node:fs/promises"; -import { homedir } from "node:os"; +import { cpus, freemem, homedir, totalmem } from "node:os"; +import { execSync } from "node:child_process"; import { resolve as resolvePath } from "node:path"; import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent"; @@ -123,6 +124,44 @@ async function buildAgentCatalogSummary(): Promise<{ agents: string[]; chains: s return { agents, chains }; } +type SystemResources = { + cpu: string; + cores: number; + ramTotal: string; + ramFree: string; + gpu: string | null; + docker: boolean; +}; + +function detectSystemResources(): SystemResources { + const cores = cpus().length; + const cpu = cpus()[0]?.model?.trim() ?? "unknown"; + const totalBytes = totalmem(); + const freeBytes = freemem(); + const ramTotal = `${Math.round(totalBytes / (1024 ** 3))}GB`; + const ramFree = `${Math.round(freeBytes / (1024 ** 3))}GB`; + + let gpu: string | null = null; + try { + if (process.platform === "darwin") { + const out = execSync("system_profiler SPDisplaysDataType 2>/dev/null | grep 'Chipset Model\\|Chip Model'", { encoding: "utf8", timeout: 3000 }).trim(); + const match = out.match(/:\s*(.+)/); + if (match) gpu = match[1]!.trim(); + } else { + const out = execSync("nvidia-smi --query-gpu=name --format=csv,noheader 2>/dev/null", { encoding: "utf8", timeout: 3000 }).trim(); + if (out) gpu = out.split("\n")[0]!.trim(); + } + } catch {} + + let docker = false; + try { + execSync("docker info 2>/dev/null", { timeout: 3000 }); + docker = true; + } catch {} + + return { cpu, cores, ramTotal, ramFree, gpu, docker }; +} + type WorkflowInfo = { name: string; description: string }; function getResearchWorkflows(pi: ExtensionAPI): WorkflowInfo[] { @@ -150,6 +189,7 @@ export function installFeynmanHeader( cache.agentSummaryPromise ??= buildAgentCatalogSummary(); return cache.agentSummaryPromise.then((agentData) => { + const resources = detectSystemResources(); const workflows = getResearchWorkflows(pi); const toolCount = pi.getAllTools().length; const commandCount = pi.getCommands().length; @@ -228,6 +268,11 @@ export function installFeynmanHeader( pushLabeled("directory", dirLabel, "text"); pushLabeled("session", sessionId, "dim"); leftLines.push(""); + pushLabeled("cpu", `${resources.cores} cores`, "dim"); + pushLabeled("ram", `${resources.ramFree} free / ${resources.ramTotal}`, "dim"); + if (resources.gpu) pushLabeled("gpu", resources.gpu, "dim"); + pushLabeled("docker", resources.docker ? "available" : "not found", "dim"); + leftLines.push(""); leftLines.push(theme.fg("dim", `${toolCount} tools · ${agentCount} agents`)); const pushList = (heading: string, items: string[]) => { @@ -298,6 +343,8 @@ export function installFeynmanHeader( push(row(`${theme.fg("dim", "model".padEnd(10))} ${theme.fg("text", truncateVisible(modelLabel, narrowValW))}`)); push(row(`${theme.fg("dim", "directory".padEnd(10))} ${theme.fg("text", truncateVisible(dirLabel, narrowValW))}`)); push(row(`${theme.fg("dim", "session".padEnd(10))} ${theme.fg("dim", truncateVisible(sessionId, narrowValW))}`)); + const resourceLine = `${resources.cores} cores · ${resources.ramTotal} ram${resources.gpu ? ` · ${resources.gpu}` : ""}${resources.docker ? " · docker" : ""}`; + push(row(theme.fg("dim", truncateVisible(resourceLine, contentW)))); push(row(theme.fg("dim", truncateVisible(`${toolCount} tools · ${agentCount} agents · ${commandCount} commands`, contentW)))); push(emptyRow()); diff --git a/prompts/autoresearch.md b/prompts/autoresearch.md index 68d84d1..cc3604e 100644 --- a/prompts/autoresearch.md +++ b/prompts/autoresearch.md @@ -25,6 +25,7 @@ Ask the user where to run: - **Local** — run in the current working directory - **New git branch** — create a branch so main stays clean - **Virtual environment** — create an isolated venv/conda env first +- **Docker** — run experiment code inside an isolated Docker container - **Cloud** — delegate to a remote Agent Computer machine via `/delegate` Do not proceed without a clear answer. diff --git a/prompts/replicate.md b/prompts/replicate.md index e3fd3c9..946bedf 100644 --- a/prompts/replicate.md +++ b/prompts/replicate.md @@ -13,6 +13,7 @@ Design a replication plan for: $@ 3. **Environment** — Before running anything, ask the user where to execute: - **Local** — run in the current working directory - **Virtual environment** — create an isolated venv/conda env first + - **Docker** — run experiment code inside an isolated Docker container - **Cloud** — delegate to a remote Agent Computer machine via `/delegate` - **Plan only** — produce the replication plan without executing 4. **Execute** — If the user chose an execution environment, implement and run the replication steps there. Save notes, scripts, and results to disk in a reproducible layout. diff --git a/skills-lock.json b/skills-lock.json new file mode 100644 index 0000000..ce73768 --- /dev/null +++ b/skills-lock.json @@ -0,0 +1,10 @@ +{ + "version": 1, + "skills": { + "find-skills": { + "source": "vercel-labs/skills", + "sourceType": "github", + "computedHash": "d31e234f0c90694a670222cdd1dafa853e051d7066beda389f1097c22dadd461" + } + } +} diff --git a/skills/docker/SKILL.md b/skills/docker/SKILL.md new file mode 100644 index 0000000..0c81f40 --- /dev/null +++ b/skills/docker/SKILL.md @@ -0,0 +1,84 @@ +--- +name: docker +description: Execute research code inside isolated Docker containers for safe replication, experiments, and benchmarks. Use when the user selects Docker as the execution environment or asks to run code safely, in isolation, or in a sandbox. +allowed-tools: Bash(docker:*) +--- + +# Docker Sandbox + +Run research code inside Docker containers while Feynman stays on the host. The container gets the project files, runs the commands, and results sync back. + +## When to use + +- User selects "Docker Sandbox" as the execution environment in `/replicate` or `/autoresearch` +- Running untrusted code from a paper's repository +- Experiments that install packages or modify system state +- Any time the user asks to run something "safely" or "isolated" + +## How it works + +1. Build or pull an appropriate base image for the research code +2. Mount the project directory into the container +3. Run experiment commands inside the container +4. Results write back to the mounted directory + +## Running commands in a container + +For Python research code (most common): + +```bash +docker run --rm -v "$(pwd)":/workspace -w /workspace python:3.11 bash -c " + pip install -r requirements.txt && + python train.py +" +``` + +For projects with a Dockerfile: + +```bash +docker build -t feynman-experiment . +docker run --rm -v "$(pwd)/results":/workspace/results feynman-experiment +``` + +For GPU workloads: + +```bash +docker run --rm --gpus all -v "$(pwd)":/workspace -w /workspace pytorch/pytorch:latest bash -c " + pip install -r requirements.txt && + python train.py +" +``` + +## Choosing the base image + +| Research type | Base image | +| --- | --- | +| Python ML/DL | `pytorch/pytorch:latest` or `tensorflow/tensorflow:latest-gpu` | +| Python general | `python:3.11` | +| Node.js | `node:20` | +| R / statistics | `rocker/r-ver:4` | +| Julia | `julia:1.10` | +| Multi-language | `ubuntu:24.04` with manual installs | + +## Persistent containers + +For iterative experiments (like `/autoresearch`), create a named container instead of `--rm`. Choose a descriptive name based on the experiment: + +```bash +docker create --name -v "$(pwd)":/workspace -w /workspace python:3.11 tail -f /dev/null +docker start +docker exec bash -c "pip install -r requirements.txt" +docker exec bash -c "python train.py" +``` + +This preserves installed packages across iterations. Clean up with: + +```bash +docker stop && docker rm +``` + +## Notes + +- The mounted workspace syncs results back to the host automatically +- Containers are network-enabled by default — add `--network none` for full isolation +- For GPU access, Docker must be configured with the NVIDIA Container Toolkit diff --git a/website/.astro/data-store.json b/website/.astro/data-store.json index 488a54c..c05519d 100644 --- a/website/.astro/data-store.json +++ b/website/.astro/data-store.json @@ -1 +1 @@ -[["Map",1,2,9,10],"meta::meta",["Map",3,4,5,6,7,8],"astro-version","5.18.1","content-config-digest","d2da5d7c4a062d75","astro-config-digest","{\"root\":{},\"srcDir\":{},\"publicDir\":{},\"outDir\":{},\"cacheDir\":{},\"site\":\"https://feynman.companion.ai\",\"compressHTML\":true,\"base\":\"/\",\"trailingSlash\":\"ignore\",\"output\":\"static\",\"scopedStyleStrategy\":\"attribute\",\"build\":{\"format\":\"directory\",\"client\":{},\"server\":{},\"assets\":\"_astro\",\"serverEntry\":\"entry.mjs\",\"redirects\":true,\"inlineStylesheets\":\"auto\",\"concurrency\":1},\"server\":{\"open\":false,\"host\":false,\"port\":3001,\"streaming\":true,\"allowedHosts\":[]},\"redirects\":{},\"image\":{\"endpoint\":{\"route\":\"/_image\"},\"service\":{\"entrypoint\":\"astro/assets/services/sharp\",\"config\":{}},\"domains\":[],\"remotePatterns\":[],\"responsiveStyles\":false},\"devToolbar\":{\"enabled\":true},\"markdown\":{\"syntaxHighlight\":{\"type\":\"shiki\",\"excludeLangs\":[\"math\"]},\"shikiConfig\":{\"langs\":[],\"langAlias\":{},\"theme\":\"github-dark\",\"themes\":{\"light\":\"github-light\",\"dark\":\"github-dark\"},\"wrap\":false,\"transformers\":[]},\"remarkPlugins\":[],\"rehypePlugins\":[],\"remarkRehype\":{},\"gfm\":true,\"smartypants\":true},\"security\":{\"checkOrigin\":true,\"allowedDomains\":[],\"actionBodySizeLimit\":1048576},\"env\":{\"schema\":{},\"validateSecrets\":false},\"experimental\":{\"clientPrerender\":false,\"contentIntellisense\":false,\"headingIdCompat\":false,\"preserveScriptOrder\":false,\"liveContentCollections\":false,\"csp\":false,\"staticImportMetaEnv\":false,\"chromeDevtoolsWorkspace\":false,\"failOnPrerenderConflict\":false,\"svgo\":false},\"legacy\":{\"collections\":false}}","docs",["Map",11,12,58,59,89,90,132,133,168,169,197,198,233,234,266,267,295,296,312,313,338,339,368,369,391,392,416,417,445,446,477,478,503,504,525,526,546,547,570,571,592,593,616,617,637,638,661,662],"agents/reviewer",{"id":11,"data":13,"body":18,"filePath":19,"digest":20,"rendered":21,"legacyId":57},{"title":14,"description":15,"section":16,"order":17},"Reviewer","Simulate a tough but constructive AI research peer reviewer with inline annotations.","Agents",2,"## Source\n\nGenerated from `.feynman/agents/reviewer.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nSimulate a tough but constructive AI research peer reviewer with inline annotations.\n\n## Default Output\n\n`review.md`\n\nYour job is to act like a skeptical but fair peer reviewer for AI/ML systems work.\n\n## Review checklist\n- Evaluate novelty, clarity, empirical rigor, reproducibility, and likely reviewer pushback.\n- Do not praise vaguely. Every positive claim should be tied to specific evidence.\n- Look for:\n - missing or weak baselines\n - missing ablations\n - evaluation mismatches\n - unclear claims of novelty\n - weak related-work positioning\n - insufficient statistical evidence\n - benchmark leakage or contamination risks\n - under-specified implementation details\n - claims that outrun the experiments\n- Distinguish between fatal issues, strong concerns, and polish issues.\n- Preserve uncertainty. If the draft might pass depending on venue norms, say so explicitly.\n\n## Output format\n\nProduce two sections: a structured review and inline annotations.\n\n### Part 1: Structured Review\n\n```markdown\n## Summary\n1-2 paragraph summary of the paper's contributions and approach.\n\n## Strengths\n- [S1] ...\n- [S2] ...\n\n## Weaknesses\n- [W1] **FATAL:** ...\n- [W2] **MAJOR:** ...\n- [W3] **MINOR:** ...\n\n## Questions for Authors\n- [Q1] ...\n\n## Verdict\nOverall assessment and confidence score. Would this pass at [venue]?\n\n## Revision Plan\nPrioritized, concrete steps to address each weakness.\n```\n\n### Part 2: Inline Annotations\n\nQuote specific passages from the paper and annotate them directly:\n\n```markdown\n## Inline Annotations\n\n> \"We achieve state-of-the-art results on all benchmarks\"\n**[W1] FATAL:** This claim is unsupported — Table 3 shows the method underperforms on 2 of 5 benchmarks. Revise to accurately reflect results.\n\n> \"Our approach is novel in combining X with Y\"\n**[W3] MINOR:** Z et al. (2024) combined X with Y in a different domain. Acknowledge this and clarify the distinction.\n\n> \"We use a learning rate of 1e-4\"\n**[Q1]:** Was this tuned? What range was searched? This matters for reproducibility.\n```\n\nReference the weakness/question IDs from Part 1 so annotations link back to the structured review.\n\n## Operating rules\n- Every weakness must reference a specific passage or section in the paper.\n- Inline annotations must quote the exact text being critiqued.\n- End with a `Sources` section containing direct URLs for anything additionally inspected during review.\n\n## Output contract\n- Save the main artifact to `review.md`.\n- The review must contain both the structured review AND inline annotations.","src/content/docs/agents/reviewer.md","115fe4b081dd8349",{"html":22,"metadata":23},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/reviewer.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Simulate a tough but constructive AI research peer reviewer with inline annotations.\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>review.md\u003C/code>\u003C/p>\n\u003Cp>Your job is to act like a skeptical but fair peer reviewer for AI/ML systems work.\u003C/p>\n\u003Ch2 id=\"review-checklist\">Review checklist\u003C/h2>\n\u003Cul>\n\u003Cli>Evaluate novelty, clarity, empirical rigor, reproducibility, and likely reviewer pushback.\u003C/li>\n\u003Cli>Do not praise vaguely. Every positive claim should be tied to specific evidence.\u003C/li>\n\u003Cli>Look for:\n\u003Cul>\n\u003Cli>missing or weak baselines\u003C/li>\n\u003Cli>missing ablations\u003C/li>\n\u003Cli>evaluation mismatches\u003C/li>\n\u003Cli>unclear claims of novelty\u003C/li>\n\u003Cli>weak related-work positioning\u003C/li>\n\u003Cli>insufficient statistical evidence\u003C/li>\n\u003Cli>benchmark leakage or contamination risks\u003C/li>\n\u003Cli>under-specified implementation details\u003C/li>\n\u003Cli>claims that outrun the experiments\u003C/li>\n\u003C/ul>\n\u003C/li>\n\u003Cli>Distinguish between fatal issues, strong concerns, and polish issues.\u003C/li>\n\u003Cli>Preserve uncertainty. If the draft might pass depending on venue norms, say so explicitly.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-format\">Output format\u003C/h2>\n\u003Cp>Produce two sections: a structured review and inline annotations.\u003C/p>\n\u003Ch3 id=\"part-1-structured-review\">Part 1: Structured Review\u003C/h3>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"markdown\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Summary\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">1-2 paragraph summary of the paper's contributions and approach.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Strengths\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">S1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">S2\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Weaknesses\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] \u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**FATAL:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W2\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] \u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**MAJOR:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W3\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] \u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**MINOR:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Questions for Authors\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">Q1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Verdict\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Overall assessment and confidence score. Would this pass at [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">venue\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">]?\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Revision Plan\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Prioritized, concrete steps to address each weakness.\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch3 id=\"part-2-inline-annotations\">Part 2: Inline Annotations\u003C/h3>\n\u003Cp>Quote specific passages from the paper and annotate them directly:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"markdown\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Inline Annotations\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#22863A;--shiki-dark:#85E89D\">> \"We achieve state-of-the-art results on all benchmarks\"\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**[\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">] FATAL:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> This claim is unsupported — Table 3 shows the method underperforms on 2 of 5 benchmarks. Revise to accurately reflect results.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#22863A;--shiki-dark:#85E89D\">> \"Our approach is novel in combining X with Y\"\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**[\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W3\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">] MINOR:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> Z et al. (2024) combined X with Y in a different domain. Acknowledge this and clarify the distinction.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#22863A;--shiki-dark:#85E89D\">> \"We use a learning rate of 1e-4\"\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**[\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">Q1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">]:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> Was this tuned? What range was searched? This matters for reproducibility.\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Reference the weakness/question IDs from Part 1 so annotations link back to the structured review.\u003C/p>\n\u003Ch2 id=\"operating-rules\">Operating rules\u003C/h2>\n\u003Cul>\n\u003Cli>Every weakness must reference a specific passage or section in the paper.\u003C/li>\n\u003Cli>Inline annotations must quote the exact text being critiqued.\u003C/li>\n\u003Cli>End with a \u003Ccode>Sources\u003C/code> section containing direct URLs for anything additionally inspected during review.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save the main artifact to \u003Ccode>review.md\u003C/code>.\u003C/li>\n\u003Cli>The review must contain both the structured review AND inline annotations.\u003C/li>\n\u003C/ul>",{"headings":24,"localImagePaths":53,"remoteImagePaths":54,"frontmatter":55,"imagePaths":56},[25,28,31,34,37,40,44,47,50],{"depth":17,"slug":26,"text":27},"source","Source",{"depth":17,"slug":29,"text":30},"role","Role",{"depth":17,"slug":32,"text":33},"default-output","Default Output",{"depth":17,"slug":35,"text":36},"review-checklist","Review checklist",{"depth":17,"slug":38,"text":39},"output-format","Output format",{"depth":41,"slug":42,"text":43},3,"part-1-structured-review","Part 1: Structured Review",{"depth":41,"slug":45,"text":46},"part-2-inline-annotations","Part 2: Inline Annotations",{"depth":17,"slug":48,"text":49},"operating-rules","Operating rules",{"depth":17,"slug":51,"text":52},"output-contract","Output contract",[],[],{"title":14,"description":15,"section":16,"order":17},[],"agents/reviewer.md","agents/writer",{"id":58,"data":60,"body":63,"filePath":64,"digest":65,"rendered":66,"legacyId":88},{"title":61,"description":62,"section":16,"order":41},"Writer","Turn research notes into clear, structured briefs and drafts.","## Source\n\nGenerated from `.feynman/agents/writer.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nTurn research notes into clear, structured briefs and drafts.\n\n## Tools\n\n`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`\n\n## Default Output\n\n`draft.md`\n\n## Integrity commandments\n1. **Write only from supplied evidence.** Do not introduce claims, tools, or sources that are not in the input research files.\n2. **Preserve caveats and disagreements.** Never smooth away uncertainty.\n3. **Be explicit about gaps.** If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.\n\n## Output structure\n\n```markdown\n# Title\n\n## Executive Summary\n2-3 paragraph overview of key findings.\n\n## Section 1: ...\nDetailed findings organized by theme or question.\n\n## Section N: ...\n...\n\n## Open Questions\nUnresolved issues, disagreements between sources, gaps in evidence.\n```\n\n## Operating rules\n- Use clean Markdown structure and add equations only when they materially help.\n- Keep the narrative readable, but never outrun the evidence.\n- Produce artifacts that are ready to review in a browser or PDF preview.\n- Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.\n- Do NOT add a Sources section — the verifier agent builds that.\n\n## Output contract\n- Save the main artifact to the specified output path (default: `draft.md`).\n- Focus on clarity, structure, and evidence traceability.","src/content/docs/agents/writer.md","ef9e81fb8113db70",{"html":67,"metadata":68},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/writer.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Turn research notes into clear, structured briefs and drafts.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Cp>\u003Ccode>read\u003C/code>, \u003Ccode>bash\u003C/code>, \u003Ccode>grep\u003C/code>, \u003Ccode>find\u003C/code>, \u003Ccode>ls\u003C/code>, \u003Ccode>write\u003C/code>, \u003Ccode>edit\u003C/code>\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>draft.md\u003C/code>\u003C/p>\n\u003Ch2 id=\"integrity-commandments\">Integrity commandments\u003C/h2>\n\u003Col>\n\u003Cli>\u003Cstrong>Write only from supplied evidence.\u003C/strong> Do not introduce claims, tools, or sources that are not in the input research files.\u003C/li>\n\u003Cli>\u003Cstrong>Preserve caveats and disagreements.\u003C/strong> Never smooth away uncertainty.\u003C/li>\n\u003Cli>\u003Cstrong>Be explicit about gaps.\u003C/strong> If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"output-structure\">Output structure\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"markdown\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\"># Title\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Executive Summary\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">2-3 paragraph overview of key findings.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Section 1: ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Detailed findings organized by theme or question.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Section N: ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Open Questions\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Unresolved issues, disagreements between sources, gaps in evidence.\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"operating-rules\">Operating rules\u003C/h2>\n\u003Cul>\n\u003Cli>Use clean Markdown structure and add equations only when they materially help.\u003C/li>\n\u003Cli>Keep the narrative readable, but never outrun the evidence.\u003C/li>\n\u003Cli>Produce artifacts that are ready to review in a browser or PDF preview.\u003C/li>\n\u003Cli>Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.\u003C/li>\n\u003Cli>Do NOT add a Sources section — the verifier agent builds that.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save the main artifact to the specified output path (default: \u003Ccode>draft.md\u003C/code>).\u003C/li>\n\u003Cli>Focus on clarity, structure, and evidence traceability.\u003C/li>\n\u003C/ul>",{"headings":69,"localImagePaths":84,"remoteImagePaths":85,"frontmatter":86,"imagePaths":87},[70,71,72,75,76,79,82,83],{"depth":17,"slug":26,"text":27},{"depth":17,"slug":29,"text":30},{"depth":17,"slug":73,"text":74},"tools","Tools",{"depth":17,"slug":32,"text":33},{"depth":17,"slug":77,"text":78},"integrity-commandments","Integrity commandments",{"depth":17,"slug":80,"text":81},"output-structure","Output structure",{"depth":17,"slug":48,"text":49},{"depth":17,"slug":51,"text":52},[],[],{"title":61,"description":62,"section":16,"order":41},[],"agents/writer.md","agents/researcher",{"id":89,"data":91,"body":95,"filePath":96,"digest":97,"rendered":98,"legacyId":131},{"title":92,"description":93,"section":16,"order":94},"Researcher","Gather primary evidence across papers, web sources, repos, docs, and local artifacts.",1,"## Source\n\nGenerated from `.feynman/agents/researcher.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nGather primary evidence across papers, web sources, repos, docs, and local artifacts.\n\n## Tools\n\n`read`, `bash`, `grep`, `find`, `ls`\n\n## Default Output\n\n`research.md`\n\n## Integrity commandments\n1. **Never fabricate a source.** Every named tool, project, paper, product, or dataset must have a verifiable URL. If you cannot find a URL, do not mention it.\n2. **Never claim a project exists without checking.** Before citing a GitHub repo, search for it. Before citing a paper, find it. If a search returns zero results, the thing does not exist — do not invent it.\n3. **Never extrapolate details you haven't read.** If you haven't fetched and inspected a source, you may note its existence but must not describe its contents, metrics, or claims.\n4. **URL or it didn't happen.** Every entry in your evidence table must include a direct, checkable URL. No URL = not included.\n\n## Search strategy\n1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.\n2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.\n3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.\n4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and `alpha_search`.\n\nUse `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.\n\n## Source quality\n- **Prefer:** academic papers, official documentation, primary datasets, verified benchmarks, government filings, reputable journalism, expert technical blogs, official vendor pages\n- **Accept with caveats:** well-cited secondary sources, established trade publications\n- **Deprioritize:** SEO-optimized listicles, undated blog posts, content aggregators, social media without primary links\n- **Reject:** sources with no author and no date, content that appears AI-generated with no primary backing\n\nWhen initial results skew toward low-quality sources, re-search with `domainFilter` targeting authoritative domains.\n\n## Output format\n\nAssign each source a stable numeric ID. Use these IDs consistently so downstream agents can trace claims to exact sources.\n\n### Evidence table\n\n| # | Source | URL | Key claim | Type | Confidence |\n|---|--------|-----|-----------|------|------------|\n| 1 | ... | ... | ... | primary / secondary / self-reported | high / medium / low |\n\n### Findings\n\nWrite findings using inline source references: `[1]`, `[2]`, etc. Every factual claim must cite at least one source by number.\n\n### Sources\n\nNumbered list matching the evidence table:\n1. Author/Title — URL\n2. Author/Title — URL\n\n## Context hygiene\n- Write findings to the output file progressively. Do not accumulate full page contents in your working memory — extract what you need, write it to file, move on.\n- When `includeContent: true` returns large pages, extract relevant quotes and discard the rest immediately.\n- If your search produces 10+ results, triage by title/snippet first. Only fetch full content for the top candidates.\n- Return a one-line summary to the parent, not full findings. The parent reads the output file.\n\n## Output contract\n- Save to the output file (default: `research.md`).\n- Minimum viable output: evidence table with ≥5 numbered entries, findings with inline references, and a numbered Sources section.\n- Write to the file and pass a lightweight reference back — do not dump full content into the parent context.","src/content/docs/agents/researcher.md","4d4d0e1b0fa38cd0",{"html":99,"metadata":100},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/researcher.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Gather primary evidence across papers, web sources, repos, docs, and local artifacts.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Cp>\u003Ccode>read\u003C/code>, \u003Ccode>bash\u003C/code>, \u003Ccode>grep\u003C/code>, \u003Ccode>find\u003C/code>, \u003Ccode>ls\u003C/code>\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>research.md\u003C/code>\u003C/p>\n\u003Ch2 id=\"integrity-commandments\">Integrity commandments\u003C/h2>\n\u003Col>\n\u003Cli>\u003Cstrong>Never fabricate a source.\u003C/strong> Every named tool, project, paper, product, or dataset must have a verifiable URL. If you cannot find a URL, do not mention it.\u003C/li>\n\u003Cli>\u003Cstrong>Never claim a project exists without checking.\u003C/strong> Before citing a GitHub repo, search for it. Before citing a paper, find it. If a search returns zero results, the thing does not exist — do not invent it.\u003C/li>\n\u003Cli>\u003Cstrong>Never extrapolate details you haven’t read.\u003C/strong> If you haven’t fetched and inspected a source, you may note its existence but must not describe its contents, metrics, or claims.\u003C/li>\n\u003Cli>\u003Cstrong>URL or it didn’t happen.\u003C/strong> Every entry in your evidence table must include a direct, checkable URL. No URL = not included.\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"search-strategy\">Search strategy\u003C/h2>\n\u003Col>\n\u003Cli>\u003Cstrong>Start wide.\u003C/strong> Begin with short, broad queries to map the landscape. Use the \u003Ccode>queries\u003C/code> array in \u003Ccode>web_search\u003C/code> with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.\u003C/li>\n\u003Cli>\u003Cstrong>Evaluate availability.\u003C/strong> After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.\u003C/li>\n\u003Cli>\u003Cstrong>Progressively narrow.\u003C/strong> Drill into specifics using terminology and names discovered in initial results. Refine queries, don’t repeat them.\u003C/li>\n\u003Cli>\u003Cstrong>Cross-source.\u003C/strong> When the topic spans current reality and academic literature, always use both \u003Ccode>web_search\u003C/code> and \u003Ccode>alpha_search\u003C/code>.\u003C/li>\n\u003C/ol>\n\u003Cp>Use \u003Ccode>recencyFilter\u003C/code> on \u003Ccode>web_search\u003C/code> for fast-moving topics. Use \u003Ccode>includeContent: true\u003C/code> on the most important results to get full page content rather than snippets.\u003C/p>\n\u003Ch2 id=\"source-quality\">Source quality\u003C/h2>\n\u003Cul>\n\u003Cli>\u003Cstrong>Prefer:\u003C/strong> academic papers, official documentation, primary datasets, verified benchmarks, government filings, reputable journalism, expert technical blogs, official vendor pages\u003C/li>\n\u003Cli>\u003Cstrong>Accept with caveats:\u003C/strong> well-cited secondary sources, established trade publications\u003C/li>\n\u003Cli>\u003Cstrong>Deprioritize:\u003C/strong> SEO-optimized listicles, undated blog posts, content aggregators, social media without primary links\u003C/li>\n\u003Cli>\u003Cstrong>Reject:\u003C/strong> sources with no author and no date, content that appears AI-generated with no primary backing\u003C/li>\n\u003C/ul>\n\u003Cp>When initial results skew toward low-quality sources, re-search with \u003Ccode>domainFilter\u003C/code> targeting authoritative domains.\u003C/p>\n\u003Ch2 id=\"output-format\">Output format\u003C/h2>\n\u003Cp>Assign each source a stable numeric ID. Use these IDs consistently so downstream agents can trace claims to exact sources.\u003C/p>\n\u003Ch3 id=\"evidence-table\">Evidence table\u003C/h3>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>#\u003C/th>\u003Cth>Source\u003C/th>\u003Cth>URL\u003C/th>\u003Cth>Key claim\u003C/th>\u003Cth>Type\u003C/th>\u003Cth>Confidence\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>1\u003C/td>\u003Ctd>…\u003C/td>\u003Ctd>…\u003C/td>\u003Ctd>…\u003C/td>\u003Ctd>primary / secondary / self-reported\u003C/td>\u003Ctd>high / medium / low\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch3 id=\"findings\">Findings\u003C/h3>\n\u003Cp>Write findings using inline source references: \u003Ccode>[1]\u003C/code>, \u003Ccode>[2]\u003C/code>, etc. Every factual claim must cite at least one source by number.\u003C/p>\n\u003Ch3 id=\"sources\">Sources\u003C/h3>\n\u003Cp>Numbered list matching the evidence table:\u003C/p>\n\u003Col>\n\u003Cli>Author/Title — URL\u003C/li>\n\u003Cli>Author/Title — URL\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"context-hygiene\">Context hygiene\u003C/h2>\n\u003Cul>\n\u003Cli>Write findings to the output file progressively. Do not accumulate full page contents in your working memory — extract what you need, write it to file, move on.\u003C/li>\n\u003Cli>When \u003Ccode>includeContent: true\u003C/code> returns large pages, extract relevant quotes and discard the rest immediately.\u003C/li>\n\u003Cli>If your search produces 10+ results, triage by title/snippet first. Only fetch full content for the top candidates.\u003C/li>\n\u003Cli>Return a one-line summary to the parent, not full findings. The parent reads the output file.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save to the output file (default: \u003Ccode>research.md\u003C/code>).\u003C/li>\n\u003Cli>Minimum viable output: evidence table with ≥5 numbered entries, findings with inline references, and a numbered Sources section.\u003C/li>\n\u003Cli>Write to the file and pass a lightweight reference back — do not dump full content into the parent context.\u003C/li>\n\u003C/ul>",{"headings":101,"localImagePaths":127,"remoteImagePaths":128,"frontmatter":129,"imagePaths":130},[102,103,104,105,106,107,110,113,114,117,120,123,126],{"depth":17,"slug":26,"text":27},{"depth":17,"slug":29,"text":30},{"depth":17,"slug":73,"text":74},{"depth":17,"slug":32,"text":33},{"depth":17,"slug":77,"text":78},{"depth":17,"slug":108,"text":109},"search-strategy","Search strategy",{"depth":17,"slug":111,"text":112},"source-quality","Source quality",{"depth":17,"slug":38,"text":39},{"depth":41,"slug":115,"text":116},"evidence-table","Evidence table",{"depth":41,"slug":118,"text":119},"findings","Findings",{"depth":41,"slug":121,"text":122},"sources","Sources",{"depth":17,"slug":124,"text":125},"context-hygiene","Context hygiene",{"depth":17,"slug":51,"text":52},[],[],{"title":92,"description":93,"section":16,"order":94},[],"agents/researcher.md","getting-started/setup",{"id":132,"data":134,"body":138,"filePath":139,"digest":140,"rendered":141,"legacyId":167},{"title":135,"description":136,"section":137,"order":41},"Setup","Detailed setup guide for Feynman","Getting Started","## Guided setup\n\n```bash\nfeynman setup\n```\n\nThis walks through four steps:\n\n### Model provider authentication\n\nFeynman uses Pi's OAuth system for model access. The setup wizard prompts you to log in to your preferred provider.\n\n```bash\nfeynman model login\n```\n\n### AlphaXiv login\n\nAlphaXiv powers Feynman's paper search and analysis tools. Sign in with:\n\n```bash\nfeynman alpha login\n```\n\nCheck status anytime:\n\n```bash\nfeynman alpha status\n```\n\n### Web search routing\n\nFeynman supports three web search backends:\n\n- **auto** — Prefer Perplexity when configured, fall back to Gemini\n- **perplexity** — Force Perplexity Sonar\n- **gemini** — Force Gemini (default, zero-config via signed-in Chromium)\n\nThe default path requires no API keys — it uses Gemini Browser via your signed-in Chromium profile.\n\n### Preview dependencies\n\nFor PDF and HTML export of generated artifacts, Feynman needs `pandoc`:\n\n```bash\nfeynman --setup-preview\n```\n\nThis installs pandoc automatically on macOS/Homebrew systems.\n\n## Diagnostics\n\nRun the doctor to check everything:\n\n```bash\nfeynman doctor\n```\n\nThis verifies model auth, alphaXiv credentials, preview dependencies, and the Pi runtime.","src/content/docs/getting-started/setup.md","49b3f67aa1ff128a",{"html":142,"metadata":143},"\u003Ch2 id=\"guided-setup\">Guided setup\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> setup\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>This walks through four steps:\u003C/p>\n\u003Ch3 id=\"model-provider-authentication\">Model provider authentication\u003C/h3>\n\u003Cp>Feynman uses Pi’s OAuth system for model access. The setup wizard prompts you to log in to your preferred provider.\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> login\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch3 id=\"alphaxiv-login\">AlphaXiv login\u003C/h3>\n\u003Cp>AlphaXiv powers Feynman’s paper search and analysis tools. Sign in with:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> alpha\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> login\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Check status anytime:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> alpha\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> status\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch3 id=\"web-search-routing\">Web search routing\u003C/h3>\n\u003Cp>Feynman supports three web search backends:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>auto\u003C/strong> — Prefer Perplexity when configured, fall back to Gemini\u003C/li>\n\u003Cli>\u003Cstrong>perplexity\u003C/strong> — Force Perplexity Sonar\u003C/li>\n\u003Cli>\u003Cstrong>gemini\u003C/strong> — Force Gemini (default, zero-config via signed-in Chromium)\u003C/li>\n\u003C/ul>\n\u003Cp>The default path requires no API keys — it uses Gemini Browser via your signed-in Chromium profile.\u003C/p>\n\u003Ch3 id=\"preview-dependencies\">Preview dependencies\u003C/h3>\n\u003Cp>For PDF and HTML export of generated artifacts, Feynman needs \u003Ccode>pandoc\u003C/code>:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --setup-preview\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>This installs pandoc automatically on macOS/Homebrew systems.\u003C/p>\n\u003Ch2 id=\"diagnostics\">Diagnostics\u003C/h2>\n\u003Cp>Run the doctor to check everything:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> doctor\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>This verifies model auth, alphaXiv credentials, preview dependencies, and the Pi runtime.\u003C/p>",{"headings":144,"localImagePaths":163,"remoteImagePaths":164,"frontmatter":165,"imagePaths":166},[145,148,151,154,157,160],{"depth":17,"slug":146,"text":147},"guided-setup","Guided setup",{"depth":41,"slug":149,"text":150},"model-provider-authentication","Model provider authentication",{"depth":41,"slug":152,"text":153},"alphaxiv-login","AlphaXiv login",{"depth":41,"slug":155,"text":156},"web-search-routing","Web search routing",{"depth":41,"slug":158,"text":159},"preview-dependencies","Preview dependencies",{"depth":17,"slug":161,"text":162},"diagnostics","Diagnostics",[],[],{"title":135,"description":136,"section":137,"order":41},[],"getting-started/setup.md","getting-started/quickstart",{"id":168,"data":170,"body":173,"filePath":174,"digest":175,"rendered":176,"legacyId":196},{"title":171,"description":172,"section":137,"order":17},"Quick Start","Get up and running with Feynman in 60 seconds","## First run\n\n```bash\nfeynman setup\nfeynman\n```\n\n`feynman setup` walks you through model authentication, alphaXiv login, web search configuration, and preview dependencies.\n\n## Ask naturally\n\nFeynman routes your questions into the right workflow automatically. You don't need slash commands to get started.\n\n```\n> What are the main approaches to RLHF alignment?\n```\n\nFeynman will search papers, gather web sources, and produce a structured answer with citations.\n\n## Use workflows directly\n\nFor explicit control, use slash commands inside the REPL:\n\n```\n> /deepresearch transformer scaling laws\n> /lit multimodal reasoning benchmarks\n> /review paper.pdf\n```\n\n## Output locations\n\nFeynman writes durable artifacts to canonical directories:\n\n- `outputs/` — Reviews, reading lists, summaries\n- `papers/` — Polished paper-style drafts\n- `experiments/` — Runnable code and result logs\n- `notes/` — Scratch notes and session logs","src/content/docs/getting-started/quickstart.md","0a22caade9f6c5a5",{"html":177,"metadata":178},"\u003Ch2 id=\"first-run\">First run\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> setup\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>\u003Ccode>feynman setup\u003C/code> walks you through model authentication, alphaXiv login, web search configuration, and preview dependencies.\u003C/p>\n\u003Ch2 id=\"ask-naturally\">Ask naturally\u003C/h2>\n\u003Cp>Feynman routes your questions into the right workflow automatically. You don’t need slash commands to get started.\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>> What are the main approaches to RLHF alignment?\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Feynman will search papers, gather web sources, and produce a structured answer with citations.\u003C/p>\n\u003Ch2 id=\"use-workflows-directly\">Use workflows directly\u003C/h2>\n\u003Cp>For explicit control, use slash commands inside the REPL:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>> /deepresearch transformer scaling laws\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>> /lit multimodal reasoning benchmarks\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>> /review paper.pdf\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output-locations\">Output locations\u003C/h2>\n\u003Cp>Feynman writes durable artifacts to canonical directories:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Ccode>outputs/\u003C/code> — Reviews, reading lists, summaries\u003C/li>\n\u003Cli>\u003Ccode>papers/\u003C/code> — Polished paper-style drafts\u003C/li>\n\u003Cli>\u003Ccode>experiments/\u003C/code> — Runnable code and result logs\u003C/li>\n\u003Cli>\u003Ccode>notes/\u003C/code> — Scratch notes and session logs\u003C/li>\n\u003C/ul>",{"headings":179,"localImagePaths":192,"remoteImagePaths":193,"frontmatter":194,"imagePaths":195},[180,183,186,189],{"depth":17,"slug":181,"text":182},"first-run","First run",{"depth":17,"slug":184,"text":185},"ask-naturally","Ask naturally",{"depth":17,"slug":187,"text":188},"use-workflows-directly","Use workflows directly",{"depth":17,"slug":190,"text":191},"output-locations","Output locations",[],[],{"title":171,"description":172,"section":137,"order":17},[],"getting-started/quickstart.md","getting-started/configuration",{"id":197,"data":199,"body":203,"filePath":204,"digest":205,"rendered":206,"legacyId":232},{"title":200,"description":201,"section":137,"order":202},"Configuration","Configure models, search, and runtime options",4,"## Model\n\nSet the default model:\n\n```bash\nfeynman model set \u003Cprovider:model>\n```\n\nOverride at runtime:\n\n```bash\nfeynman --model anthropic:claude-opus-4-6\n```\n\nList available models:\n\n```bash\nfeynman model list\n```\n\n## Thinking level\n\nControl the reasoning depth:\n\n```bash\nfeynman --thinking high\n```\n\nLevels: `off`, `minimal`, `low`, `medium`, `high`, `xhigh`.\n\n## Web search\n\nCheck the current search configuration:\n\n```bash\nfeynman search status\n```\n\nFor advanced configuration, edit `~/.feynman/web-search.json` directly to set Gemini API keys, Perplexity keys, or a different route.\n\n## Working directory\n\n```bash\nfeynman --cwd /path/to/project\n```\n\n## Session storage\n\n```bash\nfeynman --session-dir /path/to/sessions\n```\n\n## One-shot mode\n\nRun a single prompt and exit:\n\n```bash\nfeynman --prompt \"summarize the key findings of 2401.12345\"\n```","src/content/docs/getting-started/configuration.md","9d66eb82ad4b948a",{"html":207,"metadata":208},"\u003Ch2 id=\"model\">Model\u003C/h2>\n\u003Cp>Set the default model:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> set\u003C/span>\u003Cspan style=\"color:#D73A49;--shiki-dark:#F97583\"> <\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\">provider:mode\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">l\u003C/span>\u003Cspan style=\"color:#D73A49;--shiki-dark:#F97583\">>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Override at runtime:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> anthropic:claude-opus-4-6\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>List available models:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> list\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"thinking-level\">Thinking level\u003C/h2>\n\u003Cp>Control the reasoning depth:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --thinking\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> high\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Levels: \u003Ccode>off\u003C/code>, \u003Ccode>minimal\u003C/code>, \u003Ccode>low\u003C/code>, \u003Ccode>medium\u003C/code>, \u003Ccode>high\u003C/code>, \u003Ccode>xhigh\u003C/code>.\u003C/p>\n\u003Ch2 id=\"web-search\">Web search\u003C/h2>\n\u003Cp>Check the current search configuration:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> search\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> status\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>For advanced configuration, edit \u003Ccode>~/.feynman/web-search.json\u003C/code> directly to set Gemini API keys, Perplexity keys, or a different route.\u003C/p>\n\u003Ch2 id=\"working-directory\">Working directory\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --cwd\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> /path/to/project\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"session-storage\">Session storage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --session-dir\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> /path/to/sessions\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"one-shot-mode\">One-shot mode\u003C/h2>\n\u003Cp>Run a single prompt and exit:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --prompt\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> \"summarize the key findings of 2401.12345\"\u003C/span>\u003C/span>\u003C/code>\u003C/pre>",{"headings":209,"localImagePaths":228,"remoteImagePaths":229,"frontmatter":230,"imagePaths":231},[210,213,216,219,222,225],{"depth":17,"slug":211,"text":212},"model","Model",{"depth":17,"slug":214,"text":215},"thinking-level","Thinking level",{"depth":17,"slug":217,"text":218},"web-search","Web search",{"depth":17,"slug":220,"text":221},"working-directory","Working directory",{"depth":17,"slug":223,"text":224},"session-storage","Session storage",{"depth":17,"slug":226,"text":227},"one-shot-mode","One-shot mode",[],[],{"title":200,"description":201,"section":137,"order":202},[],"getting-started/configuration.md","reference/cli-commands",{"id":233,"data":235,"body":239,"filePath":240,"digest":241,"rendered":242,"legacyId":265},{"title":236,"description":237,"section":238,"order":94},"CLI Commands","Complete reference for Feynman CLI commands","Reference","This page covers the dedicated Feynman CLI commands and compatibility flags.\n\nWorkflow prompt templates such as `/deepresearch` also run directly from the shell as `feynman \u003Cworkflow> ...`. Those workflow entries live in the slash-command reference instead of being duplicated here.\n\n## Core\n\n| Command | Description |\n| --- | --- |\n| `feynman` | Launch the interactive REPL. |\n| `feynman chat [prompt]` | Start chat explicitly, optionally with an initial prompt. |\n| `feynman help` | Show CLI help. |\n| `feynman setup` | Run the guided setup wizard. |\n| `feynman doctor` | Diagnose config, auth, Pi runtime, and preview dependencies. |\n| `feynman status` | Show the current setup summary. |\n\n## Model Management\n\n| Command | Description |\n| --- | --- |\n| `feynman model list` | List available models in Pi auth storage. |\n| `feynman model login [id]` | Login to a Pi OAuth model provider. |\n| `feynman model logout [id]` | Logout from a Pi OAuth model provider. |\n| `feynman model set \u003Cprovider/model>` | Set the default model. |\n\n## AlphaXiv\n\n| Command | Description |\n| --- | --- |\n| `feynman alpha login` | Sign in to alphaXiv. |\n| `feynman alpha logout` | Clear alphaXiv auth. |\n| `feynman alpha status` | Check alphaXiv auth status. |\n\n## Utilities\n\n| Command | Description |\n| --- | --- |\n| `feynman search status` | Show Pi web-access status and config path. |\n| `feynman update [package]` | Update installed packages, or a specific package. |\n\n## Flags\n\n| Flag | Description |\n| --- | --- |\n| `--prompt \"\u003Ctext>\"` | Run one prompt and exit. |\n| `--alpha-login` | Sign in to alphaXiv and exit. |\n| `--alpha-logout` | Clear alphaXiv auth and exit. |\n| `--alpha-status` | Show alphaXiv auth status and exit. |\n| `--model \u003Cprovider:model>` | Force a specific model. |\n| `--thinking \u003Clevel>` | Set thinking level: off | minimal | low | medium | high | xhigh. |\n| `--cwd \u003Cpath>` | Set the working directory for tools. |\n| `--session-dir \u003Cpath>` | Set the session storage directory. |\n| `--new-session` | Start a new persisted session. |\n| `--doctor` | Alias for `feynman doctor`. |\n| `--setup-preview` | Alias for `feynman setup preview`. |","src/content/docs/reference/cli-commands.md","5ba10666ccf260a6",{"html":243,"metadata":244},"\u003Cp>This page covers the dedicated Feynman CLI commands and compatibility flags.\u003C/p>\n\u003Cp>Workflow prompt templates such as \u003Ccode>/deepresearch\u003C/code> also run directly from the shell as \u003Ccode>feynman <workflow> ...\u003C/code>. Those workflow entries live in the slash-command reference instead of being duplicated here.\u003C/p>\n\u003Ch2 id=\"core\">Core\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman\u003C/code>\u003C/td>\u003Ctd>Launch the interactive REPL.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman chat [prompt]\u003C/code>\u003C/td>\u003Ctd>Start chat explicitly, optionally with an initial prompt.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman help\u003C/code>\u003C/td>\u003Ctd>Show CLI help.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman setup\u003C/code>\u003C/td>\u003Ctd>Run the guided setup wizard.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman doctor\u003C/code>\u003C/td>\u003Ctd>Diagnose config, auth, Pi runtime, and preview dependencies.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman status\u003C/code>\u003C/td>\u003Ctd>Show the current setup summary.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"model-management\">Model Management\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman model list\u003C/code>\u003C/td>\u003Ctd>List available models in Pi auth storage.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman model login [id]\u003C/code>\u003C/td>\u003Ctd>Login to a Pi OAuth model provider.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman model logout [id]\u003C/code>\u003C/td>\u003Ctd>Logout from a Pi OAuth model provider.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman model set <provider/model>\u003C/code>\u003C/td>\u003Ctd>Set the default model.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"alphaxiv\">AlphaXiv\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman alpha login\u003C/code>\u003C/td>\u003Ctd>Sign in to alphaXiv.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman alpha logout\u003C/code>\u003C/td>\u003Ctd>Clear alphaXiv auth.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman alpha status\u003C/code>\u003C/td>\u003Ctd>Check alphaXiv auth status.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"utilities\">Utilities\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman search status\u003C/code>\u003C/td>\u003Ctd>Show Pi web-access status and config path.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman update [package]\u003C/code>\u003C/td>\u003Ctd>Update installed packages, or a specific package.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"flags\">Flags\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Flag\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>--prompt \"<text>\"\u003C/code>\u003C/td>\u003Ctd>Run one prompt and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--alpha-login\u003C/code>\u003C/td>\u003Ctd>Sign in to alphaXiv and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--alpha-logout\u003C/code>\u003C/td>\u003Ctd>Clear alphaXiv auth and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--alpha-status\u003C/code>\u003C/td>\u003Ctd>Show alphaXiv auth status and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--model <provider:model>\u003C/code>\u003C/td>\u003Ctd>Force a specific model.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--thinking <level>\u003C/code>\u003C/td>\u003Ctd>Set thinking level: off\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--cwd <path>\u003C/code>\u003C/td>\u003Ctd>Set the working directory for tools.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--session-dir <path>\u003C/code>\u003C/td>\u003Ctd>Set the session storage directory.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--new-session\u003C/code>\u003C/td>\u003Ctd>Start a new persisted session.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--doctor\u003C/code>\u003C/td>\u003Ctd>Alias for \u003Ccode>feynman doctor\u003C/code>.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--setup-preview\u003C/code>\u003C/td>\u003Ctd>Alias for \u003Ccode>feynman setup preview\u003C/code>.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>",{"headings":245,"localImagePaths":261,"remoteImagePaths":262,"frontmatter":263,"imagePaths":264},[246,249,252,255,258],{"depth":17,"slug":247,"text":248},"core","Core",{"depth":17,"slug":250,"text":251},"model-management","Model Management",{"depth":17,"slug":253,"text":254},"alphaxiv","AlphaXiv",{"depth":17,"slug":256,"text":257},"utilities","Utilities",{"depth":17,"slug":259,"text":260},"flags","Flags",[],[],{"title":236,"description":237,"section":238,"order":94},[],"reference/cli-commands.md","getting-started/installation",{"id":266,"data":268,"body":271,"filePath":272,"digest":273,"rendered":274,"legacyId":294},{"title":269,"description":270,"section":137,"order":94},"Installation","Install Feynman and get started","## Requirements\n\n- Node.js 20 or later\n- npm 9 or later\n\n## Install\n\n```bash\nnpm install -g @companion-ai/feynman\n```\n\n## Verify\n\n```bash\nfeynman --version\n```\n\n## Local Development\n\nFor contributing or local development:\n\n```bash\ngit clone https://github.com/getcompanion-ai/feynman.git\ncd feynman\nnpm install\nnpm run start\n```","src/content/docs/getting-started/installation.md","781ab0278b8c1673",{"html":275,"metadata":276},"\u003Ch2 id=\"requirements\">Requirements\u003C/h2>\n\u003Cul>\n\u003Cli>Node.js 20 or later\u003C/li>\n\u003Cli>npm 9 or later\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"install\">Install\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">npm\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> install\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> -g\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> @companion-ai/feynman\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"verify\">Verify\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --version\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"local-development\">Local Development\u003C/h2>\n\u003Cp>For contributing or local development:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">git\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> clone\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> https://github.com/getcompanion-ai/feynman.git\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\">cd\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> feynman\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">npm\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> install\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">npm\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> run\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> start\u003C/span>\u003C/span>\u003C/code>\u003C/pre>",{"headings":277,"localImagePaths":290,"remoteImagePaths":291,"frontmatter":292,"imagePaths":293},[278,281,284,287],{"depth":17,"slug":279,"text":280},"requirements","Requirements",{"depth":17,"slug":282,"text":283},"install","Install",{"depth":17,"slug":285,"text":286},"verify","Verify",{"depth":17,"slug":288,"text":289},"local-development","Local Development",[],[],{"title":269,"description":270,"section":137,"order":94},[],"getting-started/installation.md","reference/package-stack",{"id":295,"data":297,"body":300,"filePath":301,"digest":302,"rendered":303,"legacyId":311},{"title":298,"description":299,"section":238,"order":41},"Package Stack","Curated Pi packages bundled with Feynman","Curated Pi packages bundled with Feynman. The runtime package list lives in `.feynman/settings.json`.\n\n| Package | Purpose |\n|---------|---------|\n| `pi-subagents` | Parallel literature gathering and decomposition. |\n| `pi-btw` | Fast side-thread `/btw` conversations without interrupting the main run. |\n| `pi-docparser` | PDFs, Office docs, spreadsheets, and images. |\n| `pi-web-access` | Web, GitHub, PDF, and media access. |\n| `pi-markdown-preview` | Polished Markdown and LaTeX-heavy research writeups. |\n| `@walterra/pi-charts` | Charts and quantitative visualizations. |\n| `pi-generative-ui` | Interactive HTML-style widgets. |\n| `pi-mermaid` | Diagrams in the TUI. |\n| `@aliou/pi-processes` | Long-running experiments and log tails. |\n| `pi-zotero` | Citation-library workflows. |\n| `@kaiserlich-dev/pi-session-search` | Indexed session recall and summarize/resume UI. |\n| `pi-schedule-prompt` | Recurring and deferred research jobs. |\n| `@samfp/pi-memory` | Automatic preference and correction memory across sessions. |\n| `@tmustier/pi-ralph-wiggum` | Long-running agent loops for iterative development. |","src/content/docs/reference/package-stack.md","f8845d3da2b66045",{"html":304,"metadata":305},"\u003Cp>Curated Pi packages bundled with Feynman. The runtime package list lives in \u003Ccode>.feynman/settings.json\u003C/code>.\u003C/p>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Package\u003C/th>\u003Cth>Purpose\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>pi-subagents\u003C/code>\u003C/td>\u003Ctd>Parallel literature gathering and decomposition.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-btw\u003C/code>\u003C/td>\u003Ctd>Fast side-thread \u003Ccode>/btw\u003C/code> conversations without interrupting the main run.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-docparser\u003C/code>\u003C/td>\u003Ctd>PDFs, Office docs, spreadsheets, and images.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-web-access\u003C/code>\u003C/td>\u003Ctd>Web, GitHub, PDF, and media access.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-markdown-preview\u003C/code>\u003C/td>\u003Ctd>Polished Markdown and LaTeX-heavy research writeups.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@walterra/pi-charts\u003C/code>\u003C/td>\u003Ctd>Charts and quantitative visualizations.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-generative-ui\u003C/code>\u003C/td>\u003Ctd>Interactive HTML-style widgets.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-mermaid\u003C/code>\u003C/td>\u003Ctd>Diagrams in the TUI.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@aliou/pi-processes\u003C/code>\u003C/td>\u003Ctd>Long-running experiments and log tails.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-zotero\u003C/code>\u003C/td>\u003Ctd>Citation-library workflows.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@kaiserlich-dev/pi-session-search\u003C/code>\u003C/td>\u003Ctd>Indexed session recall and summarize/resume UI.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-schedule-prompt\u003C/code>\u003C/td>\u003Ctd>Recurring and deferred research jobs.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@samfp/pi-memory\u003C/code>\u003C/td>\u003Ctd>Automatic preference and correction memory across sessions.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@tmustier/pi-ralph-wiggum\u003C/code>\u003C/td>\u003Ctd>Long-running agent loops for iterative development.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>",{"headings":306,"localImagePaths":307,"remoteImagePaths":308,"frontmatter":309,"imagePaths":310},[],[],[],{"title":298,"description":299,"section":238,"order":41},[],"reference/package-stack.md","tools/session-search",{"id":312,"data":314,"body":317,"filePath":318,"digest":319,"rendered":320,"legacyId":337},{"title":315,"description":316,"section":74,"order":41},"Session Search","Search prior Feynman session transcripts","## Overview\n\nThe `session_search` tool recovers prior Feynman work from stored session transcripts. Useful for picking up previous research threads or finding past findings.\n\n## Usage\n\nInside the REPL:\n\n```\n/search\n```\n\nOr use the tool directly — Feynman will invoke `session_search` automatically when you reference prior work.\n\n## What it searches\n\n- Full session transcripts\n- Tool outputs and agent results\n- Generated artifacts and their content","src/content/docs/tools/session-search.md","7091dddc6969e581",{"html":321,"metadata":322},"\u003Ch2 id=\"overview\">Overview\u003C/h2>\n\u003Cp>The \u003Ccode>session_search\u003C/code> tool recovers prior Feynman work from stored session transcripts. Useful for picking up previous research threads or finding past findings.\u003C/p>\n\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cp>Inside the REPL:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/search\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Or use the tool directly — Feynman will invoke \u003Ccode>session_search\u003C/code> automatically when you reference prior work.\u003C/p>\n\u003Ch2 id=\"what-it-searches\">What it searches\u003C/h2>\n\u003Cul>\n\u003Cli>Full session transcripts\u003C/li>\n\u003Cli>Tool outputs and agent results\u003C/li>\n\u003Cli>Generated artifacts and their content\u003C/li>\n\u003C/ul>",{"headings":323,"localImagePaths":333,"remoteImagePaths":334,"frontmatter":335,"imagePaths":336},[324,327,330],{"depth":17,"slug":325,"text":326},"overview","Overview",{"depth":17,"slug":328,"text":329},"usage","Usage",{"depth":17,"slug":331,"text":332},"what-it-searches","What it searches",[],[],{"title":315,"description":316,"section":74,"order":41},[],"tools/session-search.md","tools/alphaxiv",{"id":338,"data":340,"body":342,"filePath":343,"digest":344,"rendered":345,"legacyId":367},{"title":254,"description":341,"section":74,"order":94},"Paper search and analysis tools","## Overview\n\nAlphaXiv powers Feynman's academic paper workflows. All tools require an alphaXiv account — sign in with `feynman alpha login`.\n\n## Tools\n\n### alpha_search\n\nPaper discovery with three search modes:\n\n- **semantic** — Meaning-based search across paper content\n- **keyword** — Traditional keyword matching\n- **agentic** — AI-powered search that interprets your intent\n\n### alpha_get_paper\n\nFetch a paper's report (structured summary) or full raw text by arXiv ID.\n\n### alpha_ask_paper\n\nAsk a targeted question about a specific paper. Returns an answer grounded in the paper's content.\n\n### alpha_annotate_paper\n\nAdd persistent local notes to a paper. Annotations are stored locally and persist across sessions.\n\n### alpha_list_annotations\n\nRecall all annotations across papers and sessions.\n\n### alpha_read_code\n\nRead source code from a paper's linked GitHub repository. Useful for auditing or replication planning.","src/content/docs/tools/alphaxiv.md","a6eeb2c5a98d3096",{"html":346,"metadata":347},"\u003Ch2 id=\"overview\">Overview\u003C/h2>\n\u003Cp>AlphaXiv powers Feynman’s academic paper workflows. All tools require an alphaXiv account — sign in with \u003Ccode>feynman alpha login\u003C/code>.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Ch3 id=\"alpha_search\">alpha_search\u003C/h3>\n\u003Cp>Paper discovery with three search modes:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>semantic\u003C/strong> — Meaning-based search across paper content\u003C/li>\n\u003Cli>\u003Cstrong>keyword\u003C/strong> — Traditional keyword matching\u003C/li>\n\u003Cli>\u003Cstrong>agentic\u003C/strong> — AI-powered search that interprets your intent\u003C/li>\n\u003C/ul>\n\u003Ch3 id=\"alpha_get_paper\">alpha_get_paper\u003C/h3>\n\u003Cp>Fetch a paper’s report (structured summary) or full raw text by arXiv ID.\u003C/p>\n\u003Ch3 id=\"alpha_ask_paper\">alpha_ask_paper\u003C/h3>\n\u003Cp>Ask a targeted question about a specific paper. Returns an answer grounded in the paper’s content.\u003C/p>\n\u003Ch3 id=\"alpha_annotate_paper\">alpha_annotate_paper\u003C/h3>\n\u003Cp>Add persistent local notes to a paper. Annotations are stored locally and persist across sessions.\u003C/p>\n\u003Ch3 id=\"alpha_list_annotations\">alpha_list_annotations\u003C/h3>\n\u003Cp>Recall all annotations across papers and sessions.\u003C/p>\n\u003Ch3 id=\"alpha_read_code\">alpha_read_code\u003C/h3>\n\u003Cp>Read source code from a paper’s linked GitHub repository. Useful for auditing or replication planning.\u003C/p>",{"headings":348,"localImagePaths":363,"remoteImagePaths":364,"frontmatter":365,"imagePaths":366},[349,350,351,353,355,357,359,361],{"depth":17,"slug":325,"text":326},{"depth":17,"slug":73,"text":74},{"depth":41,"slug":352,"text":352},"alpha_search",{"depth":41,"slug":354,"text":354},"alpha_get_paper",{"depth":41,"slug":356,"text":356},"alpha_ask_paper",{"depth":41,"slug":358,"text":358},"alpha_annotate_paper",{"depth":41,"slug":360,"text":360},"alpha_list_annotations",{"depth":41,"slug":362,"text":362},"alpha_read_code",[],[],{"title":254,"description":341,"section":74,"order":94},[],"tools/alphaxiv.md","tools/preview",{"id":368,"data":370,"body":373,"filePath":374,"digest":375,"rendered":376,"legacyId":390},{"title":371,"description":372,"section":74,"order":202},"Preview","Preview generated artifacts in browser or PDF","## Overview\n\nThe `preview_file` tool opens generated artifacts in your browser or PDF viewer.\n\n## Usage\n\nInside the REPL:\n\n```\n/preview\n```\n\nOr Feynman will suggest previewing when you generate artifacts that benefit from rendered output (Markdown with LaTeX, HTML reports, etc.).\n\n## Requirements\n\nPreview requires `pandoc` for PDF/HTML rendering. Install it with:\n\n```bash\nfeynman --setup-preview\n```\n\n## Supported formats\n\n- Markdown (with LaTeX math rendering)\n- HTML\n- PDF","src/content/docs/tools/preview.md","b42137d5e0befd83",{"html":377,"metadata":378},"\u003Ch2 id=\"overview\">Overview\u003C/h2>\n\u003Cp>The \u003Ccode>preview_file\u003C/code> tool opens generated artifacts in your browser or PDF viewer.\u003C/p>\n\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cp>Inside the REPL:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/preview\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Or Feynman will suggest previewing when you generate artifacts that benefit from rendered output (Markdown with LaTeX, HTML reports, etc.).\u003C/p>\n\u003Ch2 id=\"requirements\">Requirements\u003C/h2>\n\u003Cp>Preview requires \u003Ccode>pandoc\u003C/code> for PDF/HTML rendering. Install it with:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --setup-preview\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"supported-formats\">Supported formats\u003C/h2>\n\u003Cul>\n\u003Cli>Markdown (with LaTeX math rendering)\u003C/li>\n\u003Cli>HTML\u003C/li>\n\u003Cli>PDF\u003C/li>\n\u003C/ul>",{"headings":379,"localImagePaths":386,"remoteImagePaths":387,"frontmatter":388,"imagePaths":389},[380,381,382,383],{"depth":17,"slug":325,"text":326},{"depth":17,"slug":328,"text":329},{"depth":17,"slug":279,"text":280},{"depth":17,"slug":384,"text":385},"supported-formats","Supported formats",[],[],{"title":371,"description":372,"section":74,"order":202},[],"tools/preview.md","reference/slash-commands",{"id":391,"data":393,"body":396,"filePath":397,"digest":398,"rendered":399,"legacyId":415},{"title":394,"description":395,"section":238,"order":17},"Slash Commands","Repo-owned REPL slash commands","This page documents the slash commands that Feynman owns in this repository: prompt templates from `prompts/` and extension commands from `extensions/research-tools/`.\n\nAdditional slash commands can appear at runtime from Pi core and bundled packages such as subagents, preview, session search, and scheduling. Use `/help` inside the REPL for the live command list instead of relying on a static copy of package-provided commands.\n\n## Research Workflows\n\n| Command | Description |\n| --- | --- |\n| `/deepresearch \u003Ctopic>` | Run a thorough, source-heavy investigation on a topic and produce a durable research brief with inline citations. |\n| `/lit \u003Ctopic>` | Run a literature review on a topic using paper search and primary-source synthesis. |\n| `/review \u003Cartifact>` | Simulate an AI research peer review with likely objections, severity, and a concrete revision plan. |\n| `/audit \u003Citem>` | Compare a paper's claims against its public codebase and identify mismatches, omissions, and reproducibility risks. |\n| `/replicate \u003Cpaper>` | Plan or execute a replication workflow for a paper, claim, or benchmark. |\n| `/compare \u003Ctopic>` | Compare multiple sources on a topic and produce a source-grounded matrix of agreements, disagreements, and confidence. |\n| `/draft \u003Ctopic>` | Turn research findings into a polished paper-style draft with equations, sections, and explicit claims. |\n| `/autoresearch \u003Cidea>` | Autonomous experiment loop — try ideas, measure results, keep what works, discard what doesn't, repeat. |\n| `/watch \u003Ctopic>` | Set up a recurring or deferred research watch on a topic, company, paper area, or product surface. |\n\n## Project & Session\n\n| Command | Description |\n| --- | --- |\n| `/log` | Write a durable session log with completed work, findings, open questions, and next steps. |\n| `/jobs` | Inspect active background research work, including running processes and scheduled follow-ups. |\n| `/help` | Show grouped Feynman commands and prefill the editor with a selected command. |\n| `/init` | Bootstrap AGENTS.md and session-log folders for a research project. |\n\n## Setup\n\n| Command | Description |\n| --- | --- |\n| `/alpha-login` | Sign in to alphaXiv from inside Feynman. |\n| `/alpha-status` | Show alphaXiv authentication status. |\n| `/alpha-logout` | Clear alphaXiv auth from inside Feynman. |","src/content/docs/reference/slash-commands.md","f548c25cfafb9aea",{"html":400,"metadata":401},"\u003Cp>This page documents the slash commands that Feynman owns in this repository: prompt templates from \u003Ccode>prompts/\u003C/code> and extension commands from \u003Ccode>extensions/research-tools/\u003C/code>.\u003C/p>\n\u003Cp>Additional slash commands can appear at runtime from Pi core and bundled packages such as subagents, preview, session search, and scheduling. Use \u003Ccode>/help\u003C/code> inside the REPL for the live command list instead of relying on a static copy of package-provided commands.\u003C/p>\n\u003Ch2 id=\"research-workflows\">Research Workflows\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>/deepresearch <topic>\u003C/code>\u003C/td>\u003Ctd>Run a thorough, source-heavy investigation on a topic and produce a durable research brief with inline citations.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/lit <topic>\u003C/code>\u003C/td>\u003Ctd>Run a literature review on a topic using paper search and primary-source synthesis.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/review <artifact>\u003C/code>\u003C/td>\u003Ctd>Simulate an AI research peer review with likely objections, severity, and a concrete revision plan.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/audit <item>\u003C/code>\u003C/td>\u003Ctd>Compare a paper’s claims against its public codebase and identify mismatches, omissions, and reproducibility risks.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/replicate <paper>\u003C/code>\u003C/td>\u003Ctd>Plan or execute a replication workflow for a paper, claim, or benchmark.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/compare <topic>\u003C/code>\u003C/td>\u003Ctd>Compare multiple sources on a topic and produce a source-grounded matrix of agreements, disagreements, and confidence.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/draft <topic>\u003C/code>\u003C/td>\u003Ctd>Turn research findings into a polished paper-style draft with equations, sections, and explicit claims.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/autoresearch <idea>\u003C/code>\u003C/td>\u003Ctd>Autonomous experiment loop — try ideas, measure results, keep what works, discard what doesn’t, repeat.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/watch <topic>\u003C/code>\u003C/td>\u003Ctd>Set up a recurring or deferred research watch on a topic, company, paper area, or product surface.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"project--session\">Project & Session\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>/log\u003C/code>\u003C/td>\u003Ctd>Write a durable session log with completed work, findings, open questions, and next steps.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/jobs\u003C/code>\u003C/td>\u003Ctd>Inspect active background research work, including running processes and scheduled follow-ups.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/help\u003C/code>\u003C/td>\u003Ctd>Show grouped Feynman commands and prefill the editor with a selected command.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/init\u003C/code>\u003C/td>\u003Ctd>Bootstrap AGENTS.md and session-log folders for a research project.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"setup\">Setup\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>/alpha-login\u003C/code>\u003C/td>\u003Ctd>Sign in to alphaXiv from inside Feynman.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/alpha-status\u003C/code>\u003C/td>\u003Ctd>Show alphaXiv authentication status.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/alpha-logout\u003C/code>\u003C/td>\u003Ctd>Clear alphaXiv auth from inside Feynman.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>",{"headings":402,"localImagePaths":411,"remoteImagePaths":412,"frontmatter":413,"imagePaths":414},[403,406,409],{"depth":17,"slug":404,"text":405},"research-workflows","Research Workflows",{"depth":17,"slug":407,"text":408},"project--session","Project & Session",{"depth":17,"slug":410,"text":135},"setup",[],[],{"title":394,"description":395,"section":238,"order":17},[],"reference/slash-commands.md","tools/web-search",{"id":416,"data":418,"body":421,"filePath":422,"digest":423,"rendered":424,"legacyId":444},{"title":419,"description":420,"section":74,"order":17},"Web Search","Web search routing and configuration","## Routing modes\n\nFeynman supports three web search backends:\n\n| Mode | Description |\n|------|-------------|\n| `auto` | Prefer Perplexity when configured, fall back to Gemini |\n| `perplexity` | Force Perplexity Sonar |\n| `gemini` | Force Gemini (default) |\n\n## Default behavior\n\nThe default path is zero-config Gemini Browser via a signed-in Chromium profile. No API keys required.\n\n## Check current config\n\n```bash\nfeynman search status\n```\n\n## Advanced configuration\n\nEdit `~/.feynman/web-search.json` directly to set:\n\n- Gemini API keys\n- Perplexity API keys\n- Custom routing preferences","src/content/docs/tools/web-search.md","b2963fe8f7ae5dce",{"html":425,"metadata":426},"\u003Ch2 id=\"routing-modes\">Routing modes\u003C/h2>\n\u003Cp>Feynman supports three web search backends:\u003C/p>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Mode\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>auto\u003C/code>\u003C/td>\u003Ctd>Prefer Perplexity when configured, fall back to Gemini\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>perplexity\u003C/code>\u003C/td>\u003Ctd>Force Perplexity Sonar\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>gemini\u003C/code>\u003C/td>\u003Ctd>Force Gemini (default)\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"default-behavior\">Default behavior\u003C/h2>\n\u003Cp>The default path is zero-config Gemini Browser via a signed-in Chromium profile. No API keys required.\u003C/p>\n\u003Ch2 id=\"check-current-config\">Check current config\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> search\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> status\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"advanced-configuration\">Advanced configuration\u003C/h2>\n\u003Cp>Edit \u003Ccode>~/.feynman/web-search.json\u003C/code> directly to set:\u003C/p>\n\u003Cul>\n\u003Cli>Gemini API keys\u003C/li>\n\u003Cli>Perplexity API keys\u003C/li>\n\u003Cli>Custom routing preferences\u003C/li>\n\u003C/ul>",{"headings":427,"localImagePaths":440,"remoteImagePaths":441,"frontmatter":442,"imagePaths":443},[428,431,434,437],{"depth":17,"slug":429,"text":430},"routing-modes","Routing modes",{"depth":17,"slug":432,"text":433},"default-behavior","Default behavior",{"depth":17,"slug":435,"text":436},"check-current-config","Check current config",{"depth":17,"slug":438,"text":439},"advanced-configuration","Advanced configuration",[],[],{"title":419,"description":420,"section":74,"order":17},[],"tools/web-search.md","workflows/autoresearch",{"id":445,"data":447,"body":452,"filePath":453,"digest":454,"rendered":455,"legacyId":476},{"title":448,"description":449,"section":450,"order":451},"Autoresearch","Autonomous experiment optimization loop","Workflows",8,"## Usage\n\n```\n/autoresearch \u003Cidea>\n```\n\n## What it does\n\nRuns an autonomous experiment loop:\n\n1. **Edit** — Modify code or configuration\n2. **Commit** — Save the change\n3. **Benchmark** — Run evaluation\n4. **Evaluate** — Compare against baseline\n5. **Keep or revert** — Persist improvements, roll back regressions\n6. **Repeat** — Continue until the target is hit\n\n## Tracking\n\nMetrics are tracked in:\n\n- `autoresearch.md` — Human-readable progress log\n- `autoresearch.jsonl` — Machine-readable metrics over time\n\n## Controls\n\n```\n/autoresearch \u003Cidea> # start or resume\n/autoresearch off # stop, keep data\n/autoresearch clear # delete all state, start fresh\n```\n\n## Example\n\n```\n/autoresearch optimize the learning rate schedule for better convergence\n```","src/content/docs/workflows/autoresearch.md","94559e14e60edcad",{"html":456,"metadata":457},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/autoresearch <idea>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Runs an autonomous experiment loop:\u003C/p>\n\u003Col>\n\u003Cli>\u003Cstrong>Edit\u003C/strong> — Modify code or configuration\u003C/li>\n\u003Cli>\u003Cstrong>Commit\u003C/strong> — Save the change\u003C/li>\n\u003Cli>\u003Cstrong>Benchmark\u003C/strong> — Run evaluation\u003C/li>\n\u003Cli>\u003Cstrong>Evaluate\u003C/strong> — Compare against baseline\u003C/li>\n\u003Cli>\u003Cstrong>Keep or revert\u003C/strong> — Persist improvements, roll back regressions\u003C/li>\n\u003Cli>\u003Cstrong>Repeat\u003C/strong> — Continue until the target is hit\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"tracking\">Tracking\u003C/h2>\n\u003Cp>Metrics are tracked in:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Ccode>autoresearch.md\u003C/code> — Human-readable progress log\u003C/li>\n\u003Cli>\u003Ccode>autoresearch.jsonl\u003C/code> — Machine-readable metrics over time\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"controls\">Controls\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/autoresearch <idea> # start or resume\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>/autoresearch off # stop, keep data\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>/autoresearch clear # delete all state, start fresh\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/autoresearch optimize the learning rate schedule for better convergence\u003C/span>\u003C/span>\u003C/code>\u003C/pre>",{"headings":458,"localImagePaths":472,"remoteImagePaths":473,"frontmatter":474,"imagePaths":475},[459,460,463,466,469],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},"what-it-does","What it does",{"depth":17,"slug":464,"text":465},"tracking","Tracking",{"depth":17,"slug":467,"text":468},"controls","Controls",{"depth":17,"slug":470,"text":471},"example","Example",[],[],{"title":448,"description":449,"section":450,"order":451},[],"workflows/autoresearch.md","workflows/audit",{"id":477,"data":479,"body":482,"filePath":483,"digest":484,"rendered":485,"legacyId":502},{"title":480,"description":481,"section":450,"order":202},"Code Audit","Compare paper claims against public codebases","## Usage\n\n```\n/audit \u003Citem>\n```\n\n## What it does\n\nCompares claims made in a paper against its public codebase. Surfaces mismatches, missing experiments, and reproducibility risks.\n\n## What it checks\n\n- Do the reported hyperparameters match the code?\n- Are all claimed experiments present in the repository?\n- Does the training loop match the described methodology?\n- Are there undocumented preprocessing steps?\n- Do evaluation metrics match the paper's claims?\n\n## Example\n\n```\n/audit 2401.12345\n```\n\n## Output\n\nAn audit report with:\n\n- Claim-by-claim verification\n- Identified mismatches\n- Missing components\n- Reproducibility risk assessment","src/content/docs/workflows/audit.md","58f5516850bcd065",{"html":486,"metadata":487},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/audit <item>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Compares claims made in a paper against its public codebase. Surfaces mismatches, missing experiments, and reproducibility risks.\u003C/p>\n\u003Ch2 id=\"what-it-checks\">What it checks\u003C/h2>\n\u003Cul>\n\u003Cli>Do the reported hyperparameters match the code?\u003C/li>\n\u003Cli>Are all claimed experiments present in the repository?\u003C/li>\n\u003Cli>Does the training loop match the described methodology?\u003C/li>\n\u003Cli>Are there undocumented preprocessing steps?\u003C/li>\n\u003Cli>Do evaluation metrics match the paper’s claims?\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/audit 2401.12345\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>An audit report with:\u003C/p>\n\u003Cul>\n\u003Cli>Claim-by-claim verification\u003C/li>\n\u003Cli>Identified mismatches\u003C/li>\n\u003Cli>Missing components\u003C/li>\n\u003Cli>Reproducibility risk assessment\u003C/li>\n\u003C/ul>",{"headings":488,"localImagePaths":498,"remoteImagePaths":499,"frontmatter":500,"imagePaths":501},[489,490,491,494,495],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":492,"text":493},"what-it-checks","What it checks",{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},"output","Output",[],[],{"title":480,"description":481,"section":450,"order":202},[],"workflows/audit.md","workflows/compare",{"id":503,"data":505,"body":509,"filePath":510,"digest":511,"rendered":512,"legacyId":524},{"title":506,"description":507,"section":450,"order":508},"Source Comparison","Compare multiple sources with agreement/disagreement matrix",6,"## Usage\n\n```\n/compare \u003Ctopic>\n```\n\n## What it does\n\nCompares multiple sources on a topic. Builds an agreement/disagreement matrix showing where sources align and where they conflict.\n\n## Example\n\n```\n/compare approaches to constitutional AI training\n```\n\n## Output\n\n- Source-by-source breakdown\n- Agreement/disagreement matrix\n- Synthesis of key differences\n- Assessment of which positions have stronger evidence","src/content/docs/workflows/compare.md","669d1dce304b191f",{"html":513,"metadata":514},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/compare <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Compares multiple sources on a topic. Builds an agreement/disagreement matrix showing where sources align and where they conflict.\u003C/p>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/compare approaches to constitutional AI training\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cul>\n\u003Cli>Source-by-source breakdown\u003C/li>\n\u003Cli>Agreement/disagreement matrix\u003C/li>\n\u003Cli>Synthesis of key differences\u003C/li>\n\u003Cli>Assessment of which positions have stronger evidence\u003C/li>\n\u003C/ul>",{"headings":515,"localImagePaths":520,"remoteImagePaths":521,"frontmatter":522,"imagePaths":523},[516,517,518,519],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":506,"description":507,"section":450,"order":508},[],"workflows/compare.md","workflows/deep-research",{"id":525,"data":527,"body":530,"filePath":531,"digest":532,"rendered":533,"legacyId":545},{"title":528,"description":529,"section":450,"order":94},"Deep Research","Thorough source-heavy investigation with parallel agents","## Usage\n\n```\n/deepresearch \u003Ctopic>\n```\n\n## What it does\n\nDeep research runs a thorough, source-heavy investigation. It plans the research scope, delegates to parallel researcher agents, synthesizes findings, and adds inline citations.\n\nThe workflow follows these steps:\n\n1. **Plan** — Clarify the research question and identify search strategy\n2. **Delegate** — Spawn parallel researcher agents to gather evidence from different source types (papers, web, repos)\n3. **Synthesize** — Merge findings, resolve contradictions, identify gaps\n4. **Cite** — Add inline citations and verify all source URLs\n5. **Deliver** — Write a durable research brief to `outputs/`\n\n## Example\n\n```\n/deepresearch transformer scaling laws and their implications for compute-optimal training\n```\n\n## Output\n\nProduces a structured research brief with:\n\n- Executive summary\n- Key findings organized by theme\n- Evidence tables with source links\n- Open questions and suggested next steps\n- Numbered sources section with direct URLs","src/content/docs/workflows/deep-research.md","5a1ed5d3fd031659",{"html":534,"metadata":535},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/deepresearch <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Deep research runs a thorough, source-heavy investigation. It plans the research scope, delegates to parallel researcher agents, synthesizes findings, and adds inline citations.\u003C/p>\n\u003Cp>The workflow follows these steps:\u003C/p>\n\u003Col>\n\u003Cli>\u003Cstrong>Plan\u003C/strong> — Clarify the research question and identify search strategy\u003C/li>\n\u003Cli>\u003Cstrong>Delegate\u003C/strong> — Spawn parallel researcher agents to gather evidence from different source types (papers, web, repos)\u003C/li>\n\u003Cli>\u003Cstrong>Synthesize\u003C/strong> — Merge findings, resolve contradictions, identify gaps\u003C/li>\n\u003Cli>\u003Cstrong>Cite\u003C/strong> — Add inline citations and verify all source URLs\u003C/li>\n\u003Cli>\u003Cstrong>Deliver\u003C/strong> — Write a durable research brief to \u003Ccode>outputs/\u003C/code>\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/deepresearch transformer scaling laws and their implications for compute-optimal training\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>Produces a structured research brief with:\u003C/p>\n\u003Cul>\n\u003Cli>Executive summary\u003C/li>\n\u003Cli>Key findings organized by theme\u003C/li>\n\u003Cli>Evidence tables with source links\u003C/li>\n\u003Cli>Open questions and suggested next steps\u003C/li>\n\u003Cli>Numbered sources section with direct URLs\u003C/li>\n\u003C/ul>",{"headings":536,"localImagePaths":541,"remoteImagePaths":542,"frontmatter":543,"imagePaths":544},[537,538,539,540],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":528,"description":529,"section":450,"order":94},[],"workflows/deep-research.md","workflows/draft",{"id":546,"data":548,"body":552,"filePath":553,"digest":554,"rendered":555,"legacyId":569},{"title":549,"description":550,"section":450,"order":551},"Draft Writing","Paper-style draft generation from research findings",7,"## Usage\n\n```\n/draft \u003Ctopic>\n```\n\n## What it does\n\nProduces a paper-style draft with structured sections. Writes to `papers/`.\n\n## Structure\n\nThe generated draft includes:\n\n- Title\n- Abstract\n- Introduction / Background\n- Method or Approach\n- Evidence and Analysis\n- Limitations\n- Conclusion\n- Sources\n\n## Example\n\n```\n/draft survey of differentiable physics simulators\n```\n\nThe writer agent works only from supplied evidence — it never fabricates content. If evidence is insufficient, it explicitly notes the gaps.","src/content/docs/workflows/draft.md","5549e489883745ea",{"html":556,"metadata":557},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/draft <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Produces a paper-style draft with structured sections. Writes to \u003Ccode>papers/\u003C/code>.\u003C/p>\n\u003Ch2 id=\"structure\">Structure\u003C/h2>\n\u003Cp>The generated draft includes:\u003C/p>\n\u003Cul>\n\u003Cli>Title\u003C/li>\n\u003Cli>Abstract\u003C/li>\n\u003Cli>Introduction / Background\u003C/li>\n\u003Cli>Method or Approach\u003C/li>\n\u003Cli>Evidence and Analysis\u003C/li>\n\u003Cli>Limitations\u003C/li>\n\u003Cli>Conclusion\u003C/li>\n\u003Cli>Sources\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/draft survey of differentiable physics simulators\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>The writer agent works only from supplied evidence — it never fabricates content. If evidence is insufficient, it explicitly notes the gaps.\u003C/p>",{"headings":558,"localImagePaths":565,"remoteImagePaths":566,"frontmatter":567,"imagePaths":568},[559,560,561,564],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":562,"text":563},"structure","Structure",{"depth":17,"slug":470,"text":471},[],[],{"title":549,"description":550,"section":450,"order":551},[],"workflows/draft.md","workflows/replication",{"id":570,"data":572,"body":576,"filePath":577,"digest":578,"rendered":579,"legacyId":591},{"title":573,"description":574,"section":450,"order":575},"Replication","Plan replications of papers and claims",5,"## Usage\n\n```\n/replicate \u003Cpaper or claim>\n```\n\n## What it does\n\nExtracts key implementation details from a paper, identifies what's needed to replicate the results, and asks where to run before executing anything.\n\nBefore running code, Feynman asks you to choose an execution environment:\n\n- **Local** — run in the current working directory\n- **Virtual environment** — create an isolated venv/conda env first\n- **Cloud** — delegate to a remote Agent Computer machine\n- **Plan only** — produce the replication plan without executing\n\n## Example\n\n```\n/replicate \"chain-of-thought prompting improves math reasoning\"\n```\n\n## Output\n\nA replication plan covering:\n\n- Key claims to verify\n- Required resources (compute, data, models)\n- Implementation details extracted from the paper\n- Potential pitfalls and underspecified details\n- Step-by-step replication procedure\n- Success criteria\n\nIf an execution environment is selected, also produces runnable scripts and captured results.","src/content/docs/workflows/replication.md","462a792bf2682b87",{"html":580,"metadata":581},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/replicate <paper or claim>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Extracts key implementation details from a paper, identifies what’s needed to replicate the results, and asks where to run before executing anything.\u003C/p>\n\u003Cp>Before running code, Feynman asks you to choose an execution environment:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>Local\u003C/strong> — run in the current working directory\u003C/li>\n\u003Cli>\u003Cstrong>Virtual environment\u003C/strong> — create an isolated venv/conda env first\u003C/li>\n\u003Cli>\u003Cstrong>Cloud\u003C/strong> — delegate to a remote Agent Computer machine\u003C/li>\n\u003Cli>\u003Cstrong>Plan only\u003C/strong> — produce the replication plan without executing\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/replicate \"chain-of-thought prompting improves math reasoning\"\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>A replication plan covering:\u003C/p>\n\u003Cul>\n\u003Cli>Key claims to verify\u003C/li>\n\u003Cli>Required resources (compute, data, models)\u003C/li>\n\u003Cli>Implementation details extracted from the paper\u003C/li>\n\u003Cli>Potential pitfalls and underspecified details\u003C/li>\n\u003Cli>Step-by-step replication procedure\u003C/li>\n\u003Cli>Success criteria\u003C/li>\n\u003C/ul>\n\u003Cp>If an execution environment is selected, also produces runnable scripts and captured results.\u003C/p>",{"headings":582,"localImagePaths":587,"remoteImagePaths":588,"frontmatter":589,"imagePaths":590},[583,584,585,586],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":573,"description":574,"section":450,"order":575},[],"workflows/replication.md","workflows/review",{"id":592,"data":594,"body":597,"filePath":598,"digest":599,"rendered":600,"legacyId":615},{"title":595,"description":596,"section":450,"order":41},"Peer Review","Simulated peer review with severity-graded feedback","## Usage\n\n```\n/review \u003Cartifact>\n```\n\n## What it does\n\nSimulates a tough-but-fair peer review for AI research artifacts. Evaluates novelty, empirical rigor, baselines, ablations, and reproducibility.\n\nThe reviewer agent identifies:\n\n- Weak baselines\n- Missing ablations\n- Evaluation mismatches\n- Benchmark leakage\n- Under-specified implementation details\n\n## Severity levels\n\nFeedback is graded by severity:\n\n- **FATAL** — Fundamental issues that invalidate the claims\n- **MAJOR** — Significant problems that need addressing\n- **MINOR** — Small improvements or clarifications\n\n## Example\n\n```\n/review outputs/scaling-laws-brief.md\n```\n\n## Output\n\nStructured review with:\n\n- Summary of the work\n- Strengths\n- Weaknesses (severity-graded)\n- Questions for the authors\n- Verdict (accept / revise / reject)\n- Revision plan","src/content/docs/workflows/review.md","5a1cfb4bdd03056c",{"html":601,"metadata":602},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/review <artifact>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Simulates a tough-but-fair peer review for AI research artifacts. Evaluates novelty, empirical rigor, baselines, ablations, and reproducibility.\u003C/p>\n\u003Cp>The reviewer agent identifies:\u003C/p>\n\u003Cul>\n\u003Cli>Weak baselines\u003C/li>\n\u003Cli>Missing ablations\u003C/li>\n\u003Cli>Evaluation mismatches\u003C/li>\n\u003Cli>Benchmark leakage\u003C/li>\n\u003Cli>Under-specified implementation details\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"severity-levels\">Severity levels\u003C/h2>\n\u003Cp>Feedback is graded by severity:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>FATAL\u003C/strong> — Fundamental issues that invalidate the claims\u003C/li>\n\u003Cli>\u003Cstrong>MAJOR\u003C/strong> — Significant problems that need addressing\u003C/li>\n\u003Cli>\u003Cstrong>MINOR\u003C/strong> — Small improvements or clarifications\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/review outputs/scaling-laws-brief.md\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>Structured review with:\u003C/p>\n\u003Cul>\n\u003Cli>Summary of the work\u003C/li>\n\u003Cli>Strengths\u003C/li>\n\u003Cli>Weaknesses (severity-graded)\u003C/li>\n\u003Cli>Questions for the authors\u003C/li>\n\u003Cli>Verdict (accept / revise / reject)\u003C/li>\n\u003Cli>Revision plan\u003C/li>\n\u003C/ul>",{"headings":603,"localImagePaths":611,"remoteImagePaths":612,"frontmatter":613,"imagePaths":614},[604,605,606,609,610],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":607,"text":608},"severity-levels","Severity levels",{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":595,"description":596,"section":450,"order":41},[],"workflows/review.md","workflows/literature-review",{"id":616,"data":618,"body":621,"filePath":622,"digest":623,"rendered":624,"legacyId":636},{"title":619,"description":620,"section":450,"order":17},"Literature Review","Map consensus, disagreements, and open questions","## Usage\n\n```\n/lit \u003Ctopic>\n```\n\n## What it does\n\nRuns a structured literature review that searches across academic papers and web sources. Explicitly separates consensus findings from disagreements and open questions.\n\n## Example\n\n```\n/lit multimodal reasoning benchmarks for large language models\n```\n\n## Output\n\nA structured review covering:\n\n- **Consensus** — What the field agrees on\n- **Disagreements** — Where sources conflict\n- **Open questions** — What remains unresolved\n- **Sources** — Direct links to all referenced papers and articles","src/content/docs/workflows/literature-review.md","7def25e86b0bdc22",{"html":625,"metadata":626},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/lit <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Runs a structured literature review that searches across academic papers and web sources. Explicitly separates consensus findings from disagreements and open questions.\u003C/p>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/lit multimodal reasoning benchmarks for large language models\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>A structured review covering:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>Consensus\u003C/strong> — What the field agrees on\u003C/li>\n\u003Cli>\u003Cstrong>Disagreements\u003C/strong> — Where sources conflict\u003C/li>\n\u003Cli>\u003Cstrong>Open questions\u003C/strong> — What remains unresolved\u003C/li>\n\u003Cli>\u003Cstrong>Sources\u003C/strong> — Direct links to all referenced papers and articles\u003C/li>\n\u003C/ul>",{"headings":627,"localImagePaths":632,"remoteImagePaths":633,"frontmatter":634,"imagePaths":635},[628,629,630,631],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":619,"description":620,"section":450,"order":17},[],"workflows/literature-review.md","workflows/watch",{"id":637,"data":639,"body":643,"filePath":644,"digest":645,"rendered":646,"legacyId":660},{"title":640,"description":641,"section":450,"order":642},"Watch","Recurring research monitoring",9,"## Usage\n\n```\n/watch \u003Ctopic>\n```\n\n## What it does\n\nSchedules a recurring research watch. Sets a baseline of current knowledge and defines what constitutes a meaningful change worth reporting.\n\n## Example\n\n```\n/watch new papers on test-time compute scaling\n```\n\n## How it works\n\n1. Feynman establishes a baseline by surveying current sources\n2. Defines change signals (new papers, updated results, new repos)\n3. Schedules periodic checks via `pi-schedule-prompt`\n4. Reports only when meaningful changes are detected","src/content/docs/workflows/watch.md","b24ebad68d8b9736",{"html":647,"metadata":648},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/watch <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Schedules a recurring research watch. Sets a baseline of current knowledge and defines what constitutes a meaningful change worth reporting.\u003C/p>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/watch new papers on test-time compute scaling\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"how-it-works\">How it works\u003C/h2>\n\u003Col>\n\u003Cli>Feynman establishes a baseline by surveying current sources\u003C/li>\n\u003Cli>Defines change signals (new papers, updated results, new repos)\u003C/li>\n\u003Cli>Schedules periodic checks via \u003Ccode>pi-schedule-prompt\u003C/code>\u003C/li>\n\u003Cli>Reports only when meaningful changes are detected\u003C/li>\n\u003C/ol>",{"headings":649,"localImagePaths":656,"remoteImagePaths":657,"frontmatter":658,"imagePaths":659},[650,651,652,653],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":654,"text":655},"how-it-works","How it works",[],[],{"title":640,"description":641,"section":450,"order":642},[],"workflows/watch.md","agents/verifier",{"id":661,"data":663,"body":666,"filePath":667,"digest":668,"rendered":669,"legacyId":688},{"title":664,"description":665,"section":16,"order":202},"Verifier","Post-process a draft to add inline citations and verify every source URL.","## Source\n\nGenerated from `.feynman/agents/verifier.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nPost-process a draft to add inline citations and verify every source URL.\n\n## Tools\n\n`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`\n\n## Default Output\n\n`cited.md`\n\nYou receive a draft document and the research files it was built from. Your job is to:\n\n1. **Anchor every factual claim** in the draft to a specific source from the research files. Insert inline citations `[1]`, `[2]`, etc. directly after each claim.\n2. **Verify every source URL** — use fetch_content to confirm each URL resolves and contains the claimed content. Flag dead links.\n3. **Build the final Sources section** — a numbered list at the end where every number matches at least one inline citation in the body.\n4. **Remove unsourced claims** — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.\n\n## Citation rules\n\n- Every factual claim gets at least one citation: \"Transformers achieve 94.2% on MMLU [3].\"\n- Multiple sources for one claim: \"Recent work questions benchmark validity [7, 12].\"\n- No orphan citations — every `[N]` in the body must appear in Sources.\n- No orphan sources — every entry in Sources must be cited at least once.\n- Hedged or opinion statements do not need citations.\n- When multiple research files use different numbering, merge into a single unified sequence starting from [1]. Deduplicate sources that appear in multiple files.\n\n## Source verification\n\nFor each source URL:\n- **Live:** keep as-is.\n- **Dead/404:** search for an alternative URL (archived version, mirror, updated link). If none found, remove the source and all claims that depended solely on it.\n- **Redirects to unrelated content:** treat as dead.\n\n## Output contract\n- Save to the output file (default: `cited.md`).\n- The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.\n- Do not change the substance or structure of the draft. Only add citations and fix dead sources.","src/content/docs/agents/verifier.md","efc12a91a847824e",{"html":670,"metadata":671},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/verifier.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Post-process a draft to add inline citations and verify every source URL.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Cp>\u003Ccode>read\u003C/code>, \u003Ccode>bash\u003C/code>, \u003Ccode>grep\u003C/code>, \u003Ccode>find\u003C/code>, \u003Ccode>ls\u003C/code>, \u003Ccode>write\u003C/code>, \u003Ccode>edit\u003C/code>\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>cited.md\u003C/code>\u003C/p>\n\u003Cp>You receive a draft document and the research files it was built from. Your job is to:\u003C/p>\n\u003Col>\n\u003Cli>\u003Cstrong>Anchor every factual claim\u003C/strong> in the draft to a specific source from the research files. Insert inline citations \u003Ccode>[1]\u003C/code>, \u003Ccode>[2]\u003C/code>, etc. directly after each claim.\u003C/li>\n\u003Cli>\u003Cstrong>Verify every source URL\u003C/strong> — use fetch_content to confirm each URL resolves and contains the claimed content. Flag dead links.\u003C/li>\n\u003Cli>\u003Cstrong>Build the final Sources section\u003C/strong> — a numbered list at the end where every number matches at least one inline citation in the body.\u003C/li>\n\u003Cli>\u003Cstrong>Remove unsourced claims\u003C/strong> — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"citation-rules\">Citation rules\u003C/h2>\n\u003Cul>\n\u003Cli>Every factual claim gets at least one citation: “Transformers achieve 94.2% on MMLU [3].”\u003C/li>\n\u003Cli>Multiple sources for one claim: “Recent work questions benchmark validity [7, 12].”\u003C/li>\n\u003Cli>No orphan citations — every \u003Ccode>[N]\u003C/code> in the body must appear in Sources.\u003C/li>\n\u003Cli>No orphan sources — every entry in Sources must be cited at least once.\u003C/li>\n\u003Cli>Hedged or opinion statements do not need citations.\u003C/li>\n\u003Cli>When multiple research files use different numbering, merge into a single unified sequence starting from [1]. Deduplicate sources that appear in multiple files.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"source-verification\">Source verification\u003C/h2>\n\u003Cp>For each source URL:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>Live:\u003C/strong> keep as-is.\u003C/li>\n\u003Cli>\u003Cstrong>Dead/404:\u003C/strong> search for an alternative URL (archived version, mirror, updated link). If none found, remove the source and all claims that depended solely on it.\u003C/li>\n\u003Cli>\u003Cstrong>Redirects to unrelated content:\u003C/strong> treat as dead.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save to the output file (default: \u003Ccode>cited.md\u003C/code>).\u003C/li>\n\u003Cli>The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.\u003C/li>\n\u003Cli>Do not change the substance or structure of the draft. Only add citations and fix dead sources.\u003C/li>\n\u003C/ul>",{"headings":672,"localImagePaths":684,"remoteImagePaths":685,"frontmatter":686,"imagePaths":687},[673,674,675,676,677,680,683],{"depth":17,"slug":26,"text":27},{"depth":17,"slug":29,"text":30},{"depth":17,"slug":73,"text":74},{"depth":17,"slug":32,"text":33},{"depth":17,"slug":678,"text":679},"citation-rules","Citation rules",{"depth":17,"slug":681,"text":682},"source-verification","Source verification",{"depth":17,"slug":51,"text":52},[],[],{"title":664,"description":665,"section":16,"order":202},[],"agents/verifier.md"] \ No newline at end of file +[["Map",1,2,9,10],"meta::meta",["Map",3,4,5,6,7,8],"astro-version","5.18.1","content-config-digest","d2da5d7c4a062d75","astro-config-digest","{\"root\":{},\"srcDir\":{},\"publicDir\":{},\"outDir\":{},\"cacheDir\":{},\"site\":\"https://feynman.companion.ai\",\"compressHTML\":true,\"base\":\"/\",\"trailingSlash\":\"ignore\",\"output\":\"static\",\"scopedStyleStrategy\":\"attribute\",\"build\":{\"format\":\"directory\",\"client\":{},\"server\":{},\"assets\":\"_astro\",\"serverEntry\":\"entry.mjs\",\"redirects\":true,\"inlineStylesheets\":\"auto\",\"concurrency\":1},\"server\":{\"open\":false,\"host\":false,\"port\":3001,\"streaming\":true,\"allowedHosts\":[]},\"redirects\":{},\"image\":{\"endpoint\":{\"route\":\"/_image\"},\"service\":{\"entrypoint\":\"astro/assets/services/sharp\",\"config\":{}},\"domains\":[],\"remotePatterns\":[],\"responsiveStyles\":false},\"devToolbar\":{\"enabled\":true},\"markdown\":{\"syntaxHighlight\":{\"type\":\"shiki\",\"excludeLangs\":[\"math\"]},\"shikiConfig\":{\"langs\":[],\"langAlias\":{},\"theme\":\"github-dark\",\"themes\":{\"light\":\"github-light\",\"dark\":\"github-dark\"},\"wrap\":false,\"transformers\":[]},\"remarkPlugins\":[],\"rehypePlugins\":[],\"remarkRehype\":{},\"gfm\":true,\"smartypants\":true},\"security\":{\"checkOrigin\":true,\"allowedDomains\":[],\"actionBodySizeLimit\":1048576},\"env\":{\"schema\":{},\"validateSecrets\":false},\"experimental\":{\"clientPrerender\":false,\"contentIntellisense\":false,\"headingIdCompat\":false,\"preserveScriptOrder\":false,\"liveContentCollections\":false,\"csp\":false,\"staticImportMetaEnv\":false,\"chromeDevtoolsWorkspace\":false,\"failOnPrerenderConflict\":false,\"svgo\":false},\"legacy\":{\"collections\":false}}","docs",["Map",11,12,58,59,89,90,132,133,168,169,197,198,233,234,266,267,295,296,312,313,338,339,368,369,391,392,416,417,445,446,477,478,503,504,525,526,546,547,570,571,592,593,616,617,637,638,661,662],"agents/reviewer",{"id":11,"data":13,"body":18,"filePath":19,"digest":20,"rendered":21,"legacyId":57},{"title":14,"description":15,"section":16,"order":17},"Reviewer","Simulate a tough but constructive AI research peer reviewer with inline annotations.","Agents",2,"## Source\n\nGenerated from `.feynman/agents/reviewer.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nSimulate a tough but constructive AI research peer reviewer with inline annotations.\n\n## Default Output\n\n`review.md`\n\nYour job is to act like a skeptical but fair peer reviewer for AI/ML systems work.\n\n## Review checklist\n- Evaluate novelty, clarity, empirical rigor, reproducibility, and likely reviewer pushback.\n- Do not praise vaguely. Every positive claim should be tied to specific evidence.\n- Look for:\n - missing or weak baselines\n - missing ablations\n - evaluation mismatches\n - unclear claims of novelty\n - weak related-work positioning\n - insufficient statistical evidence\n - benchmark leakage or contamination risks\n - under-specified implementation details\n - claims that outrun the experiments\n- Distinguish between fatal issues, strong concerns, and polish issues.\n- Preserve uncertainty. If the draft might pass depending on venue norms, say so explicitly.\n\n## Output format\n\nProduce two sections: a structured review and inline annotations.\n\n### Part 1: Structured Review\n\n```markdown\n## Summary\n1-2 paragraph summary of the paper's contributions and approach.\n\n## Strengths\n- [S1] ...\n- [S2] ...\n\n## Weaknesses\n- [W1] **FATAL:** ...\n- [W2] **MAJOR:** ...\n- [W3] **MINOR:** ...\n\n## Questions for Authors\n- [Q1] ...\n\n## Verdict\nOverall assessment and confidence score. Would this pass at [venue]?\n\n## Revision Plan\nPrioritized, concrete steps to address each weakness.\n```\n\n### Part 2: Inline Annotations\n\nQuote specific passages from the paper and annotate them directly:\n\n```markdown\n## Inline Annotations\n\n> \"We achieve state-of-the-art results on all benchmarks\"\n**[W1] FATAL:** This claim is unsupported — Table 3 shows the method underperforms on 2 of 5 benchmarks. Revise to accurately reflect results.\n\n> \"Our approach is novel in combining X with Y\"\n**[W3] MINOR:** Z et al. (2024) combined X with Y in a different domain. Acknowledge this and clarify the distinction.\n\n> \"We use a learning rate of 1e-4\"\n**[Q1]:** Was this tuned? What range was searched? This matters for reproducibility.\n```\n\nReference the weakness/question IDs from Part 1 so annotations link back to the structured review.\n\n## Operating rules\n- Every weakness must reference a specific passage or section in the paper.\n- Inline annotations must quote the exact text being critiqued.\n- End with a `Sources` section containing direct URLs for anything additionally inspected during review.\n\n## Output contract\n- Save the main artifact to `review.md`.\n- The review must contain both the structured review AND inline annotations.","src/content/docs/agents/reviewer.md","115fe4b081dd8349",{"html":22,"metadata":23},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/reviewer.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Simulate a tough but constructive AI research peer reviewer with inline annotations.\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>review.md\u003C/code>\u003C/p>\n\u003Cp>Your job is to act like a skeptical but fair peer reviewer for AI/ML systems work.\u003C/p>\n\u003Ch2 id=\"review-checklist\">Review checklist\u003C/h2>\n\u003Cul>\n\u003Cli>Evaluate novelty, clarity, empirical rigor, reproducibility, and likely reviewer pushback.\u003C/li>\n\u003Cli>Do not praise vaguely. Every positive claim should be tied to specific evidence.\u003C/li>\n\u003Cli>Look for:\n\u003Cul>\n\u003Cli>missing or weak baselines\u003C/li>\n\u003Cli>missing ablations\u003C/li>\n\u003Cli>evaluation mismatches\u003C/li>\n\u003Cli>unclear claims of novelty\u003C/li>\n\u003Cli>weak related-work positioning\u003C/li>\n\u003Cli>insufficient statistical evidence\u003C/li>\n\u003Cli>benchmark leakage or contamination risks\u003C/li>\n\u003Cli>under-specified implementation details\u003C/li>\n\u003Cli>claims that outrun the experiments\u003C/li>\n\u003C/ul>\n\u003C/li>\n\u003Cli>Distinguish between fatal issues, strong concerns, and polish issues.\u003C/li>\n\u003Cli>Preserve uncertainty. If the draft might pass depending on venue norms, say so explicitly.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-format\">Output format\u003C/h2>\n\u003Cp>Produce two sections: a structured review and inline annotations.\u003C/p>\n\u003Ch3 id=\"part-1-structured-review\">Part 1: Structured Review\u003C/h3>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"markdown\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Summary\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">1-2 paragraph summary of the paper's contributions and approach.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Strengths\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">S1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">S2\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Weaknesses\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] \u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**FATAL:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W2\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] \u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**MAJOR:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W3\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] \u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**MINOR:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Questions for Authors\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#E36209;--shiki-dark:#FFAB70\">-\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">Q1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">] ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Verdict\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Overall assessment and confidence score. Would this pass at [\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">venue\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">]?\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Revision Plan\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Prioritized, concrete steps to address each weakness.\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch3 id=\"part-2-inline-annotations\">Part 2: Inline Annotations\u003C/h3>\n\u003Cp>Quote specific passages from the paper and annotate them directly:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"markdown\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Inline Annotations\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#22863A;--shiki-dark:#85E89D\">> \"We achieve state-of-the-art results on all benchmarks\"\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**[\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">] FATAL:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> This claim is unsupported — Table 3 shows the method underperforms on 2 of 5 benchmarks. Revise to accurately reflect results.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#22863A;--shiki-dark:#85E89D\">> \"Our approach is novel in combining X with Y\"\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**[\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">W3\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">] MINOR:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> Z et al. (2024) combined X with Y in a different domain. Acknowledge this and clarify the distinction.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#22863A;--shiki-dark:#85E89D\">> \"We use a learning rate of 1e-4\"\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">**[\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-light-text-decoration:underline;--shiki-dark:#DBEDFF;--shiki-dark-text-decoration:underline\">Q1\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-light-font-weight:bold;--shiki-dark:#E1E4E8;--shiki-dark-font-weight:bold\">]:**\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\"> Was this tuned? What range was searched? This matters for reproducibility.\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Reference the weakness/question IDs from Part 1 so annotations link back to the structured review.\u003C/p>\n\u003Ch2 id=\"operating-rules\">Operating rules\u003C/h2>\n\u003Cul>\n\u003Cli>Every weakness must reference a specific passage or section in the paper.\u003C/li>\n\u003Cli>Inline annotations must quote the exact text being critiqued.\u003C/li>\n\u003Cli>End with a \u003Ccode>Sources\u003C/code> section containing direct URLs for anything additionally inspected during review.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save the main artifact to \u003Ccode>review.md\u003C/code>.\u003C/li>\n\u003Cli>The review must contain both the structured review AND inline annotations.\u003C/li>\n\u003C/ul>",{"headings":24,"localImagePaths":53,"remoteImagePaths":54,"frontmatter":55,"imagePaths":56},[25,28,31,34,37,40,44,47,50],{"depth":17,"slug":26,"text":27},"source","Source",{"depth":17,"slug":29,"text":30},"role","Role",{"depth":17,"slug":32,"text":33},"default-output","Default Output",{"depth":17,"slug":35,"text":36},"review-checklist","Review checklist",{"depth":17,"slug":38,"text":39},"output-format","Output format",{"depth":41,"slug":42,"text":43},3,"part-1-structured-review","Part 1: Structured Review",{"depth":41,"slug":45,"text":46},"part-2-inline-annotations","Part 2: Inline Annotations",{"depth":17,"slug":48,"text":49},"operating-rules","Operating rules",{"depth":17,"slug":51,"text":52},"output-contract","Output contract",[],[],{"title":14,"description":15,"section":16,"order":17},[],"agents/reviewer.md","agents/writer",{"id":58,"data":60,"body":63,"filePath":64,"digest":65,"rendered":66,"legacyId":88},{"title":61,"description":62,"section":16,"order":41},"Writer","Turn research notes into clear, structured briefs and drafts.","## Source\n\nGenerated from `.feynman/agents/writer.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nTurn research notes into clear, structured briefs and drafts.\n\n## Tools\n\n`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`\n\n## Default Output\n\n`draft.md`\n\n## Integrity commandments\n1. **Write only from supplied evidence.** Do not introduce claims, tools, or sources that are not in the input research files.\n2. **Preserve caveats and disagreements.** Never smooth away uncertainty.\n3. **Be explicit about gaps.** If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.\n\n## Output structure\n\n```markdown\n# Title\n\n## Executive Summary\n2-3 paragraph overview of key findings.\n\n## Section 1: ...\nDetailed findings organized by theme or question.\n\n## Section N: ...\n...\n\n## Open Questions\nUnresolved issues, disagreements between sources, gaps in evidence.\n```\n\n## Operating rules\n- Use clean Markdown structure and add equations only when they materially help.\n- Keep the narrative readable, but never outrun the evidence.\n- Produce artifacts that are ready to review in a browser or PDF preview.\n- Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.\n- Do NOT add a Sources section — the verifier agent builds that.\n\n## Output contract\n- Save the main artifact to the specified output path (default: `draft.md`).\n- Focus on clarity, structure, and evidence traceability.","src/content/docs/agents/writer.md","ef9e81fb8113db70",{"html":67,"metadata":68},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/writer.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Turn research notes into clear, structured briefs and drafts.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Cp>\u003Ccode>read\u003C/code>, \u003Ccode>bash\u003C/code>, \u003Ccode>grep\u003C/code>, \u003Ccode>find\u003C/code>, \u003Ccode>ls\u003C/code>, \u003Ccode>write\u003C/code>, \u003Ccode>edit\u003C/code>\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>draft.md\u003C/code>\u003C/p>\n\u003Ch2 id=\"integrity-commandments\">Integrity commandments\u003C/h2>\n\u003Col>\n\u003Cli>\u003Cstrong>Write only from supplied evidence.\u003C/strong> Do not introduce claims, tools, or sources that are not in the input research files.\u003C/li>\n\u003Cli>\u003Cstrong>Preserve caveats and disagreements.\u003C/strong> Never smooth away uncertainty.\u003C/li>\n\u003Cli>\u003Cstrong>Be explicit about gaps.\u003C/strong> If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"output-structure\">Output structure\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"markdown\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\"># Title\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Executive Summary\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">2-3 paragraph overview of key findings.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Section 1: ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Detailed findings organized by theme or question.\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Section N: ...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">...\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-light-font-weight:bold;--shiki-dark:#79B8FF;--shiki-dark-font-weight:bold\">## Open Questions\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">Unresolved issues, disagreements between sources, gaps in evidence.\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"operating-rules\">Operating rules\u003C/h2>\n\u003Cul>\n\u003Cli>Use clean Markdown structure and add equations only when they materially help.\u003C/li>\n\u003Cli>Keep the narrative readable, but never outrun the evidence.\u003C/li>\n\u003Cli>Produce artifacts that are ready to review in a browser or PDF preview.\u003C/li>\n\u003Cli>Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.\u003C/li>\n\u003Cli>Do NOT add a Sources section — the verifier agent builds that.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save the main artifact to the specified output path (default: \u003Ccode>draft.md\u003C/code>).\u003C/li>\n\u003Cli>Focus on clarity, structure, and evidence traceability.\u003C/li>\n\u003C/ul>",{"headings":69,"localImagePaths":84,"remoteImagePaths":85,"frontmatter":86,"imagePaths":87},[70,71,72,75,76,79,82,83],{"depth":17,"slug":26,"text":27},{"depth":17,"slug":29,"text":30},{"depth":17,"slug":73,"text":74},"tools","Tools",{"depth":17,"slug":32,"text":33},{"depth":17,"slug":77,"text":78},"integrity-commandments","Integrity commandments",{"depth":17,"slug":80,"text":81},"output-structure","Output structure",{"depth":17,"slug":48,"text":49},{"depth":17,"slug":51,"text":52},[],[],{"title":61,"description":62,"section":16,"order":41},[],"agents/writer.md","agents/researcher",{"id":89,"data":91,"body":95,"filePath":96,"digest":97,"rendered":98,"legacyId":131},{"title":92,"description":93,"section":16,"order":94},"Researcher","Gather primary evidence across papers, web sources, repos, docs, and local artifacts.",1,"## Source\n\nGenerated from `.feynman/agents/researcher.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nGather primary evidence across papers, web sources, repos, docs, and local artifacts.\n\n## Tools\n\n`read`, `bash`, `grep`, `find`, `ls`\n\n## Default Output\n\n`research.md`\n\n## Integrity commandments\n1. **Never fabricate a source.** Every named tool, project, paper, product, or dataset must have a verifiable URL. If you cannot find a URL, do not mention it.\n2. **Never claim a project exists without checking.** Before citing a GitHub repo, search for it. Before citing a paper, find it. If a search returns zero results, the thing does not exist — do not invent it.\n3. **Never extrapolate details you haven't read.** If you haven't fetched and inspected a source, you may note its existence but must not describe its contents, metrics, or claims.\n4. **URL or it didn't happen.** Every entry in your evidence table must include a direct, checkable URL. No URL = not included.\n\n## Search strategy\n1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.\n2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.\n3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.\n4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and `alpha_search`.\n\nUse `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.\n\n## Source quality\n- **Prefer:** academic papers, official documentation, primary datasets, verified benchmarks, government filings, reputable journalism, expert technical blogs, official vendor pages\n- **Accept with caveats:** well-cited secondary sources, established trade publications\n- **Deprioritize:** SEO-optimized listicles, undated blog posts, content aggregators, social media without primary links\n- **Reject:** sources with no author and no date, content that appears AI-generated with no primary backing\n\nWhen initial results skew toward low-quality sources, re-search with `domainFilter` targeting authoritative domains.\n\n## Output format\n\nAssign each source a stable numeric ID. Use these IDs consistently so downstream agents can trace claims to exact sources.\n\n### Evidence table\n\n| # | Source | URL | Key claim | Type | Confidence |\n|---|--------|-----|-----------|------|------------|\n| 1 | ... | ... | ... | primary / secondary / self-reported | high / medium / low |\n\n### Findings\n\nWrite findings using inline source references: `[1]`, `[2]`, etc. Every factual claim must cite at least one source by number.\n\n### Sources\n\nNumbered list matching the evidence table:\n1. Author/Title — URL\n2. Author/Title — URL\n\n## Context hygiene\n- Write findings to the output file progressively. Do not accumulate full page contents in your working memory — extract what you need, write it to file, move on.\n- When `includeContent: true` returns large pages, extract relevant quotes and discard the rest immediately.\n- If your search produces 10+ results, triage by title/snippet first. Only fetch full content for the top candidates.\n- Return a one-line summary to the parent, not full findings. The parent reads the output file.\n\n## Output contract\n- Save to the output file (default: `research.md`).\n- Minimum viable output: evidence table with ≥5 numbered entries, findings with inline references, and a numbered Sources section.\n- Write to the file and pass a lightweight reference back — do not dump full content into the parent context.","src/content/docs/agents/researcher.md","4d4d0e1b0fa38cd0",{"html":99,"metadata":100},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/researcher.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Gather primary evidence across papers, web sources, repos, docs, and local artifacts.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Cp>\u003Ccode>read\u003C/code>, \u003Ccode>bash\u003C/code>, \u003Ccode>grep\u003C/code>, \u003Ccode>find\u003C/code>, \u003Ccode>ls\u003C/code>\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>research.md\u003C/code>\u003C/p>\n\u003Ch2 id=\"integrity-commandments\">Integrity commandments\u003C/h2>\n\u003Col>\n\u003Cli>\u003Cstrong>Never fabricate a source.\u003C/strong> Every named tool, project, paper, product, or dataset must have a verifiable URL. If you cannot find a URL, do not mention it.\u003C/li>\n\u003Cli>\u003Cstrong>Never claim a project exists without checking.\u003C/strong> Before citing a GitHub repo, search for it. Before citing a paper, find it. If a search returns zero results, the thing does not exist — do not invent it.\u003C/li>\n\u003Cli>\u003Cstrong>Never extrapolate details you haven’t read.\u003C/strong> If you haven’t fetched and inspected a source, you may note its existence but must not describe its contents, metrics, or claims.\u003C/li>\n\u003Cli>\u003Cstrong>URL or it didn’t happen.\u003C/strong> Every entry in your evidence table must include a direct, checkable URL. No URL = not included.\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"search-strategy\">Search strategy\u003C/h2>\n\u003Col>\n\u003Cli>\u003Cstrong>Start wide.\u003C/strong> Begin with short, broad queries to map the landscape. Use the \u003Ccode>queries\u003C/code> array in \u003Ccode>web_search\u003C/code> with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.\u003C/li>\n\u003Cli>\u003Cstrong>Evaluate availability.\u003C/strong> After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.\u003C/li>\n\u003Cli>\u003Cstrong>Progressively narrow.\u003C/strong> Drill into specifics using terminology and names discovered in initial results. Refine queries, don’t repeat them.\u003C/li>\n\u003Cli>\u003Cstrong>Cross-source.\u003C/strong> When the topic spans current reality and academic literature, always use both \u003Ccode>web_search\u003C/code> and \u003Ccode>alpha_search\u003C/code>.\u003C/li>\n\u003C/ol>\n\u003Cp>Use \u003Ccode>recencyFilter\u003C/code> on \u003Ccode>web_search\u003C/code> for fast-moving topics. Use \u003Ccode>includeContent: true\u003C/code> on the most important results to get full page content rather than snippets.\u003C/p>\n\u003Ch2 id=\"source-quality\">Source quality\u003C/h2>\n\u003Cul>\n\u003Cli>\u003Cstrong>Prefer:\u003C/strong> academic papers, official documentation, primary datasets, verified benchmarks, government filings, reputable journalism, expert technical blogs, official vendor pages\u003C/li>\n\u003Cli>\u003Cstrong>Accept with caveats:\u003C/strong> well-cited secondary sources, established trade publications\u003C/li>\n\u003Cli>\u003Cstrong>Deprioritize:\u003C/strong> SEO-optimized listicles, undated blog posts, content aggregators, social media without primary links\u003C/li>\n\u003Cli>\u003Cstrong>Reject:\u003C/strong> sources with no author and no date, content that appears AI-generated with no primary backing\u003C/li>\n\u003C/ul>\n\u003Cp>When initial results skew toward low-quality sources, re-search with \u003Ccode>domainFilter\u003C/code> targeting authoritative domains.\u003C/p>\n\u003Ch2 id=\"output-format\">Output format\u003C/h2>\n\u003Cp>Assign each source a stable numeric ID. Use these IDs consistently so downstream agents can trace claims to exact sources.\u003C/p>\n\u003Ch3 id=\"evidence-table\">Evidence table\u003C/h3>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>#\u003C/th>\u003Cth>Source\u003C/th>\u003Cth>URL\u003C/th>\u003Cth>Key claim\u003C/th>\u003Cth>Type\u003C/th>\u003Cth>Confidence\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>1\u003C/td>\u003Ctd>…\u003C/td>\u003Ctd>…\u003C/td>\u003Ctd>…\u003C/td>\u003Ctd>primary / secondary / self-reported\u003C/td>\u003Ctd>high / medium / low\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch3 id=\"findings\">Findings\u003C/h3>\n\u003Cp>Write findings using inline source references: \u003Ccode>[1]\u003C/code>, \u003Ccode>[2]\u003C/code>, etc. Every factual claim must cite at least one source by number.\u003C/p>\n\u003Ch3 id=\"sources\">Sources\u003C/h3>\n\u003Cp>Numbered list matching the evidence table:\u003C/p>\n\u003Col>\n\u003Cli>Author/Title — URL\u003C/li>\n\u003Cli>Author/Title — URL\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"context-hygiene\">Context hygiene\u003C/h2>\n\u003Cul>\n\u003Cli>Write findings to the output file progressively. Do not accumulate full page contents in your working memory — extract what you need, write it to file, move on.\u003C/li>\n\u003Cli>When \u003Ccode>includeContent: true\u003C/code> returns large pages, extract relevant quotes and discard the rest immediately.\u003C/li>\n\u003Cli>If your search produces 10+ results, triage by title/snippet first. Only fetch full content for the top candidates.\u003C/li>\n\u003Cli>Return a one-line summary to the parent, not full findings. The parent reads the output file.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save to the output file (default: \u003Ccode>research.md\u003C/code>).\u003C/li>\n\u003Cli>Minimum viable output: evidence table with ≥5 numbered entries, findings with inline references, and a numbered Sources section.\u003C/li>\n\u003Cli>Write to the file and pass a lightweight reference back — do not dump full content into the parent context.\u003C/li>\n\u003C/ul>",{"headings":101,"localImagePaths":127,"remoteImagePaths":128,"frontmatter":129,"imagePaths":130},[102,103,104,105,106,107,110,113,114,117,120,123,126],{"depth":17,"slug":26,"text":27},{"depth":17,"slug":29,"text":30},{"depth":17,"slug":73,"text":74},{"depth":17,"slug":32,"text":33},{"depth":17,"slug":77,"text":78},{"depth":17,"slug":108,"text":109},"search-strategy","Search strategy",{"depth":17,"slug":111,"text":112},"source-quality","Source quality",{"depth":17,"slug":38,"text":39},{"depth":41,"slug":115,"text":116},"evidence-table","Evidence table",{"depth":41,"slug":118,"text":119},"findings","Findings",{"depth":41,"slug":121,"text":122},"sources","Sources",{"depth":17,"slug":124,"text":125},"context-hygiene","Context hygiene",{"depth":17,"slug":51,"text":52},[],[],{"title":92,"description":93,"section":16,"order":94},[],"agents/researcher.md","getting-started/setup",{"id":132,"data":134,"body":138,"filePath":139,"digest":140,"rendered":141,"legacyId":167},{"title":135,"description":136,"section":137,"order":41},"Setup","Detailed setup guide for Feynman","Getting Started","## Guided setup\n\n```bash\nfeynman setup\n```\n\nThis walks through four steps:\n\n### Model provider authentication\n\nFeynman uses Pi's OAuth system for model access. The setup wizard prompts you to log in to your preferred provider.\n\n```bash\nfeynman model login\n```\n\n### AlphaXiv login\n\nAlphaXiv powers Feynman's paper search and analysis tools. Sign in with:\n\n```bash\nfeynman alpha login\n```\n\nCheck status anytime:\n\n```bash\nfeynman alpha status\n```\n\n### Web search routing\n\nFeynman supports three web search backends:\n\n- **auto** — Prefer Perplexity when configured, fall back to Gemini\n- **perplexity** — Force Perplexity Sonar\n- **gemini** — Force Gemini (default, zero-config via signed-in Chromium)\n\nThe default path requires no API keys — it uses Gemini Browser via your signed-in Chromium profile.\n\n### Preview dependencies\n\nFor PDF and HTML export of generated artifacts, Feynman needs `pandoc`:\n\n```bash\nfeynman --setup-preview\n```\n\nThis installs pandoc automatically on macOS/Homebrew systems.\n\n## Diagnostics\n\nRun the doctor to check everything:\n\n```bash\nfeynman doctor\n```\n\nThis verifies model auth, alphaXiv credentials, preview dependencies, and the Pi runtime.","src/content/docs/getting-started/setup.md","49b3f67aa1ff128a",{"html":142,"metadata":143},"\u003Ch2 id=\"guided-setup\">Guided setup\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> setup\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>This walks through four steps:\u003C/p>\n\u003Ch3 id=\"model-provider-authentication\">Model provider authentication\u003C/h3>\n\u003Cp>Feynman uses Pi’s OAuth system for model access. The setup wizard prompts you to log in to your preferred provider.\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> login\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch3 id=\"alphaxiv-login\">AlphaXiv login\u003C/h3>\n\u003Cp>AlphaXiv powers Feynman’s paper search and analysis tools. Sign in with:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> alpha\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> login\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Check status anytime:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> alpha\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> status\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch3 id=\"web-search-routing\">Web search routing\u003C/h3>\n\u003Cp>Feynman supports three web search backends:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>auto\u003C/strong> — Prefer Perplexity when configured, fall back to Gemini\u003C/li>\n\u003Cli>\u003Cstrong>perplexity\u003C/strong> — Force Perplexity Sonar\u003C/li>\n\u003Cli>\u003Cstrong>gemini\u003C/strong> — Force Gemini (default, zero-config via signed-in Chromium)\u003C/li>\n\u003C/ul>\n\u003Cp>The default path requires no API keys — it uses Gemini Browser via your signed-in Chromium profile.\u003C/p>\n\u003Ch3 id=\"preview-dependencies\">Preview dependencies\u003C/h3>\n\u003Cp>For PDF and HTML export of generated artifacts, Feynman needs \u003Ccode>pandoc\u003C/code>:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --setup-preview\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>This installs pandoc automatically on macOS/Homebrew systems.\u003C/p>\n\u003Ch2 id=\"diagnostics\">Diagnostics\u003C/h2>\n\u003Cp>Run the doctor to check everything:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> doctor\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>This verifies model auth, alphaXiv credentials, preview dependencies, and the Pi runtime.\u003C/p>",{"headings":144,"localImagePaths":163,"remoteImagePaths":164,"frontmatter":165,"imagePaths":166},[145,148,151,154,157,160],{"depth":17,"slug":146,"text":147},"guided-setup","Guided setup",{"depth":41,"slug":149,"text":150},"model-provider-authentication","Model provider authentication",{"depth":41,"slug":152,"text":153},"alphaxiv-login","AlphaXiv login",{"depth":41,"slug":155,"text":156},"web-search-routing","Web search routing",{"depth":41,"slug":158,"text":159},"preview-dependencies","Preview dependencies",{"depth":17,"slug":161,"text":162},"diagnostics","Diagnostics",[],[],{"title":135,"description":136,"section":137,"order":41},[],"getting-started/setup.md","getting-started/quickstart",{"id":168,"data":170,"body":173,"filePath":174,"digest":175,"rendered":176,"legacyId":196},{"title":171,"description":172,"section":137,"order":17},"Quick Start","Get up and running with Feynman in 60 seconds","## First run\n\n```bash\nfeynman setup\nfeynman\n```\n\n`feynman setup` walks you through model authentication, alphaXiv login, web search configuration, and preview dependencies.\n\n## Ask naturally\n\nFeynman routes your questions into the right workflow automatically. You don't need slash commands to get started.\n\n```\n> What are the main approaches to RLHF alignment?\n```\n\nFeynman will search papers, gather web sources, and produce a structured answer with citations.\n\n## Use workflows directly\n\nFor explicit control, use slash commands inside the REPL:\n\n```\n> /deepresearch transformer scaling laws\n> /lit multimodal reasoning benchmarks\n> /review paper.pdf\n```\n\n## Output locations\n\nFeynman writes durable artifacts to canonical directories:\n\n- `outputs/` — Reviews, reading lists, summaries\n- `papers/` — Polished paper-style drafts\n- `experiments/` — Runnable code and result logs\n- `notes/` — Scratch notes and session logs","src/content/docs/getting-started/quickstart.md","0a22caade9f6c5a5",{"html":177,"metadata":178},"\u003Ch2 id=\"first-run\">First run\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> setup\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>\u003Ccode>feynman setup\u003C/code> walks you through model authentication, alphaXiv login, web search configuration, and preview dependencies.\u003C/p>\n\u003Ch2 id=\"ask-naturally\">Ask naturally\u003C/h2>\n\u003Cp>Feynman routes your questions into the right workflow automatically. You don’t need slash commands to get started.\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>> What are the main approaches to RLHF alignment?\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Feynman will search papers, gather web sources, and produce a structured answer with citations.\u003C/p>\n\u003Ch2 id=\"use-workflows-directly\">Use workflows directly\u003C/h2>\n\u003Cp>For explicit control, use slash commands inside the REPL:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>> /deepresearch transformer scaling laws\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>> /lit multimodal reasoning benchmarks\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>> /review paper.pdf\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output-locations\">Output locations\u003C/h2>\n\u003Cp>Feynman writes durable artifacts to canonical directories:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Ccode>outputs/\u003C/code> — Reviews, reading lists, summaries\u003C/li>\n\u003Cli>\u003Ccode>papers/\u003C/code> — Polished paper-style drafts\u003C/li>\n\u003Cli>\u003Ccode>experiments/\u003C/code> — Runnable code and result logs\u003C/li>\n\u003Cli>\u003Ccode>notes/\u003C/code> — Scratch notes and session logs\u003C/li>\n\u003C/ul>",{"headings":179,"localImagePaths":192,"remoteImagePaths":193,"frontmatter":194,"imagePaths":195},[180,183,186,189],{"depth":17,"slug":181,"text":182},"first-run","First run",{"depth":17,"slug":184,"text":185},"ask-naturally","Ask naturally",{"depth":17,"slug":187,"text":188},"use-workflows-directly","Use workflows directly",{"depth":17,"slug":190,"text":191},"output-locations","Output locations",[],[],{"title":171,"description":172,"section":137,"order":17},[],"getting-started/quickstart.md","getting-started/configuration",{"id":197,"data":199,"body":203,"filePath":204,"digest":205,"rendered":206,"legacyId":232},{"title":200,"description":201,"section":137,"order":202},"Configuration","Configure models, search, and runtime options",4,"## Model\n\nSet the default model:\n\n```bash\nfeynman model set \u003Cprovider:model>\n```\n\nOverride at runtime:\n\n```bash\nfeynman --model anthropic:claude-opus-4-6\n```\n\nList available models:\n\n```bash\nfeynman model list\n```\n\n## Thinking level\n\nControl the reasoning depth:\n\n```bash\nfeynman --thinking high\n```\n\nLevels: `off`, `minimal`, `low`, `medium`, `high`, `xhigh`.\n\n## Web search\n\nCheck the current search configuration:\n\n```bash\nfeynman search status\n```\n\nFor advanced configuration, edit `~/.feynman/web-search.json` directly to set Gemini API keys, Perplexity keys, or a different route.\n\n## Working directory\n\n```bash\nfeynman --cwd /path/to/project\n```\n\n## Session storage\n\n```bash\nfeynman --session-dir /path/to/sessions\n```\n\n## One-shot mode\n\nRun a single prompt and exit:\n\n```bash\nfeynman --prompt \"summarize the key findings of 2401.12345\"\n```","src/content/docs/getting-started/configuration.md","9d66eb82ad4b948a",{"html":207,"metadata":208},"\u003Ch2 id=\"model\">Model\u003C/h2>\n\u003Cp>Set the default model:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> set\u003C/span>\u003Cspan style=\"color:#D73A49;--shiki-dark:#F97583\"> <\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\">provider:mode\u003C/span>\u003Cspan style=\"color:#24292E;--shiki-dark:#E1E4E8\">l\u003C/span>\u003Cspan style=\"color:#D73A49;--shiki-dark:#F97583\">>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Override at runtime:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> anthropic:claude-opus-4-6\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>List available models:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> model\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> list\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"thinking-level\">Thinking level\u003C/h2>\n\u003Cp>Control the reasoning depth:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --thinking\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> high\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Levels: \u003Ccode>off\u003C/code>, \u003Ccode>minimal\u003C/code>, \u003Ccode>low\u003C/code>, \u003Ccode>medium\u003C/code>, \u003Ccode>high\u003C/code>, \u003Ccode>xhigh\u003C/code>.\u003C/p>\n\u003Ch2 id=\"web-search\">Web search\u003C/h2>\n\u003Cp>Check the current search configuration:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> search\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> status\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>For advanced configuration, edit \u003Ccode>~/.feynman/web-search.json\u003C/code> directly to set Gemini API keys, Perplexity keys, or a different route.\u003C/p>\n\u003Ch2 id=\"working-directory\">Working directory\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --cwd\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> /path/to/project\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"session-storage\">Session storage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --session-dir\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> /path/to/sessions\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"one-shot-mode\">One-shot mode\u003C/h2>\n\u003Cp>Run a single prompt and exit:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --prompt\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> \"summarize the key findings of 2401.12345\"\u003C/span>\u003C/span>\u003C/code>\u003C/pre>",{"headings":209,"localImagePaths":228,"remoteImagePaths":229,"frontmatter":230,"imagePaths":231},[210,213,216,219,222,225],{"depth":17,"slug":211,"text":212},"model","Model",{"depth":17,"slug":214,"text":215},"thinking-level","Thinking level",{"depth":17,"slug":217,"text":218},"web-search","Web search",{"depth":17,"slug":220,"text":221},"working-directory","Working directory",{"depth":17,"slug":223,"text":224},"session-storage","Session storage",{"depth":17,"slug":226,"text":227},"one-shot-mode","One-shot mode",[],[],{"title":200,"description":201,"section":137,"order":202},[],"getting-started/configuration.md","reference/cli-commands",{"id":233,"data":235,"body":239,"filePath":240,"digest":241,"rendered":242,"legacyId":265},{"title":236,"description":237,"section":238,"order":94},"CLI Commands","Complete reference for Feynman CLI commands","Reference","This page covers the dedicated Feynman CLI commands and compatibility flags.\n\nWorkflow prompt templates such as `/deepresearch` also run directly from the shell as `feynman \u003Cworkflow> ...`. Those workflow entries live in the slash-command reference instead of being duplicated here.\n\n## Core\n\n| Command | Description |\n| --- | --- |\n| `feynman` | Launch the interactive REPL. |\n| `feynman chat [prompt]` | Start chat explicitly, optionally with an initial prompt. |\n| `feynman help` | Show CLI help. |\n| `feynman setup` | Run the guided setup wizard. |\n| `feynman doctor` | Diagnose config, auth, Pi runtime, and preview dependencies. |\n| `feynman status` | Show the current setup summary. |\n\n## Model Management\n\n| Command | Description |\n| --- | --- |\n| `feynman model list` | List available models in Pi auth storage. |\n| `feynman model login [id]` | Login to a Pi OAuth model provider. |\n| `feynman model logout [id]` | Logout from a Pi OAuth model provider. |\n| `feynman model set \u003Cprovider/model>` | Set the default model. |\n\n## AlphaXiv\n\n| Command | Description |\n| --- | --- |\n| `feynman alpha login` | Sign in to alphaXiv. |\n| `feynman alpha logout` | Clear alphaXiv auth. |\n| `feynman alpha status` | Check alphaXiv auth status. |\n\n## Utilities\n\n| Command | Description |\n| --- | --- |\n| `feynman search status` | Show Pi web-access status and config path. |\n| `feynman update [package]` | Update installed packages, or a specific package. |\n\n## Flags\n\n| Flag | Description |\n| --- | --- |\n| `--prompt \"\u003Ctext>\"` | Run one prompt and exit. |\n| `--alpha-login` | Sign in to alphaXiv and exit. |\n| `--alpha-logout` | Clear alphaXiv auth and exit. |\n| `--alpha-status` | Show alphaXiv auth status and exit. |\n| `--model \u003Cprovider:model>` | Force a specific model. |\n| `--thinking \u003Clevel>` | Set thinking level: off | minimal | low | medium | high | xhigh. |\n| `--cwd \u003Cpath>` | Set the working directory for tools. |\n| `--session-dir \u003Cpath>` | Set the session storage directory. |\n| `--new-session` | Start a new persisted session. |\n| `--doctor` | Alias for `feynman doctor`. |\n| `--setup-preview` | Alias for `feynman setup preview`. |","src/content/docs/reference/cli-commands.md","5ba10666ccf260a6",{"html":243,"metadata":244},"\u003Cp>This page covers the dedicated Feynman CLI commands and compatibility flags.\u003C/p>\n\u003Cp>Workflow prompt templates such as \u003Ccode>/deepresearch\u003C/code> also run directly from the shell as \u003Ccode>feynman <workflow> ...\u003C/code>. Those workflow entries live in the slash-command reference instead of being duplicated here.\u003C/p>\n\u003Ch2 id=\"core\">Core\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman\u003C/code>\u003C/td>\u003Ctd>Launch the interactive REPL.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman chat [prompt]\u003C/code>\u003C/td>\u003Ctd>Start chat explicitly, optionally with an initial prompt.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman help\u003C/code>\u003C/td>\u003Ctd>Show CLI help.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman setup\u003C/code>\u003C/td>\u003Ctd>Run the guided setup wizard.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman doctor\u003C/code>\u003C/td>\u003Ctd>Diagnose config, auth, Pi runtime, and preview dependencies.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman status\u003C/code>\u003C/td>\u003Ctd>Show the current setup summary.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"model-management\">Model Management\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman model list\u003C/code>\u003C/td>\u003Ctd>List available models in Pi auth storage.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman model login [id]\u003C/code>\u003C/td>\u003Ctd>Login to a Pi OAuth model provider.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman model logout [id]\u003C/code>\u003C/td>\u003Ctd>Logout from a Pi OAuth model provider.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman model set <provider/model>\u003C/code>\u003C/td>\u003Ctd>Set the default model.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"alphaxiv\">AlphaXiv\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman alpha login\u003C/code>\u003C/td>\u003Ctd>Sign in to alphaXiv.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman alpha logout\u003C/code>\u003C/td>\u003Ctd>Clear alphaXiv auth.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman alpha status\u003C/code>\u003C/td>\u003Ctd>Check alphaXiv auth status.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"utilities\">Utilities\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>feynman search status\u003C/code>\u003C/td>\u003Ctd>Show Pi web-access status and config path.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>feynman update [package]\u003C/code>\u003C/td>\u003Ctd>Update installed packages, or a specific package.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"flags\">Flags\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Flag\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>--prompt \"<text>\"\u003C/code>\u003C/td>\u003Ctd>Run one prompt and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--alpha-login\u003C/code>\u003C/td>\u003Ctd>Sign in to alphaXiv and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--alpha-logout\u003C/code>\u003C/td>\u003Ctd>Clear alphaXiv auth and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--alpha-status\u003C/code>\u003C/td>\u003Ctd>Show alphaXiv auth status and exit.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--model <provider:model>\u003C/code>\u003C/td>\u003Ctd>Force a specific model.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--thinking <level>\u003C/code>\u003C/td>\u003Ctd>Set thinking level: off\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--cwd <path>\u003C/code>\u003C/td>\u003Ctd>Set the working directory for tools.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--session-dir <path>\u003C/code>\u003C/td>\u003Ctd>Set the session storage directory.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--new-session\u003C/code>\u003C/td>\u003Ctd>Start a new persisted session.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--doctor\u003C/code>\u003C/td>\u003Ctd>Alias for \u003Ccode>feynman doctor\u003C/code>.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>--setup-preview\u003C/code>\u003C/td>\u003Ctd>Alias for \u003Ccode>feynman setup preview\u003C/code>.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>",{"headings":245,"localImagePaths":261,"remoteImagePaths":262,"frontmatter":263,"imagePaths":264},[246,249,252,255,258],{"depth":17,"slug":247,"text":248},"core","Core",{"depth":17,"slug":250,"text":251},"model-management","Model Management",{"depth":17,"slug":253,"text":254},"alphaxiv","AlphaXiv",{"depth":17,"slug":256,"text":257},"utilities","Utilities",{"depth":17,"slug":259,"text":260},"flags","Flags",[],[],{"title":236,"description":237,"section":238,"order":94},[],"reference/cli-commands.md","getting-started/installation",{"id":266,"data":268,"body":271,"filePath":272,"digest":273,"rendered":274,"legacyId":294},{"title":269,"description":270,"section":137,"order":94},"Installation","Install Feynman and get started","## Requirements\n\n- Node.js 20 or later\n- npm 9 or later\n\n## Install\n\n```bash\nnpm install -g @companion-ai/feynman\n```\n\n## Verify\n\n```bash\nfeynman --version\n```\n\n## Local Development\n\nFor contributing or local development:\n\n```bash\ngit clone https://github.com/getcompanion-ai/feynman.git\ncd feynman\nnpm install\nnpm run start\n```","src/content/docs/getting-started/installation.md","781ab0278b8c1673",{"html":275,"metadata":276},"\u003Ch2 id=\"requirements\">Requirements\u003C/h2>\n\u003Cul>\n\u003Cli>Node.js 20 or later\u003C/li>\n\u003Cli>npm 9 or later\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"install\">Install\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">npm\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> install\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> -g\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> @companion-ai/feynman\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"verify\">Verify\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --version\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"local-development\">Local Development\u003C/h2>\n\u003Cp>For contributing or local development:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">git\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> clone\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> https://github.com/getcompanion-ai/feynman.git\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\">cd\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> feynman\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">npm\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> install\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">npm\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> run\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> start\u003C/span>\u003C/span>\u003C/code>\u003C/pre>",{"headings":277,"localImagePaths":290,"remoteImagePaths":291,"frontmatter":292,"imagePaths":293},[278,281,284,287],{"depth":17,"slug":279,"text":280},"requirements","Requirements",{"depth":17,"slug":282,"text":283},"install","Install",{"depth":17,"slug":285,"text":286},"verify","Verify",{"depth":17,"slug":288,"text":289},"local-development","Local Development",[],[],{"title":269,"description":270,"section":137,"order":94},[],"getting-started/installation.md","reference/package-stack",{"id":295,"data":297,"body":300,"filePath":301,"digest":302,"rendered":303,"legacyId":311},{"title":298,"description":299,"section":238,"order":41},"Package Stack","Curated Pi packages bundled with Feynman","Curated Pi packages bundled with Feynman. The runtime package list lives in `.feynman/settings.json`.\n\n| Package | Purpose |\n|---------|---------|\n| `pi-subagents` | Parallel literature gathering and decomposition. |\n| `pi-btw` | Fast side-thread `/btw` conversations without interrupting the main run. |\n| `pi-docparser` | PDFs, Office docs, spreadsheets, and images. |\n| `pi-web-access` | Web, GitHub, PDF, and media access. |\n| `pi-markdown-preview` | Polished Markdown and LaTeX-heavy research writeups. |\n| `@walterra/pi-charts` | Charts and quantitative visualizations. |\n| `pi-generative-ui` | Interactive HTML-style widgets. |\n| `pi-mermaid` | Diagrams in the TUI. |\n| `@aliou/pi-processes` | Long-running experiments and log tails. |\n| `pi-zotero` | Citation-library workflows. |\n| `@kaiserlich-dev/pi-session-search` | Indexed session recall and summarize/resume UI. |\n| `pi-schedule-prompt` | Recurring and deferred research jobs. |\n| `@samfp/pi-memory` | Automatic preference and correction memory across sessions. |\n| `@tmustier/pi-ralph-wiggum` | Long-running agent loops for iterative development. |","src/content/docs/reference/package-stack.md","f8845d3da2b66045",{"html":304,"metadata":305},"\u003Cp>Curated Pi packages bundled with Feynman. The runtime package list lives in \u003Ccode>.feynman/settings.json\u003C/code>.\u003C/p>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Package\u003C/th>\u003Cth>Purpose\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>pi-subagents\u003C/code>\u003C/td>\u003Ctd>Parallel literature gathering and decomposition.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-btw\u003C/code>\u003C/td>\u003Ctd>Fast side-thread \u003Ccode>/btw\u003C/code> conversations without interrupting the main run.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-docparser\u003C/code>\u003C/td>\u003Ctd>PDFs, Office docs, spreadsheets, and images.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-web-access\u003C/code>\u003C/td>\u003Ctd>Web, GitHub, PDF, and media access.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-markdown-preview\u003C/code>\u003C/td>\u003Ctd>Polished Markdown and LaTeX-heavy research writeups.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@walterra/pi-charts\u003C/code>\u003C/td>\u003Ctd>Charts and quantitative visualizations.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-generative-ui\u003C/code>\u003C/td>\u003Ctd>Interactive HTML-style widgets.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-mermaid\u003C/code>\u003C/td>\u003Ctd>Diagrams in the TUI.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@aliou/pi-processes\u003C/code>\u003C/td>\u003Ctd>Long-running experiments and log tails.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-zotero\u003C/code>\u003C/td>\u003Ctd>Citation-library workflows.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@kaiserlich-dev/pi-session-search\u003C/code>\u003C/td>\u003Ctd>Indexed session recall and summarize/resume UI.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>pi-schedule-prompt\u003C/code>\u003C/td>\u003Ctd>Recurring and deferred research jobs.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@samfp/pi-memory\u003C/code>\u003C/td>\u003Ctd>Automatic preference and correction memory across sessions.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>@tmustier/pi-ralph-wiggum\u003C/code>\u003C/td>\u003Ctd>Long-running agent loops for iterative development.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>",{"headings":306,"localImagePaths":307,"remoteImagePaths":308,"frontmatter":309,"imagePaths":310},[],[],[],{"title":298,"description":299,"section":238,"order":41},[],"reference/package-stack.md","tools/session-search",{"id":312,"data":314,"body":317,"filePath":318,"digest":319,"rendered":320,"legacyId":337},{"title":315,"description":316,"section":74,"order":41},"Session Search","Search prior Feynman session transcripts","## Overview\n\nThe `session_search` tool recovers prior Feynman work from stored session transcripts. Useful for picking up previous research threads or finding past findings.\n\n## Usage\n\nInside the REPL:\n\n```\n/search\n```\n\nOr use the tool directly — Feynman will invoke `session_search` automatically when you reference prior work.\n\n## What it searches\n\n- Full session transcripts\n- Tool outputs and agent results\n- Generated artifacts and their content","src/content/docs/tools/session-search.md","7091dddc6969e581",{"html":321,"metadata":322},"\u003Ch2 id=\"overview\">Overview\u003C/h2>\n\u003Cp>The \u003Ccode>session_search\u003C/code> tool recovers prior Feynman work from stored session transcripts. Useful for picking up previous research threads or finding past findings.\u003C/p>\n\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cp>Inside the REPL:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/search\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Or use the tool directly — Feynman will invoke \u003Ccode>session_search\u003C/code> automatically when you reference prior work.\u003C/p>\n\u003Ch2 id=\"what-it-searches\">What it searches\u003C/h2>\n\u003Cul>\n\u003Cli>Full session transcripts\u003C/li>\n\u003Cli>Tool outputs and agent results\u003C/li>\n\u003Cli>Generated artifacts and their content\u003C/li>\n\u003C/ul>",{"headings":323,"localImagePaths":333,"remoteImagePaths":334,"frontmatter":335,"imagePaths":336},[324,327,330],{"depth":17,"slug":325,"text":326},"overview","Overview",{"depth":17,"slug":328,"text":329},"usage","Usage",{"depth":17,"slug":331,"text":332},"what-it-searches","What it searches",[],[],{"title":315,"description":316,"section":74,"order":41},[],"tools/session-search.md","tools/alphaxiv",{"id":338,"data":340,"body":342,"filePath":343,"digest":344,"rendered":345,"legacyId":367},{"title":254,"description":341,"section":74,"order":94},"Paper search and analysis tools","## Overview\n\nAlphaXiv powers Feynman's academic paper workflows. All tools require an alphaXiv account — sign in with `feynman alpha login`.\n\n## Tools\n\n### alpha_search\n\nPaper discovery with three search modes:\n\n- **semantic** — Meaning-based search across paper content\n- **keyword** — Traditional keyword matching\n- **agentic** — AI-powered search that interprets your intent\n\n### alpha_get_paper\n\nFetch a paper's report (structured summary) or full raw text by arXiv ID.\n\n### alpha_ask_paper\n\nAsk a targeted question about a specific paper. Returns an answer grounded in the paper's content.\n\n### alpha_annotate_paper\n\nAdd persistent local notes to a paper. Annotations are stored locally and persist across sessions.\n\n### alpha_list_annotations\n\nRecall all annotations across papers and sessions.\n\n### alpha_read_code\n\nRead source code from a paper's linked GitHub repository. Useful for auditing or replication planning.","src/content/docs/tools/alphaxiv.md","a6eeb2c5a98d3096",{"html":346,"metadata":347},"\u003Ch2 id=\"overview\">Overview\u003C/h2>\n\u003Cp>AlphaXiv powers Feynman’s academic paper workflows. All tools require an alphaXiv account — sign in with \u003Ccode>feynman alpha login\u003C/code>.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Ch3 id=\"alpha_search\">alpha_search\u003C/h3>\n\u003Cp>Paper discovery with three search modes:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>semantic\u003C/strong> — Meaning-based search across paper content\u003C/li>\n\u003Cli>\u003Cstrong>keyword\u003C/strong> — Traditional keyword matching\u003C/li>\n\u003Cli>\u003Cstrong>agentic\u003C/strong> — AI-powered search that interprets your intent\u003C/li>\n\u003C/ul>\n\u003Ch3 id=\"alpha_get_paper\">alpha_get_paper\u003C/h3>\n\u003Cp>Fetch a paper’s report (structured summary) or full raw text by arXiv ID.\u003C/p>\n\u003Ch3 id=\"alpha_ask_paper\">alpha_ask_paper\u003C/h3>\n\u003Cp>Ask a targeted question about a specific paper. Returns an answer grounded in the paper’s content.\u003C/p>\n\u003Ch3 id=\"alpha_annotate_paper\">alpha_annotate_paper\u003C/h3>\n\u003Cp>Add persistent local notes to a paper. Annotations are stored locally and persist across sessions.\u003C/p>\n\u003Ch3 id=\"alpha_list_annotations\">alpha_list_annotations\u003C/h3>\n\u003Cp>Recall all annotations across papers and sessions.\u003C/p>\n\u003Ch3 id=\"alpha_read_code\">alpha_read_code\u003C/h3>\n\u003Cp>Read source code from a paper’s linked GitHub repository. Useful for auditing or replication planning.\u003C/p>",{"headings":348,"localImagePaths":363,"remoteImagePaths":364,"frontmatter":365,"imagePaths":366},[349,350,351,353,355,357,359,361],{"depth":17,"slug":325,"text":326},{"depth":17,"slug":73,"text":74},{"depth":41,"slug":352,"text":352},"alpha_search",{"depth":41,"slug":354,"text":354},"alpha_get_paper",{"depth":41,"slug":356,"text":356},"alpha_ask_paper",{"depth":41,"slug":358,"text":358},"alpha_annotate_paper",{"depth":41,"slug":360,"text":360},"alpha_list_annotations",{"depth":41,"slug":362,"text":362},"alpha_read_code",[],[],{"title":254,"description":341,"section":74,"order":94},[],"tools/alphaxiv.md","tools/preview",{"id":368,"data":370,"body":373,"filePath":374,"digest":375,"rendered":376,"legacyId":390},{"title":371,"description":372,"section":74,"order":202},"Preview","Preview generated artifacts in browser or PDF","## Overview\n\nThe `preview_file` tool opens generated artifacts in your browser or PDF viewer.\n\n## Usage\n\nInside the REPL:\n\n```\n/preview\n```\n\nOr Feynman will suggest previewing when you generate artifacts that benefit from rendered output (Markdown with LaTeX, HTML reports, etc.).\n\n## Requirements\n\nPreview requires `pandoc` for PDF/HTML rendering. Install it with:\n\n```bash\nfeynman --setup-preview\n```\n\n## Supported formats\n\n- Markdown (with LaTeX math rendering)\n- HTML\n- PDF","src/content/docs/tools/preview.md","b42137d5e0befd83",{"html":377,"metadata":378},"\u003Ch2 id=\"overview\">Overview\u003C/h2>\n\u003Cp>The \u003Ccode>preview_file\u003C/code> tool opens generated artifacts in your browser or PDF viewer.\u003C/p>\n\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cp>Inside the REPL:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/preview\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>Or Feynman will suggest previewing when you generate artifacts that benefit from rendered output (Markdown with LaTeX, HTML reports, etc.).\u003C/p>\n\u003Ch2 id=\"requirements\">Requirements\u003C/h2>\n\u003Cp>Preview requires \u003Ccode>pandoc\u003C/code> for PDF/HTML rendering. Install it with:\u003C/p>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#005CC5;--shiki-dark:#79B8FF\"> --setup-preview\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"supported-formats\">Supported formats\u003C/h2>\n\u003Cul>\n\u003Cli>Markdown (with LaTeX math rendering)\u003C/li>\n\u003Cli>HTML\u003C/li>\n\u003Cli>PDF\u003C/li>\n\u003C/ul>",{"headings":379,"localImagePaths":386,"remoteImagePaths":387,"frontmatter":388,"imagePaths":389},[380,381,382,383],{"depth":17,"slug":325,"text":326},{"depth":17,"slug":328,"text":329},{"depth":17,"slug":279,"text":280},{"depth":17,"slug":384,"text":385},"supported-formats","Supported formats",[],[],{"title":371,"description":372,"section":74,"order":202},[],"tools/preview.md","reference/slash-commands",{"id":391,"data":393,"body":396,"filePath":397,"digest":398,"rendered":399,"legacyId":415},{"title":394,"description":395,"section":238,"order":17},"Slash Commands","Repo-owned REPL slash commands","This page documents the slash commands that Feynman owns in this repository: prompt templates from `prompts/` and extension commands from `extensions/research-tools/`.\n\nAdditional slash commands can appear at runtime from Pi core and bundled packages such as subagents, preview, session search, and scheduling. Use `/help` inside the REPL for the live command list instead of relying on a static copy of package-provided commands.\n\n## Research Workflows\n\n| Command | Description |\n| --- | --- |\n| `/deepresearch \u003Ctopic>` | Run a thorough, source-heavy investigation on a topic and produce a durable research brief with inline citations. |\n| `/lit \u003Ctopic>` | Run a literature review on a topic using paper search and primary-source synthesis. |\n| `/review \u003Cartifact>` | Simulate an AI research peer review with likely objections, severity, and a concrete revision plan. |\n| `/audit \u003Citem>` | Compare a paper's claims against its public codebase and identify mismatches, omissions, and reproducibility risks. |\n| `/replicate \u003Cpaper>` | Plan or execute a replication workflow for a paper, claim, or benchmark. |\n| `/compare \u003Ctopic>` | Compare multiple sources on a topic and produce a source-grounded matrix of agreements, disagreements, and confidence. |\n| `/draft \u003Ctopic>` | Turn research findings into a polished paper-style draft with equations, sections, and explicit claims. |\n| `/autoresearch \u003Cidea>` | Autonomous experiment loop — try ideas, measure results, keep what works, discard what doesn't, repeat. |\n| `/watch \u003Ctopic>` | Set up a recurring or deferred research watch on a topic, company, paper area, or product surface. |\n\n## Project & Session\n\n| Command | Description |\n| --- | --- |\n| `/log` | Write a durable session log with completed work, findings, open questions, and next steps. |\n| `/jobs` | Inspect active background research work, including running processes and scheduled follow-ups. |\n| `/help` | Show grouped Feynman commands and prefill the editor with a selected command. |\n| `/init` | Bootstrap AGENTS.md and session-log folders for a research project. |\n\n## Setup\n\n| Command | Description |\n| --- | --- |\n| `/alpha-login` | Sign in to alphaXiv from inside Feynman. |\n| `/alpha-status` | Show alphaXiv authentication status. |\n| `/alpha-logout` | Clear alphaXiv auth from inside Feynman. |","src/content/docs/reference/slash-commands.md","f548c25cfafb9aea",{"html":400,"metadata":401},"\u003Cp>This page documents the slash commands that Feynman owns in this repository: prompt templates from \u003Ccode>prompts/\u003C/code> and extension commands from \u003Ccode>extensions/research-tools/\u003C/code>.\u003C/p>\n\u003Cp>Additional slash commands can appear at runtime from Pi core and bundled packages such as subagents, preview, session search, and scheduling. Use \u003Ccode>/help\u003C/code> inside the REPL for the live command list instead of relying on a static copy of package-provided commands.\u003C/p>\n\u003Ch2 id=\"research-workflows\">Research Workflows\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>/deepresearch <topic>\u003C/code>\u003C/td>\u003Ctd>Run a thorough, source-heavy investigation on a topic and produce a durable research brief with inline citations.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/lit <topic>\u003C/code>\u003C/td>\u003Ctd>Run a literature review on a topic using paper search and primary-source synthesis.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/review <artifact>\u003C/code>\u003C/td>\u003Ctd>Simulate an AI research peer review with likely objections, severity, and a concrete revision plan.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/audit <item>\u003C/code>\u003C/td>\u003Ctd>Compare a paper’s claims against its public codebase and identify mismatches, omissions, and reproducibility risks.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/replicate <paper>\u003C/code>\u003C/td>\u003Ctd>Plan or execute a replication workflow for a paper, claim, or benchmark.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/compare <topic>\u003C/code>\u003C/td>\u003Ctd>Compare multiple sources on a topic and produce a source-grounded matrix of agreements, disagreements, and confidence.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/draft <topic>\u003C/code>\u003C/td>\u003Ctd>Turn research findings into a polished paper-style draft with equations, sections, and explicit claims.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/autoresearch <idea>\u003C/code>\u003C/td>\u003Ctd>Autonomous experiment loop — try ideas, measure results, keep what works, discard what doesn’t, repeat.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/watch <topic>\u003C/code>\u003C/td>\u003Ctd>Set up a recurring or deferred research watch on a topic, company, paper area, or product surface.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"project--session\">Project & Session\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>/log\u003C/code>\u003C/td>\u003Ctd>Write a durable session log with completed work, findings, open questions, and next steps.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/jobs\u003C/code>\u003C/td>\u003Ctd>Inspect active background research work, including running processes and scheduled follow-ups.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/help\u003C/code>\u003C/td>\u003Ctd>Show grouped Feynman commands and prefill the editor with a selected command.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/init\u003C/code>\u003C/td>\u003Ctd>Bootstrap AGENTS.md and session-log folders for a research project.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"setup\">Setup\u003C/h2>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Command\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>/alpha-login\u003C/code>\u003C/td>\u003Ctd>Sign in to alphaXiv from inside Feynman.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/alpha-status\u003C/code>\u003C/td>\u003Ctd>Show alphaXiv authentication status.\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>/alpha-logout\u003C/code>\u003C/td>\u003Ctd>Clear alphaXiv auth from inside Feynman.\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>",{"headings":402,"localImagePaths":411,"remoteImagePaths":412,"frontmatter":413,"imagePaths":414},[403,406,409],{"depth":17,"slug":404,"text":405},"research-workflows","Research Workflows",{"depth":17,"slug":407,"text":408},"project--session","Project & Session",{"depth":17,"slug":410,"text":135},"setup",[],[],{"title":394,"description":395,"section":238,"order":17},[],"reference/slash-commands.md","tools/web-search",{"id":416,"data":418,"body":421,"filePath":422,"digest":423,"rendered":424,"legacyId":444},{"title":419,"description":420,"section":74,"order":17},"Web Search","Web search routing and configuration","## Routing modes\n\nFeynman supports three web search backends:\n\n| Mode | Description |\n|------|-------------|\n| `auto` | Prefer Perplexity when configured, fall back to Gemini |\n| `perplexity` | Force Perplexity Sonar |\n| `gemini` | Force Gemini (default) |\n\n## Default behavior\n\nThe default path is zero-config Gemini Browser via a signed-in Chromium profile. No API keys required.\n\n## Check current config\n\n```bash\nfeynman search status\n```\n\n## Advanced configuration\n\nEdit `~/.feynman/web-search.json` directly to set:\n\n- Gemini API keys\n- Perplexity API keys\n- Custom routing preferences","src/content/docs/tools/web-search.md","b2963fe8f7ae5dce",{"html":425,"metadata":426},"\u003Ch2 id=\"routing-modes\">Routing modes\u003C/h2>\n\u003Cp>Feynman supports three web search backends:\u003C/p>\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\u003Ctable>\u003Cthead>\u003Ctr>\u003Cth>Mode\u003C/th>\u003Cth>Description\u003C/th>\u003C/tr>\u003C/thead>\u003Ctbody>\u003Ctr>\u003Ctd>\u003Ccode>auto\u003C/code>\u003C/td>\u003Ctd>Prefer Perplexity when configured, fall back to Gemini\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>perplexity\u003C/code>\u003C/td>\u003Ctd>Force Perplexity Sonar\u003C/td>\u003C/tr>\u003Ctr>\u003Ctd>\u003Ccode>gemini\u003C/code>\u003C/td>\u003Ctd>Force Gemini (default)\u003C/td>\u003C/tr>\u003C/tbody>\u003C/table>\n\u003Ch2 id=\"default-behavior\">Default behavior\u003C/h2>\n\u003Cp>The default path is zero-config Gemini Browser via a signed-in Chromium profile. No API keys required.\u003C/p>\n\u003Ch2 id=\"check-current-config\">Check current config\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"bash\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan style=\"color:#6F42C1;--shiki-dark:#B392F0\">feynman\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> search\u003C/span>\u003Cspan style=\"color:#032F62;--shiki-dark:#9ECBFF\"> status\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"advanced-configuration\">Advanced configuration\u003C/h2>\n\u003Cp>Edit \u003Ccode>~/.feynman/web-search.json\u003C/code> directly to set:\u003C/p>\n\u003Cul>\n\u003Cli>Gemini API keys\u003C/li>\n\u003Cli>Perplexity API keys\u003C/li>\n\u003Cli>Custom routing preferences\u003C/li>\n\u003C/ul>",{"headings":427,"localImagePaths":440,"remoteImagePaths":441,"frontmatter":442,"imagePaths":443},[428,431,434,437],{"depth":17,"slug":429,"text":430},"routing-modes","Routing modes",{"depth":17,"slug":432,"text":433},"default-behavior","Default behavior",{"depth":17,"slug":435,"text":436},"check-current-config","Check current config",{"depth":17,"slug":438,"text":439},"advanced-configuration","Advanced configuration",[],[],{"title":419,"description":420,"section":74,"order":17},[],"tools/web-search.md","workflows/autoresearch",{"id":445,"data":447,"body":452,"filePath":453,"digest":454,"rendered":455,"legacyId":476},{"title":448,"description":449,"section":450,"order":451},"Autoresearch","Autonomous experiment optimization loop","Workflows",8,"## Usage\n\n```\n/autoresearch \u003Cidea>\n```\n\n## What it does\n\nRuns an autonomous experiment loop:\n\n1. **Edit** — Modify code or configuration\n2. **Commit** — Save the change\n3. **Benchmark** — Run evaluation\n4. **Evaluate** — Compare against baseline\n5. **Keep or revert** — Persist improvements, roll back regressions\n6. **Repeat** — Continue until the target is hit\n\n## Tracking\n\nMetrics are tracked in:\n\n- `autoresearch.md` — Human-readable progress log\n- `autoresearch.jsonl` — Machine-readable metrics over time\n\n## Controls\n\n```\n/autoresearch \u003Cidea> # start or resume\n/autoresearch off # stop, keep data\n/autoresearch clear # delete all state, start fresh\n```\n\n## Example\n\n```\n/autoresearch optimize the learning rate schedule for better convergence\n```","src/content/docs/workflows/autoresearch.md","94559e14e60edcad",{"html":456,"metadata":457},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/autoresearch <idea>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Runs an autonomous experiment loop:\u003C/p>\n\u003Col>\n\u003Cli>\u003Cstrong>Edit\u003C/strong> — Modify code or configuration\u003C/li>\n\u003Cli>\u003Cstrong>Commit\u003C/strong> — Save the change\u003C/li>\n\u003Cli>\u003Cstrong>Benchmark\u003C/strong> — Run evaluation\u003C/li>\n\u003Cli>\u003Cstrong>Evaluate\u003C/strong> — Compare against baseline\u003C/li>\n\u003Cli>\u003Cstrong>Keep or revert\u003C/strong> — Persist improvements, roll back regressions\u003C/li>\n\u003Cli>\u003Cstrong>Repeat\u003C/strong> — Continue until the target is hit\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"tracking\">Tracking\u003C/h2>\n\u003Cp>Metrics are tracked in:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Ccode>autoresearch.md\u003C/code> — Human-readable progress log\u003C/li>\n\u003Cli>\u003Ccode>autoresearch.jsonl\u003C/code> — Machine-readable metrics over time\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"controls\">Controls\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/autoresearch <idea> # start or resume\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>/autoresearch off # stop, keep data\u003C/span>\u003C/span>\n\u003Cspan class=\"line\">\u003Cspan>/autoresearch clear # delete all state, start fresh\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/autoresearch optimize the learning rate schedule for better convergence\u003C/span>\u003C/span>\u003C/code>\u003C/pre>",{"headings":458,"localImagePaths":472,"remoteImagePaths":473,"frontmatter":474,"imagePaths":475},[459,460,463,466,469],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},"what-it-does","What it does",{"depth":17,"slug":464,"text":465},"tracking","Tracking",{"depth":17,"slug":467,"text":468},"controls","Controls",{"depth":17,"slug":470,"text":471},"example","Example",[],[],{"title":448,"description":449,"section":450,"order":451},[],"workflows/autoresearch.md","workflows/audit",{"id":477,"data":479,"body":482,"filePath":483,"digest":484,"rendered":485,"legacyId":502},{"title":480,"description":481,"section":450,"order":202},"Code Audit","Compare paper claims against public codebases","## Usage\n\n```\n/audit \u003Citem>\n```\n\n## What it does\n\nCompares claims made in a paper against its public codebase. Surfaces mismatches, missing experiments, and reproducibility risks.\n\n## What it checks\n\n- Do the reported hyperparameters match the code?\n- Are all claimed experiments present in the repository?\n- Does the training loop match the described methodology?\n- Are there undocumented preprocessing steps?\n- Do evaluation metrics match the paper's claims?\n\n## Example\n\n```\n/audit 2401.12345\n```\n\n## Output\n\nAn audit report with:\n\n- Claim-by-claim verification\n- Identified mismatches\n- Missing components\n- Reproducibility risk assessment","src/content/docs/workflows/audit.md","58f5516850bcd065",{"html":486,"metadata":487},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/audit <item>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Compares claims made in a paper against its public codebase. Surfaces mismatches, missing experiments, and reproducibility risks.\u003C/p>\n\u003Ch2 id=\"what-it-checks\">What it checks\u003C/h2>\n\u003Cul>\n\u003Cli>Do the reported hyperparameters match the code?\u003C/li>\n\u003Cli>Are all claimed experiments present in the repository?\u003C/li>\n\u003Cli>Does the training loop match the described methodology?\u003C/li>\n\u003Cli>Are there undocumented preprocessing steps?\u003C/li>\n\u003Cli>Do evaluation metrics match the paper’s claims?\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/audit 2401.12345\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>An audit report with:\u003C/p>\n\u003Cul>\n\u003Cli>Claim-by-claim verification\u003C/li>\n\u003Cli>Identified mismatches\u003C/li>\n\u003Cli>Missing components\u003C/li>\n\u003Cli>Reproducibility risk assessment\u003C/li>\n\u003C/ul>",{"headings":488,"localImagePaths":498,"remoteImagePaths":499,"frontmatter":500,"imagePaths":501},[489,490,491,494,495],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":492,"text":493},"what-it-checks","What it checks",{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},"output","Output",[],[],{"title":480,"description":481,"section":450,"order":202},[],"workflows/audit.md","workflows/compare",{"id":503,"data":505,"body":509,"filePath":510,"digest":511,"rendered":512,"legacyId":524},{"title":506,"description":507,"section":450,"order":508},"Source Comparison","Compare multiple sources with agreement/disagreement matrix",6,"## Usage\n\n```\n/compare \u003Ctopic>\n```\n\n## What it does\n\nCompares multiple sources on a topic. Builds an agreement/disagreement matrix showing where sources align and where they conflict.\n\n## Example\n\n```\n/compare approaches to constitutional AI training\n```\n\n## Output\n\n- Source-by-source breakdown\n- Agreement/disagreement matrix\n- Synthesis of key differences\n- Assessment of which positions have stronger evidence","src/content/docs/workflows/compare.md","669d1dce304b191f",{"html":513,"metadata":514},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/compare <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Compares multiple sources on a topic. Builds an agreement/disagreement matrix showing where sources align and where they conflict.\u003C/p>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/compare approaches to constitutional AI training\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cul>\n\u003Cli>Source-by-source breakdown\u003C/li>\n\u003Cli>Agreement/disagreement matrix\u003C/li>\n\u003Cli>Synthesis of key differences\u003C/li>\n\u003Cli>Assessment of which positions have stronger evidence\u003C/li>\n\u003C/ul>",{"headings":515,"localImagePaths":520,"remoteImagePaths":521,"frontmatter":522,"imagePaths":523},[516,517,518,519],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":506,"description":507,"section":450,"order":508},[],"workflows/compare.md","workflows/deep-research",{"id":525,"data":527,"body":530,"filePath":531,"digest":532,"rendered":533,"legacyId":545},{"title":528,"description":529,"section":450,"order":94},"Deep Research","Thorough source-heavy investigation with parallel agents","## Usage\n\n```\n/deepresearch \u003Ctopic>\n```\n\n## What it does\n\nDeep research runs a thorough, source-heavy investigation. It plans the research scope, delegates to parallel researcher agents, synthesizes findings, and adds inline citations.\n\nThe workflow follows these steps:\n\n1. **Plan** — Clarify the research question and identify search strategy\n2. **Delegate** — Spawn parallel researcher agents to gather evidence from different source types (papers, web, repos)\n3. **Synthesize** — Merge findings, resolve contradictions, identify gaps\n4. **Cite** — Add inline citations and verify all source URLs\n5. **Deliver** — Write a durable research brief to `outputs/`\n\n## Example\n\n```\n/deepresearch transformer scaling laws and their implications for compute-optimal training\n```\n\n## Output\n\nProduces a structured research brief with:\n\n- Executive summary\n- Key findings organized by theme\n- Evidence tables with source links\n- Open questions and suggested next steps\n- Numbered sources section with direct URLs","src/content/docs/workflows/deep-research.md","5a1ed5d3fd031659",{"html":534,"metadata":535},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/deepresearch <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Deep research runs a thorough, source-heavy investigation. It plans the research scope, delegates to parallel researcher agents, synthesizes findings, and adds inline citations.\u003C/p>\n\u003Cp>The workflow follows these steps:\u003C/p>\n\u003Col>\n\u003Cli>\u003Cstrong>Plan\u003C/strong> — Clarify the research question and identify search strategy\u003C/li>\n\u003Cli>\u003Cstrong>Delegate\u003C/strong> — Spawn parallel researcher agents to gather evidence from different source types (papers, web, repos)\u003C/li>\n\u003Cli>\u003Cstrong>Synthesize\u003C/strong> — Merge findings, resolve contradictions, identify gaps\u003C/li>\n\u003Cli>\u003Cstrong>Cite\u003C/strong> — Add inline citations and verify all source URLs\u003C/li>\n\u003Cli>\u003Cstrong>Deliver\u003C/strong> — Write a durable research brief to \u003Ccode>outputs/\u003C/code>\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/deepresearch transformer scaling laws and their implications for compute-optimal training\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>Produces a structured research brief with:\u003C/p>\n\u003Cul>\n\u003Cli>Executive summary\u003C/li>\n\u003Cli>Key findings organized by theme\u003C/li>\n\u003Cli>Evidence tables with source links\u003C/li>\n\u003Cli>Open questions and suggested next steps\u003C/li>\n\u003Cli>Numbered sources section with direct URLs\u003C/li>\n\u003C/ul>",{"headings":536,"localImagePaths":541,"remoteImagePaths":542,"frontmatter":543,"imagePaths":544},[537,538,539,540],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":528,"description":529,"section":450,"order":94},[],"workflows/deep-research.md","workflows/draft",{"id":546,"data":548,"body":552,"filePath":553,"digest":554,"rendered":555,"legacyId":569},{"title":549,"description":550,"section":450,"order":551},"Draft Writing","Paper-style draft generation from research findings",7,"## Usage\n\n```\n/draft \u003Ctopic>\n```\n\n## What it does\n\nProduces a paper-style draft with structured sections. Writes to `papers/`.\n\n## Structure\n\nThe generated draft includes:\n\n- Title\n- Abstract\n- Introduction / Background\n- Method or Approach\n- Evidence and Analysis\n- Limitations\n- Conclusion\n- Sources\n\n## Example\n\n```\n/draft survey of differentiable physics simulators\n```\n\nThe writer agent works only from supplied evidence — it never fabricates content. If evidence is insufficient, it explicitly notes the gaps.","src/content/docs/workflows/draft.md","5549e489883745ea",{"html":556,"metadata":557},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/draft <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Produces a paper-style draft with structured sections. Writes to \u003Ccode>papers/\u003C/code>.\u003C/p>\n\u003Ch2 id=\"structure\">Structure\u003C/h2>\n\u003Cp>The generated draft includes:\u003C/p>\n\u003Cul>\n\u003Cli>Title\u003C/li>\n\u003Cli>Abstract\u003C/li>\n\u003Cli>Introduction / Background\u003C/li>\n\u003Cli>Method or Approach\u003C/li>\n\u003Cli>Evidence and Analysis\u003C/li>\n\u003Cli>Limitations\u003C/li>\n\u003Cli>Conclusion\u003C/li>\n\u003Cli>Sources\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/draft survey of differentiable physics simulators\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Cp>The writer agent works only from supplied evidence — it never fabricates content. If evidence is insufficient, it explicitly notes the gaps.\u003C/p>",{"headings":558,"localImagePaths":565,"remoteImagePaths":566,"frontmatter":567,"imagePaths":568},[559,560,561,564],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":562,"text":563},"structure","Structure",{"depth":17,"slug":470,"text":471},[],[],{"title":549,"description":550,"section":450,"order":551},[],"workflows/draft.md","workflows/replication",{"id":570,"data":572,"body":576,"filePath":577,"digest":578,"rendered":579,"legacyId":591},{"title":573,"description":574,"section":450,"order":575},"Replication","Plan replications of papers and claims",5,"## Usage\n\n```\n/replicate \u003Cpaper or claim>\n```\n\n## What it does\n\nExtracts key implementation details from a paper, identifies what's needed to replicate the results, and asks where to run before executing anything.\n\nBefore running code, Feynman asks you to choose an execution environment:\n\n- **Local** — run in the current working directory\n- **Virtual environment** — create an isolated venv/conda env first\n- **Docker** — run experiment code inside an isolated Docker container\n- **Cloud** — delegate to a remote Agent Computer machine\n- **Plan only** — produce the replication plan without executing\n\n## Example\n\n```\n/replicate \"chain-of-thought prompting improves math reasoning\"\n```\n\n## Output\n\nA replication plan covering:\n\n- Key claims to verify\n- Required resources (compute, data, models)\n- Implementation details extracted from the paper\n- Potential pitfalls and underspecified details\n- Step-by-step replication procedure\n- Success criteria\n\nIf an execution environment is selected, also produces runnable scripts and captured results.","src/content/docs/workflows/replication.md","838b8fa26ebbe08d",{"html":580,"metadata":581},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/replicate <paper or claim>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Extracts key implementation details from a paper, identifies what’s needed to replicate the results, and asks where to run before executing anything.\u003C/p>\n\u003Cp>Before running code, Feynman asks you to choose an execution environment:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>Local\u003C/strong> — run in the current working directory\u003C/li>\n\u003Cli>\u003Cstrong>Virtual environment\u003C/strong> — create an isolated venv/conda env first\u003C/li>\n\u003Cli>\u003Cstrong>Docker\u003C/strong> — run experiment code inside an isolated Docker container\u003C/li>\n\u003Cli>\u003Cstrong>Cloud\u003C/strong> — delegate to a remote Agent Computer machine\u003C/li>\n\u003Cli>\u003Cstrong>Plan only\u003C/strong> — produce the replication plan without executing\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/replicate \"chain-of-thought prompting improves math reasoning\"\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>A replication plan covering:\u003C/p>\n\u003Cul>\n\u003Cli>Key claims to verify\u003C/li>\n\u003Cli>Required resources (compute, data, models)\u003C/li>\n\u003Cli>Implementation details extracted from the paper\u003C/li>\n\u003Cli>Potential pitfalls and underspecified details\u003C/li>\n\u003Cli>Step-by-step replication procedure\u003C/li>\n\u003Cli>Success criteria\u003C/li>\n\u003C/ul>\n\u003Cp>If an execution environment is selected, also produces runnable scripts and captured results.\u003C/p>",{"headings":582,"localImagePaths":587,"remoteImagePaths":588,"frontmatter":589,"imagePaths":590},[583,584,585,586],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":573,"description":574,"section":450,"order":575},[],"workflows/replication.md","workflows/review",{"id":592,"data":594,"body":597,"filePath":598,"digest":599,"rendered":600,"legacyId":615},{"title":595,"description":596,"section":450,"order":41},"Peer Review","Simulated peer review with severity-graded feedback","## Usage\n\n```\n/review \u003Cartifact>\n```\n\n## What it does\n\nSimulates a tough-but-fair peer review for AI research artifacts. Evaluates novelty, empirical rigor, baselines, ablations, and reproducibility.\n\nThe reviewer agent identifies:\n\n- Weak baselines\n- Missing ablations\n- Evaluation mismatches\n- Benchmark leakage\n- Under-specified implementation details\n\n## Severity levels\n\nFeedback is graded by severity:\n\n- **FATAL** — Fundamental issues that invalidate the claims\n- **MAJOR** — Significant problems that need addressing\n- **MINOR** — Small improvements or clarifications\n\n## Example\n\n```\n/review outputs/scaling-laws-brief.md\n```\n\n## Output\n\nStructured review with:\n\n- Summary of the work\n- Strengths\n- Weaknesses (severity-graded)\n- Questions for the authors\n- Verdict (accept / revise / reject)\n- Revision plan","src/content/docs/workflows/review.md","5a1cfb4bdd03056c",{"html":601,"metadata":602},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/review <artifact>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Simulates a tough-but-fair peer review for AI research artifacts. Evaluates novelty, empirical rigor, baselines, ablations, and reproducibility.\u003C/p>\n\u003Cp>The reviewer agent identifies:\u003C/p>\n\u003Cul>\n\u003Cli>Weak baselines\u003C/li>\n\u003Cli>Missing ablations\u003C/li>\n\u003Cli>Evaluation mismatches\u003C/li>\n\u003Cli>Benchmark leakage\u003C/li>\n\u003Cli>Under-specified implementation details\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"severity-levels\">Severity levels\u003C/h2>\n\u003Cp>Feedback is graded by severity:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>FATAL\u003C/strong> — Fundamental issues that invalidate the claims\u003C/li>\n\u003Cli>\u003Cstrong>MAJOR\u003C/strong> — Significant problems that need addressing\u003C/li>\n\u003Cli>\u003Cstrong>MINOR\u003C/strong> — Small improvements or clarifications\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/review outputs/scaling-laws-brief.md\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>Structured review with:\u003C/p>\n\u003Cul>\n\u003Cli>Summary of the work\u003C/li>\n\u003Cli>Strengths\u003C/li>\n\u003Cli>Weaknesses (severity-graded)\u003C/li>\n\u003Cli>Questions for the authors\u003C/li>\n\u003Cli>Verdict (accept / revise / reject)\u003C/li>\n\u003Cli>Revision plan\u003C/li>\n\u003C/ul>",{"headings":603,"localImagePaths":611,"remoteImagePaths":612,"frontmatter":613,"imagePaths":614},[604,605,606,609,610],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":607,"text":608},"severity-levels","Severity levels",{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":595,"description":596,"section":450,"order":41},[],"workflows/review.md","workflows/literature-review",{"id":616,"data":618,"body":621,"filePath":622,"digest":623,"rendered":624,"legacyId":636},{"title":619,"description":620,"section":450,"order":17},"Literature Review","Map consensus, disagreements, and open questions","## Usage\n\n```\n/lit \u003Ctopic>\n```\n\n## What it does\n\nRuns a structured literature review that searches across academic papers and web sources. Explicitly separates consensus findings from disagreements and open questions.\n\n## Example\n\n```\n/lit multimodal reasoning benchmarks for large language models\n```\n\n## Output\n\nA structured review covering:\n\n- **Consensus** — What the field agrees on\n- **Disagreements** — Where sources conflict\n- **Open questions** — What remains unresolved\n- **Sources** — Direct links to all referenced papers and articles","src/content/docs/workflows/literature-review.md","7def25e86b0bdc22",{"html":625,"metadata":626},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/lit <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Runs a structured literature review that searches across academic papers and web sources. Explicitly separates consensus findings from disagreements and open questions.\u003C/p>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/lit multimodal reasoning benchmarks for large language models\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"output\">Output\u003C/h2>\n\u003Cp>A structured review covering:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>Consensus\u003C/strong> — What the field agrees on\u003C/li>\n\u003Cli>\u003Cstrong>Disagreements\u003C/strong> — Where sources conflict\u003C/li>\n\u003Cli>\u003Cstrong>Open questions\u003C/strong> — What remains unresolved\u003C/li>\n\u003Cli>\u003Cstrong>Sources\u003C/strong> — Direct links to all referenced papers and articles\u003C/li>\n\u003C/ul>",{"headings":627,"localImagePaths":632,"remoteImagePaths":633,"frontmatter":634,"imagePaths":635},[628,629,630,631],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":496,"text":497},[],[],{"title":619,"description":620,"section":450,"order":17},[],"workflows/literature-review.md","workflows/watch",{"id":637,"data":639,"body":643,"filePath":644,"digest":645,"rendered":646,"legacyId":660},{"title":640,"description":641,"section":450,"order":642},"Watch","Recurring research monitoring",9,"## Usage\n\n```\n/watch \u003Ctopic>\n```\n\n## What it does\n\nSchedules a recurring research watch. Sets a baseline of current knowledge and defines what constitutes a meaningful change worth reporting.\n\n## Example\n\n```\n/watch new papers on test-time compute scaling\n```\n\n## How it works\n\n1. Feynman establishes a baseline by surveying current sources\n2. Defines change signals (new papers, updated results, new repos)\n3. Schedules periodic checks via `pi-schedule-prompt`\n4. Reports only when meaningful changes are detected","src/content/docs/workflows/watch.md","b24ebad68d8b9736",{"html":647,"metadata":648},"\u003Ch2 id=\"usage\">Usage\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/watch <topic>\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"what-it-does\">What it does\u003C/h2>\n\u003Cp>Schedules a recurring research watch. Sets a baseline of current knowledge and defines what constitutes a meaningful change worth reporting.\u003C/p>\n\u003Ch2 id=\"example\">Example\u003C/h2>\n\u003Cpre class=\"astro-code astro-code-themes github-light github-dark\" style=\"background-color:#fff;--shiki-dark-bg:#24292e;color:#24292e;--shiki-dark:#e1e4e8; overflow-x: auto;\" tabindex=\"0\" data-language=\"plaintext\">\u003Ccode>\u003Cspan class=\"line\">\u003Cspan>/watch new papers on test-time compute scaling\u003C/span>\u003C/span>\u003C/code>\u003C/pre>\n\u003Ch2 id=\"how-it-works\">How it works\u003C/h2>\n\u003Col>\n\u003Cli>Feynman establishes a baseline by surveying current sources\u003C/li>\n\u003Cli>Defines change signals (new papers, updated results, new repos)\u003C/li>\n\u003Cli>Schedules periodic checks via \u003Ccode>pi-schedule-prompt\u003C/code>\u003C/li>\n\u003Cli>Reports only when meaningful changes are detected\u003C/li>\n\u003C/ol>",{"headings":649,"localImagePaths":656,"remoteImagePaths":657,"frontmatter":658,"imagePaths":659},[650,651,652,653],{"depth":17,"slug":328,"text":329},{"depth":17,"slug":461,"text":462},{"depth":17,"slug":470,"text":471},{"depth":17,"slug":654,"text":655},"how-it-works","How it works",[],[],{"title":640,"description":641,"section":450,"order":642},[],"workflows/watch.md","agents/verifier",{"id":661,"data":663,"body":666,"filePath":667,"digest":668,"rendered":669,"legacyId":688},{"title":664,"description":665,"section":16,"order":202},"Verifier","Post-process a draft to add inline citations and verify every source URL.","## Source\n\nGenerated from `.feynman/agents/verifier.md`. Edit that prompt file, not this docs page.\n\n## Role\n\nPost-process a draft to add inline citations and verify every source URL.\n\n## Tools\n\n`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`\n\n## Default Output\n\n`cited.md`\n\nYou receive a draft document and the research files it was built from. Your job is to:\n\n1. **Anchor every factual claim** in the draft to a specific source from the research files. Insert inline citations `[1]`, `[2]`, etc. directly after each claim.\n2. **Verify every source URL** — use fetch_content to confirm each URL resolves and contains the claimed content. Flag dead links.\n3. **Build the final Sources section** — a numbered list at the end where every number matches at least one inline citation in the body.\n4. **Remove unsourced claims** — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.\n\n## Citation rules\n\n- Every factual claim gets at least one citation: \"Transformers achieve 94.2% on MMLU [3].\"\n- Multiple sources for one claim: \"Recent work questions benchmark validity [7, 12].\"\n- No orphan citations — every `[N]` in the body must appear in Sources.\n- No orphan sources — every entry in Sources must be cited at least once.\n- Hedged or opinion statements do not need citations.\n- When multiple research files use different numbering, merge into a single unified sequence starting from [1]. Deduplicate sources that appear in multiple files.\n\n## Source verification\n\nFor each source URL:\n- **Live:** keep as-is.\n- **Dead/404:** search for an alternative URL (archived version, mirror, updated link). If none found, remove the source and all claims that depended solely on it.\n- **Redirects to unrelated content:** treat as dead.\n\n## Output contract\n- Save to the output file (default: `cited.md`).\n- The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.\n- Do not change the substance or structure of the draft. Only add citations and fix dead sources.","src/content/docs/agents/verifier.md","efc12a91a847824e",{"html":670,"metadata":671},"\u003Ch2 id=\"source\">Source\u003C/h2>\n\u003Cp>Generated from \u003Ccode>.feynman/agents/verifier.md\u003C/code>. Edit that prompt file, not this docs page.\u003C/p>\n\u003Ch2 id=\"role\">Role\u003C/h2>\n\u003Cp>Post-process a draft to add inline citations and verify every source URL.\u003C/p>\n\u003Ch2 id=\"tools\">Tools\u003C/h2>\n\u003Cp>\u003Ccode>read\u003C/code>, \u003Ccode>bash\u003C/code>, \u003Ccode>grep\u003C/code>, \u003Ccode>find\u003C/code>, \u003Ccode>ls\u003C/code>, \u003Ccode>write\u003C/code>, \u003Ccode>edit\u003C/code>\u003C/p>\n\u003Ch2 id=\"default-output\">Default Output\u003C/h2>\n\u003Cp>\u003Ccode>cited.md\u003C/code>\u003C/p>\n\u003Cp>You receive a draft document and the research files it was built from. Your job is to:\u003C/p>\n\u003Col>\n\u003Cli>\u003Cstrong>Anchor every factual claim\u003C/strong> in the draft to a specific source from the research files. Insert inline citations \u003Ccode>[1]\u003C/code>, \u003Ccode>[2]\u003C/code>, etc. directly after each claim.\u003C/li>\n\u003Cli>\u003Cstrong>Verify every source URL\u003C/strong> — use fetch_content to confirm each URL resolves and contains the claimed content. Flag dead links.\u003C/li>\n\u003Cli>\u003Cstrong>Build the final Sources section\u003C/strong> — a numbered list at the end where every number matches at least one inline citation in the body.\u003C/li>\n\u003Cli>\u003Cstrong>Remove unsourced claims\u003C/strong> — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.\u003C/li>\n\u003C/ol>\n\u003Ch2 id=\"citation-rules\">Citation rules\u003C/h2>\n\u003Cul>\n\u003Cli>Every factual claim gets at least one citation: “Transformers achieve 94.2% on MMLU [3].”\u003C/li>\n\u003Cli>Multiple sources for one claim: “Recent work questions benchmark validity [7, 12].”\u003C/li>\n\u003Cli>No orphan citations — every \u003Ccode>[N]\u003C/code> in the body must appear in Sources.\u003C/li>\n\u003Cli>No orphan sources — every entry in Sources must be cited at least once.\u003C/li>\n\u003Cli>Hedged or opinion statements do not need citations.\u003C/li>\n\u003Cli>When multiple research files use different numbering, merge into a single unified sequence starting from [1]. Deduplicate sources that appear in multiple files.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"source-verification\">Source verification\u003C/h2>\n\u003Cp>For each source URL:\u003C/p>\n\u003Cul>\n\u003Cli>\u003Cstrong>Live:\u003C/strong> keep as-is.\u003C/li>\n\u003Cli>\u003Cstrong>Dead/404:\u003C/strong> search for an alternative URL (archived version, mirror, updated link). If none found, remove the source and all claims that depended solely on it.\u003C/li>\n\u003Cli>\u003Cstrong>Redirects to unrelated content:\u003C/strong> treat as dead.\u003C/li>\n\u003C/ul>\n\u003Ch2 id=\"output-contract\">Output contract\u003C/h2>\n\u003Cul>\n\u003Cli>Save to the output file (default: \u003Ccode>cited.md\u003C/code>).\u003C/li>\n\u003Cli>The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.\u003C/li>\n\u003Cli>Do not change the substance or structure of the draft. Only add citations and fix dead sources.\u003C/li>\n\u003C/ul>",{"headings":672,"localImagePaths":684,"remoteImagePaths":685,"frontmatter":686,"imagePaths":687},[673,674,675,676,677,680,683],{"depth":17,"slug":26,"text":27},{"depth":17,"slug":29,"text":30},{"depth":17,"slug":73,"text":74},{"depth":17,"slug":32,"text":33},{"depth":17,"slug":678,"text":679},"citation-rules","Citation rules",{"depth":17,"slug":681,"text":682},"source-verification","Source verification",{"depth":17,"slug":51,"text":52},[],[],{"title":664,"description":665,"section":16,"order":202},[],"agents/verifier.md"] \ No newline at end of file diff --git a/website/src/content/docs/workflows/replication.md b/website/src/content/docs/workflows/replication.md index e932f11..eaa97d1 100644 --- a/website/src/content/docs/workflows/replication.md +++ b/website/src/content/docs/workflows/replication.md @@ -19,6 +19,7 @@ Before running code, Feynman asks you to choose an execution environment: - **Local** — run in the current working directory - **Virtual environment** — create an isolated venv/conda env first +- **Docker** — run experiment code inside an isolated Docker container - **Cloud** — delegate to a remote Agent Computer machine - **Plan only** — produce the replication plan without executing diff --git a/website/src/pages/index.astro b/website/src/pages/index.astro index 2b043f9..e04ac7a 100644 --- a/website/src/pages/index.astro +++ b/website/src/pages/index.astro @@ -116,9 +116,13 @@ import Base from '../layouts/Base.astro';

Paper search, Q&A, code reading, persistent annotations

+
+ +

Isolated container execution for safe local experiments

+
-

Secure cloud execution for experiments and replications

+

Secure cloud execution for GPU workloads and long-running research

Web search