Rename .pi to .feynman, rename citation agent to verifier, add website, skills, and docs

- Rename project config dir from .pi/ to .feynman/ (Pi supports this via piConfig.configDir)
- Rename citation agent to verifier across all prompts, agents, skills, and docs
- Add website with homepage and 24 doc pages (Astro + Tailwind)
- Add skills for all workflows (deep-research, lit, review, audit, replicate, compare, draft, autoresearch, watch, jobs, session-log, agentcomputer)
- Add Pi-native prompt frontmatter (args, section, topLevelCli) and read at runtime
- Remove sync-docs generation layer — docs are standalone
- Remove metadata/prompts.mjs and metadata/packages.mjs — not needed at runtime
- Rewrite README and homepage copy
- Add environment selection to /replicate before executing
- Add prompts/delegate.md and AGENTS.md

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Advait Paliwal
2026-03-23 17:35:35 -07:00
parent 406d50b3ff
commit f5570b4e5a
98 changed files with 9886 additions and 298 deletions

View File

@@ -0,0 +1,33 @@
{
"$ref": "#/definitions/docs",
"definitions": {
"docs": {
"type": "object",
"properties": {
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"section": {
"type": "string"
},
"order": {
"type": "number"
},
"$schema": {
"type": "string"
}
},
"required": [
"title",
"description",
"section",
"order"
],
"additionalProperties": false
}
},
"$schema": "http://json-schema.org/draft-07/schema#"
}

View File

@@ -0,0 +1 @@
export default new Map();

View File

@@ -0,0 +1 @@
export default new Map();

209
website/.astro/content.d.ts vendored Normal file
View File

@@ -0,0 +1,209 @@
declare module 'astro:content' {
export interface RenderResult {
Content: import('astro/runtime/server/index.js').AstroComponentFactory;
headings: import('astro').MarkdownHeading[];
remarkPluginFrontmatter: Record<string, any>;
}
interface Render {
'.md': Promise<RenderResult>;
}
export interface RenderedContent {
html: string;
metadata?: {
imagePaths: Array<string>;
[key: string]: unknown;
};
}
}
declare module 'astro:content' {
type Flatten<T> = T extends { [K: string]: infer U } ? U : never;
export type CollectionKey = keyof AnyEntryMap;
export type CollectionEntry<C extends CollectionKey> = Flatten<AnyEntryMap[C]>;
export type ContentCollectionKey = keyof ContentEntryMap;
export type DataCollectionKey = keyof DataEntryMap;
type AllValuesOf<T> = T extends any ? T[keyof T] : never;
type ValidContentEntrySlug<C extends keyof ContentEntryMap> = AllValuesOf<
ContentEntryMap[C]
>['slug'];
export type ReferenceDataEntry<
C extends CollectionKey,
E extends keyof DataEntryMap[C] = string,
> = {
collection: C;
id: E;
};
export type ReferenceContentEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}) = string,
> = {
collection: C;
slug: E;
};
export type ReferenceLiveEntry<C extends keyof LiveContentConfig['collections']> = {
collection: C;
id: string;
};
/** @deprecated Use `getEntry` instead. */
export function getEntryBySlug<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
collection: C,
// Note that this has to accept a regular string too, for SSR
entrySlug: E,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
/** @deprecated Use `getEntry` instead. */
export function getDataEntryById<C extends keyof DataEntryMap, E extends keyof DataEntryMap[C]>(
collection: C,
entryId: E,
): Promise<CollectionEntry<C>>;
export function getCollection<C extends keyof AnyEntryMap, E extends CollectionEntry<C>>(
collection: C,
filter?: (entry: CollectionEntry<C>) => entry is E,
): Promise<E[]>;
export function getCollection<C extends keyof AnyEntryMap>(
collection: C,
filter?: (entry: CollectionEntry<C>) => unknown,
): Promise<CollectionEntry<C>[]>;
export function getLiveCollection<C extends keyof LiveContentConfig['collections']>(
collection: C,
filter?: LiveLoaderCollectionFilterType<C>,
): Promise<
import('astro').LiveDataCollectionResult<LiveLoaderDataType<C>, LiveLoaderErrorType<C>>
>;
export function getEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
entry: ReferenceContentEntry<C, E>,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof DataEntryMap,
E extends keyof DataEntryMap[C] | (string & {}),
>(
entry: ReferenceDataEntry<C, E>,
): E extends keyof DataEntryMap[C]
? Promise<DataEntryMap[C][E]>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
collection: C,
slug: E,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof DataEntryMap,
E extends keyof DataEntryMap[C] | (string & {}),
>(
collection: C,
id: E,
): E extends keyof DataEntryMap[C]
? string extends keyof DataEntryMap[C]
? Promise<DataEntryMap[C][E]> | undefined
: Promise<DataEntryMap[C][E]>
: Promise<CollectionEntry<C> | undefined>;
export function getLiveEntry<C extends keyof LiveContentConfig['collections']>(
collection: C,
filter: string | LiveLoaderEntryFilterType<C>,
): Promise<import('astro').LiveDataEntryResult<LiveLoaderDataType<C>, LiveLoaderErrorType<C>>>;
/** Resolve an array of entry references from the same collection */
export function getEntries<C extends keyof ContentEntryMap>(
entries: ReferenceContentEntry<C, ValidContentEntrySlug<C>>[],
): Promise<CollectionEntry<C>[]>;
export function getEntries<C extends keyof DataEntryMap>(
entries: ReferenceDataEntry<C, keyof DataEntryMap[C]>[],
): Promise<CollectionEntry<C>[]>;
export function render<C extends keyof AnyEntryMap>(
entry: AnyEntryMap[C][string],
): Promise<RenderResult>;
export function reference<C extends keyof AnyEntryMap>(
collection: C,
): import('astro/zod').ZodEffects<
import('astro/zod').ZodString,
C extends keyof ContentEntryMap
? ReferenceContentEntry<C, ValidContentEntrySlug<C>>
: ReferenceDataEntry<C, keyof DataEntryMap[C]>
>;
// Allow generic `string` to avoid excessive type errors in the config
// if `dev` is not running to update as you edit.
// Invalid collection names will be caught at build time.
export function reference<C extends string>(
collection: C,
): import('astro/zod').ZodEffects<import('astro/zod').ZodString, never>;
type ReturnTypeOrOriginal<T> = T extends (...args: any[]) => infer R ? R : T;
type InferEntrySchema<C extends keyof AnyEntryMap> = import('astro/zod').infer<
ReturnTypeOrOriginal<Required<ContentConfig['collections'][C]>['schema']>
>;
type ContentEntryMap = {
};
type DataEntryMap = {
"docs": Record<string, {
id: string;
render(): Render[".md"];
slug: string;
body: string;
collection: "docs";
data: InferEntrySchema<"docs">;
rendered?: RenderedContent;
filePath?: string;
}>;
};
type AnyEntryMap = ContentEntryMap & DataEntryMap;
type ExtractLoaderTypes<T> = T extends import('astro/loaders').LiveLoader<
infer TData,
infer TEntryFilter,
infer TCollectionFilter,
infer TError
>
? { data: TData; entryFilter: TEntryFilter; collectionFilter: TCollectionFilter; error: TError }
: { data: never; entryFilter: never; collectionFilter: never; error: never };
type ExtractDataType<T> = ExtractLoaderTypes<T>['data'];
type ExtractEntryFilterType<T> = ExtractLoaderTypes<T>['entryFilter'];
type ExtractCollectionFilterType<T> = ExtractLoaderTypes<T>['collectionFilter'];
type ExtractErrorType<T> = ExtractLoaderTypes<T>['error'];
type LiveLoaderDataType<C extends keyof LiveContentConfig['collections']> =
LiveContentConfig['collections'][C]['schema'] extends undefined
? ExtractDataType<LiveContentConfig['collections'][C]['loader']>
: import('astro/zod').infer<
Exclude<LiveContentConfig['collections'][C]['schema'], undefined>
>;
type LiveLoaderEntryFilterType<C extends keyof LiveContentConfig['collections']> =
ExtractEntryFilterType<LiveContentConfig['collections'][C]['loader']>;
type LiveLoaderCollectionFilterType<C extends keyof LiveContentConfig['collections']> =
ExtractCollectionFilterType<LiveContentConfig['collections'][C]['loader']>;
type LiveLoaderErrorType<C extends keyof LiveContentConfig['collections']> = ExtractErrorType<
LiveContentConfig['collections'][C]['loader']
>;
export type ContentConfig = typeof import("../src/content/config.js");
export type LiveContentConfig = never;
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,5 @@
{
"_variables": {
"lastUpdateCheck": 1774305535217
}
}

2
website/.astro/types.d.ts vendored Normal file
View File

@@ -0,0 +1,2 @@
/// <reference types="astro/client" />
/// <reference path="content.d.ts" />

15
website/astro.config.mjs Normal file
View File

@@ -0,0 +1,15 @@
import { defineConfig } from 'astro/config';
import tailwind from '@astrojs/tailwind';
export default defineConfig({
integrations: [tailwind()],
site: 'https://feynman.companion.ai',
markdown: {
shikiConfig: {
themes: {
light: 'github-light',
dark: 'github-dark',
},
},
},
});

6876
website/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

17
website/package.json Normal file
View File

@@ -0,0 +1,17 @@
{
"name": "feynman-website",
"type": "module",
"version": "0.0.1",
"private": true,
"scripts": {
"dev": "astro dev",
"build": "astro build",
"preview": "astro preview"
},
"dependencies": {
"astro": "^5.7.0",
"@astrojs/tailwind": "^6.0.2",
"tailwindcss": "^3.4.0",
"sharp": "^0.33.0"
}
}

View File

@@ -0,0 +1,9 @@
<footer class="py-8 mt-16">
<div class="max-w-6xl mx-auto px-6 flex flex-col sm:flex-row items-center justify-between gap-4">
<span class="text-sm text-text-dim">&copy; 2026 Companion Inc.</span>
<div class="flex gap-6">
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener" class="text-sm text-text-dim hover:text-text-primary transition-colors">GitHub</a>
<a href="/docs/getting-started/installation" class="text-sm text-text-dim hover:text-text-primary transition-colors">Docs</a>
</div>
</div>
</footer>

View File

@@ -0,0 +1,26 @@
---
import ThemeToggle from './ThemeToggle.astro';
interface Props {
active?: 'home' | 'docs';
}
const { active = 'home' } = Astro.props;
---
<nav class="sticky top-0 z-50 bg-bg">
<div class="max-w-6xl mx-auto px-6 h-14 flex items-center justify-between">
<a href="/" class="text-xl font-bold text-accent tracking-tight">Feynman</a>
<div class="flex items-center gap-6">
<a href="/docs/getting-started/installation"
class:list={["text-sm transition-colors", active === 'docs' ? 'text-text-primary' : 'text-text-muted hover:text-text-primary']}>
Docs
</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener"
class="text-sm text-text-muted hover:text-text-primary transition-colors">
GitHub
</a>
<ThemeToggle />
</div>
</div>
</nav>

View File

@@ -0,0 +1,80 @@
---
interface Props {
currentSlug: string;
}
const { currentSlug } = Astro.props;
const sections = [
{
title: 'Getting Started',
items: [
{ label: 'Installation', slug: 'getting-started/installation' },
{ label: 'Quick Start', slug: 'getting-started/quickstart' },
{ label: 'Setup', slug: 'getting-started/setup' },
{ label: 'Configuration', slug: 'getting-started/configuration' },
],
},
{
title: 'Workflows',
items: [
{ label: 'Deep Research', slug: 'workflows/deep-research' },
{ label: 'Literature Review', slug: 'workflows/literature-review' },
{ label: 'Peer Review', slug: 'workflows/review' },
{ label: 'Code Audit', slug: 'workflows/audit' },
{ label: 'Replication', slug: 'workflows/replication' },
{ label: 'Source Comparison', slug: 'workflows/compare' },
{ label: 'Draft Writing', slug: 'workflows/draft' },
{ label: 'Autoresearch', slug: 'workflows/autoresearch' },
{ label: 'Watch', slug: 'workflows/watch' },
],
},
{
title: 'Agents',
items: [
{ label: 'Researcher', slug: 'agents/researcher' },
{ label: 'Reviewer', slug: 'agents/reviewer' },
{ label: 'Writer', slug: 'agents/writer' },
{ label: 'Verifier', slug: 'agents/verifier' },
],
},
{
title: 'Tools',
items: [
{ label: 'AlphaXiv', slug: 'tools/alphaxiv' },
{ label: 'Web Search', slug: 'tools/web-search' },
{ label: 'Session Search', slug: 'tools/session-search' },
{ label: 'Preview', slug: 'tools/preview' },
],
},
{
title: 'Reference',
items: [
{ label: 'CLI Commands', slug: 'reference/cli-commands' },
{ label: 'Slash Commands', slug: 'reference/slash-commands' },
{ label: 'Package Stack', slug: 'reference/package-stack' },
],
},
];
---
<aside id="sidebar" class="w-64 shrink-0 h-[calc(100vh-3.5rem)] sticky top-14 overflow-y-auto py-6 pr-4 hidden lg:block border-r border-border">
{sections.map((section) => (
<div class="mb-6">
<div class="text-xs font-semibold text-accent uppercase tracking-wider px-3 mb-2">{section.title}</div>
{section.items.map((item) => (
<a
href={`/docs/${item.slug}`}
class:list={[
'block px-3 py-1.5 text-sm border-l-[2px] transition-colors',
currentSlug === item.slug
? 'border-accent text-text-primary'
: 'border-transparent text-text-muted hover:text-text-primary',
]}
>
{item.label}
</a>
))}
</div>
))}
</aside>

View File

@@ -0,0 +1,33 @@
<button id="theme-toggle" class="p-1.5 rounded-md text-text-muted hover:text-text-primary hover:bg-surface transition-colors" aria-label="Toggle theme">
<svg id="sun-icon" class="hidden w-[18px] h-[18px]" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<circle cx="12" cy="12" r="5" />
<path d="M12 1v2M12 21v2M4.22 4.22l1.42 1.42M18.36 18.36l1.42 1.42M1 12h2M21 12h2M4.22 19.78l1.42-1.42M18.36 5.64l1.42-1.42" />
</svg>
<svg id="moon-icon" class="hidden w-[18px] h-[18px]" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path d="M21 12.79A9 9 0 1 1 11.21 3 7 7 0 0 0 21 12.79z" />
</svg>
</button>
<script is:inline>
(function() {
var stored = localStorage.getItem('theme');
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
var dark = stored === 'dark' || (!stored && prefersDark);
if (dark) document.documentElement.classList.add('dark');
function update() {
var isDark = document.documentElement.classList.contains('dark');
document.getElementById('sun-icon').style.display = isDark ? 'block' : 'none';
document.getElementById('moon-icon').style.display = isDark ? 'none' : 'block';
}
update();
document.addEventListener('DOMContentLoaded', function() {
update();
document.getElementById('theme-toggle').addEventListener('click', function() {
document.documentElement.classList.toggle('dark');
var isDark = document.documentElement.classList.contains('dark');
localStorage.setItem('theme', isDark ? 'dark' : 'light');
update();
});
});
})();
</script>

View File

@@ -0,0 +1,13 @@
import { defineCollection, z } from 'astro:content';
const docs = defineCollection({
type: 'content',
schema: z.object({
title: z.string(),
description: z.string(),
section: z.string(),
order: z.number(),
}),
});
export const collections = { docs };

View File

@@ -0,0 +1,75 @@
---
title: Researcher
description: Gather primary evidence across papers, web sources, repos, docs, and local artifacts.
section: Agents
order: 1
---
## Source
Generated from `.feynman/agents/researcher.md`. Edit that prompt file, not this docs page.
## Role
Gather primary evidence across papers, web sources, repos, docs, and local artifacts.
## Tools
`read`, `bash`, `grep`, `find`, `ls`
## Default Output
`research.md`
## Integrity commandments
1. **Never fabricate a source.** Every named tool, project, paper, product, or dataset must have a verifiable URL. If you cannot find a URL, do not mention it.
2. **Never claim a project exists without checking.** Before citing a GitHub repo, search for it. Before citing a paper, find it. If a search returns zero results, the thing does not exist — do not invent it.
3. **Never extrapolate details you haven't read.** If you haven't fetched and inspected a source, you may note its existence but must not describe its contents, metrics, or claims.
4. **URL or it didn't happen.** Every entry in your evidence table must include a direct, checkable URL. No URL = not included.
## Search strategy
1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 24 varied-angle queries simultaneously — never one query at a time when exploring.
2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.
3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.
4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and `alpha_search`.
Use `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.
## Source quality
- **Prefer:** academic papers, official documentation, primary datasets, verified benchmarks, government filings, reputable journalism, expert technical blogs, official vendor pages
- **Accept with caveats:** well-cited secondary sources, established trade publications
- **Deprioritize:** SEO-optimized listicles, undated blog posts, content aggregators, social media without primary links
- **Reject:** sources with no author and no date, content that appears AI-generated with no primary backing
When initial results skew toward low-quality sources, re-search with `domainFilter` targeting authoritative domains.
## Output format
Assign each source a stable numeric ID. Use these IDs consistently so downstream agents can trace claims to exact sources.
### Evidence table
| # | Source | URL | Key claim | Type | Confidence |
|---|--------|-----|-----------|------|------------|
| 1 | ... | ... | ... | primary / secondary / self-reported | high / medium / low |
### Findings
Write findings using inline source references: `[1]`, `[2]`, etc. Every factual claim must cite at least one source by number.
### Sources
Numbered list matching the evidence table:
1. Author/Title — URL
2. Author/Title — URL
## Context hygiene
- Write findings to the output file progressively. Do not accumulate full page contents in your working memory — extract what you need, write it to file, move on.
- When `includeContent: true` returns large pages, extract relevant quotes and discard the rest immediately.
- If your search produces 10+ results, triage by title/snippet first. Only fetch full content for the top candidates.
- Return a one-line summary to the parent, not full findings. The parent reads the output file.
## Output contract
- Save to the output file (default: `research.md`).
- Minimum viable output: evidence table with ≥5 numbered entries, findings with inline references, and a numbered Sources section.
- Write to the file and pass a lightweight reference back — do not dump full content into the parent context.

View File

@@ -0,0 +1,93 @@
---
title: Reviewer
description: Simulate a tough but constructive AI research peer reviewer with inline annotations.
section: Agents
order: 2
---
## Source
Generated from `.feynman/agents/reviewer.md`. Edit that prompt file, not this docs page.
## Role
Simulate a tough but constructive AI research peer reviewer with inline annotations.
## Default Output
`review.md`
Your job is to act like a skeptical but fair peer reviewer for AI/ML systems work.
## Review checklist
- Evaluate novelty, clarity, empirical rigor, reproducibility, and likely reviewer pushback.
- Do not praise vaguely. Every positive claim should be tied to specific evidence.
- Look for:
- missing or weak baselines
- missing ablations
- evaluation mismatches
- unclear claims of novelty
- weak related-work positioning
- insufficient statistical evidence
- benchmark leakage or contamination risks
- under-specified implementation details
- claims that outrun the experiments
- Distinguish between fatal issues, strong concerns, and polish issues.
- Preserve uncertainty. If the draft might pass depending on venue norms, say so explicitly.
## Output format
Produce two sections: a structured review and inline annotations.
### Part 1: Structured Review
```markdown
## Summary
1-2 paragraph summary of the paper's contributions and approach.
## Strengths
- [S1] ...
- [S2] ...
## Weaknesses
- [W1] **FATAL:** ...
- [W2] **MAJOR:** ...
- [W3] **MINOR:** ...
## Questions for Authors
- [Q1] ...
## Verdict
Overall assessment and confidence score. Would this pass at [venue]?
## Revision Plan
Prioritized, concrete steps to address each weakness.
```
### Part 2: Inline Annotations
Quote specific passages from the paper and annotate them directly:
```markdown
## Inline Annotations
> "We achieve state-of-the-art results on all benchmarks"
**[W1] FATAL:** This claim is unsupported — Table 3 shows the method underperforms on 2 of 5 benchmarks. Revise to accurately reflect results.
> "Our approach is novel in combining X with Y"
**[W3] MINOR:** Z et al. (2024) combined X with Y in a different domain. Acknowledge this and clarify the distinction.
> "We use a learning rate of 1e-4"
**[Q1]:** Was this tuned? What range was searched? This matters for reproducibility.
```
Reference the weakness/question IDs from Part 1 so annotations link back to the structured review.
## Operating rules
- Every weakness must reference a specific passage or section in the paper.
- Inline annotations must quote the exact text being critiqued.
- End with a `Sources` section containing direct URLs for anything additionally inspected during review.
## Output contract
- Save the main artifact to `review.md`.
- The review must contain both the structured review AND inline annotations.

View File

@@ -0,0 +1,50 @@
---
title: Verifier
description: Post-process a draft to add inline citations and verify every source URL.
section: Agents
order: 4
---
## Source
Generated from `.feynman/agents/verifier.md`. Edit that prompt file, not this docs page.
## Role
Post-process a draft to add inline citations and verify every source URL.
## Tools
`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`
## Default Output
`cited.md`
You receive a draft document and the research files it was built from. Your job is to:
1. **Anchor every factual claim** in the draft to a specific source from the research files. Insert inline citations `[1]`, `[2]`, etc. directly after each claim.
2. **Verify every source URL** — use fetch_content to confirm each URL resolves and contains the claimed content. Flag dead links.
3. **Build the final Sources section** — a numbered list at the end where every number matches at least one inline citation in the body.
4. **Remove unsourced claims** — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.
## Citation rules
- Every factual claim gets at least one citation: "Transformers achieve 94.2% on MMLU [3]."
- Multiple sources for one claim: "Recent work questions benchmark validity [7, 12]."
- No orphan citations — every `[N]` in the body must appear in Sources.
- No orphan sources — every entry in Sources must be cited at least once.
- Hedged or opinion statements do not need citations.
- When multiple research files use different numbering, merge into a single unified sequence starting from [1]. Deduplicate sources that appear in multiple files.
## Source verification
For each source URL:
- **Live:** keep as-is.
- **Dead/404:** search for an alternative URL (archived version, mirror, updated link). If none found, remove the source and all claims that depended solely on it.
- **Redirects to unrelated content:** treat as dead.
## Output contract
- Save to the output file (default: `cited.md`).
- The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.
- Do not change the substance or structure of the draft. Only add citations and fix dead sources.

View File

@@ -0,0 +1,56 @@
---
title: Writer
description: Turn research notes into clear, structured briefs and drafts.
section: Agents
order: 3
---
## Source
Generated from `.feynman/agents/writer.md`. Edit that prompt file, not this docs page.
## Role
Turn research notes into clear, structured briefs and drafts.
## Tools
`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`
## Default Output
`draft.md`
## Integrity commandments
1. **Write only from supplied evidence.** Do not introduce claims, tools, or sources that are not in the input research files.
2. **Preserve caveats and disagreements.** Never smooth away uncertainty.
3. **Be explicit about gaps.** If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.
## Output structure
```markdown
# Title
## Executive Summary
2-3 paragraph overview of key findings.
## Section 1: ...
Detailed findings organized by theme or question.
## Section N: ...
...
## Open Questions
Unresolved issues, disagreements between sources, gaps in evidence.
```
## Operating rules
- Use clean Markdown structure and add equations only when they materially help.
- Keep the narrative readable, but never outrun the evidence.
- Produce artifacts that are ready to review in a browser or PDF preview.
- Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.
- Do NOT add a Sources section — the verifier agent builds that.
## Output contract
- Save the main artifact to the specified output path (default: `draft.md`).
- Focus on clarity, structure, and evidence traceability.

View File

@@ -0,0 +1,66 @@
---
title: Configuration
description: Configure models, search, and runtime options
section: Getting Started
order: 4
---
## Model
Set the default model:
```bash
feynman model set <provider:model>
```
Override at runtime:
```bash
feynman --model anthropic:claude-opus-4-6
```
List available models:
```bash
feynman model list
```
## Thinking level
Control the reasoning depth:
```bash
feynman --thinking high
```
Levels: `off`, `minimal`, `low`, `medium`, `high`, `xhigh`.
## Web search
Check the current search configuration:
```bash
feynman search status
```
For advanced configuration, edit `~/.feynman/web-search.json` directly to set Gemini API keys, Perplexity keys, or a different route.
## Working directory
```bash
feynman --cwd /path/to/project
```
## Session storage
```bash
feynman --session-dir /path/to/sessions
```
## One-shot mode
Run a single prompt and exit:
```bash
feynman --prompt "summarize the key findings of 2401.12345"
```

View File

@@ -0,0 +1,34 @@
---
title: Installation
description: Install Feynman and get started
section: Getting Started
order: 1
---
## Requirements
- Node.js 20 or later
- npm 9 or later
## Install
```bash
npm install -g @companion-ai/feynman
```
## Verify
```bash
feynman --version
```
## Local Development
For contributing or local development:
```bash
git clone https://github.com/getcompanion-ai/feynman.git
cd feynman
npm install
npm run start
```

View File

@@ -0,0 +1,44 @@
---
title: Quick Start
description: Get up and running with Feynman in 60 seconds
section: Getting Started
order: 2
---
## First run
```bash
feynman setup
feynman
```
`feynman setup` walks you through model authentication, alphaXiv login, web search configuration, and preview dependencies.
## Ask naturally
Feynman routes your questions into the right workflow automatically. You don't need slash commands to get started.
```
> What are the main approaches to RLHF alignment?
```
Feynman will search papers, gather web sources, and produce a structured answer with citations.
## Use workflows directly
For explicit control, use slash commands inside the REPL:
```
> /deepresearch transformer scaling laws
> /lit multimodal reasoning benchmarks
> /review paper.pdf
```
## Output locations
Feynman writes durable artifacts to canonical directories:
- `outputs/` — Reviews, reading lists, summaries
- `papers/` — Polished paper-style drafts
- `experiments/` — Runnable code and result logs
- `notes/` — Scratch notes and session logs

View File

@@ -0,0 +1,66 @@
---
title: Setup
description: Detailed setup guide for Feynman
section: Getting Started
order: 3
---
## Guided setup
```bash
feynman setup
```
This walks through four steps:
### Model provider authentication
Feynman uses Pi's OAuth system for model access. The setup wizard prompts you to log in to your preferred provider.
```bash
feynman model login
```
### AlphaXiv login
AlphaXiv powers Feynman's paper search and analysis tools. Sign in with:
```bash
feynman alpha login
```
Check status anytime:
```bash
feynman alpha status
```
### Web search routing
Feynman supports three web search backends:
- **auto** — Prefer Perplexity when configured, fall back to Gemini
- **perplexity** — Force Perplexity Sonar
- **gemini** — Force Gemini (default, zero-config via signed-in Chromium)
The default path requires no API keys — it uses Gemini Browser via your signed-in Chromium profile.
### Preview dependencies
For PDF and HTML export of generated artifacts, Feynman needs `pandoc`:
```bash
feynman --setup-preview
```
This installs pandoc automatically on macOS/Homebrew systems.
## Diagnostics
Run the doctor to check everything:
```bash
feynman doctor
```
This verifies model auth, alphaXiv credentials, preview dependencies, and the Pi runtime.

View File

@@ -0,0 +1,61 @@
---
title: CLI Commands
description: Complete reference for Feynman CLI commands
section: Reference
order: 1
---
This page covers the dedicated Feynman CLI commands and compatibility flags.
Workflow prompt templates such as `/deepresearch` also run directly from the shell as `feynman <workflow> ...`. Those workflow entries live in the slash-command reference instead of being duplicated here.
## Core
| Command | Description |
| --- | --- |
| `feynman` | Launch the interactive REPL. |
| `feynman chat [prompt]` | Start chat explicitly, optionally with an initial prompt. |
| `feynman help` | Show CLI help. |
| `feynman setup` | Run the guided setup wizard. |
| `feynman doctor` | Diagnose config, auth, Pi runtime, and preview dependencies. |
| `feynman status` | Show the current setup summary. |
## Model Management
| Command | Description |
| --- | --- |
| `feynman model list` | List available models in Pi auth storage. |
| `feynman model login [id]` | Login to a Pi OAuth model provider. |
| `feynman model logout [id]` | Logout from a Pi OAuth model provider. |
| `feynman model set <provider/model>` | Set the default model. |
## AlphaXiv
| Command | Description |
| --- | --- |
| `feynman alpha login` | Sign in to alphaXiv. |
| `feynman alpha logout` | Clear alphaXiv auth. |
| `feynman alpha status` | Check alphaXiv auth status. |
## Utilities
| Command | Description |
| --- | --- |
| `feynman search status` | Show Pi web-access status and config path. |
| `feynman update [package]` | Update installed packages, or a specific package. |
## Flags
| Flag | Description |
| --- | --- |
| `--prompt "<text>"` | Run one prompt and exit. |
| `--alpha-login` | Sign in to alphaXiv and exit. |
| `--alpha-logout` | Clear alphaXiv auth and exit. |
| `--alpha-status` | Show alphaXiv auth status and exit. |
| `--model <provider:model>` | Force a specific model. |
| `--thinking <level>` | Set thinking level: off | minimal | low | medium | high | xhigh. |
| `--cwd <path>` | Set the working directory for tools. |
| `--session-dir <path>` | Set the session storage directory. |
| `--new-session` | Start a new persisted session. |
| `--doctor` | Alias for `feynman doctor`. |
| `--setup-preview` | Alias for `feynman setup preview`. |

View File

@@ -0,0 +1,25 @@
---
title: Package Stack
description: Curated Pi packages bundled with Feynman
section: Reference
order: 3
---
Curated Pi packages bundled with Feynman. The runtime package list lives in `.feynman/settings.json`.
| Package | Purpose |
|---------|---------|
| `pi-subagents` | Parallel literature gathering and decomposition. |
| `pi-btw` | Fast side-thread `/btw` conversations without interrupting the main run. |
| `pi-docparser` | PDFs, Office docs, spreadsheets, and images. |
| `pi-web-access` | Web, GitHub, PDF, and media access. |
| `pi-markdown-preview` | Polished Markdown and LaTeX-heavy research writeups. |
| `@walterra/pi-charts` | Charts and quantitative visualizations. |
| `pi-generative-ui` | Interactive HTML-style widgets. |
| `pi-mermaid` | Diagrams in the TUI. |
| `@aliou/pi-processes` | Long-running experiments and log tails. |
| `pi-zotero` | Citation-library workflows. |
| `@kaiserlich-dev/pi-session-search` | Indexed session recall and summarize/resume UI. |
| `pi-schedule-prompt` | Recurring and deferred research jobs. |
| `@samfp/pi-memory` | Automatic preference and correction memory across sessions. |
| `@tmustier/pi-ralph-wiggum` | Long-running agent loops for iterative development. |

View File

@@ -0,0 +1,41 @@
---
title: Slash Commands
description: Repo-owned REPL slash commands
section: Reference
order: 2
---
This page documents the slash commands that Feynman owns in this repository: prompt templates from `prompts/` and extension commands from `extensions/research-tools/`.
Additional slash commands can appear at runtime from Pi core and bundled packages such as subagents, preview, session search, and scheduling. Use `/help` inside the REPL for the live command list instead of relying on a static copy of package-provided commands.
## Research Workflows
| Command | Description |
| --- | --- |
| `/deepresearch <topic>` | Run a thorough, source-heavy investigation on a topic and produce a durable research brief with inline citations. |
| `/lit <topic>` | Run a literature review on a topic using paper search and primary-source synthesis. |
| `/review <artifact>` | Simulate an AI research peer review with likely objections, severity, and a concrete revision plan. |
| `/audit <item>` | Compare a paper's claims against its public codebase and identify mismatches, omissions, and reproducibility risks. |
| `/replicate <paper>` | Plan or execute a replication workflow for a paper, claim, or benchmark. |
| `/compare <topic>` | Compare multiple sources on a topic and produce a source-grounded matrix of agreements, disagreements, and confidence. |
| `/draft <topic>` | Turn research findings into a polished paper-style draft with equations, sections, and explicit claims. |
| `/autoresearch <idea>` | Autonomous experiment loop — try ideas, measure results, keep what works, discard what doesn't, repeat. |
| `/watch <topic>` | Set up a recurring or deferred research watch on a topic, company, paper area, or product surface. |
## Project & Session
| Command | Description |
| --- | --- |
| `/log` | Write a durable session log with completed work, findings, open questions, and next steps. |
| `/jobs` | Inspect active background research work, including running processes and scheduled follow-ups. |
| `/help` | Show grouped Feynman commands and prefill the editor with a selected command. |
| `/init` | Bootstrap AGENTS.md and session-log folders for a research project. |
## Setup
| Command | Description |
| --- | --- |
| `/alpha-login` | Sign in to alphaXiv from inside Feynman. |
| `/alpha-status` | Show alphaXiv authentication status. |
| `/alpha-logout` | Clear alphaXiv auth from inside Feynman. |

View File

@@ -0,0 +1,40 @@
---
title: AlphaXiv
description: Paper search and analysis tools
section: Tools
order: 1
---
## Overview
AlphaXiv powers Feynman's academic paper workflows. All tools require an alphaXiv account — sign in with `feynman alpha login`.
## Tools
### alpha_search
Paper discovery with three search modes:
- **semantic** — Meaning-based search across paper content
- **keyword** — Traditional keyword matching
- **agentic** — AI-powered search that interprets your intent
### alpha_get_paper
Fetch a paper's report (structured summary) or full raw text by arXiv ID.
### alpha_ask_paper
Ask a targeted question about a specific paper. Returns an answer grounded in the paper's content.
### alpha_annotate_paper
Add persistent local notes to a paper. Annotations are stored locally and persist across sessions.
### alpha_list_annotations
Recall all annotations across papers and sessions.
### alpha_read_code
Read source code from a paper's linked GitHub repository. Useful for auditing or replication planning.

View File

@@ -0,0 +1,34 @@
---
title: Preview
description: Preview generated artifacts in browser or PDF
section: Tools
order: 4
---
## Overview
The `preview_file` tool opens generated artifacts in your browser or PDF viewer.
## Usage
Inside the REPL:
```
/preview
```
Or Feynman will suggest previewing when you generate artifacts that benefit from rendered output (Markdown with LaTeX, HTML reports, etc.).
## Requirements
Preview requires `pandoc` for PDF/HTML rendering. Install it with:
```bash
feynman --setup-preview
```
## Supported formats
- Markdown (with LaTeX math rendering)
- HTML
- PDF

View File

@@ -0,0 +1,26 @@
---
title: Session Search
description: Search prior Feynman session transcripts
section: Tools
order: 3
---
## Overview
The `session_search` tool recovers prior Feynman work from stored session transcripts. Useful for picking up previous research threads or finding past findings.
## Usage
Inside the REPL:
```
/search
```
Or use the tool directly — Feynman will invoke `session_search` automatically when you reference prior work.
## What it searches
- Full session transcripts
- Tool outputs and agent results
- Generated artifacts and their content

View File

@@ -0,0 +1,34 @@
---
title: Web Search
description: Web search routing and configuration
section: Tools
order: 2
---
## Routing modes
Feynman supports three web search backends:
| Mode | Description |
|------|-------------|
| `auto` | Prefer Perplexity when configured, fall back to Gemini |
| `perplexity` | Force Perplexity Sonar |
| `gemini` | Force Gemini (default) |
## Default behavior
The default path is zero-config Gemini Browser via a signed-in Chromium profile. No API keys required.
## Check current config
```bash
feynman search status
```
## Advanced configuration
Edit `~/.feynman/web-search.json` directly to set:
- Gemini API keys
- Perplexity API keys
- Custom routing preferences

View File

@@ -0,0 +1,39 @@
---
title: Code Audit
description: Compare paper claims against public codebases
section: Workflows
order: 4
---
## Usage
```
/audit <item>
```
## What it does
Compares claims made in a paper against its public codebase. Surfaces mismatches, missing experiments, and reproducibility risks.
## What it checks
- Do the reported hyperparameters match the code?
- Are all claimed experiments present in the repository?
- Does the training loop match the described methodology?
- Are there undocumented preprocessing steps?
- Do evaluation metrics match the paper's claims?
## Example
```
/audit 2401.12345
```
## Output
An audit report with:
- Claim-by-claim verification
- Identified mismatches
- Missing components
- Reproducibility risk assessment

View File

@@ -0,0 +1,44 @@
---
title: Autoresearch
description: Autonomous experiment optimization loop
section: Workflows
order: 8
---
## Usage
```
/autoresearch <idea>
```
## What it does
Runs an autonomous experiment loop:
1. **Edit** — Modify code or configuration
2. **Commit** — Save the change
3. **Benchmark** — Run evaluation
4. **Evaluate** — Compare against baseline
5. **Keep or revert** — Persist improvements, roll back regressions
6. **Repeat** — Continue until the target is hit
## Tracking
Metrics are tracked in:
- `autoresearch.md` — Human-readable progress log
- `autoresearch.jsonl` — Machine-readable metrics over time
## Controls
```
/autoresearch <idea> # start or resume
/autoresearch off # stop, keep data
/autoresearch clear # delete all state, start fresh
```
## Example
```
/autoresearch optimize the learning rate schedule for better convergence
```

View File

@@ -0,0 +1,29 @@
---
title: Source Comparison
description: Compare multiple sources with agreement/disagreement matrix
section: Workflows
order: 6
---
## Usage
```
/compare <topic>
```
## What it does
Compares multiple sources on a topic. Builds an agreement/disagreement matrix showing where sources align and where they conflict.
## Example
```
/compare approaches to constitutional AI training
```
## Output
- Source-by-source breakdown
- Agreement/disagreement matrix
- Synthesis of key differences
- Assessment of which positions have stronger evidence

View File

@@ -0,0 +1,40 @@
---
title: Deep Research
description: Thorough source-heavy investigation with parallel agents
section: Workflows
order: 1
---
## Usage
```
/deepresearch <topic>
```
## What it does
Deep research runs a thorough, source-heavy investigation. It plans the research scope, delegates to parallel researcher agents, synthesizes findings, and adds inline citations.
The workflow follows these steps:
1. **Plan** — Clarify the research question and identify search strategy
2. **Delegate** — Spawn parallel researcher agents to gather evidence from different source types (papers, web, repos)
3. **Synthesize** — Merge findings, resolve contradictions, identify gaps
4. **Cite** — Add inline citations and verify all source URLs
5. **Deliver** — Write a durable research brief to `outputs/`
## Example
```
/deepresearch transformer scaling laws and their implications for compute-optimal training
```
## Output
Produces a structured research brief with:
- Executive summary
- Key findings organized by theme
- Evidence tables with source links
- Open questions and suggested next steps
- Numbered sources section with direct URLs

View File

@@ -0,0 +1,37 @@
---
title: Draft Writing
description: Paper-style draft generation from research findings
section: Workflows
order: 7
---
## Usage
```
/draft <topic>
```
## What it does
Produces a paper-style draft with structured sections. Writes to `papers/`.
## Structure
The generated draft includes:
- Title
- Abstract
- Introduction / Background
- Method or Approach
- Evidence and Analysis
- Limitations
- Conclusion
- Sources
## Example
```
/draft survey of differentiable physics simulators
```
The writer agent works only from supplied evidence — it never fabricates content. If evidence is insufficient, it explicitly notes the gaps.

View File

@@ -0,0 +1,31 @@
---
title: Literature Review
description: Map consensus, disagreements, and open questions
section: Workflows
order: 2
---
## Usage
```
/lit <topic>
```
## What it does
Runs a structured literature review that searches across academic papers and web sources. Explicitly separates consensus findings from disagreements and open questions.
## Example
```
/lit multimodal reasoning benchmarks for large language models
```
## Output
A structured review covering:
- **Consensus** — What the field agrees on
- **Disagreements** — Where sources conflict
- **Open questions** — What remains unresolved
- **Sources** — Direct links to all referenced papers and articles

View File

@@ -0,0 +1,42 @@
---
title: Replication
description: Plan replications of papers and claims
section: Workflows
order: 5
---
## Usage
```
/replicate <paper or claim>
```
## What it does
Extracts key implementation details from a paper, identifies what's needed to replicate the results, and asks where to run before executing anything.
Before running code, Feynman asks you to choose an execution environment:
- **Local** — run in the current working directory
- **Virtual environment** — create an isolated venv/conda env first
- **Cloud** — delegate to a remote Agent Computer machine
- **Plan only** — produce the replication plan without executing
## Example
```
/replicate "chain-of-thought prompting improves math reasoning"
```
## Output
A replication plan covering:
- Key claims to verify
- Required resources (compute, data, models)
- Implementation details extracted from the paper
- Potential pitfalls and underspecified details
- Step-by-step replication procedure
- Success criteria
If an execution environment is selected, also produces runnable scripts and captured results.

View File

@@ -0,0 +1,49 @@
---
title: Peer Review
description: Simulated peer review with severity-graded feedback
section: Workflows
order: 3
---
## Usage
```
/review <artifact>
```
## What it does
Simulates a tough-but-fair peer review for AI research artifacts. Evaluates novelty, empirical rigor, baselines, ablations, and reproducibility.
The reviewer agent identifies:
- Weak baselines
- Missing ablations
- Evaluation mismatches
- Benchmark leakage
- Under-specified implementation details
## Severity levels
Feedback is graded by severity:
- **FATAL** — Fundamental issues that invalidate the claims
- **MAJOR** — Significant problems that need addressing
- **MINOR** — Small improvements or clarifications
## Example
```
/review outputs/scaling-laws-brief.md
```
## Output
Structured review with:
- Summary of the work
- Strengths
- Weaknesses (severity-graded)
- Questions for the authors
- Verdict (accept / revise / reject)
- Revision plan

View File

@@ -0,0 +1,29 @@
---
title: Watch
description: Recurring research monitoring
section: Workflows
order: 9
---
## Usage
```
/watch <topic>
```
## What it does
Schedules a recurring research watch. Sets a baseline of current knowledge and defines what constitutes a meaningful change worth reporting.
## Example
```
/watch new papers on test-time compute scaling
```
## How it works
1. Feynman establishes a baseline by surveying current sources
2. Defines change signals (new papers, updated results, new repos)
3. Schedules periodic checks via `pi-schedule-prompt`
4. Reports only when meaningful changes are detected

View File

@@ -0,0 +1,55 @@
---
import { ViewTransitions } from 'astro:transitions';
import Nav from '../components/Nav.astro';
import Footer from '../components/Footer.astro';
import '../styles/global.css';
interface Props {
title: string;
description?: string;
active?: 'home' | 'docs';
}
const { title, description = 'Research-first AI agent', active = 'home' } = Astro.props;
---
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="description" content={description} />
<title>{title}</title>
<ViewTransitions fallback="none" />
<script is:inline>
(function() {
var stored = localStorage.getItem('theme');
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
if (stored === 'dark' || (!stored && prefersDark)) {
document.documentElement.classList.add('dark');
}
})();
</script>
<script is:inline>
document.addEventListener('astro:after-swap', function() {
var stored = localStorage.getItem('theme');
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
if (stored === 'dark' || (!stored && prefersDark)) {
document.documentElement.classList.add('dark');
}
var isDark = document.documentElement.classList.contains('dark');
var sun = document.getElementById('sun-icon');
var moon = document.getElementById('moon-icon');
if (sun) sun.style.display = isDark ? 'block' : 'none';
if (moon) moon.style.display = isDark ? 'none' : 'block';
});
</script>
</head>
<body class="min-h-screen flex flex-col antialiased">
<Nav active={active} />
<main class="flex-1">
<slot />
</main>
<Footer />
</body>
</html>

View File

@@ -0,0 +1,79 @@
---
import Base from './Base.astro';
import Sidebar from '../components/Sidebar.astro';
interface Props {
title: string;
description?: string;
currentSlug: string;
}
const { title, description, currentSlug } = Astro.props;
---
<Base title={`${title} — Feynman Docs`} description={description} active="docs">
<div class="max-w-6xl mx-auto px-6">
<div class="flex gap-8">
<Sidebar currentSlug={currentSlug} />
<button id="mobile-menu-btn" class="lg:hidden fixed bottom-6 right-6 z-40 p-3 rounded-full bg-accent text-bg shadow-lg" aria-label="Toggle sidebar">
<svg class="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path d="M4 6h16M4 12h16M4 18h16" />
</svg>
</button>
<div id="mobile-overlay" class="hidden fixed inset-0 bg-black/50 z-30 lg:hidden"></div>
<article class="flex-1 min-w-0 py-8 max-w-3xl">
<h1 class="text-3xl font-bold mb-8 tracking-tight">{title}</h1>
<div class="prose">
<slot />
</div>
</article>
</div>
</div>
<script is:inline>
(function() {
function init() {
var btn = document.getElementById('mobile-menu-btn');
var sidebar = document.getElementById('sidebar');
var overlay = document.getElementById('mobile-overlay');
if (btn && sidebar && overlay) {
function toggle() {
sidebar.classList.toggle('hidden');
sidebar.classList.toggle('fixed');
sidebar.classList.toggle('inset-0');
sidebar.classList.toggle('z-40');
sidebar.classList.toggle('bg-bg');
sidebar.classList.toggle('w-full');
sidebar.classList.toggle('p-6');
overlay.classList.toggle('hidden');
}
btn.addEventListener('click', toggle);
overlay.addEventListener('click', toggle);
}
document.querySelectorAll('.prose pre').forEach(function(pre) {
if (pre.querySelector('.copy-code')) return;
var copyBtn = document.createElement('button');
copyBtn.className = 'copy-code';
copyBtn.setAttribute('aria-label', 'Copy code');
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
pre.appendChild(copyBtn);
copyBtn.addEventListener('click', function() {
var code = pre.querySelector('code');
var text = code ? code.textContent : pre.textContent;
navigator.clipboard.writeText(text);
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path d="M20 6L9 17l-5-5"/></svg>';
setTimeout(function() {
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
}, 2000);
});
});
}
document.addEventListener('DOMContentLoaded', init);
document.addEventListener('astro:after-swap', init);
})();
</script>
</Base>

View File

@@ -0,0 +1,19 @@
---
import { getCollection } from 'astro:content';
import Docs from '../../layouts/Docs.astro';
export async function getStaticPaths() {
const docs = await getCollection('docs');
return docs.map((entry) => ({
params: { slug: entry.slug },
props: { entry },
}));
}
const { entry } = Astro.props;
const { Content } = await entry.render();
---
<Docs title={entry.data.title} description={entry.data.description} currentSlug={entry.slug}>
<Content />
</Docs>

View File

@@ -0,0 +1,155 @@
---
import Base from '../layouts/Base.astro';
---
<Base title="Feynman — The open source AI research agent" active="home">
<section class="text-center pt-24 pb-20 px-6">
<div class="max-w-2xl mx-auto">
<h1 class="text-5xl sm:text-6xl font-bold tracking-tight mb-6" style="text-wrap: balance">The open source AI research agent</h1>
<p class="text-lg text-text-muted mb-10 leading-relaxed" style="text-wrap: pretty">Investigate topics, write papers, run experiments, review research, audit codebases &mdash; every output cited and source-grounded</p>
<div class="inline-flex items-center gap-3 bg-surface rounded-lg px-5 py-3 mb-8 font-mono text-sm">
<code class="text-accent">npm install -g @companion-ai/feynman</code>
<button id="copy-btn" class="text-text-dim hover:text-accent transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent rounded" aria-label="Copy install command">
<svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2" /><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1" /></svg>
</button>
</div>
<div class="flex gap-4 justify-center flex-wrap">
<a href="/docs/getting-started/installation" class="px-6 py-2.5 rounded-lg bg-accent text-bg font-semibold text-sm hover:bg-accent-hover transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-2 focus-visible:ring-offset-bg">Get started</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener" class="px-6 py-2.5 rounded-lg border border-border text-text-muted font-semibold text-sm hover:border-text-dim hover:text-text-primary transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-2 focus-visible:ring-offset-bg">GitHub</a>
</div>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-12">What you type &rarr; what happens</h2>
<div class="bg-surface rounded-xl p-6 font-mono text-sm leading-loose max-w-2xl mx-auto">
<div class="flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> "what do we know about scaling laws"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Searches papers and web, produces a cited research brief</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> deepresearch "mechanistic interpretability"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Multi-agent investigation with parallel researchers, synthesis, verification</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> lit "RLHF alternatives"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Literature review with consensus, disagreements, open questions</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> audit 2401.12345</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Compares paper claims against the public codebase</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> replicate "chain-of-thought improves math"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Asks where to run, then builds a replication plan</div>
</div>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-12">Workflows</h2>
<p class="text-center text-text-muted mb-10">Ask naturally or use slash commands as shortcuts.</p>
<div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-4 max-w-4xl mx-auto">
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/deepresearch</div>
<p class="text-sm text-text-muted">Source-heavy multi-agent investigation</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/lit</div>
<p class="text-sm text-text-muted">Literature review from paper search and primary sources</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/review</div>
<p class="text-sm text-text-muted">Simulated peer review with severity and revision plan</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/audit</div>
<p class="text-sm text-text-muted">Paper vs. codebase mismatch audit</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/replicate</div>
<p class="text-sm text-text-muted">Replication plan with environment selection</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/compare</div>
<p class="text-sm text-text-muted">Source comparison matrix</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/draft</div>
<p class="text-sm text-text-muted">Paper-style draft from research findings</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/autoresearch</div>
<p class="text-sm text-text-muted">Autonomous experiment loop</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-mono text-sm text-accent mb-2">/watch</div>
<p class="text-sm text-text-muted">Recurring research watch</p>
</div>
</div>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-12">Agents</h2>
<p class="text-center text-text-muted mb-10">Four bundled research agents, dispatched automatically.</p>
<div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-4">
<div class="bg-surface rounded-xl p-6 text-center">
<div class="font-semibold text-accent mb-2">Researcher</div>
<p class="text-sm text-text-muted">Gathers evidence across papers, web, repos, and docs</p>
</div>
<div class="bg-surface rounded-xl p-6 text-center">
<div class="font-semibold text-accent mb-2">Reviewer</div>
<p class="text-sm text-text-muted">Simulated peer review with severity-graded feedback</p>
</div>
<div class="bg-surface rounded-xl p-6 text-center">
<div class="font-semibold text-accent mb-2">Writer</div>
<p class="text-sm text-text-muted">Structured briefs and drafts from research notes</p>
</div>
<div class="bg-surface rounded-xl p-6 text-center">
<div class="font-semibold text-accent mb-2">Verifier</div>
<p class="text-sm text-text-muted">Inline citations and source URL verification</p>
</div>
</div>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-12">Tools</h2>
<div class="grid grid-cols-1 sm:grid-cols-2 gap-4 max-w-2xl mx-auto">
<div class="bg-surface rounded-xl p-5">
<div class="font-semibold mb-1">AlphaXiv</div>
<p class="text-sm text-text-muted">Paper search, Q&A, code reading, persistent annotations</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-semibold mb-1">Web search</div>
<p class="text-sm text-text-muted">Gemini or Perplexity, zero-config default</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-semibold mb-1">Session search</div>
<p class="text-sm text-text-muted">Indexed recall across prior research sessions</p>
</div>
<div class="bg-surface rounded-xl p-5">
<div class="font-semibold mb-1">Preview</div>
<p class="text-sm text-text-muted">Browser and PDF export of generated artifacts</p>
</div>
</div>
</div>
</section>
<section class="py-20 px-6 text-center">
<div class="max-w-xl mx-auto">
<p class="text-text-muted mb-6">Built on <a href="https://github.com/mariozechner/pi-coding-agent" class="text-accent hover:underline">Pi</a> and <a href="https://github.com/getcompanion-ai/alpha-hub" class="text-accent hover:underline">Alpha Hub</a>. MIT licensed. Open source.</p>
<div class="flex gap-4 justify-center flex-wrap">
<a href="/docs/getting-started/installation" class="px-6 py-2.5 rounded-lg bg-accent text-bg font-semibold text-sm hover:bg-accent-hover transition-colors">Get started</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener" class="px-6 py-2.5 rounded-lg border border-border text-text-muted font-semibold text-sm hover:border-text-dim hover:text-text-primary transition-colors">GitHub</a>
</div>
</div>
</section>
<script is:inline>
document.getElementById('copy-btn').addEventListener('click', function() {
navigator.clipboard.writeText('npm install -g @companion-ai/feynman');
this.innerHTML = '<svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path d="M20 6L9 17l-5-5"/></svg>';
var btn = this;
setTimeout(function() {
btn.innerHTML = '<svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
}, 2000);
});
</script>
</Base>

View File

@@ -0,0 +1,209 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
:root {
--color-bg: #f0f5f1;
--color-surface: #e4ece6;
--color-surface-2: #d8e3db;
--color-border: #c2d1c6;
--color-text: #1a2e22;
--color-text-muted: #3d5c4a;
--color-text-dim: #6b8f7a;
--color-accent: #0d9668;
--color-accent-hover: #077a54;
--color-accent-subtle: #c6e4d4;
--color-teal: #0e8a7d;
}
.dark {
--color-bg: #050a08;
--color-surface: #0c1410;
--color-surface-2: #131f1a;
--color-border: #1b2f26;
--color-text: #f0f5f2;
--color-text-muted: #8aaa9a;
--color-text-dim: #4d7565;
--color-accent: #34d399;
--color-accent-hover: #10b981;
--color-accent-subtle: #064e3b;
--color-teal: #2dd4bf;
}
html {
scroll-behavior: smooth;
}
::view-transition-old(root),
::view-transition-new(root) {
animation: none !important;
}
body {
background-color: var(--color-bg);
color: var(--color-text);
}
.prose h2 {
font-size: 1.5rem;
font-weight: 700;
margin-top: 2.5rem;
margin-bottom: 1rem;
color: var(--color-text);
}
.prose h3 {
font-size: 1.2rem;
font-weight: 600;
margin-top: 2rem;
margin-bottom: 0.75rem;
color: var(--color-teal);
}
.prose p {
margin-bottom: 1rem;
line-height: 1.75;
color: var(--color-text-muted);
}
.prose ul {
margin-bottom: 1rem;
padding-left: 1.5rem;
list-style-type: disc;
}
.prose ol {
margin-bottom: 1rem;
padding-left: 1.5rem;
list-style-type: decimal;
}
.prose li {
margin-bottom: 0.375rem;
line-height: 1.65;
color: var(--color-text-muted);
}
.prose code {
font-family: 'SF Mono', 'Fira Code', 'JetBrains Mono', monospace;
font-size: 0.875rem;
background-color: var(--color-surface);
padding: 0.125rem 0.375rem;
border-radius: 0.25rem;
color: var(--color-text);
}
.prose pre {
position: relative;
background-color: var(--color-surface) !important;
border-radius: 0.5rem;
padding: 1rem 1.25rem;
overflow-x: auto;
margin-bottom: 1.25rem;
font-family: 'SF Mono', 'Fira Code', 'JetBrains Mono', monospace;
font-size: 0.875rem;
line-height: 1.7;
}
.prose pre code {
background: none !important;
border: none;
padding: 0;
color: var(--color-text);
}
.copy-code {
all: unset;
position: absolute;
top: 0.75rem;
right: 0.75rem;
display: grid;
place-items: center;
width: 28px;
height: 28px;
border-radius: 0.25rem;
color: var(--color-text-dim);
background: var(--color-surface-2);
opacity: 0;
transition: opacity 0.15s, color 0.15s;
cursor: pointer;
}
pre:hover .copy-code {
opacity: 1;
}
.copy-code:hover {
color: var(--color-accent);
}
.prose pre code span {
color: inherit !important;
}
.prose table {
width: 100%;
border-collapse: collapse;
margin-bottom: 1.5rem;
font-size: 0.9rem;
}
.prose th {
background-color: var(--color-surface);
padding: 0.625rem 0.875rem;
text-align: left;
font-weight: 600;
color: var(--color-text);
border-bottom: 1px solid var(--color-border);
}
.prose td {
padding: 0.625rem 0.875rem;
border-bottom: 1px solid var(--color-border);
}
.prose td code {
background-color: var(--color-surface-2);
padding: 0.125rem 0.375rem;
border-radius: 0.25rem;
font-size: 0.85rem;
}
.prose tr:nth-child(even) {
background-color: var(--color-surface);
}
.prose a {
color: var(--color-accent);
text-decoration: underline;
text-underline-offset: 2px;
}
.prose a:hover {
color: var(--color-accent-hover);
}
.prose strong {
color: var(--color-text);
font-weight: 600;
}
.prose hr {
border-color: var(--color-border);
margin: 2rem 0;
}
.prose blockquote {
border-left: 2px solid var(--color-text-dim);
padding-left: 1rem;
color: var(--color-text-dim);
font-style: italic;
margin-bottom: 1rem;
}
.agent-entry {
background-color: var(--color-surface);
border-radius: 0.75rem;
padding: 1.25rem 1.5rem;
margin-bottom: 1rem;
}

View File

@@ -0,0 +1,25 @@
export default {
content: ['./src/**/*.{astro,html,js,jsx,md,mdx,svelte,ts,tsx,vue}'],
darkMode: 'class',
theme: {
extend: {
colors: {
bg: 'var(--color-bg)',
surface: 'var(--color-surface)',
'surface-2': 'var(--color-surface-2)',
border: 'var(--color-border)',
'text-primary': 'var(--color-text)',
'text-muted': 'var(--color-text-muted)',
'text-dim': 'var(--color-text-dim)',
accent: 'var(--color-accent)',
'accent-hover': 'var(--color-accent-hover)',
'accent-subtle': 'var(--color-accent-subtle)',
teal: 'var(--color-teal)',
},
fontFamily: {
mono: ['"SF Mono"', '"Fira Code"', '"JetBrains Mono"', 'monospace'],
},
},
},
plugins: [],
};

3
website/tsconfig.json Normal file
View File

@@ -0,0 +1,3 @@
{
"extends": "astro/tsconfigs/strict"
}