Rebuild website from scratch on Tailwind v4 + shadcn/ui

- Fresh Astro 5 project with Tailwind v4 and shadcn/ui olive preset
- All shadcn components installed (Card, Button, Badge, Separator, etc.)
- Homepage with hero, terminal demo, workflows, agents, sources, compute
- Full docs system with 24 markdown pages across 5 sections
- Sidebar navigation with active state highlighting
- Prose styles for markdown content using shadcn color tokens
- Dark/light theme toggle with localStorage persistence
- Shiki everforest syntax themes for code blocks
- 404 page with VT323 font
- /docs redirect to installation page
- GitHub star count fetch
- Earthy green/cream oklch color palette matching TUI theme

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Advait Paliwal
2026-03-24 15:57:03 -07:00
parent 7d3fbc3f6b
commit 8f8cf2a4a9
61 changed files with 9369 additions and 2633 deletions

View File

@@ -64,7 +64,7 @@ jobs:
matrix:
include:
- id: linux-x64
os: ubuntu-latest
os: blacksmith-4vcpu-ubuntu-2404
- id: darwin-x64
os: macos-15-intel
- id: darwin-arm64
@@ -109,7 +109,7 @@ jobs:
- version-check
- publish-npm
- build-native-bundles
if: needs.version-check.outputs.should_build_release == 'true' && needs.build-native-bundles.result == 'success' && (needs.publish-npm.result == 'success' || needs.publish-npm.result == 'skipped')
if: always() && needs.version-check.outputs.should_build_release == 'true' && needs.build-native-bundles.result == 'success' && (needs.publish-npm.result == 'success' || needs.publish-npm.result == 'skipped')
runs-on: blacksmith-4vcpu-ubuntu-2404
permissions:
contents: write

0
bin/feynman.js Executable file → Normal file
View File

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,5 @@
{
"_variables": {
"lastUpdateCheck": 1774305535217
"lastUpdateCheck": 1774391908508
}
}

23
website/.gitignore vendored Normal file
View File

@@ -0,0 +1,23 @@
# build output
dist/
# generated types
.astro/
# dependencies
node_modules/
# logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
# environment variables
.env
.env.production
# macOS-specific files
.DS_Store
# jetbrains setting folder
.idea/

6
website/.prettierignore Normal file
View File

@@ -0,0 +1,6 @@
node_modules/
coverage/
.pnpm-store/
pnpm-lock.yaml
package-lock.json
yarn.lock

19
website/.prettierrc Normal file
View File

@@ -0,0 +1,19 @@
{
"endOfLine": "lf",
"semi": false,
"singleQuote": false,
"tabWidth": 2,
"trailingComma": "es5",
"printWidth": 80,
"plugins": ["prettier-plugin-astro", "prettier-plugin-tailwindcss"],
"tailwindStylesheet": "src/styles/global.css",
"tailwindFunctions": ["cn", "cva"],
"overrides": [
{
"files": "*.astro",
"options": {
"parser": "astro"
}
}
]
}

36
website/README.md Normal file
View File

@@ -0,0 +1,36 @@
# Astro + React + TypeScript + shadcn/ui
This is a template for a new Astro project with React, TypeScript, and shadcn/ui.
## Adding components
To add components to your app, run the following command:
```bash
npx shadcn@latest add button
```
This will place the ui components in the `src/components` directory.
## Using components
To use the components in your app, import them in an `.astro` file:
```astro
---
import { Button } from "@/components/ui/button"
---
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width" />
<title>Astro App</title>
</head>
<body>
<div class="grid h-screen place-items-center content-center">
<Button>Button</Button>
</div>
</body>
</html>
```

View File

@@ -1,8 +1,15 @@
import { defineConfig } from 'astro/config';
import tailwind from '@astrojs/tailwind';
// @ts-check
import tailwindcss from "@tailwindcss/vite"
import { defineConfig } from "astro/config"
import react from "@astrojs/react"
// https://astro.build/config
export default defineConfig({
integrations: [tailwind()],
vite: {
plugins: [tailwindcss()],
},
integrations: [react()],
site: 'https://feynman.is',
markdown: {
shikiConfig: {
@@ -12,4 +19,4 @@ export default defineConfig({
},
},
},
});
})

25
website/components.json Normal file
View File

@@ -0,0 +1,25 @@
{
"$schema": "https://ui.shadcn.com/schema.json",
"style": "radix-vega",
"rsc": false,
"tsx": true,
"tailwind": {
"config": "",
"css": "src/styles/global.css",
"baseColor": "olive",
"cssVariables": true,
"prefix": ""
},
"iconLibrary": "lucide",
"rtl": false,
"aliases": {
"components": "@/components",
"utils": "@/lib/utils",
"ui": "@/components/ui",
"lib": "@/lib",
"hooks": "@/hooks"
},
"menuColor": "default",
"menuAccent": "subtle",
"registries": {}
}

23
website/eslint.config.js Normal file
View File

@@ -0,0 +1,23 @@
import js from "@eslint/js"
import globals from "globals"
import reactHooks from "eslint-plugin-react-hooks"
import reactRefresh from "eslint-plugin-react-refresh"
import tseslint from "typescript-eslint"
import { defineConfig, globalIgnores } from "eslint/config"
export default defineConfig([
globalIgnores(["dist", ".astro"]),
{
files: ["**/*.{ts,tsx}"],
extends: [
js.configs.recommended,
tseslint.configs.recommended,
reactHooks.configs.flat.recommended,
reactRefresh.configs.vite,
],
languageOptions: {
ecmaVersion: 2020,
globals: globals.browser,
},
},
])

8344
website/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,45 @@
{
"name": "feynman-website",
"name": "website",
"type": "module",
"version": "0.0.1",
"private": true,
"scripts": {
"dev": "astro dev",
"build": "node ../scripts/sync-website-installers.mjs && astro build",
"preview": "astro preview"
"build": "astro build",
"preview": "astro preview",
"astro": "astro",
"lint": "eslint .",
"format": "prettier --write \"**/*.{ts,tsx,astro}\"",
"typecheck": "astro check"
},
"dependencies": {
"astro": "^5.7.0",
"@astrojs/tailwind": "^6.0.2",
"tailwindcss": "^3.4.0",
"sharp": "^0.33.0"
"@astrojs/react": "^4.4.2",
"@fontsource-variable/ibm-plex-sans": "^5.2.8",
"@tailwindcss/vite": "^4.2.1",
"@types/react": "^19.2.14",
"@types/react-dom": "^19.2.3",
"astro": "^5.18.1",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"lucide-react": "^1.6.0",
"radix-ui": "^1.4.3",
"react": "^19.2.4",
"react-dom": "^19.2.4",
"shadcn": "^4.1.0",
"tailwind-merge": "^3.5.0",
"tailwindcss": "^4.2.1",
"tw-animate-css": "^1.4.0"
},
"devDependencies": {
"@eslint/js": "^9.39.4",
"eslint": "^9.39.4",
"eslint-plugin-react-hooks": "^7.0.1",
"eslint-plugin-react-refresh": "^0.5.2",
"globals": "^16.5.0",
"prettier": "^3.8.1",
"prettier-plugin-astro": "^0.14.1",
"prettier-plugin-tailwindcss": "^0.7.2",
"typescript": "~5.9.3",
"typescript-eslint": "^8.57.1"
}
}

View File

@@ -0,0 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 36 36">
<path fill="#000" d="M22.25 4h-8.5a1 1 0 0 0-.96.73l-5.54 19.4a.5.5 0 0 0 .62.62l5.05-1.44a1 1 0 0 0 .67-.55L18 14l4.41 8.76a1 1 0 0 0 .67.55l5.05 1.44a.5.5 0 0 0 .62-.62l-5.54-19.4a1 1 0 0 0-.96-.73Z"/>
</svg>

After

Width:  |  Height:  |  Size: 286 B

View File

Before

Width:  |  Height:  |  Size: 884 KiB

After

Width:  |  Height:  |  Size: 884 KiB

View File

@@ -1,261 +0,0 @@
#!/bin/sh
set -eu
VERSION="${1:-latest}"
INSTALL_BIN_DIR="${FEYNMAN_INSTALL_BIN_DIR:-$HOME/.local/bin}"
INSTALL_APP_DIR="${FEYNMAN_INSTALL_APP_DIR:-$HOME/.local/share/feynman}"
SKIP_PATH_UPDATE="${FEYNMAN_INSTALL_SKIP_PATH_UPDATE:-0}"
path_action="already"
path_profile=""
step() {
printf '==> %s\n' "$1"
}
run_with_spinner() {
label="$1"
shift
if [ ! -t 2 ]; then
step "$label"
"$@"
return
fi
"$@" &
pid=$!
frame=0
set +e
while kill -0 "$pid" 2>/dev/null; do
case "$frame" in
0) spinner='|' ;;
1) spinner='/' ;;
2) spinner='-' ;;
*) spinner='\\' ;;
esac
printf '\r==> %s %s' "$label" "$spinner" >&2
frame=$(( (frame + 1) % 4 ))
sleep 0.1
done
wait "$pid"
status=$?
set -e
printf '\r\033[2K' >&2
if [ "$status" -ne 0 ]; then
printf '==> %s failed\n' "$label" >&2
return "$status"
fi
step "$label"
}
normalize_version() {
case "$1" in
"" | latest)
printf 'latest\n'
;;
v*)
printf '%s\n' "${1#v}"
;;
*)
printf '%s\n' "$1"
;;
esac
}
download_file() {
url="$1"
output="$2"
if command -v curl >/dev/null 2>&1; then
if [ -t 2 ]; then
curl -fL --progress-bar "$url" -o "$output"
else
curl -fsSL "$url" -o "$output"
fi
return
fi
if command -v wget >/dev/null 2>&1; then
if [ -t 2 ]; then
wget --show-progress -O "$output" "$url"
else
wget -q -O "$output" "$url"
fi
return
fi
echo "curl or wget is required to install Feynman." >&2
exit 1
}
download_text() {
url="$1"
if command -v curl >/dev/null 2>&1; then
curl -fsSL "$url"
return
fi
if command -v wget >/dev/null 2>&1; then
wget -q -O - "$url"
return
fi
echo "curl or wget is required to install Feynman." >&2
exit 1
}
add_to_path() {
path_action="already"
path_profile=""
case ":$PATH:" in
*":$INSTALL_BIN_DIR:"*)
return
;;
esac
if [ "$SKIP_PATH_UPDATE" = "1" ]; then
path_action="skipped"
return
fi
profile="${FEYNMAN_INSTALL_SHELL_PROFILE:-$HOME/.profile}"
if [ -z "${FEYNMAN_INSTALL_SHELL_PROFILE:-}" ]; then
case "${SHELL:-}" in
*/zsh)
profile="$HOME/.zshrc"
;;
*/bash)
profile="$HOME/.bashrc"
;;
esac
fi
path_profile="$profile"
path_line="export PATH=\"$INSTALL_BIN_DIR:\$PATH\""
if [ -f "$profile" ] && grep -F "$path_line" "$profile" >/dev/null 2>&1; then
path_action="configured"
return
fi
{
printf '\n# Added by Feynman installer\n'
printf '%s\n' "$path_line"
} >>"$profile"
path_action="added"
}
require_command() {
if ! command -v "$1" >/dev/null 2>&1; then
echo "$1 is required to install Feynman." >&2
exit 1
fi
}
resolve_version() {
normalized_version="$(normalize_version "$VERSION")"
if [ "$normalized_version" != "latest" ]; then
printf '%s\n' "$normalized_version"
return
fi
release_json="$(download_text "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest")"
resolved="$(printf '%s\n' "$release_json" | sed -n 's/.*"tag_name":[[:space:]]*"v\([^"]*\)".*/\1/p' | head -n 1)"
if [ -z "$resolved" ]; then
echo "Failed to resolve the latest Feynman release version." >&2
exit 1
fi
printf '%s\n' "$resolved"
}
case "$(uname -s)" in
Darwin)
os="darwin"
;;
Linux)
os="linux"
;;
*)
echo "install.sh supports macOS and Linux. Use install.ps1 on Windows." >&2
exit 1
;;
esac
case "$(uname -m)" in
x86_64 | amd64)
arch="x64"
;;
arm64 | aarch64)
arch="arm64"
;;
*)
echo "Unsupported architecture: $(uname -m)" >&2
exit 1
;;
esac
require_command mktemp
require_command tar
resolved_version="$(resolve_version)"
asset_target="$os-$arch"
bundle_name="feynman-${resolved_version}-${asset_target}"
archive_name="${bundle_name}.tar.gz"
base_url="${FEYNMAN_INSTALL_BASE_URL:-https://github.com/getcompanion-ai/feynman/releases/download/v${resolved_version}}"
download_url="${base_url}/${archive_name}"
step "Installing Feynman ${resolved_version} for ${asset_target}"
tmp_dir="$(mktemp -d)"
cleanup() {
rm -rf "$tmp_dir"
}
trap cleanup EXIT INT TERM
archive_path="$tmp_dir/$archive_name"
step "Downloading ${archive_name}"
download_file "$download_url" "$archive_path"
mkdir -p "$INSTALL_APP_DIR"
rm -rf "$INSTALL_APP_DIR/$bundle_name"
run_with_spinner "Extracting ${archive_name}" tar -xzf "$archive_path" -C "$INSTALL_APP_DIR"
mkdir -p "$INSTALL_BIN_DIR"
step "Linking feynman into $INSTALL_BIN_DIR"
cat >"$INSTALL_BIN_DIR/feynman" <<EOF
#!/bin/sh
set -eu
exec "$INSTALL_APP_DIR/$bundle_name/feynman" "\$@"
EOF
chmod 0755 "$INSTALL_BIN_DIR/feynman"
add_to_path
case "$path_action" in
added)
step "PATH updated for future shells in $path_profile"
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
;;
configured)
step "PATH is already configured for future shells in $path_profile"
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
;;
skipped)
step "PATH update skipped"
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
;;
*)
step "$INSTALL_BIN_DIR is already on PATH"
step "Run: feynman"
;;
esac
printf 'Feynman %s installed successfully.\n' "$resolved_version"

View File

@@ -1,85 +0,0 @@
param(
[string]$Version = "latest"
)
$ErrorActionPreference = "Stop"
function Resolve-Version {
param([string]$RequestedVersion)
if ($RequestedVersion -and $RequestedVersion -ne "latest") {
return $RequestedVersion.TrimStart("v")
}
$release = Invoke-RestMethod -Uri "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest"
if (-not $release.tag_name) {
throw "Failed to resolve the latest Feynman release version."
}
return $release.tag_name.TrimStart("v")
}
function Get-ArchSuffix {
$arch = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture
switch ($arch.ToString()) {
"X64" { return "x64" }
"Arm64" { return "arm64" }
default { throw "Unsupported architecture: $arch" }
}
}
$resolvedVersion = Resolve-Version -RequestedVersion $Version
$archSuffix = Get-ArchSuffix
$bundleName = "feynman-$resolvedVersion-win32-$archSuffix"
$archiveName = "$bundleName.zip"
$baseUrl = if ($env:FEYNMAN_INSTALL_BASE_URL) { $env:FEYNMAN_INSTALL_BASE_URL } else { "https://github.com/getcompanion-ai/feynman/releases/download/v$resolvedVersion" }
$downloadUrl = "$baseUrl/$archiveName"
$installRoot = Join-Path $env:LOCALAPPDATA "Programs\feynman"
$installBinDir = Join-Path $installRoot "bin"
$bundleDir = Join-Path $installRoot $bundleName
$tmpDir = Join-Path ([System.IO.Path]::GetTempPath()) ("feynman-install-" + [System.Guid]::NewGuid().ToString("N"))
New-Item -ItemType Directory -Path $tmpDir | Out-Null
try {
$archivePath = Join-Path $tmpDir $archiveName
Write-Host "==> Downloading $archiveName"
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
New-Item -ItemType Directory -Path $installRoot -Force | Out-Null
if (Test-Path $bundleDir) {
Remove-Item -Recurse -Force $bundleDir
}
Write-Host "==> Extracting $archiveName"
Expand-Archive -LiteralPath $archivePath -DestinationPath $installRoot -Force
New-Item -ItemType Directory -Path $installBinDir -Force | Out-Null
$shimPath = Join-Path $installBinDir "feynman.cmd"
Write-Host "==> Linking feynman into $installBinDir"
@"
@echo off
"$bundleDir\feynman.cmd" %*
"@ | Set-Content -Path $shimPath -Encoding ASCII
$currentUserPath = [Environment]::GetEnvironmentVariable("Path", "User")
if (-not $currentUserPath.Split(';').Contains($installBinDir)) {
$updatedPath = if ([string]::IsNullOrWhiteSpace($currentUserPath)) {
$installBinDir
} else {
"$currentUserPath;$installBinDir"
}
[Environment]::SetEnvironmentVariable("Path", $updatedPath, "User")
Write-Host "Updated user PATH. Open a new shell to run feynman."
} else {
Write-Host "$installBinDir is already on PATH."
}
Write-Host "Feynman $resolvedVersion installed successfully."
} finally {
if (Test-Path $tmpDir) {
Remove-Item -Recurse -Force $tmpDir
}
}

View File

@@ -1,21 +0,0 @@
---
interface Props {
class?: string;
size?: 'nav' | 'hero';
}
const { class: className = '', size = 'hero' } = Astro.props;
const sizeClasses = size === 'nav'
? 'text-2xl'
: 'text-6xl sm:text-7xl md:text-8xl';
---
<span
class:list={[
"font-['VT323'] text-accent inline-block tracking-tighter",
sizeClasses,
className,
]}
aria-label="Feynman"
>feynman</span>

View File

@@ -1,9 +0,0 @@
<footer class="py-8 mt-16">
<div class="max-w-6xl mx-auto px-6 flex flex-col sm:flex-row items-center justify-between gap-4">
<span class="text-sm text-text-dim">&copy; 2026 Companion Inc.</span>
<div class="flex gap-6">
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener" class="text-sm text-text-dim hover:text-text-primary transition-colors">GitHub</a>
<a href="/docs/getting-started/installation" class="text-sm text-text-dim hover:text-text-primary transition-colors">Docs</a>
</div>
</div>
</footer>

View File

@@ -1,29 +0,0 @@
---
import ThemeToggle from './ThemeToggle.astro';
import AsciiLogo from './AsciiLogo.astro';
interface Props {
active?: 'home' | 'docs';
}
const { active = 'home' } = Astro.props;
---
<nav class="sticky top-0 z-50 bg-bg">
<div class="max-w-6xl mx-auto px-6 h-14 flex items-center justify-between">
<a href="/" class="hover:opacity-80 transition-opacity" aria-label="Feynman">
<AsciiLogo size="nav" />
</a>
<div class="flex items-center gap-6">
<a href="/docs/getting-started/installation"
class:list={["text-sm transition-colors", active === 'docs' ? 'text-text-primary' : 'text-text-muted hover:text-text-primary']}>
Docs
</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener"
class="text-sm text-text-muted hover:text-text-primary transition-colors">
GitHub
</a>
<ThemeToggle />
</div>
</div>
</nav>

View File

@@ -1,80 +0,0 @@
---
interface Props {
currentSlug: string;
}
const { currentSlug } = Astro.props;
const sections = [
{
title: 'Getting Started',
items: [
{ label: 'Installation', slug: 'getting-started/installation' },
{ label: 'Quick Start', slug: 'getting-started/quickstart' },
{ label: 'Setup', slug: 'getting-started/setup' },
{ label: 'Configuration', slug: 'getting-started/configuration' },
],
},
{
title: 'Workflows',
items: [
{ label: 'Deep Research', slug: 'workflows/deep-research' },
{ label: 'Literature Review', slug: 'workflows/literature-review' },
{ label: 'Peer Review', slug: 'workflows/review' },
{ label: 'Code Audit', slug: 'workflows/audit' },
{ label: 'Replication', slug: 'workflows/replication' },
{ label: 'Source Comparison', slug: 'workflows/compare' },
{ label: 'Draft Writing', slug: 'workflows/draft' },
{ label: 'Autoresearch', slug: 'workflows/autoresearch' },
{ label: 'Watch', slug: 'workflows/watch' },
],
},
{
title: 'Agents',
items: [
{ label: 'Researcher', slug: 'agents/researcher' },
{ label: 'Reviewer', slug: 'agents/reviewer' },
{ label: 'Writer', slug: 'agents/writer' },
{ label: 'Verifier', slug: 'agents/verifier' },
],
},
{
title: 'Tools',
items: [
{ label: 'AlphaXiv', slug: 'tools/alphaxiv' },
{ label: 'Web Search', slug: 'tools/web-search' },
{ label: 'Session Search', slug: 'tools/session-search' },
{ label: 'Preview', slug: 'tools/preview' },
],
},
{
title: 'Reference',
items: [
{ label: 'CLI Commands', slug: 'reference/cli-commands' },
{ label: 'Slash Commands', slug: 'reference/slash-commands' },
{ label: 'Package Stack', slug: 'reference/package-stack' },
],
},
];
---
<aside id="sidebar" class="w-64 shrink-0 h-[calc(100vh-3.5rem)] sticky top-14 overflow-y-auto py-6 pr-4 hidden lg:block border-r border-border">
{sections.map((section) => (
<div class="mb-6">
<div class="text-xs font-semibold text-accent uppercase tracking-wider px-3 mb-2">{section.title}</div>
{section.items.map((item) => (
<a
href={`/docs/${item.slug}`}
class:list={[
'block px-3 py-1.5 text-sm border-l-[2px] transition-colors',
currentSlug === item.slug
? 'border-accent text-text-primary'
: 'border-transparent text-text-muted hover:text-text-primary',
]}
>
{item.label}
</a>
))}
</div>
))}
</aside>

View File

@@ -1,53 +0,0 @@
<button id="theme-toggle" class="p-1.5 rounded-md text-text-muted hover:text-text-primary hover:bg-surface transition-colors" aria-label="Toggle theme">
<svg id="sun-icon" class="hidden w-[18px] h-[18px]" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<circle cx="12" cy="12" r="5" />
<path d="M12 1v2M12 21v2M4.22 4.22l1.42 1.42M18.36 18.36l1.42 1.42M1 12h2M21 12h2M4.22 19.78l1.42-1.42M18.36 5.64l1.42-1.42" />
</svg>
<svg id="moon-icon" class="hidden w-[18px] h-[18px]" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path d="M21 12.79A9 9 0 1 1 11.21 3 7 7 0 0 0 21 12.79z" />
</svg>
</button>
<script is:inline>
(function() {
var stored = localStorage.getItem('theme');
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
var dark = stored === 'dark' || (!stored && prefersDark);
if (dark) {
document.documentElement.classList.add('dark');
} else {
document.documentElement.classList.remove('dark');
}
function updateIcons() {
var isDark = document.documentElement.classList.contains('dark');
var sun = document.getElementById('sun-icon');
var moon = document.getElementById('moon-icon');
if (sun) sun.style.display = isDark ? 'block' : 'none';
if (moon) moon.style.display = isDark ? 'none' : 'block';
}
function bindToggle() {
var btn = document.getElementById('theme-toggle');
if (btn && !btn._bound) {
btn._bound = true;
btn.addEventListener('click', function() {
document.documentElement.classList.toggle('dark');
var isDark = document.documentElement.classList.contains('dark');
localStorage.setItem('theme', isDark ? 'dark' : 'light');
updateIcons();
});
}
}
updateIcons();
document.addEventListener('DOMContentLoaded', function() {
updateIcons();
bindToggle();
});
document.addEventListener('astro:after-swap', function() {
updateIcons();
bindToggle();
});
})();
</script>

View File

@@ -0,0 +1,49 @@
import * as React from "react"
import { cva, type VariantProps } from "class-variance-authority"
import { Slot } from "radix-ui"
import { cn } from "@/lib/utils"
const badgeVariants = cva(
"group/badge inline-flex h-5 w-fit shrink-0 items-center justify-center gap-1 overflow-hidden rounded-4xl border border-transparent px-2 py-0.5 text-xs font-medium whitespace-nowrap transition-all focus-visible:border-ring focus-visible:ring-[3px] focus-visible:ring-ring/50 has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5 aria-invalid:border-destructive aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 [&>svg]:pointer-events-none [&>svg]:size-3!",
{
variants: {
variant: {
default: "bg-primary text-primary-foreground [a]:hover:bg-primary/80",
secondary:
"bg-secondary text-secondary-foreground [a]:hover:bg-secondary/80",
destructive:
"bg-destructive/10 text-destructive focus-visible:ring-destructive/20 dark:bg-destructive/20 dark:focus-visible:ring-destructive/40 [a]:hover:bg-destructive/20",
outline:
"border-border text-foreground [a]:hover:bg-muted [a]:hover:text-muted-foreground",
ghost:
"hover:bg-muted hover:text-muted-foreground dark:hover:bg-muted/50",
link: "text-primary underline-offset-4 hover:underline",
},
},
defaultVariants: {
variant: "default",
},
}
)
function Badge({
className,
variant = "default",
asChild = false,
...props
}: React.ComponentProps<"span"> &
VariantProps<typeof badgeVariants> & { asChild?: boolean }) {
const Comp = asChild ? Slot.Root : "span"
return (
<Comp
data-slot="badge"
data-variant={variant}
className={cn(badgeVariants({ variant }), className)}
{...props}
/>
)
}
export { Badge, badgeVariants }

View File

@@ -0,0 +1,67 @@
import * as React from "react"
import { cva, type VariantProps } from "class-variance-authority"
import { Slot } from "radix-ui"
import { cn } from "@/lib/utils"
const buttonVariants = cva(
"group/button inline-flex shrink-0 items-center justify-center rounded-md border border-transparent bg-clip-padding text-sm font-medium whitespace-nowrap transition-all outline-none select-none focus-visible:border-ring focus-visible:ring-3 focus-visible:ring-ring/50 active:not-aria-[haspopup]:translate-y-px disabled:pointer-events-none disabled:opacity-50 aria-invalid:border-destructive aria-invalid:ring-3 aria-invalid:ring-destructive/20 dark:aria-invalid:border-destructive/50 dark:aria-invalid:ring-destructive/40 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
{
variants: {
variant: {
default: "bg-primary text-primary-foreground hover:bg-primary/80",
outline:
"border-border bg-background shadow-xs hover:bg-muted hover:text-foreground aria-expanded:bg-muted aria-expanded:text-foreground dark:border-input dark:bg-input/30 dark:hover:bg-input/50",
secondary:
"bg-secondary text-secondary-foreground hover:bg-secondary/80 aria-expanded:bg-secondary aria-expanded:text-secondary-foreground",
ghost:
"hover:bg-muted hover:text-foreground aria-expanded:bg-muted aria-expanded:text-foreground dark:hover:bg-muted/50",
destructive:
"bg-destructive/10 text-destructive hover:bg-destructive/20 focus-visible:border-destructive/40 focus-visible:ring-destructive/20 dark:bg-destructive/20 dark:hover:bg-destructive/30 dark:focus-visible:ring-destructive/40",
link: "text-primary underline-offset-4 hover:underline",
},
size: {
default:
"h-9 gap-1.5 px-2.5 in-data-[slot=button-group]:rounded-md has-data-[icon=inline-end]:pr-2 has-data-[icon=inline-start]:pl-2",
xs: "h-6 gap-1 rounded-[min(var(--radius-md),8px)] px-2 text-xs in-data-[slot=button-group]:rounded-md has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5 [&_svg:not([class*='size-'])]:size-3",
sm: "h-8 gap-1 rounded-[min(var(--radius-md),10px)] px-2.5 in-data-[slot=button-group]:rounded-md has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5",
lg: "h-10 gap-1.5 px-2.5 has-data-[icon=inline-end]:pr-3 has-data-[icon=inline-start]:pl-3",
icon: "size-9",
"icon-xs":
"size-6 rounded-[min(var(--radius-md),8px)] in-data-[slot=button-group]:rounded-md [&_svg:not([class*='size-'])]:size-3",
"icon-sm":
"size-8 rounded-[min(var(--radius-md),10px)] in-data-[slot=button-group]:rounded-md",
"icon-lg": "size-10",
},
},
defaultVariants: {
variant: "default",
size: "default",
},
}
)
function Button({
className,
variant = "default",
size = "default",
asChild = false,
...props
}: React.ComponentProps<"button"> &
VariantProps<typeof buttonVariants> & {
asChild?: boolean
}) {
const Comp = asChild ? Slot.Root : "button"
return (
<Comp
data-slot="button"
data-variant={variant}
data-size={size}
className={cn(buttonVariants({ variant, size, className }))}
{...props}
/>
)
}
export { Button, buttonVariants }

View File

@@ -0,0 +1,103 @@
import * as React from "react"
import { cn } from "@/lib/utils"
function Card({
className,
size = "default",
...props
}: React.ComponentProps<"div"> & { size?: "default" | "sm" }) {
return (
<div
data-slot="card"
data-size={size}
className={cn(
"group/card flex flex-col gap-6 overflow-hidden rounded-xl bg-card py-6 text-sm text-card-foreground shadow-xs ring-1 ring-foreground/10 has-[>img:first-child]:pt-0 data-[size=sm]:gap-4 data-[size=sm]:py-4 *:[img:first-child]:rounded-t-xl *:[img:last-child]:rounded-b-xl",
className
)}
{...props}
/>
)
}
function CardHeader({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-header"
className={cn(
"group/card-header @container/card-header grid auto-rows-min items-start gap-1 rounded-t-xl px-6 group-data-[size=sm]/card:px-4 has-data-[slot=card-action]:grid-cols-[1fr_auto] has-data-[slot=card-description]:grid-rows-[auto_auto] [.border-b]:pb-6 group-data-[size=sm]/card:[.border-b]:pb-4",
className
)}
{...props}
/>
)
}
function CardTitle({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-title"
className={cn(
"font-heading text-base leading-normal font-medium group-data-[size=sm]/card:text-sm",
className
)}
{...props}
/>
)
}
function CardDescription({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-description"
className={cn("text-sm text-muted-foreground", className)}
{...props}
/>
)
}
function CardAction({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-action"
className={cn(
"col-start-2 row-span-2 row-start-1 self-start justify-self-end",
className
)}
{...props}
/>
)
}
function CardContent({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-content"
className={cn("px-6 group-data-[size=sm]/card:px-4", className)}
{...props}
/>
)
}
function CardFooter({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-footer"
className={cn(
"flex items-center rounded-b-xl px-6 group-data-[size=sm]/card:px-4 [.border-t]:pt-6 group-data-[size=sm]/card:[.border-t]:pt-4",
className
)}
{...props}
/>
)
}
export {
Card,
CardHeader,
CardFooter,
CardTitle,
CardAction,
CardDescription,
CardContent,
}

View File

@@ -0,0 +1,26 @@
import * as React from "react"
import { Separator as SeparatorPrimitive } from "radix-ui"
import { cn } from "@/lib/utils"
function Separator({
className,
orientation = "horizontal",
decorative = true,
...props
}: React.ComponentProps<typeof SeparatorPrimitive.Root>) {
return (
<SeparatorPrimitive.Root
data-slot="separator"
decorative={decorative}
orientation={orientation}
className={cn(
"shrink-0 bg-border data-horizontal:h-px data-horizontal:w-full data-vertical:w-px data-vertical:self-stretch",
className
)}
{...props}
/>
)
}
export { Separator }

View File

@@ -1,75 +1,32 @@
---
title: Researcher
description: Gather primary evidence across papers, web sources, repos, docs, and local artifacts.
description: The researcher agent searches, reads, and extracts findings from papers and web sources.
section: Agents
order: 1
---
## Source
The researcher is the primary information-gathering agent in Feynman. It searches academic databases and the web, reads papers and articles, extracts key findings, and organizes source material for other agents to synthesize. Most workflows start with the researcher.
Generated from `.feynman/agents/researcher.md`. Edit that prompt file, not this docs page.
## What it does
## Role
The researcher agent handles the entire source discovery and extraction pipeline. It formulates search queries based on your topic, evaluates results for relevance, reads full documents, and extracts structured information including claims, methodology, results, and limitations.
Gather primary evidence across papers, web sources, repos, docs, and local artifacts.
## Tools
`read`, `bash`, `grep`, `find`, `ls`
## Default Output
`research.md`
## Integrity commandments
1. **Never fabricate a source.** Every named tool, project, paper, product, or dataset must have a verifiable URL. If you cannot find a URL, do not mention it.
2. **Never claim a project exists without checking.** Before citing a GitHub repo, search for it. Before citing a paper, find it. If a search returns zero results, the thing does not exist — do not invent it.
3. **Never extrapolate details you haven't read.** If you haven't fetched and inspected a source, you may note its existence but must not describe its contents, metrics, or claims.
4. **URL or it didn't happen.** Every entry in your evidence table must include a direct, checkable URL. No URL = not included.
When multiple researcher agents are spawned in parallel (which is the default for deep research and literature review), each agent tackles a different angle of the topic. One might search for foundational papers while another looks for recent work that challenges the established view. This parallel approach produces broader coverage than a single sequential search.
## Search strategy
1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 24 varied-angle queries simultaneously — never one query at a time when exploring.
2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.
3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.
4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and `alpha_search`.
Use `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.
The researcher uses a multi-source search strategy. For academic topics, it queries AlphaXiv for papers and uses citation chains to discover related work. For applied topics, it searches the web for documentation, blog posts, and code repositories. For most topics, it uses both channels and cross-references findings.
## Source quality
- **Prefer:** academic papers, official documentation, primary datasets, verified benchmarks, government filings, reputable journalism, expert technical blogs, official vendor pages
- **Accept with caveats:** well-cited secondary sources, established trade publications
- **Deprioritize:** SEO-optimized listicles, undated blog posts, content aggregators, social media without primary links
- **Reject:** sources with no author and no date, content that appears AI-generated with no primary backing
Search queries are diversified automatically. Rather than running the same query multiple times, the researcher generates 2-4 varied queries that approach the topic from different angles. This catches papers that use different terminology for the same concept and surfaces sources that a single query would miss.
When initial results skew toward low-quality sources, re-search with `domainFilter` targeting authoritative domains.
## Source evaluation
## Output format
Not every search result is worth reading in full. The researcher evaluates results by scanning abstracts and summaries first, then selects the most relevant and authoritative sources for deep reading. It considers publication venue, citation count, recency, and topical relevance when prioritizing sources.
Assign each source a stable numeric ID. Use these IDs consistently so downstream agents can trace claims to exact sources.
## Extraction
### Evidence table
When reading a source in depth, the researcher extracts structured data: the main claims and their supporting evidence, methodology details, experimental results, stated limitations, and connections to other work. Each extracted item is tagged with its source location for traceability.
| # | Source | URL | Key claim | Type | Confidence |
|---|--------|-----|-----------|------|------------|
| 1 | ... | ... | ... | primary / secondary / self-reported | high / medium / low |
## Used by
### Findings
Write findings using inline source references: `[1]`, `[2]`, etc. Every factual claim must cite at least one source by number.
### Sources
Numbered list matching the evidence table:
1. Author/Title — URL
2. Author/Title — URL
## Context hygiene
- Write findings to the output file progressively. Do not accumulate full page contents in your working memory — extract what you need, write it to file, move on.
- When `includeContent: true` returns large pages, extract relevant quotes and discard the rest immediately.
- If your search produces 10+ results, triage by title/snippet first. Only fetch full content for the top candidates.
- Return a one-line summary to the parent, not full findings. The parent reads the output file.
## Output contract
- Save to the output file (default: `research.md`).
- Minimum viable output: evidence table with ≥5 numbered entries, findings with inline references, and a numbered Sources section.
- Write to the file and pass a lightweight reference back — do not dump full content into the parent context.
The researcher agent is used by the `/deepresearch`, `/lit`, `/review`, `/audit`, `/replicate`, `/compare`, and `/draft` workflows. It is the most frequently invoked agent in the system. You do not invoke it directly -- it is dispatched automatically by the workflow orchestrator.

View File

@@ -1,93 +1,33 @@
---
title: Reviewer
description: Simulate a tough but constructive AI research peer reviewer with inline annotations.
description: The reviewer agent evaluates documents with severity-graded academic feedback.
section: Agents
order: 2
---
## Source
The reviewer agent evaluates documents, papers, and research artifacts with the rigor of an academic peer reviewer. It produces severity-graded feedback covering methodology, claims, writing quality, and reproducibility.
Generated from `.feynman/agents/reviewer.md`. Edit that prompt file, not this docs page.
## What it does
## Role
The reviewer reads a document end-to-end and evaluates it against standard academic criteria. It checks whether claims are supported by the presented evidence, whether the methodology is sound and described in sufficient detail, whether the experimental design controls for confounds, and whether the writing is clear and complete.
Simulate a tough but constructive AI research peer reviewer with inline annotations.
Each piece of feedback is assigned a severity level. **Critical** issues are fundamental problems that undermine the document's validity, such as a statistical test applied incorrectly or a conclusion not supported by the data. **Major** issues are significant problems that should be addressed, like missing baselines or inadequate ablation studies. **Minor** issues are suggestions for improvement, and **nits** are stylistic or formatting comments.
## Default Output
## Evaluation criteria
`review.md`
The reviewer evaluates documents across several dimensions:
Your job is to act like a skeptical but fair peer reviewer for AI/ML systems work.
- **Claims vs. Evidence** -- Does the evidence presented actually support the claims made?
- **Methodology** -- Is the approach sound? Are there confounds or biases?
- **Experimental Design** -- Are baselines appropriate? Are ablations sufficient?
- **Reproducibility** -- Could someone replicate this work from the description alone?
- **Writing Quality** -- Is the paper clear, well-organized, and free of ambiguity?
- **Completeness** -- Are limitations discussed? Is related work adequately covered?
## Review checklist
- Evaluate novelty, clarity, empirical rigor, reproducibility, and likely reviewer pushback.
- Do not praise vaguely. Every positive claim should be tied to specific evidence.
- Look for:
- missing or weak baselines
- missing ablations
- evaluation mismatches
- unclear claims of novelty
- weak related-work positioning
- insufficient statistical evidence
- benchmark leakage or contamination risks
- under-specified implementation details
- claims that outrun the experiments
- Distinguish between fatal issues, strong concerns, and polish issues.
- Preserve uncertainty. If the draft might pass depending on venue norms, say so explicitly.
## Confidence scoring
## Output format
The reviewer provides a confidence score for each finding, indicating how certain it is about the assessment. High-confidence findings are clear-cut issues (a statistical error, a missing citation). Lower-confidence findings are judgment calls (whether a baseline is sufficient, whether more ablations are needed) where reasonable reviewers might disagree.
Produce two sections: a structured review and inline annotations.
## Used by
### Part 1: Structured Review
```markdown
## Summary
1-2 paragraph summary of the paper's contributions and approach.
## Strengths
- [S1] ...
- [S2] ...
## Weaknesses
- [W1] **FATAL:** ...
- [W2] **MAJOR:** ...
- [W3] **MINOR:** ...
## Questions for Authors
- [Q1] ...
## Verdict
Overall assessment and confidence score. Would this pass at [venue]?
## Revision Plan
Prioritized, concrete steps to address each weakness.
```
### Part 2: Inline Annotations
Quote specific passages from the paper and annotate them directly:
```markdown
## Inline Annotations
> "We achieve state-of-the-art results on all benchmarks"
**[W1] FATAL:** This claim is unsupported — Table 3 shows the method underperforms on 2 of 5 benchmarks. Revise to accurately reflect results.
> "Our approach is novel in combining X with Y"
**[W3] MINOR:** Z et al. (2024) combined X with Y in a different domain. Acknowledge this and clarify the distinction.
> "We use a learning rate of 1e-4"
**[Q1]:** Was this tuned? What range was searched? This matters for reproducibility.
```
Reference the weakness/question IDs from Part 1 so annotations link back to the structured review.
## Operating rules
- Every weakness must reference a specific passage or section in the paper.
- Inline annotations must quote the exact text being critiqued.
- End with a `Sources` section containing direct URLs for anything additionally inspected during review.
## Output contract
- Save the main artifact to `review.md`.
- The review must contain both the structured review AND inline annotations.
The reviewer agent is the primary agent in the `/review` workflow. It also contributes to `/audit` (evaluating paper claims against code) and `/compare` (assessing the strength of evidence across sources). Like all agents, it is dispatched automatically by the workflow orchestrator.

View File

@@ -1,50 +1,36 @@
---
title: Verifier
description: Post-process a draft to add inline citations and verify every source URL.
description: The verifier agent cross-checks claims against their cited sources.
section: Agents
order: 4
---
## Source
The verifier agent is responsible for fact-checking and validation. It cross-references claims against their cited sources, checks code implementations against paper descriptions, and flags unsupported or misattributed assertions.
Generated from `.feynman/agents/verifier.md`. Edit that prompt file, not this docs page.
## What it does
## Role
The verifier performs targeted checks on specific claims rather than reading documents end-to-end like the reviewer. It takes a claim and its cited source, retrieves the source, and determines whether the source actually supports the claim as stated. This catches misattributions (citing a paper that says something different), overstatements (claiming a stronger result than the source reports), and fabrications (claims with no basis in the cited source).
Post-process a draft to add inline citations and verify every source URL.
When checking code against papers, the verifier examines specific implementation details: hyperparameters, architecture configurations, training procedures, and evaluation metrics. It compares the paper's description to the code's actual behavior, noting discrepancies with exact file paths and line numbers.
## Tools
## Verification process
`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`
The verifier follows a systematic process for each claim it checks:
## Default Output
1. **Retrieve the source** -- Fetch the cited paper, article, or code file
2. **Locate the relevant section** -- Find where the source addresses the claim
3. **Compare** -- Check whether the source supports the claim as stated
4. **Classify** -- Mark the claim as verified, unsupported, overstated, or contradicted
5. **Document** -- Record the evidence with exact quotes and locations
`cited.md`
This process is deterministic and traceable. Every verification result includes the specific passage or code that was checked, making it easy to audit the verifier's work.
You receive a draft document and the research files it was built from. Your job is to:
## Confidence and limitations
1. **Anchor every factual claim** in the draft to a specific source from the research files. Insert inline citations `[1]`, `[2]`, etc. directly after each claim.
2. **Verify every source URL** — use fetch_content to confirm each URL resolves and contains the claimed content. Flag dead links.
3. **Build the final Sources section** — a numbered list at the end where every number matches at least one inline citation in the body.
4. **Remove unsourced claims** — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.
The verifier assigns a confidence level to each verification. Claims that directly quote a source are verified with high confidence. Claims that paraphrase or interpret results are verified with moderate confidence, since reasonable interpretations can differ. Claims about the implications or significance of results are verified with lower confidence, since these involve judgment.
## Citation rules
The verifier is honest about its limitations. When a claim cannot be verified because the source is behind a paywall, the code is not available, or the claim requires domain expertise beyond what the verifier can assess, it says so explicitly rather than guessing.
- Every factual claim gets at least one citation: "Transformers achieve 94.2% on MMLU [3]."
- Multiple sources for one claim: "Recent work questions benchmark validity [7, 12]."
- No orphan citations — every `[N]` in the body must appear in Sources.
- No orphan sources — every entry in Sources must be cited at least once.
- Hedged or opinion statements do not need citations.
- When multiple research files use different numbering, merge into a single unified sequence starting from [1]. Deduplicate sources that appear in multiple files.
## Used by
## Source verification
For each source URL:
- **Live:** keep as-is.
- **Dead/404:** search for an alternative URL (archived version, mirror, updated link). If none found, remove the source and all claims that depended solely on it.
- **Redirects to unrelated content:** treat as dead.
## Output contract
- Save to the output file (default: `cited.md`).
- The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.
- Do not change the substance or structure of the draft. Only add citations and fix dead sources.
The verifier agent is used by `/deepresearch` (final fact-checking pass), `/audit` (comparing paper claims to code), and `/replicate` (verifying that the replication plan captures all necessary details). It serves as the quality control step that runs after the researcher and writer have produced their output.

View File

@@ -1,56 +1,36 @@
---
title: Writer
description: Turn research notes into clear, structured briefs and drafts.
description: The writer agent produces structured academic prose from research findings.
section: Agents
order: 3
---
## Source
The writer agent transforms raw research findings into structured, well-organized documents. It specializes in academic prose, producing papers, briefs, surveys, and reports with proper citations, section structure, and narrative flow.
Generated from `.feynman/agents/writer.md`. Edit that prompt file, not this docs page.
## What it does
## Role
The writer takes source material -- findings from researcher agents, review feedback, comparison matrices -- and synthesizes it into a coherent document. It handles the difficult task of turning a collection of extracted claims and citations into prose that tells a clear story.
Turn research notes into clear, structured briefs and drafts.
The writer understands academic conventions. Claims are attributed to their sources with inline citations. Methodology sections describe procedures with sufficient detail for reproduction. Results are presented with appropriate qualifiers. Limitations are discussed honestly rather than buried or omitted.
## Tools
## Writing capabilities
`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`
The writer agent handles several document types:
## Default Output
- **Research Briefs** -- Concise summaries of a topic with key findings and citations, produced by the deep research workflow
- **Literature Reviews** -- Survey-style documents that map consensus, disagreement, and open questions across the field
- **Paper Drafts** -- Full academic papers with abstract, introduction, body sections, discussion, and references
- **Comparison Reports** -- Structured analyses of how multiple sources agree and differ
- **Summaries** -- Condensed versions of longer documents or multi-source findings
`draft.md`
## Citation handling
## Integrity commandments
1. **Write only from supplied evidence.** Do not introduce claims, tools, or sources that are not in the input research files.
2. **Preserve caveats and disagreements.** Never smooth away uncertainty.
3. **Be explicit about gaps.** If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.
The writer maintains citation integrity throughout the document. Every factual claim is linked back to its source. When multiple sources support the same claim, all are cited. When a claim comes from a single source, the writer notes this to help the reader assess confidence. The final reference list includes only works actually cited in the text.
## Output structure
## Iteration
```markdown
# Title
The writer supports iterative refinement. After producing an initial draft, you can ask Feynman to revise specific sections, add more detail on a subtopic, restructure the argument, or adjust the tone and level of technical detail. Each revision preserves the citation links and document structure.
## Executive Summary
2-3 paragraph overview of key findings.
## Used by
## Section 1: ...
Detailed findings organized by theme or question.
## Section N: ...
...
## Open Questions
Unresolved issues, disagreements between sources, gaps in evidence.
```
## Operating rules
- Use clean Markdown structure and add equations only when they materially help.
- Keep the narrative readable, but never outrun the evidence.
- Produce artifacts that are ready to review in a browser or PDF preview.
- Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.
- Do NOT add a Sources section — the verifier agent builds that.
## Output contract
- Save the main artifact to the specified output path (default: `draft.md`).
- Focus on clarity, structure, and evidence traceability.
The writer agent is used by `/deepresearch` (for the final brief), `/lit` (for the review document), `/draft` (as the primary agent), and `/compare` (for the comparison report). It is always the last agent to run in a workflow, producing the final output from the material gathered and evaluated by the researcher and reviewer agents.

View File

@@ -1,66 +1,84 @@
---
title: Configuration
description: Configure models, search, and runtime options
description: Understand Feynman's configuration files and environment variables.
section: Getting Started
order: 4
---
## Model
Feynman stores all configuration and state under `~/.feynman/`. This directory is created on first run and contains settings, authentication tokens, session history, and installed packages.
Set the default model:
## Directory structure
```bash
feynman model set <provider:model>
```
~/.feynman/
├── settings.json # Core configuration
├── web-search.json # Web search routing config
├── auth/ # OAuth tokens and API keys
├── sessions/ # Persisted conversation history
├── packages/ # Installed optional packages
└── bin/ # Binary (when installed via the native installer)
```
Override at runtime:
The `settings.json` file is the primary configuration file. It is created by `feynman setup` and can be edited manually. A typical configuration looks like:
```bash
feynman --model anthropic:claude-opus-4-6
```json
{
"defaultModel": "anthropic:claude-sonnet-4-20250514",
"thinkingLevel": "medium"
}
```
List available models:
## Model configuration
The `defaultModel` field sets which model is used when you launch Feynman without the `--model` flag. The format is `provider:model-name`. You can change it via the CLI:
```bash
feynman model set anthropic:claude-opus-4-20250514
```
To see all models you have configured:
```bash
feynman model list
```
## Thinking level
## Thinking levels
Control the reasoning depth:
The `thinkingLevel` field controls how much reasoning the model does before responding. Available levels are `off`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Higher levels produce more thorough analysis at the cost of latency and token usage. You can override per-session:
```bash
feynman --thinking high
```
Levels: `off`, `minimal`, `low`, `medium`, `high`, `xhigh`.
## Environment variables
## Web search
Feynman respects the following environment variables, which take precedence over `settings.json`:
Check the current search configuration:
```bash
feynman search status
```
For advanced configuration, edit `~/.feynman/web-search.json` directly to set Gemini API keys, Perplexity keys, or a different route.
## Working directory
```bash
feynman --cwd /path/to/project
```
| Variable | Description |
| --- | --- |
| `FEYNMAN_MODEL` | Override the default model |
| `FEYNMAN_HOME` | Override the config directory (default: `~/.feynman`) |
| `FEYNMAN_THINKING` | Override the thinking level |
| `ANTHROPIC_API_KEY` | Anthropic API key |
| `OPENAI_API_KEY` | OpenAI API key |
| `GOOGLE_API_KEY` | Google AI API key |
| `TAVILY_API_KEY` | Tavily web search API key |
| `SERPER_API_KEY` | Serper web search API key |
## Session storage
```bash
feynman --session-dir /path/to/sessions
```
## One-shot mode
Run a single prompt and exit:
Each conversation is persisted as a JSON file in `~/.feynman/sessions/`. To start a fresh session:
```bash
feynman --prompt "summarize the key findings of 2401.12345"
feynman --new-session
```
To point sessions at a different directory (useful for per-project session isolation):
```bash
feynman --session-dir ~/myproject/.feynman/sessions
```
## Diagnostics
Run `feynman doctor` to verify your configuration is valid, check authentication status for all configured providers, and detect missing optional dependencies. The doctor command outputs a checklist showing what is working and what needs attention.

View File

@@ -1,44 +1,69 @@
---
title: Installation
description: Install Feynman and get started
description: Install Feynman on macOS, Linux, or Windows using the one-line installer or npm.
section: Getting Started
order: 1
---
## Requirements
Feynman ships as a standalone binary for macOS and Linux, and as an npm package for all platforms including Windows. The recommended approach is the one-line installer, which downloads a prebuilt native binary with zero dependencies.
- macOS, Linux, or WSL
- `curl` or `wget`
## One-line installer (recommended)
## Recommended install
On **macOS or Linux**, open a terminal and run:
```bash
curl -fsSL https://feynman.is/install | bash
```
## Verify
The installer detects your OS and architecture automatically. On macOS it supports both Intel and Apple Silicon. On Linux it supports x64 and arm64. The binary is installed to `~/.feynman/bin` and added to your `PATH`.
```bash
feynman --version
```
## Windows PowerShell
On **Windows**, open PowerShell as Administrator and run:
```powershell
irm https://feynman.is/install.ps1 | iex
```
## npm fallback
This installs the native Windows binary and adds Feynman to your user `PATH`. You can re-run either installer at any time to update to the latest version.
If you already manage Node yourself:
## npm / npx
If you already have Node.js 18+ installed, you can install Feynman globally via npm:
```bash
npm install -g @companion-ai/feynman
```
## Local Development
Or run it directly without installing:
For contributing or local development:
```bash
npx @companion-ai/feynman
```
The npm distribution bundles the same core runtime as the native installer but depends on Node.js being present on your system. The native installer is preferred because it ships a self-contained binary with faster startup.
## Post-install setup
After installation, run the guided setup wizard to configure your model provider and API keys:
```bash
feynman setup
```
This walks you through selecting a default model, authenticating with your provider, and optionally installing extra packages for features like web search and document preview. See the [Setup guide](/docs/getting-started/setup) for a detailed walkthrough.
## Verifying the installation
Confirm Feynman is installed and accessible:
```bash
feynman --version
```
If you see a version number, you are ready to go. Run `feynman doctor` at any time to diagnose configuration issues, missing dependencies, or authentication problems.
## Local development
For contributing or running Feynman from source:
```bash
git clone https://github.com/getcompanion-ai/feynman.git

View File

@@ -1,44 +1,60 @@
---
title: Quick Start
description: Get up and running with Feynman in 60 seconds
description: Get up and running with Feynman in under five minutes.
section: Getting Started
order: 2
---
## First run
This guide assumes you have already [installed Feynman](/docs/getting-started/installation) and run `feynman setup`. If not, start there first.
## Launch the REPL
Start an interactive session by running:
```bash
feynman setup
feynman
```
`feynman setup` walks you through model authentication, alphaXiv login, web search configuration, and preview dependencies.
You are dropped into a conversational REPL where you can ask research questions, run workflows, and interact with agents in natural language. Type your question and press Enter.
## Ask naturally
## Run a one-shot prompt
Feynman routes your questions into the right workflow automatically. You don't need slash commands to get started.
If you want a quick answer without entering the REPL, use the `--prompt` flag:
```
> What are the main approaches to RLHF alignment?
```bash
feynman --prompt "Summarize the key findings of Attention Is All You Need"
```
Feynman will search papers, gather web sources, and produce a structured answer with citations.
Feynman processes the prompt, prints the response, and exits. This is useful for scripting or piping output into other tools.
## Use workflows directly
## Start a deep research session
For explicit control, use slash commands inside the REPL:
Deep research is the flagship workflow. It dispatches multiple agents to search, read, cross-reference, and synthesize information from academic papers and the web:
```
> /deepresearch transformer scaling laws
> /lit multimodal reasoning benchmarks
> /review paper.pdf
```bash
feynman
> /deepresearch What are the current approaches to mechanistic interpretability in LLMs?
```
## Output locations
The agents collaborate to produce a structured research report with citations, key findings, and open questions. The full report is saved to your session directory for later reference.
Feynman writes durable artifacts to canonical directories:
## Work with files
- `outputs/` — Reviews, reading lists, summaries
- `papers/` — Polished paper-style drafts
- `experiments/` — Runnable code and result logs
- `notes/` — Scratch notes and session logs
Feynman can read and write files in your working directory. Point it at a paper or codebase for targeted analysis:
```bash
feynman --cwd ~/papers
> /review arxiv:2301.07041
```
You can also ask Feynman to draft documents, audit code, or compare multiple sources by referencing local files directly in your prompts.
## Explore slash commands
Type `/help` inside the REPL to see all available slash commands. Each command maps to a workflow or utility, such as `/deepresearch`, `/review`, `/draft`, `/watch`, and more. You can also run any workflow directly from the CLI:
```bash
feynman deepresearch "transformer architectures for protein folding"
```
See the [Slash Commands reference](/docs/reference/slash-commands) for the complete list.

View File

@@ -1,78 +1,57 @@
---
title: Setup
description: Detailed setup guide for Feynman
description: Walk through the guided setup wizard to configure Feynman.
section: Getting Started
order: 3
---
## Guided setup
The `feynman setup` wizard configures your model provider, API keys, and optional packages. It runs automatically on first launch, but you can re-run it at any time to change your configuration.
## Running setup
```bash
feynman setup
```
This walks through four steps:
The wizard walks you through three stages: model configuration, authentication, and optional package installation.
### Model provider authentication
## Stage 1: Model selection
Feynman uses Pi's OAuth system for model access. The setup wizard prompts you to log in to your preferred provider.
Feynman supports multiple model providers. The setup wizard presents a list of available providers and models. Select your preferred default model using the arrow keys:
```bash
feynman model login
```
? Select your default model:
anthropic:claude-sonnet-4-20250514
> anthropic:claude-opus-4-20250514
openai:gpt-4o
openai:o3
google:gemini-2.5-pro
```
### AlphaXiv login
The model you choose here becomes the default for all sessions. You can override it per-session with the `--model` flag or change it later via `feynman model set <provider:model>`.
AlphaXiv powers Feynman's paper search and analysis tools. Sign in with:
## Stage 2: Authentication
```bash
feynman alpha login
Depending on your chosen provider, setup prompts you for an API key or walks you through OAuth login. For providers that support Pi OAuth (like Anthropic and OpenAI), Feynman opens a browser window to complete the sign-in flow. Your credentials are stored securely in the Pi auth storage at `~/.feynman/`.
For API key providers, you are prompted to paste your key directly:
```
? Enter your API key: sk-ant-...
```
Check status anytime:
Keys are encrypted at rest and never sent anywhere except the provider's API endpoint.
```bash
feynman alpha status
```
## Stage 3: Optional packages
### Web search routing
Feynman's core ships with the essentials, but some features require additional packages. The wizard asks if you want to install optional presets:
Feynman supports three web search backends:
- **session-search** -- Enables searching prior session transcripts for past research
- **memory** -- Automatic preference and correction memory across sessions
- **generative-ui** -- Interactive HTML-style widgets for rich output
- **auto** — Prefer Perplexity when configured, fall back to Gemini
- **perplexity** — Force Perplexity Sonar
- **gemini** — Force Gemini (default, zero-config via signed-in Chromium)
You can skip this step and install packages later with `feynman packages install <preset>`.
The default path requires no API keys — it uses Gemini Browser via your signed-in Chromium profile.
## Re-running setup
### Preview dependencies
For PDF and HTML export of generated artifacts, Feynman needs `pandoc`:
```bash
feynman --setup-preview
```
Global macOS installs also try to install pandoc automatically when Homebrew is available. Use the command above to retry manually.
### Optional packages
Feynman keeps the default package set lean so first-run installs stay fast. Install the heavier optional packages only when you need them:
```bash
feynman packages list
feynman packages install memory
feynman packages install session-search
feynman packages install generative-ui
feynman packages install all-extras
```
## Diagnostics
Run the doctor to check everything:
```bash
feynman doctor
```
This verifies model auth, alphaXiv credentials, preview dependencies, and the Pi runtime.
Configuration is stored in `~/.feynman/settings.json`. Running `feynman setup` again overwrites previous settings. If you only need to change a specific value, edit the config file directly or use the targeted commands like `feynman model set` or `feynman alpha login`.

View File

@@ -1,61 +1,88 @@
---
title: CLI Commands
description: Complete reference for Feynman CLI commands
description: Complete reference for all Feynman CLI commands and flags.
section: Reference
order: 1
---
This page covers the dedicated Feynman CLI commands and compatibility flags.
This page covers the dedicated Feynman CLI commands and flags. Workflow commands like `feynman deepresearch` are also documented in the [Slash Commands](/docs/reference/slash-commands) reference since they map directly to REPL slash commands.
Workflow prompt templates such as `/deepresearch` also run directly from the shell as `feynman <workflow> ...`. Those workflow entries live in the slash-command reference instead of being duplicated here.
## Core
## Core commands
| Command | Description |
| --- | --- |
| `feynman` | Launch the interactive REPL. |
| `feynman chat [prompt]` | Start chat explicitly, optionally with an initial prompt. |
| `feynman help` | Show CLI help. |
| `feynman setup` | Run the guided setup wizard. |
| `feynman doctor` | Diagnose config, auth, Pi runtime, and preview dependencies. |
| `feynman status` | Show the current setup summary. |
| `feynman` | Launch the interactive REPL |
| `feynman chat [prompt]` | Start chat explicitly, optionally with an initial prompt |
| `feynman help` | Show CLI help |
| `feynman setup` | Run the guided setup wizard |
| `feynman doctor` | Diagnose config, auth, Pi runtime, and preview dependencies |
| `feynman status` | Show the current setup summary (model, auth, packages) |
## Model Management
## Model management
| Command | Description |
| --- | --- |
| `feynman model list` | List available models in Pi auth storage. |
| `feynman model login [id]` | Login to a Pi OAuth model provider. |
| `feynman model logout [id]` | Logout from a Pi OAuth model provider. |
| `feynman model set <provider/model>` | Set the default model. |
| `feynman model list` | List available models in Pi auth storage |
| `feynman model login [id]` | Login to a Pi OAuth model provider |
| `feynman model logout [id]` | Logout from a Pi OAuth model provider |
| `feynman model set <provider:model>` | Set the default model for all sessions |
## AlphaXiv
These commands manage your model provider configuration. The `model set` command updates `~/.feynman/settings.json` with the new default. The format is `provider:model-name`, for example `anthropic:claude-sonnet-4-20250514`.
## AlphaXiv commands
| Command | Description |
| --- | --- |
| `feynman alpha login` | Sign in to alphaXiv. |
| `feynman alpha logout` | Clear alphaXiv auth. |
| `feynman alpha status` | Check alphaXiv auth status. |
| `feynman alpha login` | Sign in to alphaXiv |
| `feynman alpha logout` | Clear alphaXiv auth |
| `feynman alpha status` | Check alphaXiv auth status |
## Utilities
AlphaXiv authentication enables Feynman to search and retrieve papers, access discussion threads, and pull citation metadata. You can also manage AlphaXiv auth from inside the REPL with `/alpha-login`, `/alpha-status`, and `/alpha-logout`.
## Package management
| Command | Description |
| --- | --- |
| `feynman search status` | Show Pi web-access status and config path. |
| `feynman update [package]` | Update installed packages, or a specific package. |
| `feynman packages list` | List all available packages and their install status |
| `feynman packages install <preset>` | Install an optional package preset |
| `feynman update [package]` | Update installed packages, or a specific package by name |
Use `feynman packages list` to see which optional packages are available and which are already installed. The `all-extras` preset installs every optional package at once.
## Utility commands
| Command | Description |
| --- | --- |
| `feynman search status` | Show Pi web-access status and config path |
## Workflow commands
All research workflow slash commands can also be invoked directly from the CLI:
```bash
feynman deepresearch "topic"
feynman lit "topic"
feynman review artifact.md
feynman audit 2401.12345
feynman replicate "claim"
feynman compare "topic"
feynman draft "topic"
```
These are equivalent to launching the REPL and typing the corresponding slash command.
## Flags
| Flag | Description |
| --- | --- |
| `--prompt "<text>"` | Run one prompt and exit. |
| `--alpha-login` | Sign in to alphaXiv and exit. |
| `--alpha-logout` | Clear alphaXiv auth and exit. |
| `--alpha-status` | Show alphaXiv auth status and exit. |
| `--model <provider:model>` | Force a specific model. |
| `--thinking <level>` | Set thinking level: off | minimal | low | medium | high | xhigh. |
| `--cwd <path>` | Set the working directory for tools. |
| `--session-dir <path>` | Set the session storage directory. |
| `--new-session` | Start a new persisted session. |
| `--doctor` | Alias for `feynman doctor`. |
| `--setup-preview` | Alias for `feynman setup preview`. |
| `--prompt "<text>"` | Run one prompt and exit (one-shot mode) |
| `--model <provider:model>` | Force a specific model for this session |
| `--thinking <level>` | Set thinking level: `off`, `minimal`, `low`, `medium`, `high`, `xhigh` |
| `--cwd <path>` | Set the working directory for all file operations |
| `--session-dir <path>` | Set the session storage directory |
| `--new-session` | Start a new persisted session |
| `--alpha-login` | Sign in to alphaXiv and exit |
| `--alpha-logout` | Clear alphaXiv auth and exit |
| `--alpha-status` | Show alphaXiv auth status and exit |
| `--doctor` | Alias for `feynman doctor` |
| `--setup-preview` | Install preview dependencies (pandoc) |

View File

@@ -1,36 +1,76 @@
---
title: Package Stack
description: Curated Pi packages bundled with Feynman
description: Core and optional Pi packages bundled with Feynman.
section: Reference
order: 3
---
Curated Pi packages bundled with Feynman. The runtime package list lives in `.feynman/settings.json`.
Feynman is built on the Pi runtime and uses curated Pi packages for its capabilities. Packages are managed through `feynman packages` commands and configured in `~/.feynman/settings.json`.
## Core packages
Installed by default.
These are installed by default with every Feynman installation. They provide the foundation for all research workflows.
| Package | Purpose |
|---------|---------|
| `pi-subagents` | Parallel literature gathering and decomposition. |
| `pi-btw` | Fast side-thread `/btw` conversations without interrupting the main run. |
| `pi-docparser` | PDFs, Office docs, spreadsheets, and images. |
| `pi-web-access` | Web, GitHub, PDF, and media access. |
| `pi-markdown-preview` | Polished Markdown and LaTeX-heavy research writeups. |
| `@walterra/pi-charts` | Charts and quantitative visualizations. |
| `pi-mermaid` | Diagrams in the TUI. |
| `@aliou/pi-processes` | Long-running experiments and log tails. |
| `pi-zotero` | Citation-library workflows. |
| `pi-schedule-prompt` | Recurring and deferred research jobs. |
| `@tmustier/pi-ralph-wiggum` | Long-running agent loops for iterative development. |
| --- | --- |
| `pi-subagents` | Parallel agent spawning for literature gathering and task decomposition. Powers the multi-agent workflows |
| `pi-btw` | Fast side-thread `/btw` conversations without interrupting the main research run |
| `pi-docparser` | Parse PDFs, Office documents, spreadsheets, and images for content extraction |
| `pi-web-access` | Web browsing, GitHub access, PDF fetching, and media retrieval |
| `pi-markdown-preview` | Render Markdown and LaTeX-heavy research documents as polished HTML/PDF |
| `@walterra/pi-charts` | Generate charts and quantitative visualizations from data |
| `pi-mermaid` | Render Mermaid diagrams in the terminal UI |
| `@aliou/pi-processes` | Manage long-running experiments, background tasks, and log tailing |
| `pi-zotero` | Integration with Zotero for citation library management |
| `pi-schedule-prompt` | Schedule recurring and deferred research jobs. Powers the `/watch` workflow |
| `@tmustier/pi-ralph-wiggum` | Long-running agent loops for iterative development. Powers `/autoresearch` |
These packages are updated together when you run `feynman update`. You do not need to install them individually.
## Optional packages
Install on demand with `feynman packages install <preset>`.
Install on demand with `feynman packages install <preset>`. These extend Feynman with capabilities that not every user needs.
| Package | Purpose |
|---------|---------|
| `pi-generative-ui` | Interactive HTML-style widgets. |
| `@kaiserlich-dev/pi-session-search` | Indexed session recall and summarize/resume UI. |
| `@samfp/pi-memory` | Automatic preference and correction memory across sessions. |
| Package | Preset | Purpose |
| --- | --- | --- |
| `pi-generative-ui` | `generative-ui` | Interactive HTML-style widgets for rich output |
| `@kaiserlich-dev/pi-session-search` | `session-search` | Indexed session recall with summarize and resume UI. Powers `/search` |
| `@samfp/pi-memory` | `memory` | Automatic preference and correction memory across sessions |
## Installing and managing packages
List all available packages and their install status:
```bash
feynman packages list
```
Install a specific optional preset:
```bash
feynman packages install session-search
feynman packages install memory
feynman packages install generative-ui
```
Install all optional packages at once:
```bash
feynman packages install all-extras
```
## Updating packages
Update all installed packages to their latest versions:
```bash
feynman update
```
Update a specific package:
```bash
feynman update pi-subagents
```
Running `feynman update` without arguments updates everything. Pass a specific package name to update just that one. Updates are safe and preserve your configuration.

View File

@@ -1,41 +1,63 @@
---
title: Slash Commands
description: Repo-owned REPL slash commands
description: Complete reference for REPL slash commands.
section: Reference
order: 2
---
This page documents the slash commands that Feynman owns in this repository: prompt templates from `prompts/` and extension commands from `extensions/research-tools/`.
Slash commands are available inside the Feynman REPL. They map to research workflows, project management tools, and setup utilities. Type `/help` inside the REPL for the live command list, which may include additional commands from installed Pi packages.
Additional slash commands can appear at runtime from Pi core and bundled packages such as subagents, preview, session search, and scheduling. Use `/help` inside the REPL for the live command list instead of relying on a static copy of package-provided commands.
## Research Workflows
## Research workflows
| Command | Description |
| --- | --- |
| `/deepresearch <topic>` | Run a thorough, source-heavy investigation on a topic and produce a durable research brief with inline citations. |
| `/lit <topic>` | Run a literature review on a topic using paper search and primary-source synthesis. |
| `/review <artifact>` | Simulate an AI research peer review with likely objections, severity, and a concrete revision plan. |
| `/audit <item>` | Compare a paper's claims against its public codebase and identify mismatches, omissions, and reproducibility risks. |
| `/replicate <paper>` | Plan or execute a replication workflow for a paper, claim, or benchmark. |
| `/compare <topic>` | Compare multiple sources on a topic and produce a source-grounded matrix of agreements, disagreements, and confidence. |
| `/draft <topic>` | Turn research findings into a polished paper-style draft with equations, sections, and explicit claims. |
| `/autoresearch <idea>` | Autonomous experiment loop — try ideas, measure results, keep what works, discard what doesn't, repeat. |
| `/watch <topic>` | Set up a recurring or deferred research watch on a topic, company, paper area, or product surface. |
| `/deepresearch <topic>` | Run a thorough, source-heavy investigation and produce a research brief with inline citations |
| `/lit <topic>` | Run a structured literature review with consensus, disagreements, and open questions |
| `/review <artifact>` | Simulate a peer review with severity-graded feedback and inline annotations |
| `/audit <item>` | Compare a paper's claims against its public codebase for mismatches and reproducibility risks |
| `/replicate <paper>` | Plan or execute a replication workflow for a paper, claim, or benchmark |
| `/compare <topic>` | Compare multiple sources and produce an agreement/disagreement matrix |
| `/draft <topic>` | Generate a paper-style draft from research findings |
| `/autoresearch <idea>` | Start an autonomous experiment loop that iteratively optimizes toward a goal |
| `/watch <topic>` | Set up recurring research monitoring on a topic |
## Project & Session
These are the primary commands you will use day-to-day. Each workflow dispatches one or more specialized agents (researcher, reviewer, writer, verifier) depending on the task.
## Project and session
| Command | Description |
| --- | --- |
| `/log` | Write a durable session log with completed work, findings, open questions, and next steps. |
| `/jobs` | Inspect active background research work, including running processes and scheduled follow-ups. |
| `/help` | Show grouped Feynman commands and prefill the editor with a selected command. |
| `/init` | Bootstrap AGENTS.md and session-log folders for a research project. |
| `/log` | Write a durable session log with completed work, findings, open questions, and next steps |
| `/jobs` | Inspect active background work: running processes, scheduled follow-ups, and active watches |
| `/help` | Show grouped Feynman commands and prefill the editor with a selected command |
| `/init` | Bootstrap `AGENTS.md` and session-log folders for a new research project |
| `/search` | Search prior session transcripts for past research and findings |
| `/preview` | Preview the current artifact as rendered HTML or PDF |
## Setup
Session management commands help you organize ongoing work. The `/log` command is particularly useful at the end of a research session to capture what was accomplished and what remains.
## Setup commands
| Command | Description |
| --- | --- |
| `/alpha-login` | Sign in to alphaXiv from inside Feynman. |
| `/alpha-status` | Show alphaXiv authentication status. |
| `/alpha-logout` | Clear alphaXiv auth from inside Feynman. |
| `/alpha-login` | Sign in to alphaXiv from inside the REPL |
| `/alpha-status` | Show alphaXiv authentication status |
| `/alpha-logout` | Clear alphaXiv auth from inside the REPL |
These provide a convenient way to manage alphaXiv authentication without leaving the REPL.
## Running workflows from the CLI
All research workflow slash commands can also be run directly from the command line:
```bash
feynman deepresearch "topic"
feynman lit "topic"
feynman review artifact.md
feynman audit 2401.12345
feynman replicate "claim"
feynman compare "topic"
feynman draft "topic"
```
This is equivalent to launching the REPL and typing the slash command. The CLI form is useful for scripting and automation.

View File

@@ -1,40 +1,48 @@
---
title: AlphaXiv
description: Paper search and analysis tools
description: Search and retrieve academic papers through the AlphaXiv integration.
section: Tools
order: 1
---
## Overview
AlphaXiv is the primary academic paper search and retrieval tool in Feynman. It provides access to a vast corpus of research papers, discussion threads, citation metadata, and full-text PDFs. The researcher agent uses AlphaXiv as its primary source for academic content.
AlphaXiv powers Feynman's academic paper workflows. All tools require an alphaXiv account — sign in with `feynman alpha login`.
## Authentication
## Tools
AlphaXiv requires authentication. Set it up during initial setup or at any time:
### alpha_search
```bash
feynman alpha login
```
Paper discovery with three search modes:
Check your authentication status:
- **semantic** — Meaning-based search across paper content
- **keyword** — Traditional keyword matching
- **agentic** — AI-powered search that interprets your intent
```bash
feynman alpha status
```
### alpha_get_paper
You can also manage AlphaXiv auth from inside the REPL with `/alpha-login`, `/alpha-status`, and `/alpha-logout`.
Fetch a paper's report (structured summary) or full raw text by arXiv ID.
## What it provides
### alpha_ask_paper
AlphaXiv gives Feynman access to several capabilities that power the research workflows:
Ask a targeted question about a specific paper. Returns an answer grounded in the paper's content.
- **Paper search** -- Find papers by topic, author, keyword, or arXiv ID
- **Full-text retrieval** -- Download and parse complete PDFs for in-depth reading
- **Citation metadata** -- Access citation counts, references, and citation chains
- **Discussion threads** -- Read community discussions and annotations on papers
- **Related papers** -- Discover connected work through citation graphs and recommendations
### alpha_annotate_paper
## How it is used
Add persistent local notes to a paper. Annotations are stored locally and persist across sessions.
You do not invoke AlphaXiv directly in most cases. The researcher agent uses it automatically during workflows like deep research, literature review, and peer review. When you provide an arXiv ID (like `arxiv:2401.12345`), Feynman fetches the paper through AlphaXiv.
### alpha_list_annotations
AlphaXiv search is especially powerful when combined with citation chaining. The researcher agent can follow references from a relevant paper to discover foundational work, then follow forward citations to find papers that built on it. This produces a much more complete picture than keyword search alone.
Recall all annotations across papers and sessions.
## Configuration
### alpha_read_code
AlphaXiv configuration is managed through the CLI commands listed above. Authentication tokens are stored in `~/.feynman/auth/` and persist across sessions. No additional configuration is needed beyond logging in.
Read source code from a paper's linked GitHub repository. Useful for auditing or replication planning.
## Without AlphaXiv
If you choose not to authenticate with AlphaXiv, Feynman still functions but with reduced academic search capabilities. It falls back to web search for finding papers, which works for well-known work but misses the citation metadata, discussion threads, and full-text access that AlphaXiv provides. For serious research workflows, AlphaXiv authentication is strongly recommended.

View File

@@ -1,34 +1,50 @@
---
title: Preview
description: Preview generated artifacts in browser or PDF
description: Preview generated research artifacts as rendered HTML or PDF.
section: Tools
order: 4
---
## Overview
The `preview_file` tool opens generated artifacts in your browser or PDF viewer.
The preview tool renders generated artifacts as polished HTML or PDF documents and opens them in your browser or PDF viewer. This is particularly useful for research briefs, paper drafts, and any document that contains LaTeX math, tables, or complex formatting that does not render well in a terminal.
## Usage
Inside the REPL:
Inside the REPL, preview the most recent artifact:
```
/preview
```
Or Feynman will suggest previewing when you generate artifacts that benefit from rendered output (Markdown with LaTeX, HTML reports, etc.).
Feynman suggests previewing automatically when you generate artifacts that benefit from rendered output. You can also preview a specific file:
```
/preview outputs/scaling-laws-brief.md
```
## Requirements
Preview requires `pandoc` for PDF/HTML rendering. Install it with:
Preview requires `pandoc` for Markdown-to-HTML and Markdown-to-PDF rendering. Install the preview dependencies with:
```bash
feynman --setup-preview
```
On macOS with Homebrew, the setup command attempts to install pandoc automatically. On Linux, it checks for pandoc in your package manager. If the automatic install does not work, install pandoc manually from [pandoc.org](https://pandoc.org/installing.html) and rerun `feynman --setup-preview` to verify.
## Supported formats
- Markdown (with LaTeX math rendering)
- HTML
- PDF
The preview tool handles three output formats:
- **Markdown** -- Rendered as HTML with full LaTeX math support via KaTeX, syntax-highlighted code blocks, and clean typography
- **HTML** -- Opened directly in your default browser with no conversion step
- **PDF** -- Generated via pandoc with LaTeX rendering, suitable for sharing or printing
## How it works
The `pi-markdown-preview` package handles the rendering pipeline. For Markdown files, it converts to HTML with a clean stylesheet, proper code highlighting, and rendered math equations. The preview opens in your default browser as a local file.
For documents with heavy math notation (common in research drafts), the preview ensures all LaTeX expressions render correctly. Inline math (`$...$`) and display math (`$$...$$`) are both supported. Tables, citation lists, and nested blockquotes all render with proper formatting.
## Customization
The preview stylesheet is designed for research documents and includes styles for proper heading hierarchy, code blocks with syntax highlighting, tables with clean borders, math equations (inline and display), citation formatting, and blockquotes. The stylesheet is bundled with the package and does not require any configuration.

View File

@@ -1,26 +1,47 @@
---
title: Session Search
description: Search prior Feynman session transcripts
description: Search prior Feynman session transcripts to recall past research.
section: Tools
order: 3
---
## Overview
The session search tool recovers prior Feynman work from stored session transcripts. Every Feynman session is persisted to disk, and session search lets you find and reference past research, findings, and generated artifacts without starting over.
The `session_search` tool recovers prior Feynman work from stored session transcripts. Useful for picking up previous research threads or finding past findings.
## Installation
Session search is an optional package. Install it with:
```bash
feynman packages install session-search
```
Once installed, the `/search` slash command and automatic session recall become available in all future sessions.
## Usage
Inside the REPL:
Inside the REPL, invoke session search directly:
```
/search
/search transformer scaling laws
```
Or use the tool directly — Feynman will invoke `session_search` automatically when you reference prior work.
You can also reference prior work naturally in conversation. Feynman invokes session search automatically when you mention previous research or ask to continue earlier work. For example, saying "pick up where I left off on protein folding" triggers a session search behind the scenes.
## What it searches
- Full session transcripts
- Tool outputs and agent results
- Generated artifacts and their content
Session search indexes the full contents of your session history:
- Full session transcripts including your prompts and Feynman's responses
- Tool outputs and agent results from workflows like deep research and literature review
- Generated artifacts such as drafts, reports, and comparison matrices
- Metadata like timestamps, topics, and workflow types
The search uses both keyword matching and semantic similarity to find relevant past work. Results include the session ID, timestamp, and relevant excerpts so you can quickly identify which session contains the information you need.
## When to use it
Session search is valuable when you want to pick up a previous research thread without rerunning an expensive workflow, find specific findings or citations from a past deep research session, reference prior analysis in a new research context, or check what you have already investigated on a topic before launching a new round.
## How it works
The `@kaiserlich-dev/pi-session-search` package provides the underlying search and indexing. Sessions are stored in `~/.feynman/sessions/` by default (configurable with `--session-dir`). The index is built incrementally as new sessions complete, so search stays fast even with hundreds of past sessions.

View File

@@ -1,34 +1,57 @@
---
title: Web Search
description: Web search routing and configuration
description: Web search routing, configuration, and usage within Feynman.
section: Tools
order: 2
---
Feynman's web search tool retrieves current information from the web during research workflows. It supports multiple simultaneous queries, domain filtering, recency filtering, and optional full-page content retrieval. The researcher agent uses web search alongside AlphaXiv to gather evidence from non-academic sources like blog posts, documentation, news, and code repositories.
## Routing modes
Feynman supports three web search backends:
Feynman supports three web search backends. You can configure which one to use or let Feynman choose automatically:
| Mode | Description |
|------|-------------|
| --- | --- |
| `auto` | Prefer Perplexity when configured, fall back to Gemini |
| `perplexity` | Force Perplexity Sonar |
| `gemini` | Force Gemini (default) |
| `perplexity` | Force Perplexity Sonar for all web searches |
| `gemini` | Force Gemini grounding (default, zero-config) |
## Default behavior
The default path is zero-config Gemini Browser via a signed-in Chromium profile. No API keys required.
The default path is zero-config Gemini grounding via a signed-in Chromium profile. No API keys are required. This works on macOS and Linux where a Chromium-based browser is installed and signed in to a Google account.
## Check current config
For headless environments, CI pipelines, or servers without a browser, configure an explicit API key for either Perplexity or Gemini in `~/.feynman/web-search.json`.
## Configuration
Check the current search configuration:
```bash
feynman search status
```
## Advanced configuration
Edit `~/.feynman/web-search.json` to configure the backend:
Edit `~/.feynman/web-search.json` directly to set:
```json
{
"route": "auto",
"perplexityApiKey": "pplx-...",
"geminiApiKey": "AIza..."
}
```
- Gemini API keys
- Perplexity API keys
- Custom routing preferences
Set `route` to `auto`, `perplexity`, or `gemini`. When using `auto`, Feynman prefers Perplexity if a key is present, then falls back to Gemini.
## Search features
The web search tool supports several capabilities that the researcher agent leverages automatically:
- **Multiple queries** -- Send 2-4 varied-angle queries simultaneously for broader coverage of a topic
- **Domain filtering** -- Restrict results to specific domains like `arxiv.org`, `github.com`, or `nature.com`
- **Recency filtering** -- Filter results by date, useful for fast-moving topics where only recent work matters
- **Full content retrieval** -- Fetch complete page content for the most important results rather than relying on snippets
## When it runs
Web search is used automatically by researcher agents during workflows. You do not need to invoke it directly. The researcher decides when to use web search versus paper search based on the topic and source availability. Academic topics lean toward AlphaXiv; engineering and applied topics lean toward web search.

View File

@@ -1,39 +1,50 @@
---
title: Code Audit
description: Compare paper claims against public codebases
description: Compare a paper's claims against its public codebase for reproducibility.
section: Workflows
order: 4
---
The code audit workflow compares a paper's claims against its public codebase to identify mismatches, undocumented deviations, and reproducibility risks. It bridges the gap between what a paper says and what the code actually does.
## Usage
```
/audit <item>
```
## What it does
Compares claims made in a paper against its public codebase. Surfaces mismatches, missing experiments, and reproducibility risks.
## What it checks
- Do the reported hyperparameters match the code?
- Are all claimed experiments present in the repository?
- Does the training loop match the described methodology?
- Are there undocumented preprocessing steps?
- Do evaluation metrics match the paper's claims?
## Example
From the REPL:
```
/audit 2401.12345
/audit arxiv:2401.12345
```
## Output
```
/audit https://github.com/org/repo --paper arxiv:2401.12345
```
An audit report with:
From the CLI:
- Claim-by-claim verification
- Identified mismatches
- Missing components
- Reproducibility risk assessment
```bash
feynman audit 2401.12345
```
When given an arXiv ID, Feynman locates the associated code repository from the paper's links, Papers With Code, or GitHub search. You can also provide the repository URL directly.
## How it works
The audit workflow operates in two passes. First, the researcher agent reads the paper and extracts all concrete claims: hyperparameters, architecture details, training procedures, dataset splits, evaluation metrics, and reported results. Each claim is tagged with its location in the paper for traceability.
Second, the verifier agent examines the codebase to find the corresponding implementation for each claim. It checks configuration files, training scripts, model definitions, and evaluation code to verify that the code matches the paper's description. When it finds a discrepancy -- a hyperparameter that differs, a training step that was described but not implemented, or an evaluation procedure that deviates from the paper -- it documents the mismatch with exact file paths and line numbers.
The audit also checks for common reproducibility issues like missing random seeds, non-deterministic operations without pinned versions, hardcoded paths, and absent environment specifications.
## Output format
The audit report contains:
- **Match Summary** -- Percentage of claims that match the code
- **Confirmed Claims** -- Claims that are accurately reflected in the codebase
- **Mismatches** -- Discrepancies between paper and code with evidence from both
- **Missing Implementations** -- Claims in the paper with no corresponding code
- **Reproducibility Risks** -- Issues like missing seeds, unpinned dependencies, or hardcoded paths
## When to use it
Use `/audit` when you are deciding whether to build on a paper's results, when replicating an experiment, or when reviewing a paper for a venue and want to verify its claims against the code. It is also useful for auditing your own papers before submission to catch inconsistencies between your writeup and implementation.

View File

@@ -1,44 +1,58 @@
---
title: Autoresearch
description: Autonomous experiment optimization loop
description: Start an autonomous experiment loop that iteratively optimizes toward a goal.
section: Workflows
order: 8
---
The autoresearch workflow launches an autonomous research loop that iteratively designs experiments, runs them, analyzes results, and proposes next steps. It is designed for open-ended exploration where the goal is optimization or discovery rather than a specific answer.
## Usage
```
/autoresearch <idea>
```
## What it does
Runs an autonomous experiment loop:
1. **Edit** — Modify code or configuration
2. **Commit** — Save the change
3. **Benchmark** — Run evaluation
4. **Evaluate** — Compare against baseline
5. **Keep or revert** — Persist improvements, roll back regressions
6. **Repeat** — Continue until the target is hit
## Tracking
Metrics are tracked in:
- `autoresearch.md` — Human-readable progress log
- `autoresearch.jsonl` — Machine-readable metrics over time
## Controls
From the REPL:
```
/autoresearch <idea> # start or resume
/autoresearch off # stop, keep data
/autoresearch clear # delete all state, start fresh
/autoresearch Optimize prompt engineering strategies for math reasoning on GSM8K
```
## Example
From the CLI:
```bash
feynman autoresearch "Optimize prompt engineering strategies for math reasoning on GSM8K"
```
Autoresearch runs as a long-lived background process. You can monitor its progress, pause it, or redirect its focus at any time.
## How it works
The autoresearch workflow is powered by `@tmustier/pi-ralph-wiggum`, which provides long-running agent loops. The workflow begins by analyzing the research goal and designing an initial experiment plan. It then enters an iterative loop:
1. **Hypothesis** -- The agent proposes a hypothesis or modification based on current results
2. **Experiment** -- It designs and executes an experiment to test the hypothesis
3. **Analysis** -- Results are analyzed and compared against prior iterations
4. **Decision** -- The agent decides whether to continue the current direction, try a variation, or pivot to a new approach
Each iteration builds on the previous ones. The agent maintains a running log of what has been tried, what worked, what failed, and what the current best result is. This prevents repeating failed approaches and ensures the search progresses efficiently.
## Monitoring and control
Check active autoresearch jobs:
```
/autoresearch optimize the learning rate schedule for better convergence
/jobs
```
Autoresearch runs in the background, so you can continue using Feynman for other tasks while it works. The `/jobs` command shows the current status, iteration count, and best result so far. You can interrupt the loop at any time to provide guidance or redirect the search.
## Output format
Autoresearch produces a running experiment log that includes:
- **Experiment History** -- What was tried in each iteration with parameters and results
- **Best Configuration** -- The best-performing setup found so far
- **Ablation Results** -- Which factors mattered most based on the experiments run
- **Recommendations** -- Suggested next steps based on observed trends
## When to use it
Use `/autoresearch` for tasks that benefit from iterative exploration: hyperparameter optimization, prompt engineering, architecture search, or any problem where the search space is large and the feedback signal is clear. It is not the right tool for answering a specific question (use `/deepresearch` for that) but excels at finding what works best through systematic experimentation.

View File

@@ -1,29 +1,50 @@
---
title: Source Comparison
description: Compare multiple sources with agreement/disagreement matrix
description: Compare multiple sources and produce an agreement/disagreement matrix.
section: Workflows
order: 6
---
The source comparison workflow analyzes multiple papers, articles, or documents side by side and produces a structured matrix showing where they agree, disagree, and differ in methodology. It is useful for understanding conflicting results, evaluating competing approaches, and identifying which claims have broad support versus limited evidence.
## Usage
```
/compare <topic>
```
## What it does
Compares multiple sources on a topic. Builds an agreement/disagreement matrix showing where sources align and where they conflict.
## Example
From the REPL:
```
/compare approaches to constitutional AI training
/compare "GPT-4 vs Claude vs Gemini on reasoning benchmarks"
```
## Output
```
/compare arxiv:2401.12345 arxiv:2402.67890 arxiv:2403.11111
```
- Source-by-source breakdown
- Agreement/disagreement matrix
- Synthesis of key differences
- Assessment of which positions have stronger evidence
From the CLI:
```bash
feynman compare "topic or list of sources"
```
You can provide a topic and let Feynman find the sources, or list specific papers and documents for a targeted comparison.
## How it works
The comparison workflow begins by identifying or retrieving the sources to compare. If you provide a topic, the researcher agents find the most relevant and contrasting papers. If you provide specific IDs or files, they are used directly.
Each source is analyzed independently first: the researcher agents extract claims, results, methodology, and limitations from each document. Then the comparison engine aligns claims across sources -- identifying where two papers make the same claim (agreement), where they report contradictory results (disagreement), and where they measure different things entirely (non-overlapping scope).
The alignment step handles the nuance that papers often measure slightly different quantities or use different evaluation protocols. The comparison explicitly notes when an apparent disagreement might be explained by methodological differences rather than genuine conflicting results.
## Output format
The comparison produces:
- **Source Summaries** -- One-paragraph summary of each source's key contributions
- **Agreement Matrix** -- Claims supported by multiple sources with citation evidence
- **Disagreement Matrix** -- Conflicting claims with analysis of why sources diverge
- **Methodology Differences** -- How the sources differ in approach, data, and evaluation
- **Synthesis** -- An overall assessment of which claims are well-supported and which remain contested
## When to use it
Use `/compare` when you encounter contradictory results in the literature, when evaluating competing approaches to the same problem, or when you need to understand how different research groups frame the same topic. It is also useful for writing related work sections where you need to accurately characterize the state of debate.

View File

@@ -1,40 +1,48 @@
---
title: Deep Research
description: Thorough source-heavy investigation with parallel agents
description: Run a thorough, multi-agent investigation that produces a cited research brief.
section: Workflows
order: 1
---
Deep research is the flagship Feynman workflow. It dispatches multiple researcher agents in parallel to search academic papers, web sources, and code repositories, then synthesizes everything into a structured research brief with inline citations.
## Usage
```
/deepresearch <topic>
```
## What it does
Deep research runs a thorough, source-heavy investigation. It plans the research scope, delegates to parallel researcher agents, synthesizes findings, and adds inline citations.
The workflow follows these steps:
1. **Plan** — Clarify the research question and identify search strategy
2. **Delegate** — Spawn parallel researcher agents to gather evidence from different source types (papers, web, repos)
3. **Synthesize** — Merge findings, resolve contradictions, identify gaps
4. **Cite** — Add inline citations and verify all source URLs
5. **Deliver** — Write a durable research brief to `outputs/`
## Example
From the REPL:
```
/deepresearch transformer scaling laws and their implications for compute-optimal training
/deepresearch What are the current approaches to mechanistic interpretability in LLMs?
```
## Output
From the CLI:
Produces a structured research brief with:
```bash
feynman deepresearch "What are the current approaches to mechanistic interpretability in LLMs?"
```
- Executive summary
- Key findings organized by theme
- Evidence tables with source links
- Open questions and suggested next steps
- Numbered sources section with direct URLs
Both forms are equivalent. The workflow begins immediately and streams progress as agents discover and analyze sources.
## How it works
The deep research workflow proceeds through four phases. First, the researcher agents fan out to search AlphaXiv for relevant papers and the web for non-academic sources like blog posts, documentation, and code repositories. Each agent tackles a different angle of the topic to maximize coverage.
Second, the agents read and extract key findings from the most relevant sources. They pull claims, methodology details, results, and limitations from each paper or article. For academic papers, they access the full PDF through AlphaXiv when available.
Third, a synthesis step cross-references findings across sources, identifies areas of consensus and disagreement, and organizes the material into a coherent narrative. The writer agent structures the output as a research brief with sections for background, key findings, open questions, and references.
Finally, the verifier agent spot-checks claims against their cited sources to flag any misattributions or unsupported assertions. The finished report is saved to your session directory and can be previewed as rendered HTML with `/preview`.
## Output format
The research brief follows a consistent structure:
- **Summary** -- A concise overview of the topic and key takeaways
- **Background** -- Context and motivation for the research area
- **Key Findings** -- The main results organized by theme, with inline citations
- **Open Questions** -- Unresolved issues and promising research directions
- **References** -- Full citation list with links to source papers and articles
## Customization
You can steer the research by being specific in your prompt. Narrow topics produce more focused briefs. Broad topics produce survey-style overviews. You can also specify constraints like "focus on papers from 2024" or "only consider empirical results" to guide the agents.

View File

@@ -1,37 +1,51 @@
---
title: Draft Writing
description: Paper-style draft generation from research findings
description: Generate a paper-style draft from research findings and session context.
section: Workflows
order: 7
---
The draft writing workflow generates structured academic-style documents from your research findings. It uses the writer agent to produce well-organized prose with proper citations, sections, and formatting suitable for papers, reports, or blog posts.
## Usage
```
/draft <topic>
```
## What it does
Produces a paper-style draft with structured sections. Writes to `papers/`.
## Structure
The generated draft includes:
- Title
- Abstract
- Introduction / Background
- Method or Approach
- Evidence and Analysis
- Limitations
- Conclusion
- Sources
## Example
From the REPL:
```
/draft survey of differentiable physics simulators
/draft A survey of retrieval-augmented generation techniques
```
The writer agent works only from supplied evidence — it never fabricates content. If evidence is insufficient, it explicitly notes the gaps.
```
/draft --from-session
```
From the CLI:
```bash
feynman draft "A survey of retrieval-augmented generation techniques"
```
When used with `--from-session`, the writer draws from the current session's research findings, making it a natural follow-up to a deep research or literature review workflow.
## How it works
The draft workflow leverages the writer agent, which specializes in producing structured academic prose. When given a topic, it first consults the researcher agents to gather source material, then organizes the findings into a coherent document with proper narrative flow.
When working from existing session context (after a deep research or literature review), the writer skips the research phase and works directly with the findings already gathered. This produces a more focused draft because the source material has already been vetted and organized.
The writer pays attention to academic conventions: claims are attributed to their sources with inline citations, methodology sections describe procedures precisely, and limitations are discussed honestly. The draft includes placeholder sections for any content the writer cannot generate from available sources, clearly marking what needs human input.
## Output format
The draft follows standard academic structure:
- **Abstract** -- Concise summary of the document's scope and findings
- **Introduction** -- Motivation, context, and contribution statement
- **Body Sections** -- Organized by topic with subsections as needed
- **Discussion** -- Interpretation of findings and implications
- **Limitations** -- Honest assessment of scope and gaps
- **References** -- Complete bibliography in a consistent citation format
## Preview and iteration
After generating the draft, use `/preview` to render it as HTML or PDF with proper formatting, math rendering, and typography. You can iterate on the draft by asking Feynman to revise specific sections, add more detail, or restructure the argument.

View File

@@ -1,31 +1,45 @@
---
title: Literature Review
description: Map consensus, disagreements, and open questions
description: Run a structured literature review with consensus mapping and gap analysis.
section: Workflows
order: 2
---
The literature review workflow produces a structured survey of the academic landscape on a given topic. Unlike deep research which aims for a comprehensive brief, the literature review focuses specifically on mapping the state of the field -- what researchers agree on, where they disagree, and what remains unexplored.
## Usage
```
/lit <topic>
```
## What it does
Runs a structured literature review that searches across academic papers and web sources. Explicitly separates consensus findings from disagreements and open questions.
## Example
From the REPL:
```
/lit multimodal reasoning benchmarks for large language models
/lit Scaling laws for language model performance
```
## Output
From the CLI:
A structured review covering:
```bash
feynman lit "Scaling laws for language model performance"
```
- **Consensus** — What the field agrees on
- **Disagreements** — Where sources conflict
- **Open questions** — What remains unresolved
- **Sources** — Direct links to all referenced papers and articles
## How it works
The literature review workflow begins by having researcher agents search for papers on the topic across AlphaXiv and the web. The agents prioritize survey papers, highly-cited foundational work, and recent publications to capture both established knowledge and the current frontier.
After gathering sources, the agents extract claims, results, and methodology from each paper. The synthesis step then organizes findings into a structured review that maps out where the community has reached consensus, where active debate exists, and where gaps in the literature remain.
The output is organized chronologically and thematically, showing how ideas evolved over time and how different research groups approach the problem differently. Citation counts and publication venues are used as signals for weighting claims, though the review explicitly notes when influential work contradicts the mainstream view.
## Output format
The literature review produces:
- **Scope and Methodology** -- What was searched and how papers were selected
- **Consensus** -- Claims that most papers agree on, with supporting citations
- **Disagreements** -- Active debates where papers present conflicting evidence or interpretations
- **Open Questions** -- Topics that the literature has not adequately addressed
- **Timeline** -- Key milestones and how the field evolved
- **References** -- Complete bibliography organized by relevance
## When to use it
Use `/lit` when you need a map of the research landscape rather than a deep dive into a specific question. It is particularly useful at the start of a new research project when you need to understand what has already been done, or when preparing a related work section for a paper.

View File

@@ -1,42 +1,50 @@
---
title: Replication
description: Plan replications of papers and claims
description: Plan or execute a replication of a paper's experiments and claims.
section: Workflows
order: 5
---
The replication workflow helps you plan and execute reproductions of published experiments, benchmark results, or specific claims. It generates a detailed replication plan, identifies potential pitfalls, and can guide you through the execution step by step.
## Usage
```
/replicate <paper or claim>
```
## What it does
Extracts key implementation details from a paper, identifies what's needed to replicate the results, and asks where to run before executing anything.
Before running code, Feynman asks you to choose an execution environment:
- **Local** — run in the current working directory
- **Virtual environment** — create an isolated venv/conda env first
- **Docker** — run experiment code inside an isolated Docker container
- **Plan only** — produce the replication plan without executing
## Example
From the REPL:
```
/replicate "chain-of-thought prompting improves math reasoning"
/replicate arxiv:2401.12345
```
## Output
```
/replicate "The claim that sparse attention achieves 95% of dense attention quality at 60% compute"
```
A replication plan covering:
From the CLI:
- Key claims to verify
- Required resources (compute, data, models)
- Implementation details extracted from the paper
- Potential pitfalls and underspecified details
- Step-by-step replication procedure
- Success criteria
```bash
feynman replicate "paper or claim"
```
If an execution environment is selected, also produces runnable scripts and captured results.
You can point the workflow at a full paper for a comprehensive replication plan, or at a specific claim for a focused reproduction.
## How it works
The replication workflow starts with the researcher agent reading the target paper and extracting every detail needed for reproduction: model architecture, hyperparameters, training schedule, dataset preparation, evaluation protocol, and hardware requirements. It cross-references these details against the codebase (if available) using the same machinery as the code audit workflow.
Next, the workflow generates a structured replication plan that breaks the experiment into discrete steps, estimates compute and time requirements, and identifies where the paper is underspecified. For each underspecified detail, it suggests reasonable defaults based on common practices in the field and flags the assumption as a potential source of divergence.
The plan also includes a risk assessment: which parts of the experiment are most likely to cause replication failure, what tolerance to expect for numerical results, and which claims are most sensitive to implementation details.
## Output format
The replication plan includes:
- **Requirements** -- Hardware, software, data, and estimated compute cost
- **Step-by-step Plan** -- Ordered steps from environment setup through final evaluation
- **Underspecified Details** -- Where the paper leaves out information needed for replication
- **Risk Assessment** -- Which steps are most likely to cause divergence from reported results
- **Success Criteria** -- What results would constitute a successful replication
## Iterative execution
After generating the plan, you can execute the replication interactively. Feynman walks you through each step, helps you write the code, monitors training runs, and compares intermediate results against the paper's reported values. When results diverge, it helps diagnose whether the cause is an implementation difference, a hyperparameter mismatch, or a genuine replication failure.

View File

@@ -1,49 +1,52 @@
---
title: Peer Review
description: Simulated peer review with severity-graded feedback
description: Simulate a rigorous peer review with severity-graded feedback.
section: Workflows
order: 3
---
The peer review workflow simulates a thorough academic peer review of a paper, draft, or research artifact. It produces severity-graded feedback with inline annotations, covering methodology, claims, writing quality, and reproducibility.
## Usage
```
/review <artifact>
```
## What it does
Simulates a tough-but-fair peer review for AI research artifacts. Evaluates novelty, empirical rigor, baselines, ablations, and reproducibility.
The reviewer agent identifies:
- Weak baselines
- Missing ablations
- Evaluation mismatches
- Benchmark leakage
- Under-specified implementation details
## Severity levels
Feedback is graded by severity:
- **FATAL** — Fundamental issues that invalidate the claims
- **MAJOR** — Significant problems that need addressing
- **MINOR** — Small improvements or clarifications
## Example
From the REPL:
```
/review outputs/scaling-laws-brief.md
/review arxiv:2401.12345
```
## Output
```
/review ~/papers/my-draft.pdf
```
Structured review with:
From the CLI:
- Summary of the work
- Strengths
- Weaknesses (severity-graded)
- Questions for the authors
- Verdict (accept / revise / reject)
- Revision plan
```bash
feynman review arxiv:2401.12345
feynman review my-draft.md
```
You can pass an arXiv ID, a URL, or a local file path. For arXiv papers, Feynman fetches the full PDF through AlphaXiv.
## How it works
The review workflow assigns the reviewer agent to read the document end-to-end and evaluate it against standard academic criteria. The reviewer examines the paper's claims, checks whether the methodology supports the conclusions, evaluates the experimental design for potential confounds, and assesses the clarity and completeness of the writing.
Each piece of feedback is assigned a severity level: **critical** (fundamental issues that undermine the paper's validity), **major** (significant problems that should be addressed), **minor** (suggestions for improvement), or **nit** (stylistic or formatting issues). This grading helps you triage feedback and focus on what matters most.
The reviewer also produces a summary assessment with an overall recommendation and a confidence score indicating how certain it is about each finding. When the reviewer identifies a claim that cannot be verified from the paper alone, it flags it as needing additional evidence.
## Output format
The review output includes:
- **Summary Assessment** -- Overall evaluation and recommendation
- **Strengths** -- What the paper does well
- **Critical Issues** -- Fundamental problems that need to be addressed
- **Major Issues** -- Significant concerns with suggested fixes
- **Minor Issues** -- Smaller improvements and suggestions
- **Inline Annotations** -- Specific comments tied to sections of the document
## Customization
You can focus the review by specifying what to examine: "focus on the statistical methodology" or "check the claims in Section 4 against the experimental results." The reviewer adapts its analysis to your priorities while still performing a baseline check of the full document.

View File

@@ -1,29 +1,54 @@
---
title: Watch
description: Recurring research monitoring
description: Set up recurring research monitoring on a topic.
section: Workflows
order: 9
---
The watch workflow sets up recurring research monitoring that periodically checks for new papers, articles, and developments on a topic you care about. It notifies you when something relevant appears and can automatically summarize new findings.
## Usage
```
/watch <topic>
```
## What it does
Schedules a recurring research watch. Sets a baseline of current knowledge and defines what constitutes a meaningful change worth reporting.
## Example
From the REPL:
```
/watch new papers on test-time compute scaling
/watch New developments in state space models for sequence modeling
```
From the CLI:
```bash
feynman watch "New developments in state space models for sequence modeling"
```
After setting up a watch, Feynman periodically runs searches on the topic and alerts you when it finds new relevant material.
## How it works
1. Feynman establishes a baseline by surveying current sources
2. Defines change signals (new papers, updated results, new repos)
3. Schedules periodic checks via `pi-schedule-prompt`
4. Reports only when meaningful changes are detected
The watch workflow is built on `pi-schedule-prompt`, which manages scheduled and recurring tasks. When you create a watch, Feynman stores the topic and search parameters, then runs a lightweight search at regular intervals (default: daily).
Each check searches AlphaXiv for new papers and the web for new articles matching your topic. Results are compared against what was found in previous checks to surface only genuinely new material. When new items are found, Feynman produces a brief summary of each and stores it in your session history.
The watch is smart about relevance. It does not just keyword-match -- it uses the same researcher agent that powers deep research to evaluate whether new papers are genuinely relevant to your topic or just superficially related. This keeps the signal-to-noise ratio high even for broad topics.
## Managing watches
List active watches:
```
/jobs
```
The `/jobs` command shows all active watches along with their schedule, last check time, and number of new items found. You can pause, resume, or delete watches from within the REPL.
## Output format
Each watch check produces:
- **New Papers** -- Titles, authors, and one-paragraph summaries of newly discovered papers
- **New Articles** -- Relevant blog posts, documentation updates, or news articles
- **Relevance Notes** -- Why each item was flagged as relevant to your watch topic
## When to use it
Use `/watch` to stay current on a research area without manually searching every day. It is particularly useful for fast-moving fields where new papers appear frequently, for tracking specific research groups or topics related to your own work, and for monitoring the literature while you focus on other tasks.

View File

@@ -1,61 +0,0 @@
---
import { ViewTransitions } from 'astro:transitions';
import Nav from '../components/Nav.astro';
import Footer from '../components/Footer.astro';
import '../styles/global.css';
interface Props {
title: string;
description?: string;
active?: 'home' | 'docs';
}
const { title, description = 'Research-first AI agent', active = 'home' } = Astro.props;
---
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="description" content={description} />
<title>{title}</title>
<link rel="preconnect" href="https://fonts.googleapis.com" />
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
<link href="https://fonts.googleapis.com/css2?family=VT323&display=swap" rel="stylesheet" />
<ViewTransitions fallback="none" />
<script is:inline>
(function() {
var stored = localStorage.getItem('theme');
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
if (stored === 'dark' || (!stored && prefersDark)) {
document.documentElement.classList.add('dark');
}
})();
</script>
<script is:inline>
document.addEventListener('astro:after-swap', function() {
var stored = localStorage.getItem('theme');
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
var shouldBeDark = stored === 'dark' || (!stored && prefersDark);
if (shouldBeDark) {
document.documentElement.classList.add('dark');
} else {
document.documentElement.classList.remove('dark');
}
var isDark = document.documentElement.classList.contains('dark');
var sun = document.getElementById('sun-icon');
var moon = document.getElementById('moon-icon');
if (sun) sun.style.display = isDark ? 'block' : 'none';
if (moon) moon.style.display = isDark ? 'none' : 'block';
});
</script>
</head>
<body class="min-h-screen flex flex-col antialiased">
<Nav active={active} />
<main class="flex-1">
<slot />
</main>
<Footer />
</body>
</html>

View File

@@ -1,79 +0,0 @@
---
import Base from './Base.astro';
import Sidebar from '../components/Sidebar.astro';
interface Props {
title: string;
description?: string;
currentSlug: string;
}
const { title, description, currentSlug } = Astro.props;
---
<Base title={`${title} — Feynman Docs`} description={description} active="docs">
<div class="max-w-6xl mx-auto px-6">
<div class="flex gap-8">
<Sidebar currentSlug={currentSlug} />
<button id="mobile-menu-btn" class="lg:hidden fixed bottom-6 right-6 z-40 p-3 rounded-full bg-accent text-bg shadow-lg" aria-label="Toggle sidebar">
<svg class="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path d="M4 6h16M4 12h16M4 18h16" />
</svg>
</button>
<div id="mobile-overlay" class="hidden fixed inset-0 bg-black/50 z-30 lg:hidden"></div>
<article class="flex-1 min-w-0 py-8 max-w-3xl">
<h1 class="text-3xl font-bold mb-8 tracking-tight">{title}</h1>
<div class="prose">
<slot />
</div>
</article>
</div>
</div>
<script is:inline>
(function() {
function init() {
var btn = document.getElementById('mobile-menu-btn');
var sidebar = document.getElementById('sidebar');
var overlay = document.getElementById('mobile-overlay');
if (btn && sidebar && overlay) {
function toggle() {
sidebar.classList.toggle('hidden');
sidebar.classList.toggle('fixed');
sidebar.classList.toggle('inset-0');
sidebar.classList.toggle('z-40');
sidebar.classList.toggle('bg-bg');
sidebar.classList.toggle('w-full');
sidebar.classList.toggle('p-6');
overlay.classList.toggle('hidden');
}
btn.addEventListener('click', toggle);
overlay.addEventListener('click', toggle);
}
document.querySelectorAll('.prose pre').forEach(function(pre) {
if (pre.querySelector('.copy-code')) return;
var copyBtn = document.createElement('button');
copyBtn.className = 'copy-code';
copyBtn.setAttribute('aria-label', 'Copy code');
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
pre.appendChild(copyBtn);
copyBtn.addEventListener('click', function() {
var code = pre.querySelector('code');
var text = code ? code.textContent : pre.textContent;
navigator.clipboard.writeText(text);
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path d="M20 6L9 17l-5-5"/></svg>';
setTimeout(function() {
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
}, 2000);
});
});
}
document.addEventListener('DOMContentLoaded', init);
document.addEventListener('astro:after-swap', init);
})();
</script>
</Base>

View File

@@ -0,0 +1,149 @@
---
import "@/styles/global.css"
import { ViewTransitions } from "astro:transitions"
interface Props {
title?: string
description?: string
active?: "home" | "docs"
}
const {
title = "Feynman - The open source AI research agent",
description = "An AI-powered research agent that helps you discover, analyze, and synthesize scientific literature.",
active = "home",
} = Astro.props
---
<html lang="en" class="dark">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
<meta name="description" content={description} />
<title>{title}</title>
<link rel="preconnect" href="https://fonts.googleapis.com" />
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
<link href="https://fonts.googleapis.com/css2?family=VT323&display=swap" rel="stylesheet" />
<ViewTransitions />
<script is:inline>
;(function () {
const theme = localStorage.getItem("theme")
if (theme === "dark" || (!theme && window.matchMedia("(prefers-color-scheme: dark)").matches)) {
document.documentElement.classList.add("dark")
} else {
document.documentElement.classList.remove("dark")
}
})()
</script>
</head>
<body class="flex min-h-screen flex-col bg-background text-foreground antialiased">
<nav class="sticky top-0 z-50 bg-background">
<div class="mx-auto flex h-14 max-w-6xl items-center justify-between px-6">
<a href="/" class="flex items-center gap-2">
<span class="font-['VT323'] text-2xl text-primary">feynman</span>
</a>
<div class="flex items-center gap-6">
<a
href="/docs/getting-started/installation"
class:list={[
"text-sm transition-colors hover:text-foreground",
active === "docs" ? "text-foreground" : "text-muted-foreground",
]}
>
Docs
</a>
<a
href="https://github.com/getcompanion-ai/feynman"
target="_blank"
rel="noopener noreferrer"
class="text-sm text-muted-foreground transition-colors hover:text-foreground"
>
GitHub
</a>
<button
id="theme-toggle"
type="button"
class="inline-flex size-9 items-center justify-center rounded-md text-muted-foreground transition-colors hover:bg-muted hover:text-foreground"
aria-label="Toggle theme"
>
<svg
id="sun-icon"
class="hidden size-4"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="2"
stroke="currentColor"
>
<path stroke-linecap="round" stroke-linejoin="round" d="M12 3v2.25m6.364.386l-1.591 1.591M21 12h-2.25m-.386 6.364l-1.591-1.591M12 18.75V21m-4.773-4.227l-1.591 1.591M5.25 12H3m4.227-4.773L5.636 5.636M15.75 12a3.75 3.75 0 11-7.5 0 3.75 3.75 0 017.5 0z" />
</svg>
<svg
id="moon-icon"
class="hidden size-4"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="2"
stroke="currentColor"
>
<path stroke-linecap="round" stroke-linejoin="round" d="M21.752 15.002A9.718 9.718 0 0118 15.75c-5.385 0-9.75-4.365-9.75-9.75 0-1.33.266-2.597.748-3.752A9.753 9.753 0 003 11.25C3 16.635 7.365 21 12.75 21a9.753 9.753 0 009.002-5.998z" />
</svg>
</button>
</div>
</nav>
<main class="flex-1">
<slot />
</main>
<footer>
<div class="mx-auto flex max-w-6xl flex-col items-center justify-between gap-4 px-6 py-8 sm:flex-row">
<p class="text-sm text-muted-foreground">
&copy; {new Date().getFullYear()} Feynman. Open source under MIT.
</p>
<div class="flex items-center gap-4 text-sm">
<a href="/docs/getting-started/installation" class="text-muted-foreground transition-colors hover:text-foreground">Docs</a>
<a
href="https://github.com/getcompanion-ai/feynman"
target="_blank"
rel="noopener noreferrer"
class="text-muted-foreground transition-colors hover:text-foreground"
>
GitHub
</a>
</div>
</div>
</footer>
<script is:inline>
function updateThemeIcons() {
const isDark = document.documentElement.classList.contains("dark")
document.getElementById("sun-icon").classList.toggle("hidden", !isDark)
document.getElementById("moon-icon").classList.toggle("hidden", isDark)
}
function setupThemeToggle() {
updateThemeIcons()
document.getElementById("theme-toggle").addEventListener("click", function () {
document.documentElement.classList.toggle("dark")
const isDark = document.documentElement.classList.contains("dark")
localStorage.setItem("theme", isDark ? "dark" : "light")
updateThemeIcons()
})
}
setupThemeToggle()
document.addEventListener("astro:after-swap", function () {
const theme = localStorage.getItem("theme")
if (theme === "dark" || (!theme && window.matchMedia("(prefers-color-scheme: dark)").matches)) {
document.documentElement.classList.add("dark")
} else {
document.documentElement.classList.remove("dark")
}
setupThemeToggle()
})
</script>
</body>
</html>

6
website/src/lib/utils.ts Normal file
View File

@@ -0,0 +1,6 @@
import { clsx, type ClassValue } from "clsx"
import { twMerge } from "tailwind-merge"
export function cn(...inputs: ClassValue[]) {
return twMerge(clsx(inputs))
}

View File

@@ -0,0 +1,13 @@
---
import Layout from "@/layouts/main.astro"
---
<Layout title="404 — Feynman">
<section class="flex flex-1 items-center justify-center">
<div class="flex flex-col items-center gap-4 text-center">
<h1 class="font-['VT323'] text-9xl text-primary">404</h1>
<p class="text-lg text-muted-foreground">Page not found.</p>
<a href="/" class="text-sm text-primary hover:underline">Back to home</a>
</div>
</section>
</Layout>

View File

@@ -1,6 +1,6 @@
---
import { getCollection } from 'astro:content';
import Docs from '../../layouts/Docs.astro';
import Layout from '@/layouts/main.astro';
export async function getStaticPaths() {
const docs = await getCollection('docs');
@@ -12,8 +12,143 @@ export async function getStaticPaths() {
const { entry } = Astro.props;
const { Content } = await entry.render();
const currentSlug = entry.slug;
const sections = [
{
title: 'Getting Started',
items: [
{ label: 'Installation', slug: 'getting-started/installation' },
{ label: 'Quick Start', slug: 'getting-started/quickstart' },
{ label: 'Setup', slug: 'getting-started/setup' },
{ label: 'Configuration', slug: 'getting-started/configuration' },
],
},
{
title: 'Workflows',
items: [
{ label: 'Deep Research', slug: 'workflows/deep-research' },
{ label: 'Literature Review', slug: 'workflows/literature-review' },
{ label: 'Peer Review', slug: 'workflows/review' },
{ label: 'Code Audit', slug: 'workflows/audit' },
{ label: 'Replication', slug: 'workflows/replication' },
{ label: 'Source Comparison', slug: 'workflows/compare' },
{ label: 'Draft Writing', slug: 'workflows/draft' },
{ label: 'Autoresearch', slug: 'workflows/autoresearch' },
{ label: 'Watch', slug: 'workflows/watch' },
],
},
{
title: 'Agents',
items: [
{ label: 'Researcher', slug: 'agents/researcher' },
{ label: 'Reviewer', slug: 'agents/reviewer' },
{ label: 'Writer', slug: 'agents/writer' },
{ label: 'Verifier', slug: 'agents/verifier' },
],
},
{
title: 'Tools',
items: [
{ label: 'AlphaXiv', slug: 'tools/alphaxiv' },
{ label: 'Web Search', slug: 'tools/web-search' },
{ label: 'Session Search', slug: 'tools/session-search' },
{ label: 'Preview', slug: 'tools/preview' },
],
},
{
title: 'Reference',
items: [
{ label: 'CLI Commands', slug: 'reference/cli-commands' },
{ label: 'Slash Commands', slug: 'reference/slash-commands' },
{ label: 'Package Stack', slug: 'reference/package-stack' },
],
},
];
---
<Docs title={entry.data.title} description={entry.data.description} currentSlug={entry.slug}>
<Layout title={`${entry.data.title} — Feynman Docs`} description={entry.data.description} active="docs">
<div class="max-w-6xl mx-auto px-6">
<div class="flex gap-8">
<aside id="sidebar" class="w-64 shrink-0 h-[calc(100vh-3.5rem)] sticky top-14 overflow-y-auto py-6 pr-4 hidden lg:block border-r border-border">
{sections.map((section) => (
<div class="mb-6">
<div class="text-xs font-semibold text-primary uppercase tracking-wider px-3 mb-2">{section.title}</div>
{section.items.map((item) => (
<a
href={`/docs/${item.slug}`}
class:list={[
'block px-3 py-1.5 text-sm border-l-[2px] transition-colors',
currentSlug === item.slug
? 'border-primary text-foreground'
: 'border-transparent text-muted-foreground hover:text-foreground',
]}
>
{item.label}
</a>
))}
</div>
))}
</aside>
<button id="mobile-menu-btn" class="lg:hidden fixed bottom-6 right-6 z-40 p-3 rounded-full bg-primary text-primary-foreground shadow-lg" aria-label="Toggle sidebar">
<svg class="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path d="M4 6h16M4 12h16M4 18h16" />
</svg>
</button>
<div id="mobile-overlay" class="hidden fixed inset-0 bg-black/50 z-30 lg:hidden"></div>
<article class="flex-1 min-w-0 py-8 max-w-3xl">
<h1 class="text-3xl font-bold mb-8 tracking-tight">{entry.data.title}</h1>
<div class="prose">
<Content />
</Docs>
</div>
</article>
</div>
</div>
<script is:inline>
(function() {
function init() {
var btn = document.getElementById('mobile-menu-btn');
var sidebar = document.getElementById('sidebar');
var overlay = document.getElementById('mobile-overlay');
if (btn && sidebar && overlay) {
function toggle() {
sidebar.classList.toggle('hidden');
sidebar.classList.toggle('fixed');
sidebar.classList.toggle('inset-0');
sidebar.classList.toggle('z-40');
sidebar.classList.toggle('bg-background');
sidebar.classList.toggle('w-full');
sidebar.classList.toggle('p-6');
overlay.classList.toggle('hidden');
}
btn.addEventListener('click', toggle);
overlay.addEventListener('click', toggle);
}
document.querySelectorAll('.prose pre').forEach(function(pre) {
if (pre.querySelector('.copy-code')) return;
var copyBtn = document.createElement('button');
copyBtn.className = 'copy-code';
copyBtn.setAttribute('aria-label', 'Copy code');
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
pre.appendChild(copyBtn);
copyBtn.addEventListener('click', function() {
var code = pre.querySelector('code');
var text = code ? code.textContent : pre.textContent;
navigator.clipboard.writeText(text);
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path d="M20 6L9 17l-5-5"/></svg>';
setTimeout(function() {
copyBtn.innerHTML = '<svg width="14" height="14" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
}, 2000);
});
});
}
document.addEventListener('DOMContentLoaded', init);
document.addEventListener('astro:after-swap', init);
})();
</script>
</Layout>

View File

@@ -0,0 +1,3 @@
---
return Astro.redirect('/docs/getting-started/installation')
---

View File

@@ -1,169 +1,277 @@
---
import Base from '../layouts/Base.astro';
import Layout from "@/layouts/main.astro"
import { Button } from "@/components/ui/button"
import { Card, CardHeader, CardTitle, CardDescription, CardContent } from "@/components/ui/card"
import { Separator } from "@/components/ui/separator"
import { Badge } from "@/components/ui/badge"
import { Star } from "lucide-react"
const workflows = [
{ command: "/deepresearch", description: "Run a multi-step research pipeline on any scientific question" },
{ command: "/lit", description: "Search and summarize relevant literature from multiple databases" },
{ command: "/review", description: "Get an AI-powered peer review of any paper or manuscript" },
{ command: "/audit", description: "Audit methodology, statistics, and reproducibility of a study" },
{ command: "/replicate", description: "Attempt to reproduce key findings with available data" },
{ command: "/compare", description: "Compare and contrast multiple papers side by side" },
{ command: "/draft", description: "Draft a manuscript section from your research notes and sources" },
{ command: "/autoresearch", description: "Autonomous research loop that iterates until a question is answered" },
{ command: "/watch", description: "Monitor topics or authors for new publications and preprints" },
]
const agents = [
{ name: "Researcher", description: "Searches databases, reads papers, extracts findings" },
{ name: "Reviewer", description: "Evaluates methodology, identifies gaps, checks claims" },
{ name: "Writer", description: "Synthesizes sources into structured prose and citations" },
{ name: "Verifier", description: "Cross-references claims, checks data, validates results" },
]
const sources = [
{ name: "AlphaXiv", description: "Real-time access to arXiv papers with community annotations", href: "https://alphaxiv.org" },
{ name: "Web search", description: "Search the open web for supplementary context and data" },
{ name: "Preview", description: "More source integrations coming soon" },
]
const terminalCommands = [
{ command: "feynman /deepresearch \"What causes Alzheimer's disease?\"", description: "Run a multi-step deep research pipeline" },
{ command: "feynman /lit \"CRISPR gene therapy 2024\"", description: "Search and summarize recent literature" },
{ command: "feynman /review ./my-paper.pdf", description: "Get an AI peer review of your manuscript" },
{ command: "feynman /audit 2401.12345", description: "Audit a paper's methodology and statistics" },
{ command: "feynman /watch \"transformer architectures\"", description: "Monitor a topic for new publications" },
]
---
<Base title="Feynman The open source AI research agent" active="home">
<section class="text-center pt-24 pb-20 px-6">
<div class="max-w-2xl mx-auto">
<h1 class="text-5xl sm:text-6xl font-bold tracking-tight mb-6" style="text-wrap: balance">Research from the command line</h1>
<p class="text-lg text-text-muted mb-10 leading-relaxed" style="text-wrap: pretty">Feynman reads papers, searches the web, writes drafts, runs experiments, and cites every claim. Open source. Runs locally.</p>
<button id="copy-btn" class="group inline-flex items-center justify-between gap-3 bg-surface rounded-lg px-5 py-3 mb-8 font-mono text-sm border border-border hover:border-accent/40 hover:text-accent transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent max-w-full" aria-label="Copy install command">
<code class="text-accent text-left">curl -fsSL https://feynman.is/install | bash</code>
<span id="copy-icon" class="shrink-0 text-text-dim group-hover:text-accent transition-colors" aria-hidden="true">
<svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2" /><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1" /></svg>
</span>
<Layout title="Feynman - The open source AI research agent" active="home">
<div class="mx-auto max-w-5xl px-6">
<!-- Hero -->
<section class="flex flex-col items-center gap-8 pb-16 pt-20 text-center">
<div class="flex max-w-3xl flex-col gap-4">
<h1 class="text-4xl font-bold tracking-tight sm:text-5xl lg:text-6xl">
The open source AI<br />research agent
</h1>
<p class="mx-auto max-w-2xl text-lg text-muted-foreground">
Discover, analyze, and synthesize scientific literature from your terminal.
Feynman orchestrates AI agents to do the heavy lifting so you can focus on what matters.
</p>
</div>
<div class="flex flex-col items-center gap-4">
<button
id="install-cmd"
class="group flex items-center gap-3 rounded-lg bg-muted px-4 py-2.5 font-mono text-sm transition-colors hover:bg-muted/80 cursor-pointer"
>
<span class="text-muted-foreground">$</span>
<span>curl -fsSL https://feynman.is/install | bash</span>
<svg
id="copy-icon"
class="size-4 shrink-0 text-muted-foreground transition-colors group-hover:text-foreground"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="2"
stroke="currentColor"
>
<path stroke-linecap="round" stroke-linejoin="round" d="M15.666 3.888A2.25 2.25 0 0013.5 2.25h-3c-1.03 0-1.9.693-2.166 1.638m7.332 0c.055.194.084.4.084.612v0a.75.75 0 01-.75.75H9.75a.75.75 0 01-.75-.75v0c0-.212.03-.418.084-.612m7.332 0c.646.049 1.288.11 1.927.184 1.1.128 1.907 1.077 1.907 2.185V19.5a2.25 2.25 0 01-2.25 2.25H6.75A2.25 2.25 0 014.5 19.5V6.257c0-1.108.806-2.057 1.907-2.185a48.208 48.208 0 011.927-.184" />
</svg>
<svg
id="check-icon"
class="hidden size-4 shrink-0 text-primary"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="2"
stroke="currentColor"
>
<path stroke-linecap="round" stroke-linejoin="round" d="M4.5 12.75l6 6 9-13.5" />
</svg>
</button>
<div class="flex gap-4 justify-center flex-wrap">
<a href="/docs/getting-started/installation" class="px-6 py-2.5 rounded-lg bg-accent text-bg font-semibold text-sm hover:bg-accent-hover transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-2 focus-visible:ring-offset-bg">Get started</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener" class="px-6 py-2.5 rounded-lg border border-border text-text-muted font-semibold text-sm hover:border-text-dim hover:text-text-primary transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-accent focus-visible:ring-offset-2 focus-visible:ring-offset-bg">GitHub</a>
<div class="flex items-center gap-3">
<a href="/docs">
<Button client:load size="lg">
Get Started
</Button>
</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener noreferrer" class="inline-flex h-10 items-center justify-center gap-2 rounded-md border border-input bg-background px-4 text-sm font-medium text-foreground transition-colors hover:bg-accent hover:text-accent-foreground">
GitHub
<span id="star-badge" class="hidden inline-flex items-center gap-1 text-muted-foreground">
<span id="star-count"></span>
<Star client:load size={14} fill="currentColor" />
</span>
</a>
</div>
</div>
<div class="max-w-4xl mx-auto mt-16">
<img src="/hero-raw.png" alt="Feynman CLI" class="w-full" />
<img src="/hero.png" class="w-full" alt="Feynman terminal demo" />
</section>
<!-- Terminal demo -->
<section class="py-16">
<div class="flex flex-col items-center gap-8 text-center">
<div class="flex flex-col gap-2">
<h2 class="text-2xl font-bold tracking-tight sm:text-3xl">Powerful from the command line</h2>
<p class="text-muted-foreground">Run research workflows with simple commands.</p>
</div>
<Card client:load className="w-full text-left">
<CardContent client:load>
<div class="flex flex-col gap-3 font-mono text-sm">
{terminalCommands.map((cmd) => (
<div class="flex flex-col gap-0.5">
<div>
<span class="text-muted-foreground">$ </span>
<span class="text-primary">{cmd.command}</span>
</div>
<div class="text-xs text-muted-foreground">{cmd.description}</div>
</div>
))}
</div>
</CardContent>
</Card>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-4">Ask a question, get a cited answer</h2>
<p class="text-center text-text-muted mb-12 max-w-xl mx-auto">Feynman dispatches research agents that search papers, crawl the web, and synthesize findings with inline citations back to the source.</p>
<div class="bg-surface rounded-xl p-6 font-mono text-sm leading-loose max-w-2xl mx-auto border border-border">
<div class="flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> "what do we know about scaling laws"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Searches papers and web, produces a cited research brief</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> deepresearch "mechanistic interpretability"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Multi-agent deep dive with parallel researchers, synthesis, and verification</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> lit "RLHF alternatives"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Literature review covering consensus, disagreements, and open questions</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> audit 2401.12345</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Compares paper claims against the public codebase</div>
<div class="mt-4 flex gap-4"><span class="text-text-dim shrink-0">$</span><span><span class="text-accent">feynman</span> replicate "chain-of-thought improves math"</span></div>
<div class="text-text-dim mt-1 ml-6 text-xs">Builds a replication plan, picks a compute target, runs the experiment</div>
<!-- Workflows -->
<section class="py-16">
<div class="flex flex-col items-center gap-8 text-center">
<div class="flex flex-col gap-2">
<h2 class="text-2xl font-bold tracking-tight sm:text-3xl">Built-in workflows</h2>
<p class="text-muted-foreground">Pre-configured research workflows you can run with a single command.</p>
</div>
<div class="grid w-full gap-4 sm:grid-cols-2 lg:grid-cols-3">
{workflows.map((wf) => (
<Card client:load size="sm">
<CardHeader client:load>
<CardTitle client:load className="font-mono text-sm text-primary">{wf.command}</CardTitle>
<CardDescription client:load>{wf.description}</CardDescription>
</CardHeader>
</Card>
))}
</div>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-4">Workflows</h2>
<p class="text-center text-text-muted mb-10 max-w-xl mx-auto">Structured research workflows you can trigger with a slash command or just ask for in natural language.</p>
<div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-4 max-w-4xl mx-auto">
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/deepresearch</div>
<p class="text-sm text-text-muted">Multi-agent investigation across papers, web, and code</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/lit</div>
<p class="text-sm text-text-muted">Literature review from primary sources with consensus mapping</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/review</div>
<p class="text-sm text-text-muted">Simulated peer review with severity scores and a revision plan</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/audit</div>
<p class="text-sm text-text-muted">Check paper claims against what the code actually does</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/replicate</div>
<p class="text-sm text-text-muted">Design and run a replication in a sandboxed Docker container</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/compare</div>
<p class="text-sm text-text-muted">Side-by-side source comparison with agreement and conflict matrix</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/draft</div>
<p class="text-sm text-text-muted">Turn research findings into a polished paper-style draft</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/autoresearch</div>
<p class="text-sm text-text-muted">Hypothesis &rarr; experiment &rarr; measure &rarr; repeat</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-mono text-sm text-accent mb-2">/watch</div>
<p class="text-sm text-text-muted">Recurring monitor for new papers, code, or product updates</p>
<!-- Agents -->
<section class="py-16">
<div class="flex flex-col items-center gap-8 text-center">
<div class="flex flex-col gap-2">
<h2 class="text-2xl font-bold tracking-tight sm:text-3xl">Specialized agents</h2>
<p class="text-muted-foreground">Each agent is purpose-built for a specific part of the research process.</p>
</div>
<div class="grid w-full gap-4 sm:grid-cols-2 lg:grid-cols-4">
{agents.map((agent) => (
<Card client:load size="sm" className="text-center">
<CardHeader client:load className="items-center">
<CardTitle client:load>{agent.name}</CardTitle>
<CardDescription client:load>{agent.description}</CardDescription>
</CardHeader>
</Card>
))}
</div>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-4">Agents that do the work</h2>
<p class="text-center text-text-muted mb-10 max-w-xl mx-auto">Feynman dispatches specialized agents behind the scenes. You ask a question &mdash; the right team assembles.</p>
<div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-4">
<div class="bg-surface rounded-xl p-6 text-center border border-border">
<div class="font-semibold text-accent mb-2">Researcher</div>
<p class="text-sm text-text-muted">Hunts for evidence across papers, the web, repos, and docs</p>
</div>
<div class="bg-surface rounded-xl p-6 text-center border border-border">
<div class="font-semibold text-accent mb-2">Reviewer</div>
<p class="text-sm text-text-muted">Grades claims by severity, flags gaps, and suggests revisions</p>
</div>
<div class="bg-surface rounded-xl p-6 text-center border border-border">
<div class="font-semibold text-accent mb-2">Writer</div>
<p class="text-sm text-text-muted">Structures notes into briefs, drafts, and paper-style output</p>
</div>
<div class="bg-surface rounded-xl p-6 text-center border border-border">
<div class="font-semibold text-accent mb-2">Verifier</div>
<p class="text-sm text-text-muted">Checks every citation, verifies URLs, removes dead links</p>
<!-- Sources -->
<section class="py-16">
<div class="flex flex-col items-center gap-8 text-center">
<div class="flex flex-col gap-2">
<h2 class="text-2xl font-bold tracking-tight sm:text-3xl">Connected sources</h2>
<p class="text-muted-foreground">Pull data from multiple sources to build a complete picture.</p>
</div>
<div class="grid w-full gap-4 sm:grid-cols-3">
{sources.map((source) => (
<Card client:load size="sm" className="text-center">
<CardHeader client:load className="items-center">
<CardTitle client:load>
{source.href ? (
<a href={source.href} target="_blank" rel="noopener noreferrer" class="underline underline-offset-4 hover:text-primary">
{source.name}
</a>
) : (
source.name
)}
</CardTitle>
<CardDescription client:load>{source.description}</CardDescription>
</CardHeader>
</Card>
))}
</div>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-4">Sources</h2>
<p class="text-center text-text-muted mb-10 max-w-xl mx-auto">Where Feynman finds information and what it uses to build answers.</p>
<div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-4 max-w-4xl mx-auto">
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-semibold mb-1"><a href="https://www.alphaxiv.org/" class="text-accent hover:underline">AlphaXiv</a></div>
<p class="text-sm text-text-muted">Paper search, Q&A, code reading, and persistent annotations</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-semibold mb-1">Web search</div>
<p class="text-sm text-text-muted">Gemini or Perplexity, zero-config by default</p>
</div>
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-semibold mb-1">Preview</div>
<p class="text-sm text-text-muted">Browser and PDF export of generated artifacts</p>
</div>
<!-- Compute -->
<section class="py-16">
<div class="flex flex-col items-center gap-8 text-center">
<div class="flex flex-col gap-2">
<h2 class="text-2xl font-bold tracking-tight sm:text-3xl">Sandboxed compute</h2>
<p class="text-muted-foreground">Execute code and run experiments in isolated environments.</p>
</div>
<Card client:load size="sm" className="w-full max-w-md text-center">
<CardHeader client:load className="items-center">
<CardTitle client:load>Docker</CardTitle>
<CardDescription client:load>
Run code, install packages, and execute experiments in secure Docker containers.
Results are streamed back to your terminal in real time.
</CardDescription>
</CardHeader>
</Card>
</div>
</section>
<section class="py-20 px-6">
<div class="max-w-5xl mx-auto">
<h2 class="text-2xl font-bold text-center mb-4">Compute</h2>
<p class="text-center text-text-muted mb-10 max-w-xl mx-auto">Experiments run in sandboxed Docker containers on your machine. Your code stays local.</p>
<div class="grid grid-cols-1 gap-4 max-w-md mx-auto">
<div class="bg-surface rounded-xl p-5 border border-border">
<div class="font-semibold mb-1"><a href="https://www.docker.com/" class="text-accent hover:underline">Docker</a></div>
<p class="text-sm text-text-muted">Isolated container execution for safe local experiments</p>
</div>
</div>
<!-- Footer CTA -->
<section class="flex flex-col items-center gap-6 py-20 text-center">
<h2 class="text-2xl font-bold tracking-tight sm:text-3xl">Start researching</h2>
<p class="max-w-lg text-muted-foreground">
Install Feynman in seconds and run your first research workflow from the terminal.
</p>
<div class="flex items-center gap-3">
<a href="/docs">
<Button client:load size="lg">
Read the Docs
</Button>
</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener noreferrer">
<Button client:load variant="outline" size="lg">
View on GitHub
</Button>
</a>
</div>
</section>
<section class="py-20 px-6 text-center">
<div class="max-w-xl mx-auto">
<p class="text-text-muted mb-6">Built on <a href="https://github.com/badlogic/pi-mono" class="text-accent hover:underline">Pi</a> and <a href="https://www.alphaxiv.org/" class="text-accent hover:underline">alphaXiv</a>. MIT licensed. Open source.</p>
<div class="flex gap-4 justify-center flex-wrap">
<a href="/docs/getting-started/installation" class="px-6 py-2.5 rounded-lg bg-accent text-bg font-semibold text-sm hover:bg-accent-hover transition-colors">Get started</a>
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener" class="px-6 py-2.5 rounded-lg border border-border text-text-muted font-semibold text-sm hover:border-text-dim hover:text-text-primary transition-colors">GitHub</a>
</div>
</div>
</section>
</Layout>
<script is:inline>
document.getElementById('copy-btn').addEventListener('click', function() {
navigator.clipboard.writeText('curl -fsSL https://feynman.is/install | bash');
var icon = document.getElementById('copy-icon');
icon.innerHTML = '<svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path d="M20 6L9 17l-5-5"/></svg>';
document.getElementById("install-cmd").addEventListener("click", function () {
var text = "curl -fsSL https://feynman.is/install | bash"
navigator.clipboard.writeText(text).then(function () {
var copyIcon = document.getElementById("copy-icon")
var checkIcon = document.getElementById("check-icon")
copyIcon.classList.add("hidden")
checkIcon.classList.remove("hidden")
setTimeout(function () {
icon.innerHTML = '<svg class="w-4 h-4" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><rect x="9" y="9" width="13" height="13" rx="2"/><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"/></svg>';
}, 2000);
});
copyIcon.classList.remove("hidden")
checkIcon.classList.add("hidden")
}, 2000)
})
})
fetch("https://api.github.com/repos/getcompanion-ai/feynman")
.then(function(r) { return r.json() })
.then(function(d) {
if (d.stargazers_count !== undefined) {
var el = document.getElementById("star-count")
if (el) {
el.textContent = d.stargazers_count >= 1000 ? (d.stargazers_count / 1000).toFixed(1) + "k" : String(d.stargazers_count)
var badge = document.getElementById("star-badge")
if (badge) badge.classList.remove("hidden")
}
}
})
.catch(function() {})
</script>
</Base>

View File

@@ -1,47 +1,133 @@
@tailwind base;
@tailwind components;
@tailwind utilities;
@import "tailwindcss";
@import "tw-animate-css";
@import "shadcn/tailwind.css";
@import "@fontsource-variable/ibm-plex-sans";
@custom-variant dark (&:is(.dark *));
@theme inline {
--font-heading: var(--font-sans);
--font-sans: 'IBM Plex Sans Variable', sans-serif;
--color-sidebar-ring: var(--sidebar-ring);
--color-sidebar-border: var(--sidebar-border);
--color-sidebar-accent-foreground: var(--sidebar-accent-foreground);
--color-sidebar-accent: var(--sidebar-accent);
--color-sidebar-primary-foreground: var(--sidebar-primary-foreground);
--color-sidebar-primary: var(--sidebar-primary);
--color-sidebar-foreground: var(--sidebar-foreground);
--color-sidebar: var(--sidebar);
--color-chart-5: var(--chart-5);
--color-chart-4: var(--chart-4);
--color-chart-3: var(--chart-3);
--color-chart-2: var(--chart-2);
--color-chart-1: var(--chart-1);
--color-ring: var(--ring);
--color-input: var(--input);
--color-border: var(--border);
--color-destructive: var(--destructive);
--color-accent-foreground: var(--accent-foreground);
--color-accent: var(--accent);
--color-muted-foreground: var(--muted-foreground);
--color-muted: var(--muted);
--color-secondary-foreground: var(--secondary-foreground);
--color-secondary: var(--secondary);
--color-primary-foreground: var(--primary-foreground);
--color-primary: var(--primary);
--color-popover-foreground: var(--popover-foreground);
--color-popover: var(--popover);
--color-card-foreground: var(--card-foreground);
--color-card: var(--card);
--color-foreground: var(--foreground);
--color-background: var(--background);
--radius-sm: calc(var(--radius) * 0.6);
--radius-md: calc(var(--radius) * 0.8);
--radius-lg: var(--radius);
--radius-xl: calc(var(--radius) * 1.4);
--radius-2xl: calc(var(--radius) * 1.8);
--radius-3xl: calc(var(--radius) * 2.2);
--radius-4xl: calc(var(--radius) * 2.6);
}
:root {
--color-bg: #f3ead3;
--color-surface: #eae4ca;
--color-surface-2: #e0dbc2;
--color-border: #c9c4b0;
--color-text: #3a464c;
--color-text-muted: #5c6a72;
--color-text-dim: #859289;
--color-accent: #6e8b53;
--color-accent-hover: #5a7342;
--color-accent-subtle: #d5e3bf;
--color-teal: #5da09a;
--background: oklch(0.974 0.026 90.1);
--foreground: oklch(0.30 0.02 150);
--card: oklch(0.952 0.031 98.9);
--card-foreground: oklch(0.30 0.02 150);
--popover: oklch(0.952 0.031 98.9);
--popover-foreground: oklch(0.30 0.02 150);
--primary: oklch(0.45 0.12 145);
--primary-foreground: oklch(0.97 0.02 90);
--secondary: oklch(0.937 0.031 98.9);
--secondary-foreground: oklch(0.30 0.02 150);
--muted: oklch(0.937 0.031 98.9);
--muted-foreground: oklch(0.55 0.02 150);
--accent: oklch(0.937 0.031 98.9);
--accent-foreground: oklch(0.30 0.02 150);
--destructive: oklch(0.709 0.128 19.6);
--border: oklch(0.892 0.028 98.1);
--input: oklch(0.892 0.028 98.1);
--ring: oklch(0.45 0.12 145);
--chart-1: oklch(0.45 0.12 145);
--chart-2: oklch(0.749 0.063 185.5);
--chart-3: oklch(0.750 0.082 349.2);
--chart-4: oklch(0.709 0.128 19.6);
--chart-5: oklch(0.30 0.02 150);
--radius: 0.625rem;
--sidebar: oklch(0.952 0.031 98.9);
--sidebar-foreground: oklch(0.30 0.02 150);
--sidebar-primary: oklch(0.45 0.12 145);
--sidebar-primary-foreground: oklch(0.97 0.02 90);
--sidebar-accent: oklch(0.937 0.031 98.9);
--sidebar-accent-foreground: oklch(0.30 0.02 150);
--sidebar-border: oklch(0.892 0.028 98.1);
--sidebar-ring: oklch(0.45 0.12 145);
}
.dark {
--color-bg: #2d353b;
--color-surface: #343f44;
--color-surface-2: #3a464c;
--color-border: #5c6a72;
--color-text: #d3c6aa;
--color-text-muted: #9da9a0;
--color-text-dim: #859289;
--color-accent: #a7c080;
--color-accent-hover: #93ad6c;
--color-accent-subtle: #425047;
--color-teal: #7fbbb3;
--background: oklch(0.324 0.015 240.4);
--foreground: oklch(0.830 0.041 86.1);
--card: oklch(0.360 0.017 227.1);
--card-foreground: oklch(0.830 0.041 86.1);
--popover: oklch(0.360 0.017 227.1);
--popover-foreground: oklch(0.830 0.041 86.1);
--primary: oklch(0.773 0.091 125.8);
--primary-foreground: oklch(0.324 0.015 240.4);
--secondary: oklch(0.386 0.019 229.5);
--secondary-foreground: oklch(0.830 0.041 86.1);
--muted: oklch(0.386 0.019 229.5);
--muted-foreground: oklch(0.723 0.019 153.4);
--accent: oklch(0.386 0.019 229.5);
--accent-foreground: oklch(0.830 0.041 86.1);
--destructive: oklch(0.709 0.128 19.6);
--border: oklch(0.515 0.021 232.9);
--input: oklch(0.515 0.021 232.9);
--ring: oklch(0.773 0.091 125.8);
--chart-1: oklch(0.773 0.091 125.8);
--chart-2: oklch(0.749 0.063 185.5);
--chart-3: oklch(0.750 0.082 349.2);
--chart-4: oklch(0.709 0.128 19.6);
--chart-5: oklch(0.647 0.020 155.6);
--sidebar: oklch(0.360 0.017 227.1);
--sidebar-foreground: oklch(0.830 0.041 86.1);
--sidebar-primary: oklch(0.773 0.091 125.8);
--sidebar-primary-foreground: oklch(0.324 0.015 240.4);
--sidebar-accent: oklch(0.416 0.023 157.1);
--sidebar-accent-foreground: oklch(0.830 0.041 86.1);
--sidebar-border: oklch(0.515 0.021 232.9);
--sidebar-ring: oklch(0.773 0.091 125.8);
}
@layer base {
* {
@apply border-border outline-ring/50;
}
body {
@apply bg-background text-foreground;
}
html {
@apply font-sans;
scroll-behavior: smooth;
}
::view-transition-old(root),
::view-transition-new(root) {
animation: none !important;
}
body {
background-color: var(--color-bg);
color: var(--color-text);
}
.prose h2 {
@@ -49,7 +135,7 @@ body {
font-weight: 700;
margin-top: 2.5rem;
margin-bottom: 1rem;
color: var(--color-text);
color: var(--foreground);
}
.prose h3 {
@@ -57,13 +143,13 @@ body {
font-weight: 600;
margin-top: 2rem;
margin-bottom: 0.75rem;
color: var(--color-teal);
color: var(--primary);
}
.prose p {
margin-bottom: 1rem;
line-height: 1.75;
color: var(--color-text-muted);
color: var(--muted-foreground);
}
.prose ul {
@@ -81,16 +167,16 @@ body {
.prose li {
margin-bottom: 0.375rem;
line-height: 1.65;
color: var(--color-text-muted);
color: var(--muted-foreground);
}
.prose code {
font-family: 'SF Mono', 'Fira Code', 'JetBrains Mono', monospace;
font-size: 0.875rem;
background-color: var(--color-surface-2);
background-color: var(--muted);
padding: 0.125rem 0.375rem;
border-radius: 0.25rem;
color: var(--color-text);
color: var(--foreground);
}
.prose pre {
@@ -102,13 +188,15 @@ body {
font-family: 'SF Mono', 'Fira Code', 'JetBrains Mono', monospace;
font-size: 0.875rem;
line-height: 1.7;
border: 1px solid var(--color-border);
background-color: var(--card) !important;
color: var(--card-foreground);
}
.prose pre code {
background: none !important;
border: none;
padding: 0;
color: var(--card-foreground);
}
.prose pre .copy-code {
@@ -124,8 +212,8 @@ body {
margin: 0;
border: none;
border-radius: 0.25rem;
color: var(--color-text-dim);
background: var(--color-surface-2);
color: var(--muted-foreground);
background: var(--muted);
opacity: 0;
transition: opacity 0.15s, color 0.15s;
cursor: pointer;
@@ -137,7 +225,7 @@ body {
}
.prose pre .copy-code:hover {
color: var(--color-accent);
color: var(--primary);
}
.prose table {
@@ -148,88 +236,60 @@ body {
}
.prose th {
background-color: var(--color-surface);
background-color: var(--card);
padding: 0.625rem 0.875rem;
text-align: left;
font-weight: 600;
color: var(--color-text);
border-bottom: 1px solid var(--color-border);
color: var(--foreground);
border-bottom: 1px solid var(--border);
}
.prose td {
padding: 0.625rem 0.875rem;
border-bottom: 1px solid var(--color-border);
border-bottom: 1px solid var(--border);
}
.prose td code {
background-color: var(--color-surface-2);
background-color: var(--muted);
padding: 0.125rem 0.375rem;
border-radius: 0.25rem;
font-size: 0.85rem;
}
.prose tr:nth-child(even) {
background-color: var(--color-surface);
background-color: var(--card);
}
.prose a {
color: var(--color-accent);
color: var(--primary);
text-decoration: underline;
text-underline-offset: 2px;
}
.prose a:hover {
color: var(--color-accent-hover);
opacity: 0.8;
}
.prose strong {
color: var(--color-text);
color: var(--foreground);
font-weight: 600;
}
.prose hr {
border-color: var(--color-border);
border-color: var(--border);
margin: 2rem 0;
}
.prose blockquote {
border-left: 2px solid var(--color-text-dim);
border-left: 2px solid var(--muted-foreground);
padding-left: 1rem;
color: var(--color-text-dim);
color: var(--muted-foreground);
font-style: italic;
margin-bottom: 1rem;
}
* {
scrollbar-width: thin;
scrollbar-color: var(--color-border) transparent;
}
::-webkit-scrollbar {
width: 8px;
height: 8px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: var(--color-border);
border-radius: 4px;
}
::-webkit-scrollbar-thumb:hover {
background: var(--color-text-dim);
}
::selection {
background: var(--color-accent-subtle);
color: var(--color-text);
}
.dark .astro-code {
background-color: var(--shiki-dark-bg) !important;
background-color: var(--card) !important;
}
.dark .astro-code code span {
@@ -240,9 +300,35 @@ body {
text-decoration: var(--shiki-dark-text-decoration) !important;
}
.agent-entry {
background-color: var(--color-surface);
border-radius: 0.75rem;
padding: 1.25rem 1.5rem;
margin-bottom: 1rem;
* {
scrollbar-width: thin;
scrollbar-color: var(--border) transparent;
scrollbar-gutter: stable;
}
::-webkit-scrollbar {
width: 6px;
height: 6px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: var(--border);
border-radius: 3px;
}
::-webkit-scrollbar-thumb:hover {
background: var(--muted-foreground);
}
::-webkit-scrollbar-corner {
background: transparent;
}
::selection {
background: var(--primary);
color: var(--primary-foreground);
}

View File

@@ -1,25 +0,0 @@
export default {
content: ['./src/**/*.{astro,html,js,jsx,md,mdx,svelte,ts,tsx,vue}'],
darkMode: 'class',
theme: {
extend: {
colors: {
bg: 'var(--color-bg)',
surface: 'var(--color-surface)',
'surface-2': 'var(--color-surface-2)',
border: 'var(--color-border)',
'text-primary': 'var(--color-text)',
'text-muted': 'var(--color-text-muted)',
'text-dim': 'var(--color-text-dim)',
accent: 'var(--color-accent)',
'accent-hover': 'var(--color-accent-hover)',
'accent-subtle': 'var(--color-accent-subtle)',
teal: 'var(--color-teal)',
},
fontFamily: {
mono: ['"SF Mono"', '"Fira Code"', '"JetBrains Mono"', 'monospace'],
},
},
},
plugins: [],
};

View File

@@ -1,3 +1,13 @@
{
"extends": "astro/tsconfigs/strict"
"extends": "astro/tsconfigs/strict",
"include": [".astro/types.d.ts", "**/*"],
"exclude": ["dist"],
"compilerOptions": {
"jsx": "react-jsx",
"jsxImportSource": "react",
"baseUrl": ".",
"paths": {
"@/*": ["./src/*"]
}
}
}