Refine Feynman research workflows
This commit is contained in:
28
.pi/agents/auto.chain.md
Normal file
28
.pi/agents/auto.chain.md
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: auto
|
||||
description: Plan, investigate, verify, and draft an end-to-end autoresearch run.
|
||||
---
|
||||
|
||||
## planner
|
||||
output: plan.md
|
||||
|
||||
Clarify the objective, intended contribution, artifact, smallest useful experiment, and key open questions for {task}.
|
||||
|
||||
## researcher
|
||||
reads: plan.md
|
||||
output: research.md
|
||||
|
||||
Gather the strongest evidence, prior work, and concrete experiment options for {task} using plan.md as the scope guard.
|
||||
|
||||
## verifier
|
||||
reads: plan.md+research.md
|
||||
output: verification.md
|
||||
|
||||
Check whether the evidence and proposed claims for {task} are strong enough. Identify unsupported leaps, missing validation, and highest-value next checks.
|
||||
|
||||
## writer
|
||||
reads: plan.md+research.md+verification.md
|
||||
output: autoresearch.md
|
||||
progress: true
|
||||
|
||||
Produce the final autoresearch artifact for {task}. If experiments were not run, be explicit about that. Preserve limitations and end with Sources.
|
||||
22
.pi/agents/deep.chain.md
Normal file
22
.pi/agents/deep.chain.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: deep
|
||||
description: Gather, verify, and synthesize a deep research brief.
|
||||
---
|
||||
|
||||
## researcher
|
||||
output: research.md
|
||||
|
||||
Investigate {task}. Gather the strongest relevant primary sources, inspect them directly, and produce an evidence-first research brief.
|
||||
|
||||
## verifier
|
||||
reads: research.md
|
||||
output: verification.md
|
||||
|
||||
Verify the claims, source quality, and unresolved gaps in research.md for {task}. Produce a verification table and prioritized corrections.
|
||||
|
||||
## writer
|
||||
reads: research.md+verification.md
|
||||
output: deepresearch.md
|
||||
progress: true
|
||||
|
||||
Write the final deep research brief for {task} using research.md and verification.md. Keep only supported claims, preserve caveats, and end with Sources.
|
||||
28
.pi/agents/researcher.md
Normal file
28
.pi/agents/researcher.md
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: researcher
|
||||
description: Gather primary evidence across papers, web sources, repos, docs, and local artifacts.
|
||||
thinking: high
|
||||
output: research.md
|
||||
defaultProgress: true
|
||||
---
|
||||
|
||||
You are Feynman's evidence-gathering subagent.
|
||||
|
||||
Operating rules:
|
||||
- Prefer primary sources: official docs, papers, datasets, repos, benchmarks, and direct experimental outputs.
|
||||
- When the topic is current or market-facing, use web tools first; when it has literature depth, use paper tools as well.
|
||||
- Do not rely on a single source type when the topic spans current reality and academic background.
|
||||
- Inspect the strongest sources directly before summarizing them.
|
||||
- Build a compact evidence table with:
|
||||
- source
|
||||
- key claim
|
||||
- evidence type
|
||||
- caveats
|
||||
- confidence
|
||||
- Preserve uncertainty explicitly and note disagreements across sources.
|
||||
- Produce durable markdown that another agent can verify and another agent can turn into a polished artifact.
|
||||
- End with a `Sources` section containing direct URLs.
|
||||
|
||||
Default output expectations:
|
||||
- Save the main artifact to `research.md`.
|
||||
- Keep it structured, terse, and evidence-first.
|
||||
28
.pi/agents/verifier.md
Normal file
28
.pi/agents/verifier.md
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: verifier
|
||||
description: Verify claims, source quality, and evidentiary support in a research artifact.
|
||||
thinking: high
|
||||
output: verification.md
|
||||
defaultProgress: true
|
||||
---
|
||||
|
||||
You are Feynman's verification subagent.
|
||||
|
||||
Your job is to audit evidence, not to write a polished final narrative.
|
||||
|
||||
Operating rules:
|
||||
- Check every strong claim against inspected sources or explicit experimental evidence.
|
||||
- Label claims as:
|
||||
- supported
|
||||
- plausible inference
|
||||
- disputed
|
||||
- unsupported
|
||||
- Look for stale sources, benchmark leakage, repo-paper mismatches, missing defaults, ambiguous methodology, and citation quality problems.
|
||||
- Prefer precise corrections over broad rewrites.
|
||||
- Produce a verification table plus a short prioritized list of fixes.
|
||||
- Preserve open questions and unresolved disagreements instead of smoothing them away.
|
||||
- End with a `Sources` section containing direct URLs for any additional material you inspected during verification.
|
||||
|
||||
Default output expectations:
|
||||
- Save the main artifact to `verification.md`.
|
||||
- Optimize for factual pressure-testing, not prose.
|
||||
22
.pi/agents/writer.md
Normal file
22
.pi/agents/writer.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: writer
|
||||
description: Turn verified research notes into clear memos, audits, and paper-style drafts.
|
||||
thinking: medium
|
||||
output: draft.md
|
||||
defaultProgress: true
|
||||
---
|
||||
|
||||
You are Feynman's writing subagent.
|
||||
|
||||
Operating rules:
|
||||
- Write only from supplied evidence and clearly marked inference.
|
||||
- Do not introduce unsupported claims.
|
||||
- Preserve caveats, disagreements, and open questions instead of hiding them.
|
||||
- Use clean Markdown structure and add equations only when they materially help.
|
||||
- Keep the narrative readable, but never outrun the evidence.
|
||||
- Produce artifacts that are ready to review in a browser or PDF preview.
|
||||
- End with a `Sources` appendix containing direct URLs.
|
||||
|
||||
Default output expectations:
|
||||
- Save the main artifact to `draft.md` unless the caller specifies a different output path.
|
||||
- Optimize for clarity, structure, and evidence traceability.
|
||||
@@ -7,13 +7,15 @@
|
||||
"paper2": "#1c1917",
|
||||
"paper3": "#221f1c",
|
||||
"panel": "#27231f",
|
||||
"stone": "#9a9084",
|
||||
"ash": "#6f675e",
|
||||
"moss": "#24332c",
|
||||
"moss2": "#202c26",
|
||||
"stone": "#aaa79d",
|
||||
"ash": "#909d91",
|
||||
"darkAsh": "#4f4a44",
|
||||
"oxide": "#b76e4c",
|
||||
"gold": "#d0a85c",
|
||||
"sage": "#88a88a",
|
||||
"teal": "#7aa6a1",
|
||||
"sage": "#86d8a4",
|
||||
"teal": "#69d6c4",
|
||||
"rose": "#c97b84",
|
||||
"violet": "#a98dc6",
|
||||
"selection": "#302b27",
|
||||
@@ -21,31 +23,31 @@
|
||||
"errorBg": "#2b1f21"
|
||||
},
|
||||
"colors": {
|
||||
"accent": "gold",
|
||||
"accent": "sage",
|
||||
"border": "stone",
|
||||
"borderAccent": "gold",
|
||||
"borderAccent": "sage",
|
||||
"borderMuted": "darkAsh",
|
||||
"success": "sage",
|
||||
"error": "rose",
|
||||
"warning": "oxide",
|
||||
"warning": "sage",
|
||||
"muted": "stone",
|
||||
"dim": "ash",
|
||||
"text": "ink",
|
||||
"thinkingText": "stone",
|
||||
"thinkingText": "sage",
|
||||
|
||||
"selectedBg": "selection",
|
||||
"userMessageBg": "panel",
|
||||
"userMessageText": "ink",
|
||||
"customMessageBg": "panel",
|
||||
"customMessageText": "ink",
|
||||
"customMessageLabel": "violet",
|
||||
"userMessageBg": "moss",
|
||||
"userMessageText": "",
|
||||
"customMessageBg": "moss2",
|
||||
"customMessageText": "",
|
||||
"customMessageLabel": "sage",
|
||||
"toolPendingBg": "paper2",
|
||||
"toolSuccessBg": "successBg",
|
||||
"toolErrorBg": "errorBg",
|
||||
"toolTitle": "gold",
|
||||
"toolTitle": "sage",
|
||||
"toolOutput": "stone",
|
||||
|
||||
"mdHeading": "gold",
|
||||
"mdHeading": "sage",
|
||||
"mdLink": "teal",
|
||||
"mdLinkUrl": "stone",
|
||||
"mdCode": "teal",
|
||||
@@ -54,30 +56,30 @@
|
||||
"mdQuote": "stone",
|
||||
"mdQuoteBorder": "ash",
|
||||
"mdHr": "ash",
|
||||
"mdListBullet": "gold",
|
||||
"mdListBullet": "sage",
|
||||
|
||||
"toolDiffAdded": "sage",
|
||||
"toolDiffRemoved": "rose",
|
||||
"toolDiffContext": "stone",
|
||||
|
||||
"syntaxComment": "stone",
|
||||
"syntaxKeyword": "gold",
|
||||
"syntaxKeyword": "sage",
|
||||
"syntaxFunction": "teal",
|
||||
"syntaxVariable": "ink",
|
||||
"syntaxString": "sage",
|
||||
"syntaxNumber": "oxide",
|
||||
"syntaxNumber": "teal",
|
||||
"syntaxType": "violet",
|
||||
"syntaxOperator": "ink",
|
||||
"syntaxPunctuation": "stone",
|
||||
|
||||
"thinkingOff": "darkAsh",
|
||||
"thinkingMinimal": "stone",
|
||||
"thinkingMinimal": "ash",
|
||||
"thinkingLow": "teal",
|
||||
"thinkingMedium": "gold",
|
||||
"thinkingMedium": "sage",
|
||||
"thinkingHigh": "violet",
|
||||
"thinkingXhigh": "rose",
|
||||
|
||||
"bashMode": "oxide"
|
||||
"bashMode": "sage"
|
||||
},
|
||||
"export": {
|
||||
"pageBg": "#141210",
|
||||
|
||||
36
README.md
36
README.md
@@ -57,17 +57,32 @@ Most users should not need slash commands. The intended default is:
|
||||
Inside the REPL:
|
||||
|
||||
- `/help` shows local commands
|
||||
- `/init` bootstraps `AGENTS.md` and `notes/session-logs/`
|
||||
- `/alpha-login` signs in to alphaXiv
|
||||
- `/alpha-status` checks alphaXiv auth
|
||||
- `/new` starts a new persisted session
|
||||
- `/exit` quits
|
||||
- `/lit-review <topic>` expands the literature-review prompt template
|
||||
- `/lit <topic>` expands the literature-review prompt template
|
||||
- `/replicate <paper or claim>` expands the replication prompt template
|
||||
- `/reading-list <topic>` expands the reading-list prompt template
|
||||
- `/research-memo <topic>` expands the general research memo prompt template
|
||||
- `/reading <topic>` expands the reading-list prompt template
|
||||
- `/memo <topic>` expands the general research memo prompt template
|
||||
- `/deepresearch <topic>` expands the thorough source-heavy research prompt template
|
||||
- `/autoresearch <idea>` expands the end-to-end idea-to-paper prompt template
|
||||
- `/compare-sources <topic>` expands the source comparison prompt template
|
||||
- `/paper-code-audit <item>` expands the paper/code audit prompt template
|
||||
- `/paper-draft <topic>` expands the paper-style writing prompt template
|
||||
- `/compare <topic>` expands the source comparison prompt template
|
||||
- `/audit <item>` expands the paper/code audit prompt template
|
||||
- `/draft <topic>` expands the paper-style writing prompt template
|
||||
- `/log` writes a durable session log to `notes/`
|
||||
- `/watch <topic>` schedules or prepares a recurring research watch
|
||||
- `/jobs` inspects active background work
|
||||
|
||||
Package-powered workflows inside the REPL:
|
||||
|
||||
- `/agents` opens the subagent and chain manager
|
||||
- `/run`, `/chain`, and `/parallel` delegate work to subagents
|
||||
- `/ps` opens the background process panel
|
||||
- `/schedule-prompt` manages recurring and deferred jobs
|
||||
- `/search` opens indexed session search
|
||||
- `/preview` previews generated artifacts in the terminal, browser, or PDF
|
||||
|
||||
Outside the REPL:
|
||||
|
||||
@@ -90,6 +105,14 @@ The starter extension adds:
|
||||
- `session_search` for recovering prior Feynman work from stored transcripts
|
||||
- `preview_file` for browser/PDF review of generated artifacts
|
||||
|
||||
Feynman also ships bundled research subagents in `.pi/agents/`:
|
||||
|
||||
- `researcher` for evidence gathering
|
||||
- `verifier` for claim and source checking
|
||||
- `writer` for polished memo and draft writing
|
||||
- `deep` chain for gather → verify → synthesize
|
||||
- `auto` chain for plan → gather → verify → draft
|
||||
|
||||
Feynman uses `@companion-ai/alpha-hub` directly in-process rather than shelling out to the CLI.
|
||||
|
||||
## Curated Pi Stack
|
||||
@@ -115,6 +138,7 @@ The default expectation is source-grounded outputs with explicit `Sources` secti
|
||||
|
||||
```text
|
||||
feynman/
|
||||
├── .pi/agents/ # Bundled research subagents and chains
|
||||
├── extensions/ # Custom research tools
|
||||
├── papers/ # Polished paper-style drafts and writeups
|
||||
├── prompts/ # Slash-style prompt templates
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { execFile, spawn } from "node:child_process";
|
||||
import { createRequire } from "node:module";
|
||||
import { mkdir, mkdtemp, readFile, readdir, stat, writeFile } from "node:fs/promises";
|
||||
import { homedir, tmpdir } from "node:os";
|
||||
import { basename, dirname, extname, join, resolve as resolvePath } from "node:path";
|
||||
@@ -10,7 +11,11 @@ import {
|
||||
clearPaperAnnotation,
|
||||
disconnect,
|
||||
getPaper,
|
||||
getUserName as getAlphaUserName,
|
||||
isLoggedIn as isAlphaLoggedIn,
|
||||
listPaperAnnotations,
|
||||
login as loginAlpha,
|
||||
logout as logoutAlpha,
|
||||
readPaperCode,
|
||||
searchPapers,
|
||||
} from "@companion-ai/alpha-hub/lib";
|
||||
@@ -18,6 +23,15 @@ import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-age
|
||||
import { Type } from "@sinclair/typebox";
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
const require = createRequire(import.meta.url);
|
||||
const FEYNMAN_VERSION = (() => {
|
||||
try {
|
||||
const pkg = require("../package.json") as { version?: string };
|
||||
return pkg.version ?? "dev";
|
||||
} catch {
|
||||
return "dev";
|
||||
}
|
||||
})();
|
||||
|
||||
function formatToolText(result: unknown): string {
|
||||
return typeof result === "string" ? result : JSON.stringify(result, null, 2);
|
||||
@@ -414,6 +428,170 @@ async function renderPdfPreview(filePath: string): Promise<string> {
|
||||
return pdfPath;
|
||||
}
|
||||
|
||||
function formatHeaderPath(path: string): string {
|
||||
const home = homedir();
|
||||
return path.startsWith(home) ? `~${path.slice(home.length)}` : path;
|
||||
}
|
||||
|
||||
function truncateForWidth(text: string, width: number): string {
|
||||
if (width <= 0) {
|
||||
return "";
|
||||
}
|
||||
|
||||
if (text.length <= width) {
|
||||
return text;
|
||||
}
|
||||
|
||||
if (width <= 3) {
|
||||
return ".".repeat(width);
|
||||
}
|
||||
|
||||
return `${text.slice(0, width - 3)}...`;
|
||||
}
|
||||
|
||||
function padCell(text: string, width: number): string {
|
||||
const truncated = truncateForWidth(text, width);
|
||||
return `${truncated}${" ".repeat(Math.max(0, width - truncated.length))}`;
|
||||
}
|
||||
|
||||
function wrapForWidth(text: string, width: number, maxLines: number): string[] {
|
||||
if (width <= 0 || maxLines <= 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const normalized = text.replace(/\s+/g, " ").trim();
|
||||
if (!normalized) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const words = normalized.split(" ");
|
||||
const lines: string[] = [];
|
||||
let current = "";
|
||||
|
||||
for (const word of words) {
|
||||
const candidate = current ? `${current} ${word}` : word;
|
||||
if (candidate.length <= width) {
|
||||
current = candidate;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current) {
|
||||
lines.push(current);
|
||||
if (lines.length === maxLines) {
|
||||
lines[maxLines - 1] = truncateForWidth(lines[maxLines - 1], width);
|
||||
return lines;
|
||||
}
|
||||
}
|
||||
|
||||
current = word.length <= width ? word : truncateForWidth(word, width);
|
||||
}
|
||||
|
||||
if (current && lines.length < maxLines) {
|
||||
lines.push(current);
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
function getCurrentModelLabel(ctx: ExtensionContext): string {
|
||||
if (ctx.model) {
|
||||
return `${ctx.model.provider}/${ctx.model.id}`;
|
||||
}
|
||||
|
||||
const branch = ctx.sessionManager.getBranch();
|
||||
for (let index = branch.length - 1; index >= 0; index -= 1) {
|
||||
const entry = branch[index];
|
||||
if (entry.type === "model_change") {
|
||||
return `${entry.provider}/${entry.modelId}`;
|
||||
}
|
||||
}
|
||||
|
||||
return "model not set";
|
||||
}
|
||||
|
||||
function getRecentActivitySummary(ctx: ExtensionContext): string {
|
||||
const branch = ctx.sessionManager.getBranch();
|
||||
for (let index = branch.length - 1; index >= 0; index -= 1) {
|
||||
const entry = branch[index];
|
||||
if (entry.type !== "message") {
|
||||
continue;
|
||||
}
|
||||
|
||||
const text = extractMessageText(entry.message).replace(/\s+/g, " ").trim();
|
||||
if (!text) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const role = entry.message.role === "assistant"
|
||||
? "agent"
|
||||
: entry.message.role === "user"
|
||||
? "you"
|
||||
: entry.message.role;
|
||||
return `${role}: ${text}`;
|
||||
}
|
||||
|
||||
return "No messages yet in this session.";
|
||||
}
|
||||
|
||||
function buildTitledBorder(width: number, title: string): { left: string; right: string } {
|
||||
const gap = Math.max(0, width - title.length);
|
||||
const left = Math.floor(gap / 2);
|
||||
return {
|
||||
left: "─".repeat(left),
|
||||
right: "─".repeat(gap - left),
|
||||
};
|
||||
}
|
||||
|
||||
function formatShortcutLine(command: string, description: string, width: number): string {
|
||||
const commandWidth = Math.min(18, Math.max(13, Math.floor(width * 0.3)));
|
||||
return truncateForWidth(`${padCell(command, commandWidth)} ${description}`, width);
|
||||
}
|
||||
|
||||
async function pathExists(path: string): Promise<boolean> {
|
||||
try {
|
||||
await stat(path);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function buildProjectAgentsTemplate(): string {
|
||||
return `# Feynman Project Guide
|
||||
|
||||
This file is read automatically at startup. It is the durable project memory for Feynman.
|
||||
|
||||
## Project Overview
|
||||
- State the research question, target artifact, and key datasets here.
|
||||
|
||||
## Ground Rules
|
||||
- Do not modify raw data in \`Data/Raw/\` or equivalent raw-data folders.
|
||||
- Read first, act second: inspect project structure and existing notes before making changes.
|
||||
- Prefer durable artifacts in \`notes/\`, \`outputs/\`, \`experiments/\`, and \`papers/\`.
|
||||
- Keep strong claims source-grounded. Include direct URLs in final writeups.
|
||||
|
||||
## Current Status
|
||||
- Replace this section with the latest project status, known issues, and next steps.
|
||||
|
||||
## Session Logging
|
||||
- Use \`/log\` at the end of meaningful sessions to write a durable session note into \`notes/session-logs/\`.
|
||||
`;
|
||||
}
|
||||
|
||||
function buildSessionLogsReadme(): string {
|
||||
return `# Session Logs
|
||||
|
||||
Use \`/log\` to write one durable note per meaningful Feynman session.
|
||||
|
||||
Recommended contents:
|
||||
- what was done
|
||||
- strongest findings
|
||||
- artifacts written
|
||||
- unresolved questions
|
||||
- next steps
|
||||
`;
|
||||
}
|
||||
|
||||
export default function researchTools(pi: ExtensionAPI): void {
|
||||
function installFeynmanHeader(ctx: ExtensionContext): void {
|
||||
if (!ctx.hasUI) {
|
||||
@@ -421,13 +599,101 @@ export default function researchTools(pi: ExtensionAPI): void {
|
||||
}
|
||||
|
||||
ctx.ui.setHeader((_tui, theme) => ({
|
||||
render(_width: number): string[] {
|
||||
return [
|
||||
"",
|
||||
`${theme.fg("accent", theme.bold("Feynman"))}${theme.fg("muted", " research agent")}`,
|
||||
theme.fg("dim", "sources first • memory on • scheduled research ready"),
|
||||
"",
|
||||
render(width: number): string[] {
|
||||
const maxAvailableWidth = Math.max(width - 2, 1);
|
||||
const preferredWidth = Math.min(104, Math.max(56, width - 4));
|
||||
const cardWidth = Math.min(maxAvailableWidth, preferredWidth);
|
||||
const innerWidth = cardWidth - 2;
|
||||
const outerPadding = " ".repeat(Math.max(0, Math.floor((width - cardWidth) / 2)));
|
||||
const title = truncateForWidth(` Feynman v${FEYNMAN_VERSION} `, innerWidth);
|
||||
const titledBorder = buildTitledBorder(innerWidth, title);
|
||||
const modelLabel = getCurrentModelLabel(ctx);
|
||||
const sessionLabel = ctx.sessionManager.getSessionName()?.trim() || "default session";
|
||||
const directoryLabel = formatHeaderPath(ctx.cwd);
|
||||
const recentActivity = getRecentActivitySummary(ctx);
|
||||
const shortcuts = [
|
||||
["/lit", "survey papers on a topic"],
|
||||
["/deepresearch", "run a source-heavy research pass"],
|
||||
["/draft", "draft a paper-style writeup"],
|
||||
["/jobs", "inspect active background work"],
|
||||
];
|
||||
const lines: string[] = [];
|
||||
|
||||
const push = (line: string): void => {
|
||||
lines.push(`${outerPadding}${line}`);
|
||||
};
|
||||
|
||||
const renderBoxLine = (content: string): string =>
|
||||
`${theme.fg("borderMuted", "│")}${content}${theme.fg("borderMuted", "│")}`;
|
||||
const renderDivider = (): string =>
|
||||
`${theme.fg("borderMuted", "├")}${theme.fg("borderMuted", "─".repeat(innerWidth))}${theme.fg("borderMuted", "┤")}`;
|
||||
const styleAccentCell = (text: string, cellWidth: number): string =>
|
||||
theme.fg("accent", theme.bold(padCell(text, cellWidth)));
|
||||
const styleMutedCell = (text: string, cellWidth: number): string =>
|
||||
theme.fg("muted", padCell(text, cellWidth));
|
||||
|
||||
push("");
|
||||
push(
|
||||
theme.fg("borderMuted", `╭${titledBorder.left}`) +
|
||||
theme.fg("accent", theme.bold(title)) +
|
||||
theme.fg("borderMuted", `${titledBorder.right}╮`),
|
||||
);
|
||||
|
||||
if (innerWidth < 88) {
|
||||
const activityLines = wrapForWidth(recentActivity, innerWidth, 2);
|
||||
push(renderBoxLine(padCell("", innerWidth)));
|
||||
push(renderBoxLine(theme.fg("accent", theme.bold(padCell("Research session ready", innerWidth)))));
|
||||
push(renderBoxLine(padCell(`model: ${modelLabel}`, innerWidth)));
|
||||
push(renderBoxLine(padCell(`session: ${sessionLabel}`, innerWidth)));
|
||||
push(renderBoxLine(padCell(`directory: ${directoryLabel}`, innerWidth)));
|
||||
push(renderDivider());
|
||||
push(renderBoxLine(theme.fg("accent", theme.bold(padCell("Quick starts", innerWidth)))));
|
||||
for (const [command, description] of shortcuts) {
|
||||
push(renderBoxLine(padCell(formatShortcutLine(command, description, innerWidth), innerWidth)));
|
||||
}
|
||||
push(renderDivider());
|
||||
push(renderBoxLine(theme.fg("accent", theme.bold(padCell("Recent activity", innerWidth)))));
|
||||
for (const activityLine of activityLines.length > 0 ? activityLines : ["No messages yet in this session."]) {
|
||||
push(renderBoxLine(padCell(activityLine, innerWidth)));
|
||||
}
|
||||
} else {
|
||||
const leftWidth = Math.min(44, Math.max(38, Math.floor(innerWidth * 0.43)));
|
||||
const rightWidth = innerWidth - leftWidth - 3;
|
||||
const activityLines = wrapForWidth(recentActivity, innerWidth, 2);
|
||||
const row = (
|
||||
left: string,
|
||||
right: string,
|
||||
options?: { leftAccent?: boolean; rightAccent?: boolean; leftMuted?: boolean; rightMuted?: boolean },
|
||||
): string => {
|
||||
const leftCell = options?.leftAccent
|
||||
? styleAccentCell(left, leftWidth)
|
||||
: options?.leftMuted
|
||||
? styleMutedCell(left, leftWidth)
|
||||
: padCell(left, leftWidth);
|
||||
const rightCell = options?.rightAccent
|
||||
? styleAccentCell(right, rightWidth)
|
||||
: options?.rightMuted
|
||||
? styleMutedCell(right, rightWidth)
|
||||
: padCell(right, rightWidth);
|
||||
return renderBoxLine(`${leftCell}${theme.fg("borderMuted", " │ ")}${rightCell}`);
|
||||
};
|
||||
|
||||
push(renderBoxLine(padCell("", innerWidth)));
|
||||
push(row("Research session ready", "Quick starts", { leftAccent: true, rightAccent: true }));
|
||||
push(row(`model: ${modelLabel}`, formatShortcutLine(shortcuts[0][0], shortcuts[0][1], rightWidth)));
|
||||
push(row(`session: ${sessionLabel}`, formatShortcutLine(shortcuts[1][0], shortcuts[1][1], rightWidth)));
|
||||
push(row(`directory: ${directoryLabel}`, formatShortcutLine(shortcuts[2][0], shortcuts[2][1], rightWidth)));
|
||||
push(row("ask naturally; slash commands are optional", formatShortcutLine(shortcuts[3][0], shortcuts[3][1], rightWidth), { leftMuted: true }));
|
||||
push(renderDivider());
|
||||
push(renderBoxLine(theme.fg("accent", theme.bold(padCell("Recent activity", innerWidth)))));
|
||||
for (const activityLine of activityLines.length > 0 ? activityLines : ["No messages yet in this session."]) {
|
||||
push(renderBoxLine(padCell(activityLine, innerWidth)));
|
||||
}
|
||||
}
|
||||
|
||||
push(theme.fg("borderMuted", `╰${"─".repeat(innerWidth)}╯`));
|
||||
push("");
|
||||
return lines;
|
||||
},
|
||||
invalidate() {},
|
||||
}));
|
||||
@@ -441,6 +707,75 @@ export default function researchTools(pi: ExtensionAPI): void {
|
||||
installFeynmanHeader(ctx);
|
||||
});
|
||||
|
||||
pi.registerCommand("alpha-login", {
|
||||
description: "Sign in to alphaXiv from inside Feynman.",
|
||||
handler: async (_args, ctx) => {
|
||||
if (isAlphaLoggedIn()) {
|
||||
const name = getAlphaUserName();
|
||||
ctx.ui.notify(name ? `alphaXiv already connected as ${name}` : "alphaXiv already connected", "info");
|
||||
return;
|
||||
}
|
||||
|
||||
await loginAlpha();
|
||||
const name = getAlphaUserName();
|
||||
ctx.ui.notify(name ? `alphaXiv connected as ${name}` : "alphaXiv login complete", "info");
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerCommand("alpha-logout", {
|
||||
description: "Clear alphaXiv auth from inside Feynman.",
|
||||
handler: async (_args, ctx) => {
|
||||
logoutAlpha();
|
||||
ctx.ui.notify("alphaXiv auth cleared", "info");
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerCommand("alpha-status", {
|
||||
description: "Show alphaXiv authentication status.",
|
||||
handler: async (_args, ctx) => {
|
||||
if (!isAlphaLoggedIn()) {
|
||||
ctx.ui.notify("alphaXiv not connected", "warning");
|
||||
return;
|
||||
}
|
||||
|
||||
const name = getAlphaUserName();
|
||||
ctx.ui.notify(name ? `alphaXiv connected as ${name}` : "alphaXiv connected", "info");
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerCommand("init", {
|
||||
description: "Initialize AGENTS.md and session-log folders for a research project.",
|
||||
handler: async (_args, ctx) => {
|
||||
const agentsPath = resolvePath(ctx.cwd, "AGENTS.md");
|
||||
const notesDir = resolvePath(ctx.cwd, "notes");
|
||||
const sessionLogsDir = resolvePath(notesDir, "session-logs");
|
||||
const sessionLogsReadmePath = resolvePath(sessionLogsDir, "README.md");
|
||||
const created: string[] = [];
|
||||
const skipped: string[] = [];
|
||||
|
||||
await mkdir(notesDir, { recursive: true });
|
||||
await mkdir(sessionLogsDir, { recursive: true });
|
||||
|
||||
if (!(await pathExists(agentsPath))) {
|
||||
await writeFile(agentsPath, buildProjectAgentsTemplate(), "utf8");
|
||||
created.push("AGENTS.md");
|
||||
} else {
|
||||
skipped.push("AGENTS.md");
|
||||
}
|
||||
|
||||
if (!(await pathExists(sessionLogsReadmePath))) {
|
||||
await writeFile(sessionLogsReadmePath, buildSessionLogsReadme(), "utf8");
|
||||
created.push("notes/session-logs/README.md");
|
||||
} else {
|
||||
skipped.push("notes/session-logs/README.md");
|
||||
}
|
||||
|
||||
const createdSummary = created.length > 0 ? `created: ${created.join(", ")}` : "created: nothing";
|
||||
const skippedSummary = skipped.length > 0 ? `; kept existing: ${skipped.join(", ")}` : "";
|
||||
ctx.ui.notify(`${createdSummary}${skippedSummary}`, "info");
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerTool({
|
||||
name: "session_search",
|
||||
label: "Session Search",
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
"files": [
|
||||
"bin/",
|
||||
"dist/",
|
||||
".pi/agents/",
|
||||
".pi/settings.json",
|
||||
".pi/themes/",
|
||||
"extensions/",
|
||||
|
||||
@@ -4,6 +4,7 @@ description: Compare a paper's claims against its public codebase and identify m
|
||||
Audit the paper and codebase for: $@
|
||||
|
||||
Requirements:
|
||||
- Prefer the `researcher` subagent for evidence gathering and the `verifier` subagent for the mismatch pass when the audit is non-trivial.
|
||||
- Identify the canonical paper first with `alpha_search` and `alpha_get_paper`.
|
||||
- Extract implementation-sensitive claims with `alpha_ask_paper`.
|
||||
- If a public repo exists, inspect it with `alpha_read_code`.
|
||||
@@ -4,6 +4,8 @@ description: Turn a research idea into a paper-oriented end-to-end run with lite
|
||||
Run an autoresearch workflow for: $@
|
||||
|
||||
Requirements:
|
||||
- Prefer the project `auto` chain or the `planner` + `researcher` + `verifier` + `writer` subagents when the task is broad enough to benefit from decomposition.
|
||||
- If the run is likely to take a while, or the user wants it detached, launch the subagent workflow in background with `clarify: false, async: true` and report how to inspect status.
|
||||
- Start by clarifying the research objective, scope, and target contribution.
|
||||
- Search for the strongest relevant primary sources first.
|
||||
- If the topic is current, product-oriented, market-facing, or asks about latest developments, start with `web_search` and `fetch_content`.
|
||||
|
||||
@@ -4,6 +4,7 @@ description: Compare multiple sources on a topic and produce a source-grounded m
|
||||
Compare sources for: $@
|
||||
|
||||
Requirements:
|
||||
- Use the `researcher` subagent to gather source material when the comparison set is broad, and the `verifier` subagent to pressure-test the resulting matrix when needed.
|
||||
- Identify the strongest relevant primary sources first.
|
||||
- For current or market-facing topics, use `web_search` and `fetch_content` to gather up-to-date primary sources before comparing them.
|
||||
- For academic claims, use `alpha_search` and inspect the strongest papers directly.
|
||||
@@ -4,6 +4,8 @@ description: Run a thorough, source-heavy investigation on a topic and produce a
|
||||
Run a deep research workflow for: $@
|
||||
|
||||
Requirements:
|
||||
- If the task is broad, multi-source, or obviously long-running, prefer delegating through the `subagent` tool. Use the project `researcher`, `verifier`, and `writer` agents, or the project `deep` chain when that decomposition fits.
|
||||
- If the user wants it to run unattended, or the sweep will clearly take a while, prefer background execution with `subagent` using `clarify: false, async: true`, then report how to inspect status.
|
||||
- If the topic is current, product-oriented, market-facing, regulatory, or asks about latest developments, start with `web_search` and `fetch_content`.
|
||||
- If the topic has an academic literature component, use `alpha_search`, `alpha_get_paper`, and `alpha_ask_paper` for the strongest papers.
|
||||
- Do not rely on a single source type when the topic spans both current reality and academic background.
|
||||
|
||||
@@ -4,6 +4,7 @@ description: Turn research findings into a polished paper-style draft with equat
|
||||
Write a paper-style draft for: $@
|
||||
|
||||
Requirements:
|
||||
- Prefer the `writer` subagent when the draft should be produced from already-collected notes, and use `verifier` first if the evidence still looks shaky.
|
||||
- Ground every claim in inspected sources, experiments, or explicit inference.
|
||||
- Use clean Markdown structure with LaTeX where equations materially help.
|
||||
- Include at minimum:
|
||||
14
prompts/jobs.md
Normal file
14
prompts/jobs.md
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
description: Inspect active background research work, including running processes and scheduled follow-ups.
|
||||
---
|
||||
Inspect active background work for this project.
|
||||
|
||||
Requirements:
|
||||
- Use the `process` tool with the `list` action to inspect running and finished managed background processes.
|
||||
- Use the scheduling tooling to list active recurring or deferred jobs if any are configured.
|
||||
- Summarize:
|
||||
- active background processes
|
||||
- queued or recurring research watches
|
||||
- failures that need attention
|
||||
- the next concrete command the user should run if they want logs or detailed status
|
||||
- Be concise and operational.
|
||||
@@ -4,6 +4,7 @@ description: Run a literature review on a topic using paper search and primary-s
|
||||
Investigate the following topic as a literature review: $@
|
||||
|
||||
Requirements:
|
||||
- Use the `researcher` subagent when the sweep is wide enough to benefit from delegated paper triage before synthesis.
|
||||
- If the topic is academic or paper-centric, use `alpha_search` first.
|
||||
- If the topic is current, product-oriented, market-facing, or asks about latest developments, use `web_search` and `fetch_content` first, then use `alpha_search` only for academic background.
|
||||
- Use `alpha_get_paper` on the most relevant papers before making strong claims.
|
||||
12
prompts/log.md
Normal file
12
prompts/log.md
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
description: Write a durable session log with completed work, findings, open questions, and next steps.
|
||||
---
|
||||
Write a session log for the current research work.
|
||||
|
||||
Requirements:
|
||||
- Summarize what was done in this session.
|
||||
- Capture the strongest findings or decisions.
|
||||
- List open questions, unresolved risks, and concrete next steps.
|
||||
- Reference any important artifacts written to `notes/`, `outputs/`, `experiments/`, or `papers/`.
|
||||
- If any external claims matter, include direct source URLs.
|
||||
- Save the log to `notes/` as markdown with a date-oriented filename.
|
||||
@@ -4,6 +4,7 @@ description: Produce a general research memo grounded in explicit sources and di
|
||||
Write a research memo about: $@
|
||||
|
||||
Requirements:
|
||||
- Use the `researcher` and `writer` subagents when decomposition will improve quality or reduce context pressure.
|
||||
- Start by finding the strongest relevant sources.
|
||||
- If the topic is current, market-facing, product-oriented, regulatory, or asks about latest developments, use `web_search` and `fetch_content` first.
|
||||
- Use `alpha_search` for academic background where relevant, but do not rely on it alone for current topics.
|
||||
@@ -4,6 +4,7 @@ description: Build a prioritized reading list on a research topic with rationale
|
||||
Create a research reading list for: $@
|
||||
|
||||
Requirements:
|
||||
- Use the `researcher` subagent when a wider literature sweep would help before curating the final list.
|
||||
- If the topic is academic, use `alpha_search` with `all` mode.
|
||||
- If the topic is current, product-oriented, or asks for the latest landscape, use `web_search` and `fetch_content` first, then add `alpha_search` for academic background when relevant.
|
||||
- Inspect the strongest papers or primary sources directly before recommending them.
|
||||
@@ -4,6 +4,7 @@ description: Plan or execute a replication workflow for a paper, claim, or bench
|
||||
Design a replication plan for: $@
|
||||
|
||||
Requirements:
|
||||
- Use the `subagent` tool for decomposition when the replication needs separate planning, evidence extraction, and execution passes.
|
||||
- Identify the canonical paper or source material first.
|
||||
- Use `alpha_get_paper` for the target paper.
|
||||
- Use `alpha_ask_paper` to extract the exact implementation or evaluation details you still need.
|
||||
|
||||
14
prompts/watch.md
Normal file
14
prompts/watch.md
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
description: Set up a recurring or deferred research watch on a topic, company, paper area, or product surface.
|
||||
---
|
||||
Create a research watch for: $@
|
||||
|
||||
Requirements:
|
||||
- Start with a baseline sweep of the topic using the strongest relevant sources.
|
||||
- If the watch is about current events, products, markets, regulations, or releases, use `web_search` and `fetch_content` first.
|
||||
- If the watch has a literature component, add `alpha_search` and inspect the strongest papers directly.
|
||||
- Summarize what should be monitored, what signals matter, and what counts as a meaningful change.
|
||||
- Use `schedule_prompt` to create the recurring or delayed follow-up instead of merely promising to check later.
|
||||
- If the user wants detached execution for the initial sweep, use `subagent` in background mode and report how to inspect status.
|
||||
- Save a durable baseline artifact to `outputs/`.
|
||||
- End with a `Sources` section containing direct URLs for every source used.
|
||||
@@ -9,6 +9,7 @@ const piPackageRoot = resolve(appRoot, "node_modules", "@mariozechner", "pi-codi
|
||||
const packageJsonPath = resolve(piPackageRoot, "package.json");
|
||||
const cliPath = resolve(piPackageRoot, "dist", "cli.js");
|
||||
const interactiveModePath = resolve(piPackageRoot, "dist", "modes", "interactive", "interactive-mode.js");
|
||||
const footerPath = resolve(piPackageRoot, "dist", "modes", "interactive", "components", "footer.js");
|
||||
const workspaceRoot = resolve(appRoot, ".pi", "npm", "node_modules");
|
||||
const webAccessPath = resolve(workspaceRoot, "pi-web-access", "index.ts");
|
||||
const sessionSearchIndexerPath = resolve(
|
||||
@@ -102,6 +103,42 @@ if (existsSync(interactiveModePath)) {
|
||||
}
|
||||
}
|
||||
|
||||
if (existsSync(footerPath)) {
|
||||
const footerSource = readFileSync(footerPath, "utf8");
|
||||
const footerOriginal = [
|
||||
' // Add thinking level indicator if model supports reasoning',
|
||||
' let rightSideWithoutProvider = modelName;',
|
||||
' if (state.model?.reasoning) {',
|
||||
' const thinkingLevel = state.thinkingLevel || "off";',
|
||||
' rightSideWithoutProvider =',
|
||||
' thinkingLevel === "off" ? `${modelName} • thinking off` : `${modelName} • ${thinkingLevel}`;',
|
||||
' }',
|
||||
' // Prepend the provider in parentheses if there are multiple providers and there\'s enough room',
|
||||
' let rightSide = rightSideWithoutProvider;',
|
||||
' if (this.footerData.getAvailableProviderCount() > 1 && state.model) {',
|
||||
' rightSide = `(${state.model.provider}) ${rightSideWithoutProvider}`;',
|
||||
].join("\n");
|
||||
const footerReplacement = [
|
||||
' // Add thinking level indicator if model supports reasoning',
|
||||
' const modelLabel = theme.fg("accent", modelName);',
|
||||
' let rightSideWithoutProvider = modelLabel;',
|
||||
' if (state.model?.reasoning) {',
|
||||
' const thinkingLevel = state.thinkingLevel || "off";',
|
||||
' const separator = theme.fg("dim", " • ");',
|
||||
' rightSideWithoutProvider = thinkingLevel === "off"',
|
||||
' ? `${modelLabel}${separator}${theme.fg("muted", "thinking off")}`',
|
||||
' : `${modelLabel}${separator}${theme.getThinkingBorderColor(thinkingLevel)(thinkingLevel)}`;',
|
||||
' }',
|
||||
' // Prepend the provider in parentheses if there are multiple providers and there\'s enough room',
|
||||
' let rightSide = rightSideWithoutProvider;',
|
||||
' if (this.footerData.getAvailableProviderCount() > 1 && state.model) {',
|
||||
' rightSide = `${theme.fg("muted", `(${state.model.provider})`)} ${rightSideWithoutProvider}`;',
|
||||
].join("\n");
|
||||
if (footerSource.includes(footerOriginal)) {
|
||||
writeFileSync(footerPath, footerSource.replace(footerOriginal, footerReplacement), "utf8");
|
||||
}
|
||||
}
|
||||
|
||||
if (existsSync(webAccessPath)) {
|
||||
const source = readFileSync(webAccessPath, "utf8");
|
||||
if (source.includes('pi.registerCommand("search",')) {
|
||||
|
||||
@@ -16,6 +16,8 @@ Operating rules:
|
||||
- Never answer a latest/current question from arXiv or alpha-backed paper search alone.
|
||||
- For AI model or product claims, prefer official docs/vendor pages plus recent web sources over old papers.
|
||||
- Use the installed Pi research packages for broader web/PDF access, document parsing, citation workflows, background processes, memory, session recall, and delegated subtasks when they reduce friction.
|
||||
- Feynman ships project subagents for research work. Prefer the \`researcher\`, \`verifier\`, and \`writer\` subagents for larger research tasks, and use the project \`deep\` or \`auto\` chains when a multi-step delegated workflow clearly fits.
|
||||
- Use subagents when decomposition meaningfully reduces context pressure or lets you parallelize evidence gathering. For detached long-running work, prefer background subagent execution with \`clarify: false, async: true\`.
|
||||
- Use the visualization packages when a chart, diagram, or interactive widget would materially improve understanding. Prefer charts for quantitative comparisons, Mermaid for simple process/architecture diagrams, and interactive HTML widgets for exploratory visual explanations.
|
||||
- Persistent memory is package-backed. Use \`memory_search\` to recall prior preferences and lessons, \`memory_remember\` to store explicit durable facts, and \`memory_lessons\` when prior corrections matter.
|
||||
- If the user says "remember", states a stable preference, or asks for something to be the default in future sessions, call \`memory_remember\`. Do not just say you will remember it.
|
||||
@@ -23,6 +25,7 @@ Operating rules:
|
||||
- Feynman is intended to support always-on research work. Use the scheduling package when recurring or deferred work is appropriate instead of telling the user to remember manually.
|
||||
- Use \`schedule_prompt\` for recurring scans, delayed follow-ups, reminders, and periodic research jobs.
|
||||
- If the user asks you to remind, check later, run something nightly, or keep watching something over time, call \`schedule_prompt\`. Do not just promise to do it later.
|
||||
- For long-running local work such as experiments, crawls, or log-following, use the process package instead of blocking the main thread unnecessarily. Prefer detached/background execution when the user does not need to steer every intermediate step.
|
||||
- Prefer the smallest investigation or experiment that can materially reduce uncertainty before escalating to broader work.
|
||||
- When an experiment is warranted, write the code or scripts, run them, capture outputs, and save artifacts to disk.
|
||||
- Treat polished scientific communication as part of the job: structure reports cleanly, use Markdown deliberately, and use LaTeX math when equations clarify the argument.
|
||||
|
||||
301
src/index.ts
301
src/index.ts
@@ -1,7 +1,7 @@
|
||||
import "dotenv/config";
|
||||
|
||||
import { spawn, spawnSync } from "node:child_process";
|
||||
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { homedir } from "node:os";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { stdin as input, stdout as output } from "node:process";
|
||||
@@ -23,24 +23,214 @@ import {
|
||||
import { buildFeynmanSystemPrompt } from "./feynman-prompt.js";
|
||||
|
||||
type ThinkingLevel = "off" | "low" | "medium" | "high";
|
||||
type Rgb = { r: number; g: number; b: number };
|
||||
type ThemeColorValue = string | number;
|
||||
type ThemeJson = {
|
||||
$schema?: string;
|
||||
name: string;
|
||||
vars?: Record<string, ThemeColorValue>;
|
||||
colors: Record<string, ThemeColorValue>;
|
||||
export?: Record<string, ThemeColorValue>;
|
||||
};
|
||||
|
||||
const OSC11_QUERY = "\u001b]11;?\u0007";
|
||||
const OSC11_RESPONSE_PATTERN =
|
||||
/\u001b]11;(?:rgb:([0-9a-fA-F]{2,4})\/([0-9a-fA-F]{2,4})\/([0-9a-fA-F]{2,4})|#?([0-9a-fA-F]{6}))(?:\u0007|\u001b\\)/;
|
||||
const DEFAULT_SAGE_RGB: Rgb = { r: 0x88, g: 0xa8, b: 0x8a };
|
||||
|
||||
function parseHexComponent(component: string): number {
|
||||
const value = Number.parseInt(component, 16);
|
||||
if (Number.isNaN(value)) {
|
||||
throw new Error(`Invalid OSC 11 component: ${component}`);
|
||||
}
|
||||
if (component.length === 2) {
|
||||
return value;
|
||||
}
|
||||
return Math.round(value / ((1 << (component.length * 4)) - 1) * 255);
|
||||
}
|
||||
|
||||
function parseHexColor(color: string): Rgb | undefined {
|
||||
const match = color.trim().match(/^#?([0-9a-fA-F]{6})$/);
|
||||
if (!match) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return {
|
||||
r: Number.parseInt(match[1].slice(0, 2), 16),
|
||||
g: Number.parseInt(match[1].slice(2, 4), 16),
|
||||
b: Number.parseInt(match[1].slice(4, 6), 16),
|
||||
};
|
||||
}
|
||||
|
||||
function rgbToHex(rgb: Rgb): string {
|
||||
return `#${[rgb.r, rgb.g, rgb.b]
|
||||
.map((value) => Math.max(0, Math.min(255, Math.round(value))).toString(16).padStart(2, "0"))
|
||||
.join("")}`;
|
||||
}
|
||||
|
||||
function blendRgb(base: Rgb, tint: Rgb, alpha: number): Rgb {
|
||||
const mix = (baseChannel: number, tintChannel: number) =>
|
||||
baseChannel + (tintChannel - baseChannel) * alpha;
|
||||
return {
|
||||
r: mix(base.r, tint.r),
|
||||
g: mix(base.g, tint.g),
|
||||
b: mix(base.b, tint.b),
|
||||
};
|
||||
}
|
||||
|
||||
function isLightRgb(rgb: Rgb): boolean {
|
||||
const luminance = (0.299 * rgb.r + 0.587 * rgb.g + 0.114 * rgb.b) / 255;
|
||||
return luminance >= 0.6;
|
||||
}
|
||||
|
||||
function resolveThemeColorValue(
|
||||
value: ThemeColorValue | undefined,
|
||||
vars: Record<string, ThemeColorValue> | undefined,
|
||||
visited = new Set<string>(),
|
||||
): ThemeColorValue | undefined {
|
||||
if (value === undefined || typeof value === "number" || value === "" || value.startsWith("#")) {
|
||||
return value;
|
||||
}
|
||||
if (!vars || !(value in vars) || visited.has(value)) {
|
||||
return value;
|
||||
}
|
||||
visited.add(value);
|
||||
return resolveThemeColorValue(vars[value], vars, visited);
|
||||
}
|
||||
|
||||
function resolveThemeRgb(
|
||||
value: ThemeColorValue | undefined,
|
||||
vars: Record<string, ThemeColorValue> | undefined,
|
||||
): Rgb | undefined {
|
||||
const resolved = resolveThemeColorValue(value, vars);
|
||||
return typeof resolved === "string" ? parseHexColor(resolved) : undefined;
|
||||
}
|
||||
|
||||
function deriveMessageBackgrounds(themeJson: ThemeJson, terminalBackgroundHex: string): Pick<ThemeJson["colors"], "userMessageBg" | "customMessageBg"> | undefined {
|
||||
const terminalBackground = parseHexColor(terminalBackgroundHex);
|
||||
if (!terminalBackground) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const tint =
|
||||
resolveThemeRgb(themeJson.colors.accent, themeJson.vars) ??
|
||||
resolveThemeRgb(themeJson.vars?.sage, themeJson.vars) ??
|
||||
DEFAULT_SAGE_RGB;
|
||||
const lightBackground = isLightRgb(terminalBackground);
|
||||
const userAlpha = lightBackground ? 0.15 : 0.23;
|
||||
const customAlpha = lightBackground ? 0.11 : 0.17;
|
||||
|
||||
return {
|
||||
userMessageBg: rgbToHex(blendRgb(terminalBackground, tint, userAlpha)),
|
||||
customMessageBg: rgbToHex(blendRgb(terminalBackground, tint, customAlpha)),
|
||||
};
|
||||
}
|
||||
|
||||
async function probeTerminalBackgroundHex(timeoutMs = 120): Promise<string | undefined> {
|
||||
if (typeof process.env.FEYNMAN_TERMINAL_BG === "string" && process.env.FEYNMAN_TERMINAL_BG.trim()) {
|
||||
return process.env.FEYNMAN_TERMINAL_BG.trim();
|
||||
}
|
||||
if (typeof process.env.PI_TERMINAL_BG === "string" && process.env.PI_TERMINAL_BG.trim()) {
|
||||
return process.env.PI_TERMINAL_BG.trim();
|
||||
}
|
||||
if (!input.isTTY || !output.isTTY || typeof input.setRawMode !== "function") {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const wasRaw = "isRaw" in input ? Boolean((input as typeof input & { isRaw?: boolean }).isRaw) : false;
|
||||
|
||||
return await new Promise<string | undefined>((resolve) => {
|
||||
let settled = false;
|
||||
let buffer = "";
|
||||
|
||||
const finish = (value: string | undefined) => {
|
||||
if (settled) {
|
||||
return;
|
||||
}
|
||||
settled = true;
|
||||
clearTimeout(timer);
|
||||
input.off("data", onData);
|
||||
try {
|
||||
if (!wasRaw) {
|
||||
input.setRawMode(false);
|
||||
}
|
||||
} catch {
|
||||
// Ignore raw mode restore failures and return best-effort detection.
|
||||
}
|
||||
resolve(value);
|
||||
};
|
||||
|
||||
const onData = (chunk: string | Buffer) => {
|
||||
buffer += chunk.toString("utf8");
|
||||
const match = buffer.match(OSC11_RESPONSE_PATTERN);
|
||||
if (!match) {
|
||||
if (buffer.length > 512) {
|
||||
finish(undefined);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (match[4]) {
|
||||
finish(`#${match[4].toLowerCase()}`);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
finish(
|
||||
rgbToHex({
|
||||
r: parseHexComponent(match[1]),
|
||||
g: parseHexComponent(match[2]),
|
||||
b: parseHexComponent(match[3]),
|
||||
}),
|
||||
);
|
||||
} catch {
|
||||
finish(undefined);
|
||||
}
|
||||
};
|
||||
|
||||
const timer = setTimeout(() => finish(undefined), timeoutMs);
|
||||
input.on("data", onData);
|
||||
|
||||
try {
|
||||
if (!wasRaw) {
|
||||
input.setRawMode(true);
|
||||
}
|
||||
output.write(OSC11_QUERY);
|
||||
} catch {
|
||||
finish(undefined);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function printHelp(): void {
|
||||
console.log(`Feynman commands:
|
||||
/help Show this help
|
||||
/init Initialize AGENTS.md and session-log folders
|
||||
/alpha-login Sign in to alphaXiv
|
||||
/alpha-logout Clear alphaXiv auth
|
||||
/alpha-status Show alphaXiv auth status
|
||||
/new Start a fresh persisted session
|
||||
/exit Quit the REPL
|
||||
/lit-review <topic> Expand the literature review prompt template
|
||||
/lit <topic> Expand the literature review prompt template
|
||||
/replicate <paper> Expand the replication prompt template
|
||||
/reading-list <topic> Expand the reading list prompt template
|
||||
/research-memo <topic> Expand the general research memo prompt template
|
||||
/reading <topic> Expand the reading list prompt template
|
||||
/memo <topic> Expand the general research memo prompt template
|
||||
/deepresearch <topic> Expand the thorough source-heavy research prompt template
|
||||
/autoresearch <idea> Expand the idea-to-paper autoresearch prompt template
|
||||
/compare-sources <topic> Expand the source comparison prompt template
|
||||
/paper-code-audit <item> Expand the paper/code audit prompt template
|
||||
/paper-draft <topic> Expand the paper-style writing prompt template
|
||||
/compare <topic> Expand the source comparison prompt template
|
||||
/audit <item> Expand the paper/code audit prompt template
|
||||
/draft <topic> Expand the paper-style writing prompt template
|
||||
/log Write a durable session log
|
||||
/watch <topic> Create a recurring or deferred research watch
|
||||
/jobs Inspect active background work
|
||||
|
||||
Package-powered workflows:
|
||||
/agents Open the subagent and chain manager
|
||||
/run /chain /parallel Delegate research work to subagents
|
||||
/ps Open the background process panel
|
||||
/schedule-prompt Manage deferred and recurring jobs
|
||||
/search Search prior indexed sessions
|
||||
/preview Preview generated markdown or code artifacts
|
||||
|
||||
CLI flags:
|
||||
--prompt "<text>" Run one prompt and exit
|
||||
@@ -116,6 +306,7 @@ function patchEmbeddedPiBranding(piPackageRoot: string): void {
|
||||
const packageJsonPath = resolve(piPackageRoot, "package.json");
|
||||
const cliPath = resolve(piPackageRoot, "dist", "cli.js");
|
||||
const interactiveModePath = resolve(piPackageRoot, "dist", "modes", "interactive", "interactive-mode.js");
|
||||
const footerPath = resolve(piPackageRoot, "dist", "modes", "interactive", "components", "footer.js");
|
||||
|
||||
if (existsSync(packageJsonPath)) {
|
||||
const pkg = JSON.parse(readFileSync(packageJsonPath, "utf8")) as {
|
||||
@@ -149,6 +340,42 @@ function patchEmbeddedPiBranding(piPackageRoot: string): void {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (existsSync(footerPath)) {
|
||||
const footerSource = readFileSync(footerPath, "utf8");
|
||||
const footerOriginal = [
|
||||
' // Add thinking level indicator if model supports reasoning',
|
||||
' let rightSideWithoutProvider = modelName;',
|
||||
' if (state.model?.reasoning) {',
|
||||
' const thinkingLevel = state.thinkingLevel || "off";',
|
||||
' rightSideWithoutProvider =',
|
||||
' thinkingLevel === "off" ? `${modelName} • thinking off` : `${modelName} • ${thinkingLevel}`;',
|
||||
' }',
|
||||
' // Prepend the provider in parentheses if there are multiple providers and there\'s enough room',
|
||||
' let rightSide = rightSideWithoutProvider;',
|
||||
' if (this.footerData.getAvailableProviderCount() > 1 && state.model) {',
|
||||
' rightSide = `(${state.model.provider}) ${rightSideWithoutProvider}`;',
|
||||
].join("\n");
|
||||
const footerReplacement = [
|
||||
' // Add thinking level indicator if model supports reasoning',
|
||||
' const modelLabel = theme.fg("accent", modelName);',
|
||||
' let rightSideWithoutProvider = modelLabel;',
|
||||
' if (state.model?.reasoning) {',
|
||||
' const thinkingLevel = state.thinkingLevel || "off";',
|
||||
' const separator = theme.fg("dim", " • ");',
|
||||
' rightSideWithoutProvider = thinkingLevel === "off"',
|
||||
' ? `${modelLabel}${separator}${theme.fg("muted", "thinking off")}`',
|
||||
' : `${modelLabel}${separator}${theme.getThinkingBorderColor(thinkingLevel)(thinkingLevel)}`;',
|
||||
' }',
|
||||
' // Prepend the provider in parentheses if there are multiple providers and there\'s enough room',
|
||||
' let rightSide = rightSideWithoutProvider;',
|
||||
' if (this.footerData.getAvailableProviderCount() > 1 && state.model) {',
|
||||
' rightSide = `${theme.fg("muted", `(${state.model.provider})`)} ${rightSideWithoutProvider}`;',
|
||||
].join("\n");
|
||||
if (footerSource.includes(footerOriginal)) {
|
||||
writeFileSync(footerPath, footerSource.replace(footerOriginal, footerReplacement), "utf8");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function patchPackageWorkspace(appRoot: string): void {
|
||||
@@ -491,7 +718,28 @@ function setupPreviewDependencies(): void {
|
||||
throw new Error("Automatic preview setup is only supported on macOS with Homebrew right now.");
|
||||
}
|
||||
|
||||
function syncFeynmanTheme(appRoot: string, agentDir: string): void {
|
||||
function syncDirectory(sourceDir: string, targetDir: string): void {
|
||||
if (!existsSync(sourceDir)) {
|
||||
return;
|
||||
}
|
||||
|
||||
mkdirSync(targetDir, { recursive: true });
|
||||
for (const entry of readdirSync(sourceDir, { withFileTypes: true })) {
|
||||
const sourcePath = resolve(sourceDir, entry.name);
|
||||
const targetPath = resolve(targetDir, entry.name);
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
syncDirectory(sourcePath, targetPath);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry.isFile()) {
|
||||
writeFileSync(targetPath, readFileSync(sourcePath, "utf8"), "utf8");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function syncFeynmanTheme(appRoot: string, agentDir: string, terminalBackgroundHex?: string): void {
|
||||
const sourceThemePath = resolve(appRoot, ".pi", "themes", "feynman.json");
|
||||
const targetThemeDir = resolve(agentDir, "themes");
|
||||
const targetThemePath = resolve(targetThemeDir, "feynman.json");
|
||||
@@ -501,7 +749,36 @@ function syncFeynmanTheme(appRoot: string, agentDir: string): void {
|
||||
}
|
||||
|
||||
mkdirSync(targetThemeDir, { recursive: true });
|
||||
writeFileSync(targetThemePath, readFileSync(sourceThemePath, "utf8"), "utf8");
|
||||
|
||||
const sourceTheme = readFileSync(sourceThemePath, "utf8");
|
||||
if (!terminalBackgroundHex) {
|
||||
writeFileSync(targetThemePath, sourceTheme, "utf8");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsedTheme = JSON.parse(sourceTheme) as ThemeJson;
|
||||
const derivedBackgrounds = deriveMessageBackgrounds(parsedTheme, terminalBackgroundHex);
|
||||
if (!derivedBackgrounds) {
|
||||
writeFileSync(targetThemePath, sourceTheme, "utf8");
|
||||
return;
|
||||
}
|
||||
|
||||
const generatedTheme: ThemeJson = {
|
||||
...parsedTheme,
|
||||
colors: {
|
||||
...parsedTheme.colors,
|
||||
...derivedBackgrounds,
|
||||
},
|
||||
};
|
||||
writeFileSync(targetThemePath, JSON.stringify(generatedTheme, null, 2) + "\n", "utf8");
|
||||
} catch {
|
||||
writeFileSync(targetThemePath, sourceTheme, "utf8");
|
||||
}
|
||||
}
|
||||
|
||||
function syncFeynmanAgents(appRoot: string, agentDir: string): void {
|
||||
syncDirectory(resolve(appRoot, ".pi", "agents"), resolve(agentDir, "agents"));
|
||||
}
|
||||
|
||||
async function main(): Promise<void> {
|
||||
@@ -539,9 +816,11 @@ async function main(): Promise<void> {
|
||||
|
||||
const workingDir = resolve(values.cwd ?? process.cwd());
|
||||
const sessionDir = resolve(values["session-dir"] ?? resolve(homedir(), ".feynman", "sessions"));
|
||||
const terminalBackgroundHex = await probeTerminalBackgroundHex();
|
||||
mkdirSync(sessionDir, { recursive: true });
|
||||
mkdirSync(feynmanAgentDir, { recursive: true });
|
||||
syncFeynmanTheme(appRoot, feynmanAgentDir);
|
||||
syncFeynmanTheme(appRoot, feynmanAgentDir, terminalBackgroundHex);
|
||||
syncFeynmanAgents(appRoot, feynmanAgentDir);
|
||||
const feynmanSettingsPath = resolve(feynmanAgentDir, "settings.json");
|
||||
const feynmanAuthPath = resolve(feynmanAgentDir, "auth.json");
|
||||
const thinkingLevel = normalizeThinkingLevel(values.thinking ?? process.env.FEYNMAN_THINKING) ?? "medium";
|
||||
@@ -637,6 +916,8 @@ async function main(): Promise<void> {
|
||||
...process.env,
|
||||
PI_CODING_AGENT_DIR: feynmanAgentDir,
|
||||
FEYNMAN_CODING_AGENT_DIR: feynmanAgentDir,
|
||||
FEYNMAN_TERMINAL_BG: terminalBackgroundHex,
|
||||
PI_TERMINAL_BG: terminalBackgroundHex,
|
||||
FEYNMAN_PI_NPM_ROOT: resolve(appRoot, ".pi", "npm", "node_modules"),
|
||||
FEYNMAN_SESSION_DIR: sessionDir,
|
||||
PI_SESSION_DIR: sessionDir,
|
||||
|
||||
Reference in New Issue
Block a user