Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92914acff7 | ||
|
|
f0bbb25910 | ||
|
|
9841342866 |
@@ -25,7 +25,7 @@ curl -fsSL https://feynman.is/install | bash
|
||||
irm https://feynman.is/install.ps1 | iex
|
||||
```
|
||||
|
||||
The one-line installer fetches the latest tagged release. To pin a version, pass it explicitly, for example `curl -fsSL https://feynman.is/install | bash -s -- 0.2.24`.
|
||||
The one-line installer fetches the latest tagged release. To pin a version, pass it explicitly, for example `curl -fsSL https://feynman.is/install | bash -s -- 0.2.27`.
|
||||
|
||||
The installer downloads a standalone native bundle with its own Node.js runtime.
|
||||
|
||||
@@ -33,7 +33,9 @@ To upgrade the standalone app later, rerun the installer. `feynman update` only
|
||||
|
||||
To uninstall the standalone app, remove the launcher and runtime bundle, then optionally remove `~/.feynman` if you also want to delete settings, sessions, and installed package state. If you also want to delete alphaXiv login state, remove `~/.ahub`. See the installation guide for platform-specific paths.
|
||||
|
||||
Local models are supported through the setup flow. For LM Studio, run `feynman setup`, choose `LM Studio`, and keep the default `http://localhost:1234/v1` unless you changed the server port. For Ollama or vLLM, choose `Custom provider (baseUrl + API key)`, use `openai-completions`, and point it at the local `/v1` endpoint.
|
||||
Local models are supported through the setup flow. For LM Studio, run `feynman setup`, choose `LM Studio`, and keep the default `http://localhost:1234/v1` unless you changed the server port. For LiteLLM, choose `LiteLLM Proxy` and keep the default `http://localhost:4000/v1`. For Ollama or vLLM, choose `Custom provider (baseUrl + API key)`, use `openai-completions`, and point it at the local `/v1` endpoint.
|
||||
|
||||
Feynman uses Pi's own runtime hooks for context hygiene: Pi compaction/retry settings are seeded by default, `context_report` exposes the current Pi context usage to the model, oversized alphaXiv tool returns spill to `outputs/.cache/`, oversized custom/subagent returns spill to `outputs/.runs/`, and a bounded resume packet is injected from `outputs/.plans/`, `outputs/.state/`, and `CHANGELOG.md` when those files exist. Automatic session logging writes JSONL snippets to `notes/feynman-autolog/`; set `FEYNMAN_AUTO_LOG=off` to disable it or `FEYNMAN_AUTO_LOG=full` for full text. Feynman also locks new plan slugs under `outputs/.state/` to prevent concurrent workflow collisions and garbage-collects stale managed caches on startup.
|
||||
|
||||
### Skills Only
|
||||
|
||||
|
||||
@@ -1,12 +1,16 @@
|
||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { registerAlphaTools } from "./research-tools/alpha.js";
|
||||
import { registerAutoLog } from "./research-tools/autolog.js";
|
||||
import { registerContextReportTool } from "./research-tools/context.js";
|
||||
import { registerDiscoveryCommands } from "./research-tools/discovery.js";
|
||||
import { registerFeynmanModelCommand } from "./research-tools/feynman-model.js";
|
||||
import { installFeynmanHeader } from "./research-tools/header.js";
|
||||
import { registerHelpCommand } from "./research-tools/help.js";
|
||||
import { registerInitCommand, registerOutputsCommand } from "./research-tools/project.js";
|
||||
import { registerResumePacket } from "./research-tools/resume.js";
|
||||
import { registerServiceTierControls } from "./research-tools/service-tier.js";
|
||||
import { registerStateManagement } from "./research-tools/state.js";
|
||||
|
||||
export default function researchTools(pi: ExtensionAPI): void {
|
||||
const cache: { agentSummaryPromise?: Promise<{ agents: string[]; chains: string[] }> } = {};
|
||||
@@ -17,10 +21,14 @@ export default function researchTools(pi: ExtensionAPI): void {
|
||||
});
|
||||
|
||||
registerAlphaTools(pi);
|
||||
registerAutoLog(pi);
|
||||
registerContextReportTool(pi);
|
||||
registerDiscoveryCommands(pi);
|
||||
registerFeynmanModelCommand(pi);
|
||||
registerHelpCommand(pi);
|
||||
registerInitCommand(pi);
|
||||
registerOutputsCommand(pi);
|
||||
registerResumePacket(pi);
|
||||
registerServiceTierControls(pi);
|
||||
registerStateManagement(pi);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,11 @@ import {
|
||||
readPaperCode,
|
||||
searchPapers,
|
||||
} from "@companion-ai/alpha-hub/lib";
|
||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
import { createHash } from "node:crypto";
|
||||
import { mkdirSync, writeFileSync } from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
|
||||
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
|
||||
function formatText(value: unknown): string {
|
||||
@@ -15,6 +19,44 @@ function formatText(value: unknown): string {
|
||||
return JSON.stringify(value, null, 2);
|
||||
}
|
||||
|
||||
function toolOutputCapChars(): number {
|
||||
const raw = Number(process.env.FEYNMAN_TOOL_OUTPUT_CAP_CHARS);
|
||||
return Number.isFinite(raw) && raw > 0 ? Math.floor(raw) : 32_000;
|
||||
}
|
||||
|
||||
function spillPath(ctx: ExtensionContext, toolName: string, text: string): string {
|
||||
const hash = createHash("sha256").update(text).digest("hex").slice(0, 12);
|
||||
return resolve(ctx.cwd, "outputs", ".cache", `${toolName}-${hash}.md`);
|
||||
}
|
||||
|
||||
export function formatToolResultWithSpillover(
|
||||
ctx: ExtensionContext,
|
||||
toolName: string,
|
||||
result: unknown,
|
||||
): { text: string; details: unknown } {
|
||||
const text = formatText(result);
|
||||
const cap = toolOutputCapChars();
|
||||
if (text.length <= cap) {
|
||||
return { text, details: result };
|
||||
}
|
||||
|
||||
const path = spillPath(ctx, toolName, text);
|
||||
mkdirSync(dirname(path), { recursive: true });
|
||||
writeFileSync(path, text, "utf8");
|
||||
|
||||
const head = text.slice(0, Math.min(cap, 4_000));
|
||||
const pointer = {
|
||||
feynman_spillover: true,
|
||||
tool: toolName,
|
||||
path,
|
||||
bytes: Buffer.byteLength(text, "utf8"),
|
||||
sha256: createHash("sha256").update(text).digest("hex"),
|
||||
note: "Full tool output was written to disk. Read the path in bounded chunks instead of asking the tool to return everything again.",
|
||||
head,
|
||||
};
|
||||
return { text: JSON.stringify(pointer, null, 2), details: pointer };
|
||||
}
|
||||
|
||||
export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
pi.registerTool({
|
||||
name: "alpha_search",
|
||||
@@ -27,9 +69,10 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
Type.String({ description: "Search mode: semantic, keyword, both, agentic, or all." }),
|
||||
),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
|
||||
const result = await searchPapers(params.query, params.mode?.trim() || "semantic");
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
const formatted = formatToolResultWithSpillover(ctx, "alpha_search", result);
|
||||
return { content: [{ type: "text", text: formatted.text }], details: formatted.details };
|
||||
},
|
||||
});
|
||||
|
||||
@@ -41,9 +84,10 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
paper: Type.String({ description: "arXiv ID, arXiv URL, or alphaXiv URL." }),
|
||||
fullText: Type.Optional(Type.Boolean({ description: "Return raw full text instead of AI report." })),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
|
||||
const result = await getPaper(params.paper, { fullText: params.fullText });
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
const formatted = formatToolResultWithSpillover(ctx, "alpha_get_paper", result);
|
||||
return { content: [{ type: "text", text: formatted.text }], details: formatted.details };
|
||||
},
|
||||
});
|
||||
|
||||
@@ -55,9 +99,10 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
paper: Type.String({ description: "arXiv ID, arXiv URL, or alphaXiv URL." }),
|
||||
question: Type.String({ description: "Question about the paper." }),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
|
||||
const result = await askPaper(params.paper, params.question);
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
const formatted = formatToolResultWithSpillover(ctx, "alpha_ask_paper", result);
|
||||
return { content: [{ type: "text", text: formatted.text }], details: formatted.details };
|
||||
},
|
||||
});
|
||||
|
||||
@@ -70,13 +115,14 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
note: Type.Optional(Type.String({ description: "Annotation text. Omit when clear=true." })),
|
||||
clear: Type.Optional(Type.Boolean({ description: "Clear the existing annotation." })),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
|
||||
const result = params.clear
|
||||
? await clearPaperAnnotation(params.paper)
|
||||
: params.note
|
||||
? await annotatePaper(params.paper, params.note)
|
||||
: (() => { throw new Error("Provide either note or clear=true."); })();
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
const formatted = formatToolResultWithSpillover(ctx, "alpha_annotate_paper", result);
|
||||
return { content: [{ type: "text", text: formatted.text }], details: formatted.details };
|
||||
},
|
||||
});
|
||||
|
||||
@@ -85,9 +131,10 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
label: "Alpha List Annotations",
|
||||
description: "List all persistent local paper annotations.",
|
||||
parameters: Type.Object({}),
|
||||
async execute() {
|
||||
async execute(_toolCallId, _params, _signal, _onUpdate, ctx) {
|
||||
const result = await listPaperAnnotations();
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
const formatted = formatToolResultWithSpillover(ctx, "alpha_list_annotations", result);
|
||||
return { content: [{ type: "text", text: formatted.text }], details: formatted.details };
|
||||
},
|
||||
});
|
||||
|
||||
@@ -99,9 +146,10 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
githubUrl: Type.String({ description: "GitHub repository URL." }),
|
||||
path: Type.Optional(Type.String({ description: "File or directory path. Default: '/'" })),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
|
||||
const result = await readPaperCode(params.githubUrl, params.path?.trim() || "/");
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
const formatted = formatToolResultWithSpillover(ctx, "alpha_read_code", result);
|
||||
return { content: [{ type: "text", text: formatted.text }], details: formatted.details };
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
84
extensions/research-tools/autolog.ts
Normal file
84
extensions/research-tools/autolog.ts
Normal file
@@ -0,0 +1,84 @@
|
||||
import { appendFileSync, mkdirSync, readFileSync } from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
|
||||
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
type AutoLogMode = "off" | "events" | "full";
|
||||
|
||||
function readAgentSettings(): Record<string, unknown> {
|
||||
const agentDir = process.env.PI_CODING_AGENT_DIR;
|
||||
if (!agentDir) return {};
|
||||
try {
|
||||
return JSON.parse(readFileSync(resolve(agentDir, "settings.json"), "utf8")) as Record<string, unknown>;
|
||||
} catch {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeMode(value: unknown): AutoLogMode | undefined {
|
||||
if (typeof value !== "string") return undefined;
|
||||
const normalized = value.trim().toLowerCase();
|
||||
if (normalized === "off" || normalized === "events" || normalized === "full") return normalized;
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function getAutoLogMode(): AutoLogMode {
|
||||
return normalizeMode(process.env.FEYNMAN_AUTO_LOG) ??
|
||||
normalizeMode(readAgentSettings().autoLog) ??
|
||||
"events";
|
||||
}
|
||||
|
||||
function extractMessageText(message: unknown): string {
|
||||
if (!message || typeof message !== "object") return "";
|
||||
const content = (message as { content?: unknown }).content;
|
||||
if (typeof content === "string") return content;
|
||||
if (!Array.isArray(content)) return "";
|
||||
return content
|
||||
.map((item) => {
|
||||
if (!item || typeof item !== "object") return "";
|
||||
const record = item as { type?: string; text?: unknown; thinking?: unknown; name?: unknown };
|
||||
if (record.type === "text" && typeof record.text === "string") return record.text;
|
||||
if (record.type === "thinking" && typeof record.thinking === "string") return "[thinking omitted]";
|
||||
if (record.type === "toolCall") return `[tool:${typeof record.name === "string" ? record.name : "unknown"}]`;
|
||||
return "";
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
function clip(text: string, maxChars: number): string {
|
||||
return text.length > maxChars ? `${text.slice(0, maxChars)}\n...[truncated ${text.length - maxChars} chars]` : text;
|
||||
}
|
||||
|
||||
export function autoLogPath(cwd: string, date = new Date()): string {
|
||||
const day = date.toISOString().slice(0, 10);
|
||||
return resolve(cwd, "notes", "feynman-autolog", `${day}.jsonl`);
|
||||
}
|
||||
|
||||
export function writeAutoLogEntry(cwd: string, entry: Record<string, unknown>): void {
|
||||
const path = autoLogPath(cwd);
|
||||
mkdirSync(dirname(path), { recursive: true });
|
||||
appendFileSync(path, `${JSON.stringify(entry)}\n`, "utf8");
|
||||
}
|
||||
|
||||
export function registerAutoLog(pi: ExtensionAPI): void {
|
||||
pi.on("message_end", async (event, ctx: ExtensionContext) => {
|
||||
const mode = getAutoLogMode();
|
||||
if (mode === "off") return;
|
||||
|
||||
const message = event.message as any;
|
||||
if (message.role !== "user" && message.role !== "assistant") return;
|
||||
|
||||
const text = extractMessageText(message).replace(/\s+/g, " ").trim();
|
||||
if (!text) return;
|
||||
|
||||
writeAutoLogEntry(ctx.cwd, {
|
||||
timestamp: new Date(message.timestamp ?? Date.now()).toISOString(),
|
||||
session: ctx.sessionManager.getSessionId(),
|
||||
role: message.role,
|
||||
model: message.role === "assistant" ? `${message.provider}/${message.model}` : undefined,
|
||||
mode,
|
||||
text: mode === "full" ? text : clip(text, 500),
|
||||
});
|
||||
});
|
||||
}
|
||||
53
extensions/research-tools/context.ts
Normal file
53
extensions/research-tools/context.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
|
||||
type ContextPosture = {
|
||||
model: string;
|
||||
contextWindow: number | null;
|
||||
estimatedInputTokens: number | null;
|
||||
utilizationPct: number | null;
|
||||
compactionThresholdHit: boolean;
|
||||
recommendedMaxWorkers: number;
|
||||
};
|
||||
|
||||
export function computeContextPosture(ctx: ExtensionContext): ContextPosture {
|
||||
const usage = ctx.getContextUsage();
|
||||
const modelWindow = typeof ctx.model?.contextWindow === "number" ? ctx.model.contextWindow : null;
|
||||
const contextWindow = usage?.contextWindow ?? modelWindow;
|
||||
const estimatedInputTokens = usage?.tokens ?? null;
|
||||
const utilizationPct = usage?.percent ?? (contextWindow && estimatedInputTokens
|
||||
? Math.round((estimatedInputTokens / contextWindow) * 1000) / 10
|
||||
: null);
|
||||
const compactionThresholdHit = utilizationPct !== null && utilizationPct >= 70;
|
||||
const availableForWorkers = contextWindow
|
||||
? Math.max(0, contextWindow - 16_384 - (estimatedInputTokens ?? 0))
|
||||
: 0;
|
||||
const recommendedMaxWorkers = contextWindow === null
|
||||
? 1
|
||||
: Math.max(1, Math.min(4, Math.floor(availableForWorkers / 24_000) || 1));
|
||||
|
||||
return {
|
||||
model: ctx.model ? `${ctx.model.provider}/${ctx.model.id}` : "not set",
|
||||
contextWindow,
|
||||
estimatedInputTokens,
|
||||
utilizationPct,
|
||||
compactionThresholdHit,
|
||||
recommendedMaxWorkers,
|
||||
};
|
||||
}
|
||||
|
||||
export function registerContextReportTool(pi: ExtensionAPI): void {
|
||||
pi.registerTool({
|
||||
name: "context_report",
|
||||
label: "Context Report",
|
||||
description: "Report current Pi context usage, compaction threshold posture, and safe worker-count guidance.",
|
||||
parameters: Type.Object({}),
|
||||
async execute(_toolCallId, _params, _signal, _onUpdate, ctx) {
|
||||
const report = computeContextPosture(ctx);
|
||||
return {
|
||||
content: [{ type: "text", text: JSON.stringify(report, null, 2) }],
|
||||
details: report,
|
||||
};
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { getExtensionCommandSpec } from "../../metadata/commands.mjs";
|
||||
import { buildProjectAgentsTemplate, buildSessionLogsReadme } from "./project-scaffold.js";
|
||||
import { collectManagedGc } from "./state.js";
|
||||
|
||||
async function pathExists(path: string): Promise<boolean> {
|
||||
try {
|
||||
@@ -104,7 +105,15 @@ export function registerInitCommand(pi: ExtensionAPI): void {
|
||||
export function registerOutputsCommand(pi: ExtensionAPI): void {
|
||||
pi.registerCommand("outputs", {
|
||||
description: "Browse all research artifacts (papers, outputs, experiments, notes).",
|
||||
handler: async (_args, ctx) => {
|
||||
handler: async (args, ctx) => {
|
||||
const trimmedArgs = args.trim();
|
||||
if (trimmedArgs === "gc" || trimmedArgs === "gc --dry-run") {
|
||||
const dryRun = trimmedArgs.includes("--dry-run");
|
||||
const result = collectManagedGc(ctx.cwd, Date.now(), undefined, { dryRun });
|
||||
ctx.ui.notify(`${dryRun ? "Would remove" : "Removed"} ${result.deleted.length} managed cache file(s).`, "info");
|
||||
return;
|
||||
}
|
||||
|
||||
const items = await collectArtifacts(ctx.cwd);
|
||||
if (items.length === 0) {
|
||||
ctx.ui.notify("No artifacts found. Use /lit, /draft, /review, or /deepresearch to create some.", "info");
|
||||
|
||||
92
extensions/research-tools/resume.ts
Normal file
92
extensions/research-tools/resume.ts
Normal file
@@ -0,0 +1,92 @@
|
||||
import { existsSync, readdirSync, readFileSync, statSync } from "node:fs";
|
||||
import { join, resolve } from "node:path";
|
||||
|
||||
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
type ResumeArtifact = {
|
||||
path: string;
|
||||
mtimeMs: number;
|
||||
};
|
||||
|
||||
function collectFiles(root: string, predicate: (path: string) => boolean): ResumeArtifact[] {
|
||||
if (!existsSync(root)) return [];
|
||||
const files: ResumeArtifact[] = [];
|
||||
for (const entry of readdirSync(root, { withFileTypes: true })) {
|
||||
const path = join(root, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
files.push(...collectFiles(path, predicate));
|
||||
continue;
|
||||
}
|
||||
if (!entry.isFile() || !predicate(path)) continue;
|
||||
try {
|
||||
files.push({ path, mtimeMs: statSync(path).mtimeMs });
|
||||
} catch {}
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
function tail(text: string, maxChars: number): string {
|
||||
return text.length <= maxChars ? text : text.slice(text.length - maxChars);
|
||||
}
|
||||
|
||||
export function buildResumePacket(cwd: string, maxChars = 4_000): string | undefined {
|
||||
const plans = collectFiles(resolve(cwd, "outputs", ".plans"), (path) => path.endsWith(".md"))
|
||||
.sort((a, b) => b.mtimeMs - a.mtimeMs)
|
||||
.slice(0, 3);
|
||||
const stateFiles = collectFiles(resolve(cwd, "outputs", ".state"), (path) => /\.(json|jsonl|md)$/i.test(path))
|
||||
.sort((a, b) => b.mtimeMs - a.mtimeMs)
|
||||
.slice(0, 5);
|
||||
const changelogPath = resolve(cwd, "CHANGELOG.md");
|
||||
|
||||
if (plans.length === 0 && stateFiles.length === 0 && !existsSync(changelogPath)) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const lines: string[] = [
|
||||
"[feynman resume packet]",
|
||||
"This is a bounded project-state summary from disk. Prefer these paths over guessing prior workflow state.",
|
||||
];
|
||||
|
||||
if (plans.length > 0) {
|
||||
lines.push("", "Recent plans:");
|
||||
for (const plan of plans) {
|
||||
lines.push(`- ${plan.path}`);
|
||||
}
|
||||
const newestPlan = plans[0]!;
|
||||
try {
|
||||
lines.push("", `Newest plan tail (${newestPlan.path}):`, tail(readFileSync(newestPlan.path, "utf8"), 1_500));
|
||||
} catch {}
|
||||
}
|
||||
|
||||
if (stateFiles.length > 0) {
|
||||
lines.push("", "Recent state files:");
|
||||
for (const file of stateFiles) {
|
||||
lines.push(`- ${file.path}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (existsSync(changelogPath)) {
|
||||
try {
|
||||
lines.push("", "CHANGELOG tail:", tail(readFileSync(changelogPath, "utf8"), 1_200));
|
||||
} catch {}
|
||||
}
|
||||
|
||||
return tail(lines.join("\n"), maxChars);
|
||||
}
|
||||
|
||||
export function registerResumePacket(pi: ExtensionAPI): void {
|
||||
pi.on("session_start", async (_event, ctx: ExtensionContext) => {
|
||||
if (process.env.FEYNMAN_RESUME_PACKET === "off") return;
|
||||
const packet = buildResumePacket(ctx.cwd);
|
||||
if (!packet) return;
|
||||
pi.sendMessage(
|
||||
{
|
||||
customType: "feynman_resume_packet",
|
||||
content: packet,
|
||||
display: false,
|
||||
details: { source: "outputs/.plans outputs/.state CHANGELOG.md" },
|
||||
},
|
||||
{ triggerTurn: false, deliverAs: "nextTurn" },
|
||||
);
|
||||
});
|
||||
}
|
||||
276
extensions/research-tools/state.ts
Normal file
276
extensions/research-tools/state.ts
Normal file
@@ -0,0 +1,276 @@
|
||||
import { createHash } from "node:crypto";
|
||||
import { appendFileSync, existsSync, mkdirSync, readdirSync, readFileSync, rmSync, statSync, writeFileSync } from "node:fs";
|
||||
import { basename, dirname, relative, resolve } from "node:path";
|
||||
|
||||
import { isToolCallEventType, type ExtensionAPI, type ExtensionContext, type ToolCallEvent } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
type SlugLock = {
|
||||
pid: number;
|
||||
sessionId: string;
|
||||
startedAt: string;
|
||||
planPath: string;
|
||||
};
|
||||
|
||||
type GcResult = {
|
||||
deleted: string[];
|
||||
kept: string[];
|
||||
};
|
||||
|
||||
type SpillResult = {
|
||||
content: { type: "text"; text: string }[];
|
||||
details: unknown;
|
||||
} | undefined;
|
||||
|
||||
type ToolResultPatch = {
|
||||
content?: { type: "text"; text: string }[];
|
||||
details?: unknown;
|
||||
isError?: boolean;
|
||||
};
|
||||
|
||||
const BUILT_IN_TOOL_NAMES = new Set(["bash", "read", "write", "edit", "grep", "find", "ls"]);
|
||||
|
||||
function isPathInside(parent: string, child: string): boolean {
|
||||
const rel = relative(parent, child);
|
||||
return rel === "" || (!rel.startsWith("..") && !rel.startsWith("/"));
|
||||
}
|
||||
|
||||
function pidIsLive(pid: number): boolean {
|
||||
if (!Number.isInteger(pid) || pid <= 0) return false;
|
||||
try {
|
||||
process.kill(pid, 0);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function readLock(path: string): SlugLock | undefined {
|
||||
try {
|
||||
return JSON.parse(readFileSync(path, "utf8")) as SlugLock;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
function lockIsLive(lock: SlugLock | undefined, timeoutMs: number, now = Date.now()): boolean {
|
||||
if (!lock) return false;
|
||||
const started = Date.parse(lock.startedAt);
|
||||
if (!Number.isFinite(started) || now - started > timeoutMs) return false;
|
||||
return pidIsLive(lock.pid);
|
||||
}
|
||||
|
||||
function planPathInfo(cwd: string, inputPath: string): { absPath: string; slug: string; lockPath: string } | undefined {
|
||||
const absPath = resolve(cwd, inputPath);
|
||||
const plansRoot = resolve(cwd, "outputs", ".plans");
|
||||
if (!isPathInside(plansRoot, absPath) || !absPath.endsWith(".md")) return undefined;
|
||||
const slug = basename(absPath, ".md");
|
||||
const lockPath = resolve(cwd, "outputs", ".state", `${slug}.lock`);
|
||||
return { absPath, slug, lockPath };
|
||||
}
|
||||
|
||||
export function claimPlanSlug(
|
||||
cwd: string,
|
||||
sessionId: string,
|
||||
inputPath: string,
|
||||
options?: { timeoutMinutes?: number; strategy?: "suffix" | "error" | "overwrite"; now?: number },
|
||||
): { ok: true; lockPath?: string } | { ok: false; reason: string } {
|
||||
const info = planPathInfo(cwd, inputPath);
|
||||
if (!info) return { ok: true };
|
||||
|
||||
const strategy = options?.strategy ?? (process.env.FEYNMAN_SLUG_COLLISION_STRATEGY as "suffix" | "error" | "overwrite" | undefined) ?? "error";
|
||||
if (strategy === "overwrite") return { ok: true };
|
||||
|
||||
const timeoutMinutes = options?.timeoutMinutes ?? (Number(process.env.FEYNMAN_SLUG_LOCK_TIMEOUT_MINUTES) || 30);
|
||||
const timeoutMs = timeoutMinutes * 60_000;
|
||||
const existingLock = readLock(info.lockPath);
|
||||
const live = lockIsLive(existingLock, timeoutMs, options?.now);
|
||||
if (live && existingLock?.sessionId !== sessionId) {
|
||||
return {
|
||||
ok: false,
|
||||
reason: `Slug "${info.slug}" is locked by another Feynman session. Use a unique slug such as ${info.slug}-2, or wait for ${info.lockPath} to expire.`,
|
||||
};
|
||||
}
|
||||
if (existsSync(info.absPath) && existingLock?.sessionId !== sessionId) {
|
||||
return {
|
||||
ok: false,
|
||||
reason: `Plan already exists at ${relative(cwd, info.absPath)}. Use a unique slug such as ${info.slug}-2 to avoid overwriting another run.`,
|
||||
};
|
||||
}
|
||||
|
||||
mkdirSync(dirname(info.lockPath), { recursive: true });
|
||||
writeFileSync(
|
||||
info.lockPath,
|
||||
JSON.stringify({
|
||||
pid: process.pid,
|
||||
sessionId,
|
||||
startedAt: new Date(options?.now ?? Date.now()).toISOString(),
|
||||
planPath: info.absPath,
|
||||
}, null, 2) + "\n",
|
||||
"utf8",
|
||||
);
|
||||
return { ok: true, lockPath: info.lockPath };
|
||||
}
|
||||
|
||||
function managedRetentionDays(): number {
|
||||
const raw = Number(process.env.FEYNMAN_CACHE_RETENTION_DAYS);
|
||||
return Number.isFinite(raw) && raw >= 0 ? raw : 14;
|
||||
}
|
||||
|
||||
function gcIgnored(path: string): boolean {
|
||||
if (path.endsWith(".gcignore")) return true;
|
||||
try {
|
||||
return /^---[\s\S]*?retain:\s*true/im.test(readFileSync(path, "utf8").slice(0, 500));
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export function collectManagedGc(
|
||||
cwd: string,
|
||||
now = Date.now(),
|
||||
retentionDays = managedRetentionDays(),
|
||||
options?: { dryRun?: boolean },
|
||||
): GcResult {
|
||||
const roots = [
|
||||
resolve(cwd, "outputs", ".cache"),
|
||||
resolve(cwd, "outputs", ".runs"),
|
||||
resolve(cwd, "outputs", ".notes"),
|
||||
];
|
||||
const cutoff = now - retentionDays * 24 * 60 * 60 * 1000;
|
||||
const result: GcResult = { deleted: [], kept: [] };
|
||||
|
||||
const visit = (path: string) => {
|
||||
if (!existsSync(path)) return;
|
||||
for (const entry of readdirSync(path, { withFileTypes: true })) {
|
||||
const child = resolve(path, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
visit(child);
|
||||
try {
|
||||
if (readdirSync(child).length === 0) rmSync(child, { recursive: true, force: true });
|
||||
} catch {}
|
||||
continue;
|
||||
}
|
||||
if (!entry.isFile()) continue;
|
||||
const stat = statSync(child);
|
||||
if (gcIgnored(child) || stat.mtimeMs >= cutoff) {
|
||||
result.kept.push(child);
|
||||
continue;
|
||||
}
|
||||
if (!options?.dryRun) {
|
||||
rmSync(child, { force: true });
|
||||
}
|
||||
result.deleted.push(child);
|
||||
}
|
||||
};
|
||||
|
||||
for (const root of roots) visit(root);
|
||||
return result;
|
||||
}
|
||||
|
||||
function textFromToolContent(content: ToolResultContent): string {
|
||||
return content
|
||||
.map((item) => item.type === "text" ? item.text : "")
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
type ToolResultContent = Array<{ type: "text"; text: string } | { type: "image"; data: string; mimeType: string }>;
|
||||
|
||||
function customToolOutputCapChars(): number {
|
||||
const raw = Number(process.env.FEYNMAN_CUSTOM_TOOL_CAP_CHARS);
|
||||
return Number.isFinite(raw) && raw > 0 ? raw : 24_000;
|
||||
}
|
||||
|
||||
export function spillLargeCustomToolResult(
|
||||
cwd: string,
|
||||
toolName: string,
|
||||
toolCallId: string,
|
||||
content: ToolResultContent,
|
||||
details: unknown,
|
||||
): SpillResult {
|
||||
if (BUILT_IN_TOOL_NAMES.has(toolName)) return undefined;
|
||||
const text = textFromToolContent(content);
|
||||
const cap = customToolOutputCapChars();
|
||||
if (text.length <= cap) return undefined;
|
||||
|
||||
const hash = createHash("sha256").update(text).digest("hex");
|
||||
const safeToolName = toolName.replace(/[^a-zA-Z0-9._-]+/g, "-").slice(0, 60) || "tool";
|
||||
const path = resolve(cwd, "outputs", ".runs", `${safeToolName}-${toolCallId}-${hash.slice(0, 12)}.md`);
|
||||
mkdirSync(dirname(path), { recursive: true });
|
||||
writeFileSync(path, text, "utf8");
|
||||
const pointer = {
|
||||
feynman_spillover: true,
|
||||
tool: toolName,
|
||||
toolCallId,
|
||||
path,
|
||||
bytes: Buffer.byteLength(text, "utf8"),
|
||||
sha256: hash,
|
||||
head: text.slice(0, Math.min(cap, 2_000)),
|
||||
note: "Full custom/subagent tool result was written to disk. Read the path in bounded chunks when needed.",
|
||||
originalDetails: details,
|
||||
};
|
||||
return {
|
||||
content: [{ type: "text", text: JSON.stringify(pointer, null, 2) }],
|
||||
details: pointer,
|
||||
};
|
||||
}
|
||||
|
||||
function appendJsonl(path: string, value: unknown): void {
|
||||
mkdirSync(dirname(path), { recursive: true });
|
||||
appendFileSync(path, `${JSON.stringify(value)}\n`, "utf8");
|
||||
}
|
||||
|
||||
function recordCheckpoint(ctx: ExtensionContext, toolName: string, isError: boolean): void {
|
||||
appendJsonl(resolve(ctx.cwd, "outputs", ".state", "feynman.checkpoint.jsonl"), {
|
||||
timestamp: new Date().toISOString(),
|
||||
sessionId: ctx.sessionManager.getSessionId(),
|
||||
toolName,
|
||||
isError,
|
||||
context: ctx.getContextUsage?.(),
|
||||
});
|
||||
}
|
||||
|
||||
function recordJobEvent(ctx: ExtensionContext, toolName: string, status: "running" | "done" | "failed", data: unknown): void {
|
||||
appendJsonl(resolve(ctx.cwd, "outputs", ".state", "subagent.jobs.jsonl"), {
|
||||
timestamp: new Date().toISOString(),
|
||||
sessionId: ctx.sessionManager.getSessionId(),
|
||||
toolName,
|
||||
status,
|
||||
data,
|
||||
});
|
||||
}
|
||||
|
||||
function looksLikeSubagentTool(toolName: string): boolean {
|
||||
return /subagent|parallel|chain|run/i.test(toolName);
|
||||
}
|
||||
|
||||
export function registerStateManagement(pi: ExtensionAPI): void {
|
||||
pi.on("session_start", async (_event, ctx) => {
|
||||
if (process.env.FEYNMAN_OUTPUTS_GC === "off") return;
|
||||
collectManagedGc(ctx.cwd);
|
||||
});
|
||||
|
||||
pi.on("tool_call", async (event: ToolCallEvent, ctx) => {
|
||||
const sessionId = ctx.sessionManager.getSessionId();
|
||||
if (isToolCallEventType("write", event)) {
|
||||
const claim = claimPlanSlug(ctx.cwd, sessionId, event.input.path);
|
||||
if (!claim.ok) return { block: true, reason: claim.reason };
|
||||
}
|
||||
if (isToolCallEventType("edit", event)) {
|
||||
const claim = claimPlanSlug(ctx.cwd, sessionId, event.input.path);
|
||||
if (!claim.ok) return { block: true, reason: claim.reason };
|
||||
}
|
||||
if (looksLikeSubagentTool(event.toolName)) {
|
||||
recordJobEvent(ctx, event.toolName, "running", event.input);
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
|
||||
pi.on("tool_result", async (event, ctx): Promise<ToolResultPatch | undefined> => {
|
||||
recordCheckpoint(ctx, event.toolName, event.isError);
|
||||
if (looksLikeSubagentTool(event.toolName)) {
|
||||
recordJobEvent(ctx, event.toolName, event.isError ? "failed" : "done", event.details ?? event.content);
|
||||
}
|
||||
return spillLargeCustomToolResult(ctx.cwd, event.toolName, event.toolCallId, event.content as ToolResultContent, event.details);
|
||||
});
|
||||
}
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.24",
|
||||
"version": "0.2.27",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.24",
|
||||
"version": "0.2.27",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.24",
|
||||
"version": "0.2.27",
|
||||
"description": "Research-first CLI agent built on Pi and alphaXiv",
|
||||
"license": "MIT",
|
||||
"type": "module",
|
||||
|
||||
@@ -9,7 +9,7 @@ Audit the paper and codebase for: $@
|
||||
Derive a short slug from the audit target (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||
|
||||
Requirements:
|
||||
- Before starting, outline the audit plan: which paper, which repo, which claims to check. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||
- Before starting, outline the audit plan: which paper, which repo, which claims to check. Write the plan to `outputs/.plans/<slug>.md`. Briefly summarize the plan to the user and continue immediately. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested plan review.
|
||||
- Use the `researcher` subagent for evidence gathering and the `verifier` subagent to verify sources and add inline citations when the audit is non-trivial.
|
||||
- Compare claimed methods, defaults, metrics, and data handling against the actual code.
|
||||
- Call out missing code, mismatches, ambiguous defaults, and reproduction risks.
|
||||
|
||||
@@ -9,7 +9,7 @@ Compare sources for: $@
|
||||
Derive a short slug from the comparison topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||
|
||||
Requirements:
|
||||
- Before starting, outline the comparison plan: which sources to compare, which dimensions to evaluate, expected output structure. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||
- Before starting, outline the comparison plan: which sources to compare, which dimensions to evaluate, expected output structure. Write the plan to `outputs/.plans/<slug>.md`. Briefly summarize the plan to the user and continue immediately. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested plan review.
|
||||
- Use the `researcher` subagent to gather source material when the comparison set is broad, and the `verifier` subagent to verify sources and add inline citations to the final matrix.
|
||||
- Build a comparison matrix covering: source, key claim, evidence type, caveats, confidence.
|
||||
- Generate charts with `pi-charts` when the comparison involves quantitative metrics. Use Mermaid for method or architecture comparisons.
|
||||
|
||||
@@ -51,7 +51,7 @@ If `CHANGELOG.md` exists, read the most recent relevant entries before finalizin
|
||||
|
||||
Also save the plan with `memory_remember` (type: `fact`, key: `deepresearch.<slug>.plan`) so it survives context truncation.
|
||||
|
||||
Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting in the terminal, give them a brief chance to request plan changes before proceeding.
|
||||
Briefly summarize the plan to the user and continue immediately. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested plan review.
|
||||
|
||||
Do not stop after planning. If live search, subagents, web access, alphaXiv, or any other capability is unavailable, continue in degraded mode and write a durable blocked/partial report that records exactly which capabilities failed.
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Write a paper-style draft for: $@
|
||||
Derive a short slug from the topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||
|
||||
Requirements:
|
||||
- Before writing, outline the draft structure: proposed title, sections, key claims to make, source material to draw from, and a verification log for the critical claims, figures, and calculations. Write the outline to `outputs/.plans/<slug>.md`. Present the outline to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||
- Before writing, outline the draft structure: proposed title, sections, key claims to make, source material to draw from, and a verification log for the critical claims, figures, and calculations. Write the outline to `outputs/.plans/<slug>.md`. Briefly summarize the outline to the user and continue immediately. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested outline review.
|
||||
- Use the `writer` subagent when the draft should be produced from already-collected notes, then use the `verifier` subagent to add inline citations and verify sources.
|
||||
- Include at minimum: title, abstract, problem statement, related work, method or synthesis, evidence or experiments, limitations, conclusion.
|
||||
- Use clean Markdown with LaTeX where equations materially help.
|
||||
|
||||
@@ -10,7 +10,7 @@ Derive a short slug from the topic (lowercase, hyphens, no filler words, ≤5 wo
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Plan** — Outline the scope: key questions, source types to search (papers, web, repos), time period, expected sections, and a small task ledger plus verification log. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||
1. **Plan** — Outline the scope: key questions, source types to search (papers, web, repos), time period, expected sections, and a small task ledger plus verification log. Write the plan to `outputs/.plans/<slug>.md`. Briefly summarize the plan to the user and continue immediately. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested plan review.
|
||||
2. **Gather** — Use the `researcher` subagent when the sweep is wide enough to benefit from delegated paper triage before synthesis. For narrow topics, search directly. Researcher outputs go to `<slug>-research-*.md`. Do not silently skip assigned questions; mark them `done`, `blocked`, or `superseded`.
|
||||
3. **Synthesize** — Separate consensus, disagreements, and open questions. When useful, propose concrete next experiments or follow-up reading. Generate charts with `pi-charts` for quantitative comparisons across papers and Mermaid diagrams for taxonomies or method pipelines. Before finishing the draft, sweep every strong claim against the verification log and downgrade anything that is inferred or single-source critical.
|
||||
4. **Cite** — Spawn the `verifier` agent to add inline citations and verify every source URL in the draft.
|
||||
|
||||
@@ -9,7 +9,7 @@ Review this AI research artifact: $@
|
||||
Derive a short slug from the artifact name (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||
|
||||
Requirements:
|
||||
- Before starting, outline what will be reviewed, the review criteria (novelty, empirical rigor, baselines, reproducibility, etc.), and any verification-specific checks needed for claims, figures, and reported metrics. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||
- Before starting, outline what will be reviewed, the review criteria (novelty, empirical rigor, baselines, reproducibility, etc.), and any verification-specific checks needed for claims, figures, and reported metrics. Briefly summarize the plan to the user and continue immediately. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested plan review.
|
||||
- Spawn a `researcher` subagent to gather evidence on the artifact — inspect the paper, code, cited work, and any linked experimental artifacts. Save to `<slug>-research.md`.
|
||||
- Spawn a `reviewer` subagent with `<slug>-research.md` to produce the final peer review with inline annotations.
|
||||
- For small or simple artifacts where evidence gathering is overkill, run the `reviewer` subagent directly instead.
|
||||
|
||||
@@ -101,7 +101,7 @@ print(f"[summarize] chunks={len(chunks)} chunk_size={chunk_size} overlap={overla
|
||||
|
||||
### 3b. Confirm before spawning
|
||||
|
||||
If this is an unattended or one-shot run, continue automatically. Otherwise tell the user: "Source is ~<chars> chars -> <N> chunks -> <N> researcher subagents. This may take several minutes. Proceed?" Wait for confirmation before launching Tier 3.
|
||||
Briefly summarize: "Source is ~<chars> chars -> <N> chunks -> <N> researcher subagents. This may take several minutes." Then continue automatically. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested review before launching.
|
||||
|
||||
### 3c. Dispatch researcher subagents
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ Create a research watch for: $@
|
||||
Derive a short slug from the watch topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||
|
||||
Requirements:
|
||||
- Before starting, outline the watch plan: what to monitor, what signals matter, what counts as a meaningful change, and the check frequency. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||
- Before starting, outline the watch plan: what to monitor, what signals matter, what counts as a meaningful change, and the check frequency. Write the plan to `outputs/.plans/<slug>.md`. Briefly summarize the plan to the user and continue immediately. Do not ask for confirmation or wait for a proceed response unless the user explicitly requested plan review.
|
||||
- Start with a baseline sweep of the topic.
|
||||
- Use `schedule_prompt` to create the recurring or delayed follow-up instead of merely promising to check later.
|
||||
- Save exactly one baseline artifact to `outputs/<slug>-baseline.md`.
|
||||
|
||||
@@ -110,7 +110,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.24
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.27
|
||||
"@
|
||||
}
|
||||
|
||||
|
||||
@@ -261,7 +261,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.24
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.27
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
export const PI_SUBAGENTS_PATCH_TARGETS: string[];
|
||||
export function patchPiSubagentsSource(relativePath: string, source: string): string;
|
||||
export function stripPiSubagentBuiltinModelSource(source: string): string;
|
||||
|
||||
@@ -66,6 +66,24 @@ function replaceAll(source, from, to) {
|
||||
return source.split(from).join(to);
|
||||
}
|
||||
|
||||
export function stripPiSubagentBuiltinModelSource(source) {
|
||||
if (!source.startsWith("---\n")) {
|
||||
return source;
|
||||
}
|
||||
|
||||
const endIndex = source.indexOf("\n---", 4);
|
||||
if (endIndex === -1) {
|
||||
return source;
|
||||
}
|
||||
|
||||
const frontmatter = source.slice(4, endIndex);
|
||||
const nextFrontmatter = frontmatter
|
||||
.split("\n")
|
||||
.filter((line) => !/^\s*model\s*:/.test(line))
|
||||
.join("\n");
|
||||
return `---\n${nextFrontmatter}${source.slice(endIndex)}`;
|
||||
}
|
||||
|
||||
export function patchPiSubagentsSource(relativePath, source) {
|
||||
let patched = source;
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import { patchAlphaHubAuthSource } from "./lib/alpha-hub-auth-patch.mjs";
|
||||
import { patchPiExtensionLoaderSource } from "./lib/pi-extension-loader-patch.mjs";
|
||||
import { patchPiGoogleLegacySchemaSource } from "./lib/pi-google-legacy-schema-patch.mjs";
|
||||
import { PI_WEB_ACCESS_PATCH_TARGETS, patchPiWebAccessSource } from "./lib/pi-web-access-patch.mjs";
|
||||
import { PI_SUBAGENTS_PATCH_TARGETS, patchPiSubagentsSource } from "./lib/pi-subagents-patch.mjs";
|
||||
import { PI_SUBAGENTS_PATCH_TARGETS, patchPiSubagentsSource, stripPiSubagentBuiltinModelSource } from "./lib/pi-subagents-patch.mjs";
|
||||
|
||||
const here = dirname(fileURLToPath(import.meta.url));
|
||||
const appRoot = resolve(here, "..");
|
||||
@@ -479,6 +479,19 @@ if (existsSync(piSubagentsRoot)) {
|
||||
writeFileSync(entryPath, patched, "utf8");
|
||||
}
|
||||
}
|
||||
|
||||
const builtinAgentsRoot = resolve(piSubagentsRoot, "agents");
|
||||
if (existsSync(builtinAgentsRoot)) {
|
||||
for (const entry of readdirSync(builtinAgentsRoot, { withFileTypes: true })) {
|
||||
if (!entry.isFile() || !entry.name.endsWith(".md")) continue;
|
||||
const entryPath = resolve(builtinAgentsRoot, entry.name);
|
||||
const source = readFileSync(entryPath, "utf8");
|
||||
const patched = stripPiSubagentBuiltinModelSource(source);
|
||||
if (patched !== source) {
|
||||
writeFileSync(entryPath, patched, "utf8");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (packageJsonPath && existsSync(packageJsonPath)) {
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import { existsSync, mkdirSync, readFileSync, rmSync, statSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, rmSync, statSync, writeFileSync } from "node:fs";
|
||||
import { resolve } from "node:path";
|
||||
import { spawnSync } from "node:child_process";
|
||||
|
||||
import { stripPiSubagentBuiltinModelSource } from "./lib/pi-subagents-patch.mjs";
|
||||
|
||||
const appRoot = resolve(import.meta.dirname, "..");
|
||||
const settingsPath = resolve(appRoot, ".feynman", "settings.json");
|
||||
const feynmanDir = resolve(appRoot, ".feynman");
|
||||
@@ -10,7 +12,7 @@ const workspaceNodeModulesDir = resolve(workspaceDir, "node_modules");
|
||||
const manifestPath = resolve(workspaceDir, ".runtime-manifest.json");
|
||||
const workspacePackageJsonPath = resolve(workspaceDir, "package.json");
|
||||
const workspaceArchivePath = resolve(feynmanDir, "runtime-workspace.tgz");
|
||||
const PRUNE_VERSION = 3;
|
||||
const PRUNE_VERSION = 4;
|
||||
|
||||
function readPackageSpecs() {
|
||||
const settings = JSON.parse(readFileSync(settingsPath, "utf8"));
|
||||
@@ -72,6 +74,17 @@ function writeWorkspacePackageJson() {
|
||||
);
|
||||
}
|
||||
|
||||
function childNpmInstallEnv() {
|
||||
return {
|
||||
...process.env,
|
||||
// `npm pack --dry-run` exports dry-run config to lifecycle scripts. The
|
||||
// vendored runtime workspace must still install real node_modules so the
|
||||
// publish artifact can be validated without poisoning the archive.
|
||||
npm_config_dry_run: "false",
|
||||
NPM_CONFIG_DRY_RUN: "false",
|
||||
};
|
||||
}
|
||||
|
||||
function prepareWorkspace(packageSpecs) {
|
||||
rmSync(workspaceDir, { recursive: true, force: true });
|
||||
mkdirSync(workspaceDir, { recursive: true });
|
||||
@@ -84,9 +97,9 @@ function prepareWorkspace(packageSpecs) {
|
||||
const result = spawnSync(
|
||||
process.env.npm_execpath ? process.execPath : "npm",
|
||||
process.env.npm_execpath
|
||||
? [process.env.npm_execpath, "install", "--prefer-offline", "--no-audit", "--no-fund", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs]
|
||||
: ["install", "--prefer-offline", "--no-audit", "--no-fund", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs],
|
||||
{ stdio: "inherit" },
|
||||
? [process.env.npm_execpath, "install", "--prefer-offline", "--no-audit", "--no-fund", "--no-dry-run", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs]
|
||||
: ["install", "--prefer-offline", "--no-audit", "--no-fund", "--no-dry-run", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs],
|
||||
{ stdio: "inherit", env: childNpmInstallEnv() },
|
||||
);
|
||||
if (result.status !== 0) {
|
||||
process.exit(result.status ?? 1);
|
||||
@@ -122,6 +135,25 @@ function pruneWorkspace() {
|
||||
}
|
||||
}
|
||||
|
||||
function stripBundledPiSubagentModelPins() {
|
||||
const agentsRoot = resolve(workspaceNodeModulesDir, "pi-subagents", "agents");
|
||||
if (!existsSync(agentsRoot)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let changed = false;
|
||||
for (const entry of readdirSync(agentsRoot, { withFileTypes: true })) {
|
||||
if (!entry.isFile() || !entry.name.endsWith(".md")) continue;
|
||||
const entryPath = resolve(agentsRoot, entry.name);
|
||||
const source = readFileSync(entryPath, "utf8");
|
||||
const patched = stripPiSubagentBuiltinModelSource(source);
|
||||
if (patched === source) continue;
|
||||
writeFileSync(entryPath, patched, "utf8");
|
||||
changed = true;
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
function archiveIsCurrent() {
|
||||
if (!existsSync(workspaceArchivePath) || !existsSync(manifestPath)) {
|
||||
return false;
|
||||
@@ -145,6 +177,10 @@ const packageSpecs = readPackageSpecs();
|
||||
|
||||
if (workspaceIsCurrent(packageSpecs)) {
|
||||
console.log("[feynman] vendored runtime workspace already up to date");
|
||||
if (stripBundledPiSubagentModelPins()) {
|
||||
writeManifest(packageSpecs);
|
||||
console.log("[feynman] stripped bundled pi-subagents model pins");
|
||||
}
|
||||
if (archiveIsCurrent()) {
|
||||
process.exit(0);
|
||||
}
|
||||
@@ -157,6 +193,7 @@ if (workspaceIsCurrent(packageSpecs)) {
|
||||
console.log("[feynman] preparing vendored runtime workspace...");
|
||||
prepareWorkspace(packageSpecs);
|
||||
pruneWorkspace();
|
||||
stripBundledPiSubagentModelPins();
|
||||
writeManifest(packageSpecs);
|
||||
createWorkspaceArchive();
|
||||
console.log("[feynman] vendored runtime workspace ready");
|
||||
|
||||
@@ -48,6 +48,7 @@ const PROVIDER_LABELS: Record<string, string> = {
|
||||
huggingface: "Hugging Face",
|
||||
"amazon-bedrock": "Amazon Bedrock",
|
||||
"azure-openai-responses": "Azure OpenAI Responses",
|
||||
litellm: "LiteLLM Proxy",
|
||||
};
|
||||
|
||||
const RESEARCH_MODEL_PREFERENCES = [
|
||||
|
||||
@@ -84,6 +84,7 @@ const API_KEY_PROVIDERS: ApiKeyProviderInfo[] = [
|
||||
{ id: "anthropic", label: "Anthropic API", envVar: "ANTHROPIC_API_KEY" },
|
||||
{ id: "google", label: "Google Gemini API", envVar: "GEMINI_API_KEY" },
|
||||
{ id: "lm-studio", label: "LM Studio (local OpenAI-compatible server)" },
|
||||
{ id: "litellm", label: "LiteLLM Proxy (OpenAI-compatible gateway)" },
|
||||
{ id: "__custom__", label: "Custom provider (local/self-hosted/proxy)" },
|
||||
{ id: "amazon-bedrock", label: "Amazon Bedrock (AWS credential chain)" },
|
||||
{ id: "openrouter", label: "OpenRouter", envVar: "OPENROUTER_API_KEY" },
|
||||
@@ -127,15 +128,24 @@ export function resolveModelProviderForCommand(
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function apiKeyProviderHint(provider: ApiKeyProviderInfo): string {
|
||||
if (provider.id === "__custom__") {
|
||||
return "Ollama, vLLM, LM Studio, proxies";
|
||||
}
|
||||
if (provider.id === "lm-studio") {
|
||||
return "http://localhost:1234/v1";
|
||||
}
|
||||
if (provider.id === "litellm") {
|
||||
return "http://localhost:4000/v1";
|
||||
}
|
||||
return provider.envVar ?? provider.id;
|
||||
}
|
||||
|
||||
async function selectApiKeyProvider(): Promise<ApiKeyProviderInfo | undefined> {
|
||||
const options: PromptSelectOption<ApiKeyProviderInfo | "cancel">[] = API_KEY_PROVIDERS.map((provider) => ({
|
||||
value: provider,
|
||||
label: provider.label,
|
||||
hint: provider.id === "__custom__"
|
||||
? "Ollama, vLLM, LM Studio, proxies"
|
||||
: provider.id === "lm-studio"
|
||||
? "http://localhost:1234/v1"
|
||||
: provider.envVar ?? provider.id,
|
||||
hint: apiKeyProviderHint(provider),
|
||||
}));
|
||||
options.push({ value: "cancel", label: "Cancel" });
|
||||
|
||||
@@ -403,6 +413,65 @@ async function promptLmStudioProviderSetup(): Promise<CustomProviderSetup | unde
|
||||
};
|
||||
}
|
||||
|
||||
async function promptLiteLlmProviderSetup(): Promise<CustomProviderSetup | undefined> {
|
||||
printSection("LiteLLM Proxy");
|
||||
printInfo("Start the LiteLLM proxy first. Feynman uses the OpenAI-compatible chat-completions API.");
|
||||
|
||||
const baseUrlRaw = await promptText("Base URL", "http://localhost:4000/v1");
|
||||
const { baseUrl } = normalizeCustomProviderBaseUrl("openai-completions", baseUrlRaw);
|
||||
if (!baseUrl) {
|
||||
printWarning("Base URL is required.");
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const keyChoices = [
|
||||
"Yes (use LITELLM_MASTER_KEY and send Authorization: Bearer <key>)",
|
||||
"No (proxy runs without authentication)",
|
||||
"Cancel",
|
||||
];
|
||||
const keySelection = await promptChoice("Is the proxy protected by a master key?", keyChoices, 0);
|
||||
if (keySelection >= 2) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const hasKey = keySelection === 0;
|
||||
const apiKeyConfig = hasKey ? "LITELLM_MASTER_KEY" : "local";
|
||||
const authHeader = hasKey;
|
||||
if (hasKey) {
|
||||
printInfo("Set LITELLM_MASTER_KEY in your shell or .env before using Feynman.");
|
||||
}
|
||||
|
||||
const resolvedKey = hasKey ? await resolveApiKeyConfig(apiKeyConfig) : apiKeyConfig;
|
||||
const detectedModelIds = resolvedKey
|
||||
? await bestEffortFetchOpenAiModelIds(baseUrl, resolvedKey, authHeader)
|
||||
: undefined;
|
||||
|
||||
let modelIdsDefault = "gpt-4";
|
||||
if (detectedModelIds && detectedModelIds.length > 0) {
|
||||
const sample = detectedModelIds.slice(0, 10).join(", ");
|
||||
printInfo(`Detected LiteLLM models: ${sample}${detectedModelIds.length > 10 ? ", ..." : ""}`);
|
||||
modelIdsDefault = detectedModelIds[0]!;
|
||||
} else {
|
||||
printInfo("No models detected from /models. Enter the model id(s) from your LiteLLM config.");
|
||||
}
|
||||
|
||||
const modelIdsRaw = await promptText("Model id(s) (comma-separated)", modelIdsDefault);
|
||||
const modelIds = normalizeModelIds(modelIdsRaw);
|
||||
if (modelIds.length === 0) {
|
||||
printWarning("At least one model id is required.");
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return {
|
||||
providerId: "litellm",
|
||||
modelIds,
|
||||
baseUrl,
|
||||
api: "openai-completions",
|
||||
apiKeyConfig,
|
||||
authHeader,
|
||||
};
|
||||
}
|
||||
|
||||
async function verifyCustomProvider(setup: CustomProviderSetup, authPath: string): Promise<void> {
|
||||
const registry = createModelRegistry(authPath);
|
||||
const modelsError = registry.getError();
|
||||
@@ -614,6 +683,31 @@ async function configureApiKeyProvider(authPath: string, providerId?: string): P
|
||||
return true;
|
||||
}
|
||||
|
||||
if (provider.id === "litellm") {
|
||||
const setup = await promptLiteLlmProviderSetup();
|
||||
if (!setup) {
|
||||
printInfo("LiteLLM setup cancelled.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const modelsJsonPath = getModelsJsonPath(authPath);
|
||||
const result = upsertProviderConfig(modelsJsonPath, setup.providerId, {
|
||||
baseUrl: setup.baseUrl,
|
||||
apiKey: setup.apiKeyConfig,
|
||||
api: setup.api,
|
||||
authHeader: setup.authHeader,
|
||||
models: setup.modelIds.map((id) => ({ id })),
|
||||
});
|
||||
if (!result.ok) {
|
||||
printWarning(result.error);
|
||||
return false;
|
||||
}
|
||||
|
||||
printSuccess("Saved LiteLLM provider.");
|
||||
await verifyCustomProvider(setup, authPath);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (provider.id === "__custom__") {
|
||||
const setup = await promptCustomProviderSetup();
|
||||
if (!setup) {
|
||||
|
||||
@@ -169,6 +169,15 @@ function resolvePackageManagerCommand(settingsManager: SettingsManager): { comma
|
||||
return { command: executable, args };
|
||||
}
|
||||
|
||||
function childPackageManagerEnv(): NodeJS.ProcessEnv {
|
||||
return {
|
||||
...process.env,
|
||||
PATH: getPathWithCurrentNode(process.env.PATH),
|
||||
npm_config_dry_run: "false",
|
||||
NPM_CONFIG_DRY_RUN: "false",
|
||||
};
|
||||
}
|
||||
|
||||
async function runPackageManagerInstall(
|
||||
settingsManager: SettingsManager,
|
||||
workingDir: string,
|
||||
@@ -207,10 +216,7 @@ async function runPackageManagerInstall(
|
||||
const child = spawn(packageManagerCommand.command, args, {
|
||||
cwd: scope === "user" ? agentDir : workingDir,
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
env: {
|
||||
...process.env,
|
||||
PATH: getPathWithCurrentNode(process.env.PATH),
|
||||
},
|
||||
env: childPackageManagerEnv(),
|
||||
});
|
||||
|
||||
child.stdout?.on("data", (chunk) => relayFilteredOutput(chunk, process.stdout));
|
||||
|
||||
@@ -127,6 +127,19 @@ export function normalizeFeynmanSettings(
|
||||
settings.theme = "feynman";
|
||||
settings.quietStartup = true;
|
||||
settings.collapseChangelog = true;
|
||||
settings.compaction = {
|
||||
enabled: true,
|
||||
reserveTokens: 16384,
|
||||
keepRecentTokens: 20000,
|
||||
...(settings.compaction && typeof settings.compaction === "object" ? settings.compaction : {}),
|
||||
};
|
||||
settings.retry = {
|
||||
enabled: true,
|
||||
maxRetries: 3,
|
||||
baseDelayMs: 2000,
|
||||
maxDelayMs: 60000,
|
||||
...(settings.retry && typeof settings.retry === "object" ? settings.retry : {}),
|
||||
};
|
||||
const supportedCorePackages = filterPackageSourcesForCurrentNode(CORE_PACKAGE_SOURCES);
|
||||
if (!Array.isArray(settings.packages) || settings.packages.length === 0) {
|
||||
settings.packages = supportedCorePackages;
|
||||
|
||||
@@ -12,6 +12,11 @@ import { buildModelStatusSnapshotFromRecords, getAvailableModelRecords, getSuppo
|
||||
import { createModelRegistry, getModelsJsonPath } from "../model/registry.js";
|
||||
import { getConfiguredServiceTier } from "../model/service-tier.js";
|
||||
|
||||
type ContextRiskSummary = {
|
||||
level: "low" | "medium" | "high" | "unknown";
|
||||
lines: string[];
|
||||
};
|
||||
|
||||
function findProvidersMissingApiKey(modelsJsonPath: string): string[] {
|
||||
try {
|
||||
const raw = readFileSync(modelsJsonPath, "utf8").trim();
|
||||
@@ -35,6 +40,50 @@ function findProvidersMissingApiKey(modelsJsonPath: string): string[] {
|
||||
}
|
||||
}
|
||||
|
||||
function numberSetting(settings: Record<string, unknown>, path: string[], fallback: number): number {
|
||||
let value: unknown = settings;
|
||||
for (const key of path) {
|
||||
if (!value || typeof value !== "object") return fallback;
|
||||
value = (value as Record<string, unknown>)[key];
|
||||
}
|
||||
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
|
||||
}
|
||||
|
||||
export function buildContextRiskSummary(
|
||||
settings: Record<string, unknown>,
|
||||
model: { provider: string; id: string; contextWindow: number; maxTokens: number; reasoning: boolean } | undefined,
|
||||
): ContextRiskSummary {
|
||||
if (!model) {
|
||||
return {
|
||||
level: "unknown",
|
||||
lines: ["context risk: unknown (no active model)"],
|
||||
};
|
||||
}
|
||||
|
||||
const reserveTokens = numberSetting(settings, ["compaction", "reserveTokens"], 16384);
|
||||
const keepRecentTokens = numberSetting(settings, ["compaction", "keepRecentTokens"], 20000);
|
||||
const retryMax = numberSetting(settings, ["retry", "maxRetries"], 3);
|
||||
const usableWindow = Math.max(0, model.contextWindow - reserveTokens);
|
||||
const level = model.contextWindow < 64_000
|
||||
? "high"
|
||||
: model.contextWindow < 128_000
|
||||
? "medium"
|
||||
: "low";
|
||||
|
||||
return {
|
||||
level,
|
||||
lines: [
|
||||
`context risk: ${level}`,
|
||||
` model: ${model.provider}/${model.id}`,
|
||||
` context window: ${model.contextWindow}`,
|
||||
` usable before Pi compaction reserve: ${usableWindow}`,
|
||||
` Pi compaction: reserve=${reserveTokens}, keepRecent=${keepRecentTokens}`,
|
||||
` Pi retry: maxRetries=${retryMax}`,
|
||||
` reasoning: ${model.reasoning ? "supported" : "off/not supported"}`,
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export type DoctorOptions = {
|
||||
settingsPath: string;
|
||||
authPath: string;
|
||||
@@ -164,6 +213,10 @@ export function runDoctor(options: DoctorOptions): void {
|
||||
: "not set"}`,
|
||||
);
|
||||
const modelStatus = collectStatusSnapshot(options);
|
||||
const currentModel = typeof settings.defaultProvider === "string" && typeof settings.defaultModel === "string"
|
||||
? modelRegistry.find(settings.defaultProvider, settings.defaultModel)
|
||||
: undefined;
|
||||
const contextRisk = buildContextRiskSummary(settings, currentModel);
|
||||
console.log(`default model valid: ${modelStatus.modelValid ? "yes" : "no"}`);
|
||||
console.log(`authenticated providers: ${modelStatus.authenticatedProviderCount}`);
|
||||
console.log(`authenticated models: ${modelStatus.authenticatedModelCount}`);
|
||||
@@ -172,6 +225,9 @@ export function runDoctor(options: DoctorOptions): void {
|
||||
if (modelStatus.recommendedModelReason) {
|
||||
console.log(` why: ${modelStatus.recommendedModelReason}`);
|
||||
}
|
||||
for (const line of contextRisk.lines) {
|
||||
console.log(line);
|
||||
}
|
||||
const modelsError = modelRegistry.getError();
|
||||
if (modelsError) {
|
||||
console.log("models.json: error");
|
||||
|
||||
@@ -69,3 +69,31 @@ test("deepresearch workflow requires durable artifacts even when blocked", () =>
|
||||
assert.match(deepResearchPrompt, /Verification: BLOCKED/i);
|
||||
assert.match(deepResearchPrompt, /Never end with only an explanation in chat/i);
|
||||
});
|
||||
|
||||
test("workflow prompts do not introduce implicit confirmation gates", () => {
|
||||
const workflowPrompts = [
|
||||
"audit.md",
|
||||
"compare.md",
|
||||
"deepresearch.md",
|
||||
"draft.md",
|
||||
"lit.md",
|
||||
"review.md",
|
||||
"summarize.md",
|
||||
"watch.md",
|
||||
];
|
||||
const bannedConfirmationGates = [
|
||||
/Do you want to proceed/i,
|
||||
/Wait for confirmation/i,
|
||||
/wait for user confirmation/i,
|
||||
/give them a brief chance/i,
|
||||
/request changes before proceeding/i,
|
||||
];
|
||||
|
||||
for (const fileName of workflowPrompts) {
|
||||
const content = readFileSync(join(repoRoot, "prompts", fileName), "utf8");
|
||||
assert.match(content, /continue (immediately|automatically)/i, `${fileName} should keep running after planning`);
|
||||
for (const pattern of bannedConfirmationGates) {
|
||||
assert.doesNotMatch(content, pattern, `${fileName} contains confirmation gate ${pattern}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -88,6 +88,15 @@ test("resolveModelProviderForCommand supports LM Studio as a first-class local p
|
||||
assert.equal(resolved?.id, "lm-studio");
|
||||
});
|
||||
|
||||
test("resolveModelProviderForCommand supports LiteLLM as a first-class proxy provider", () => {
|
||||
const authPath = createAuthPath({});
|
||||
|
||||
const resolved = resolveModelProviderForCommand(authPath, "litellm");
|
||||
|
||||
assert.equal(resolved?.kind, "api-key");
|
||||
assert.equal(resolved?.id, "litellm");
|
||||
});
|
||||
|
||||
test("resolveModelProviderForCommand prefers OAuth when a provider supports both auth modes", () => {
|
||||
const authPath = createAuthPath({});
|
||||
|
||||
|
||||
@@ -30,3 +30,45 @@ test("upsertProviderConfig creates models.json and merges provider config", () =
|
||||
assert.equal(parsed.providers.custom.authHeader, true);
|
||||
assert.deepEqual(parsed.providers.custom.models, [{ id: "llama3.1:8b" }]);
|
||||
});
|
||||
|
||||
test("upsertProviderConfig writes LiteLLM proxy config with master key", () => {
|
||||
const dir = mkdtempSync(join(tmpdir(), "feynman-litellm-"));
|
||||
const modelsPath = join(dir, "models.json");
|
||||
|
||||
const result = upsertProviderConfig(modelsPath, "litellm", {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "LITELLM_MASTER_KEY",
|
||||
api: "openai-completions",
|
||||
authHeader: true,
|
||||
models: [{ id: "gpt-4o" }],
|
||||
});
|
||||
assert.deepEqual(result, { ok: true });
|
||||
|
||||
const parsed = JSON.parse(readFileSync(modelsPath, "utf8")) as any;
|
||||
assert.equal(parsed.providers.litellm.baseUrl, "http://localhost:4000/v1");
|
||||
assert.equal(parsed.providers.litellm.apiKey, "LITELLM_MASTER_KEY");
|
||||
assert.equal(parsed.providers.litellm.api, "openai-completions");
|
||||
assert.equal(parsed.providers.litellm.authHeader, true);
|
||||
assert.deepEqual(parsed.providers.litellm.models, [{ id: "gpt-4o" }]);
|
||||
});
|
||||
|
||||
test("upsertProviderConfig writes LiteLLM proxy config without master key", () => {
|
||||
const dir = mkdtempSync(join(tmpdir(), "feynman-litellm-"));
|
||||
const modelsPath = join(dir, "models.json");
|
||||
|
||||
const result = upsertProviderConfig(modelsPath, "litellm", {
|
||||
baseUrl: "http://localhost:4000/v1",
|
||||
apiKey: "local",
|
||||
api: "openai-completions",
|
||||
authHeader: false,
|
||||
models: [{ id: "llama3" }],
|
||||
});
|
||||
assert.deepEqual(result, { ok: true });
|
||||
|
||||
const parsed = JSON.parse(readFileSync(modelsPath, "utf8")) as any;
|
||||
assert.equal(parsed.providers.litellm.baseUrl, "http://localhost:4000/v1");
|
||||
assert.equal(parsed.providers.litellm.apiKey, "local");
|
||||
assert.equal(parsed.providers.litellm.api, "openai-completions");
|
||||
assert.equal(parsed.providers.litellm.authHeader, false);
|
||||
assert.deepEqual(parsed.providers.litellm.models, [{ id: "llama3" }]);
|
||||
});
|
||||
|
||||
@@ -188,6 +188,46 @@ test("installPackageSources skips native packages on unsupported Node majors bef
|
||||
}
|
||||
});
|
||||
|
||||
test("installPackageSources disables inherited npm dry-run config for child installs", async () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-package-ops-"));
|
||||
const workingDir = resolve(root, "project");
|
||||
const agentDir = resolve(root, "agent");
|
||||
const markerPath = resolve(root, "install-env-ok.txt");
|
||||
mkdirSync(workingDir, { recursive: true });
|
||||
|
||||
const scriptPath = writeFakeNpmScript(root, [
|
||||
`import { writeFileSync } from "node:fs";`,
|
||||
`if (process.env.npm_config_dry_run !== "false" || process.env.NPM_CONFIG_DRY_RUN !== "false") process.exit(42);`,
|
||||
`writeFileSync(${JSON.stringify(markerPath)}, "ok\\n", "utf8");`,
|
||||
"process.exit(0);",
|
||||
].join("\n"));
|
||||
|
||||
writeSettings(agentDir, {
|
||||
npmCommand: [process.execPath, scriptPath],
|
||||
});
|
||||
|
||||
const originalLower = process.env.npm_config_dry_run;
|
||||
const originalUpper = process.env.NPM_CONFIG_DRY_RUN;
|
||||
process.env.npm_config_dry_run = "true";
|
||||
process.env.NPM_CONFIG_DRY_RUN = "true";
|
||||
try {
|
||||
const result = await installPackageSources(workingDir, agentDir, ["npm:test-package"]);
|
||||
assert.deepEqual(result.installed, ["npm:test-package"]);
|
||||
assert.equal(existsSync(markerPath), true);
|
||||
} finally {
|
||||
if (originalLower === undefined) {
|
||||
delete process.env.npm_config_dry_run;
|
||||
} else {
|
||||
process.env.npm_config_dry_run = originalLower;
|
||||
}
|
||||
if (originalUpper === undefined) {
|
||||
delete process.env.NPM_CONFIG_DRY_RUN;
|
||||
} else {
|
||||
process.env.NPM_CONFIG_DRY_RUN = originalUpper;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
test("updateConfiguredPackages batches multiple npm updates into a single install per scope", async () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-package-ops-"));
|
||||
const workingDir = resolve(root, "project");
|
||||
@@ -218,7 +258,7 @@ test("updateConfiguredPackages batches multiple npm updates into a single instal
|
||||
globalThis.fetch = (async () => ({
|
||||
ok: true,
|
||||
json: async () => ({ version: "2.0.0" }),
|
||||
})) as typeof fetch;
|
||||
})) as unknown as typeof fetch;
|
||||
|
||||
try {
|
||||
const result = await updateConfiguredPackages(workingDir, agentDir);
|
||||
@@ -266,7 +306,7 @@ test("updateConfiguredPackages skips native package updates on unsupported Node
|
||||
globalThis.fetch = (async () => ({
|
||||
ok: true,
|
||||
json: async () => ({ version: "2.0.0" }),
|
||||
})) as typeof fetch;
|
||||
})) as unknown as typeof fetch;
|
||||
Object.defineProperty(process.versions, "node", { value: "25.0.0", configurable: true });
|
||||
|
||||
try {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
|
||||
import { patchPiSubagentsSource } from "../scripts/lib/pi-subagents-patch.mjs";
|
||||
import { patchPiSubagentsSource, stripPiSubagentBuiltinModelSource } from "../scripts/lib/pi-subagents-patch.mjs";
|
||||
|
||||
const CASES = [
|
||||
{
|
||||
@@ -140,3 +140,22 @@ test("patchPiSubagentsSource rewrites modern agents.ts discovery paths", () => {
|
||||
assert.ok(!patched.includes('loadChainsFromDir(userDirNew, "user")'));
|
||||
assert.ok(!patched.includes('fs.existsSync(userDirNew) ? userDirNew : userDirOld'));
|
||||
});
|
||||
|
||||
test("stripPiSubagentBuiltinModelSource removes built-in model pins", () => {
|
||||
const input = [
|
||||
"---",
|
||||
"name: researcher",
|
||||
"description: Web researcher",
|
||||
"model: anthropic/claude-sonnet-4-6",
|
||||
"tools: read, web_search",
|
||||
"---",
|
||||
"",
|
||||
"Body",
|
||||
].join("\n");
|
||||
|
||||
const patched = stripPiSubagentBuiltinModelSource(input);
|
||||
|
||||
assert.ok(!patched.includes("model: anthropic/claude-sonnet-4-6"));
|
||||
assert.match(patched, /name: researcher/);
|
||||
assert.match(patched, /tools: read, web_search/);
|
||||
});
|
||||
|
||||
156
tests/research-tools-extension.test.ts
Normal file
156
tests/research-tools-extension.test.ts
Normal file
@@ -0,0 +1,156 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { existsSync, mkdirSync, mkdtempSync, readFileSync, utimesSync, writeFileSync } from "node:fs";
|
||||
import { tmpdir } from "node:os";
|
||||
import { dirname, join, resolve } from "node:path";
|
||||
|
||||
import type { ExtensionContext } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { formatToolResultWithSpillover } from "../extensions/research-tools/alpha.js";
|
||||
import { autoLogPath, writeAutoLogEntry } from "../extensions/research-tools/autolog.js";
|
||||
import { computeContextPosture } from "../extensions/research-tools/context.js";
|
||||
import { buildResumePacket } from "../extensions/research-tools/resume.js";
|
||||
import { buildContextRiskSummary } from "../src/setup/doctor.js";
|
||||
import { claimPlanSlug, collectManagedGc, spillLargeCustomToolResult } from "../extensions/research-tools/state.js";
|
||||
|
||||
function fakeCtx(cwd: string): ExtensionContext {
|
||||
return {
|
||||
cwd,
|
||||
model: {
|
||||
provider: "test",
|
||||
id: "small",
|
||||
contextWindow: 32_000,
|
||||
},
|
||||
getContextUsage: () => ({
|
||||
tokens: 24_000,
|
||||
contextWindow: 32_000,
|
||||
percent: 75,
|
||||
}),
|
||||
sessionManager: {
|
||||
getSessionId: () => "session-1",
|
||||
},
|
||||
} as unknown as ExtensionContext;
|
||||
}
|
||||
|
||||
test("alpha tool spillover writes oversized output to outputs cache", () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-spill-"));
|
||||
const originalCap = process.env.FEYNMAN_TOOL_OUTPUT_CAP_CHARS;
|
||||
process.env.FEYNMAN_TOOL_OUTPUT_CAP_CHARS = "64";
|
||||
try {
|
||||
const result = formatToolResultWithSpillover(fakeCtx(root), "alpha_get_paper", { text: "x".repeat(500) });
|
||||
const parsed = JSON.parse(result.text) as { path: string; feynman_spillover: boolean };
|
||||
assert.equal(parsed.feynman_spillover, true);
|
||||
assert.equal(existsSync(parsed.path), true);
|
||||
assert.match(readFileSync(parsed.path, "utf8"), /xxxxx/);
|
||||
assert.match(parsed.path, /outputs\/\.cache\/alpha_get_paper-/);
|
||||
} finally {
|
||||
if (originalCap === undefined) {
|
||||
delete process.env.FEYNMAN_TOOL_OUTPUT_CAP_CHARS;
|
||||
} else {
|
||||
process.env.FEYNMAN_TOOL_OUTPUT_CAP_CHARS = originalCap;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
test("context_report posture uses Pi context usage directly", () => {
|
||||
const report = computeContextPosture(fakeCtx("/tmp"));
|
||||
assert.equal(report.model, "test/small");
|
||||
assert.equal(report.contextWindow, 32_000);
|
||||
assert.equal(report.estimatedInputTokens, 24_000);
|
||||
assert.equal(report.compactionThresholdHit, true);
|
||||
assert.equal(report.recommendedMaxWorkers, 1);
|
||||
});
|
||||
|
||||
test("autolog writes dated jsonl entries under notes", () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-autolog-"));
|
||||
writeAutoLogEntry(root, { role: "user", text: "hello" });
|
||||
const path = autoLogPath(root);
|
||||
assert.equal(existsSync(path), true);
|
||||
assert.deepEqual(JSON.parse(readFileSync(path, "utf8").trim()), { role: "user", text: "hello" });
|
||||
});
|
||||
|
||||
test("resume packet summarizes recent plans and changelog from disk", () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-resume-"));
|
||||
mkdirSync(resolve(root, "outputs", ".plans"), { recursive: true });
|
||||
mkdirSync(resolve(root, "outputs", ".state"), { recursive: true });
|
||||
const planPath = resolve(root, "outputs", ".plans", "demo.md");
|
||||
const statePath = resolve(root, "outputs", ".state", "demo.jobs.jsonl");
|
||||
writeFileSyncSafe(planPath, "# Plan\n\n- next step");
|
||||
writeFileSyncSafe(statePath, "{\"status\":\"running\"}\n");
|
||||
writeFileSyncSafe(resolve(root, "CHANGELOG.md"), "## Entry\n- verified\n");
|
||||
const packet = buildResumePacket(root);
|
||||
assert.ok(packet);
|
||||
assert.match(packet!, /Recent plans/);
|
||||
assert.match(packet!, /demo\.md/);
|
||||
assert.match(packet!, /CHANGELOG tail/);
|
||||
});
|
||||
|
||||
test("doctor context risk uses Pi model context window and compaction settings", () => {
|
||||
const summary = buildContextRiskSummary(
|
||||
{ compaction: { reserveTokens: 4096, keepRecentTokens: 8000 }, retry: { maxRetries: 2 } },
|
||||
{ provider: "local", id: "qwen", contextWindow: 32_000, maxTokens: 4096, reasoning: true },
|
||||
);
|
||||
assert.equal(summary.level, "high");
|
||||
assert.match(summary.lines.join("\n"), /Pi compaction: reserve=4096, keepRecent=8000/);
|
||||
assert.match(summary.lines.join("\n"), /Pi retry: maxRetries=2/);
|
||||
});
|
||||
|
||||
test("slug lock blocks overwriting an existing plan from another session", () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-slug-"));
|
||||
const planPath = resolve(root, "outputs", ".plans", "demo.md");
|
||||
writeFileSyncSafe(planPath, "# Existing\n");
|
||||
|
||||
const result = claimPlanSlug(root, "session-2", "outputs/.plans/demo.md");
|
||||
|
||||
assert.equal(result.ok, false);
|
||||
if (!result.ok) {
|
||||
assert.match(result.reason, /Plan already exists/);
|
||||
}
|
||||
});
|
||||
|
||||
test("managed cache gc deletes stale cache files and honors dry-run", () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-gc-"));
|
||||
const cachePath = resolve(root, "outputs", ".cache", "old.md");
|
||||
writeFileSyncSafe(cachePath, "old");
|
||||
const old = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000);
|
||||
utimesSync(cachePath, old, old);
|
||||
|
||||
const preview = collectManagedGc(root, Date.now(), 14, { dryRun: true });
|
||||
assert.equal(preview.deleted.length, 1);
|
||||
assert.equal(existsSync(cachePath), true);
|
||||
|
||||
const actual = collectManagedGc(root, Date.now(), 14);
|
||||
assert.equal(actual.deleted.length, 1);
|
||||
assert.equal(existsSync(cachePath), false);
|
||||
});
|
||||
|
||||
test("large custom tool results spill to outputs runs", () => {
|
||||
const root = mkdtempSync(join(tmpdir(), "feynman-subagent-spill-"));
|
||||
const originalCap = process.env.FEYNMAN_CUSTOM_TOOL_CAP_CHARS;
|
||||
process.env.FEYNMAN_CUSTOM_TOOL_CAP_CHARS = "50";
|
||||
try {
|
||||
const result = spillLargeCustomToolResult(
|
||||
root,
|
||||
"subagent",
|
||||
"call-1",
|
||||
[{ type: "text", text: "x".repeat(200) }],
|
||||
{ ok: true },
|
||||
);
|
||||
assert.ok(result);
|
||||
const parsed = JSON.parse(result!.content[0]!.text) as { path: string; feynman_spillover: boolean };
|
||||
assert.equal(parsed.feynman_spillover, true);
|
||||
assert.match(parsed.path, /outputs\/\.runs\/subagent-call-1-/);
|
||||
assert.equal(existsSync(parsed.path), true);
|
||||
} finally {
|
||||
if (originalCap === undefined) {
|
||||
delete process.env.FEYNMAN_CUSTOM_TOOL_CAP_CHARS;
|
||||
} else {
|
||||
process.env.FEYNMAN_CUSTOM_TOOL_CAP_CHARS = originalCap;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
function writeFileSyncSafe(path: string, text: string): void {
|
||||
mkdirSync(dirname(path), { recursive: true });
|
||||
writeFileSync(path, text, "utf8");
|
||||
}
|
||||
@@ -261,7 +261,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.24
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.27
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -110,7 +110,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.24
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.27
|
||||
"@
|
||||
}
|
||||
|
||||
|
||||
@@ -117,13 +117,13 @@ These installers download the bundled `skills/` and `prompts/` trees plus the re
|
||||
The one-line installer already targets the latest tagged release. To pin an exact version, pass it explicitly:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.24
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.27
|
||||
```
|
||||
|
||||
On Windows:
|
||||
|
||||
```powershell
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.24
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.27
|
||||
```
|
||||
|
||||
## Post-install setup
|
||||
|
||||
@@ -52,7 +52,7 @@ Amazon Bedrock (AWS credential chain)
|
||||
|
||||
Feynman verifies the same AWS credential chain Pi uses at runtime, including `AWS_PROFILE`, `~/.aws` credentials/config, SSO, ECS/IRSA, and EC2 instance roles. Once that check passes, Bedrock models become available in `feynman model list` without needing a traditional API key.
|
||||
|
||||
### Local models: LM Studio, Ollama, vLLM
|
||||
### Local models: LM Studio, LiteLLM, Ollama, vLLM
|
||||
|
||||
If you want to use LM Studio, start the LM Studio local server, load a model, choose the API-key flow, and then select:
|
||||
|
||||
@@ -70,6 +70,22 @@ API key: lm-studio
|
||||
|
||||
Feynman attempts to read LM Studio's `/models` endpoint and prefill the loaded model id.
|
||||
|
||||
For LiteLLM, start the proxy, choose the API-key flow, and then select:
|
||||
|
||||
```text
|
||||
LiteLLM Proxy (OpenAI-compatible gateway)
|
||||
```
|
||||
|
||||
The default settings are:
|
||||
|
||||
```text
|
||||
Base URL: http://localhost:4000/v1
|
||||
API mode: openai-completions
|
||||
Master key: optional, read from LITELLM_MASTER_KEY
|
||||
```
|
||||
|
||||
Feynman attempts to read LiteLLM's `/models` endpoint and prefill model ids from the proxy config.
|
||||
|
||||
For Ollama, vLLM, or another OpenAI-compatible local server, choose:
|
||||
|
||||
```text
|
||||
|
||||
Reference in New Issue
Block a user