Compare commits
49 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2fdf0d505 | ||
|
|
cba7532d59 | ||
|
|
2dea96f25f | ||
|
|
83a570235f | ||
|
|
ff6328121e | ||
|
|
404c8b5469 | ||
|
|
4c62e78ca5 | ||
|
|
10c93a673b | ||
|
|
30d07246d1 | ||
|
|
dbd89d8e3d | ||
|
|
c8536583bf | ||
|
|
ca74226c83 | ||
|
|
bc9fa2be86 | ||
|
|
f6dbacc9d5 | ||
|
|
572de7ba85 | ||
|
|
85e0c4d8c4 | ||
|
|
584d065902 | ||
|
|
151956ea24 | ||
|
|
75b0467761 | ||
|
|
4ac668c50a | ||
|
|
8178173ff7 | ||
|
|
4eeccafed0 | ||
|
|
7024a86024 | ||
|
|
5fab329ad1 | ||
|
|
563068180f | ||
|
|
8dd20935ad | ||
|
|
aaa0f63bc7 | ||
|
|
79e14dd79d | ||
|
|
cd85e875df | ||
|
|
3ee6ff4199 | ||
|
|
762ca66a68 | ||
|
|
2aa4c84ce5 | ||
|
|
3d84624011 | ||
|
|
6445c20e02 | ||
|
|
4c0a417232 | ||
|
|
42cedd3137 | ||
|
|
b07b0f4197 | ||
|
|
323faf56ee | ||
|
|
1e333ba490 | ||
|
|
1dd7f30a37 | ||
|
|
17c48be4b5 | ||
|
|
8f8cf2a4a9 | ||
|
|
7d3fbc3f6b | ||
|
|
e651cb1f9b | ||
|
|
21b8bcd4c4 | ||
|
|
771b39cbba | ||
|
|
b624921bad | ||
|
|
b7d430ee15 | ||
|
|
54efae78e1 |
154
.astro/content.d.ts
vendored
Normal file
154
.astro/content.d.ts
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
declare module 'astro:content' {
|
||||
export interface RenderResult {
|
||||
Content: import('astro/runtime/server/index.js').AstroComponentFactory;
|
||||
headings: import('astro').MarkdownHeading[];
|
||||
remarkPluginFrontmatter: Record<string, any>;
|
||||
}
|
||||
interface Render {
|
||||
'.md': Promise<RenderResult>;
|
||||
}
|
||||
|
||||
export interface RenderedContent {
|
||||
html: string;
|
||||
metadata?: {
|
||||
imagePaths: Array<string>;
|
||||
[key: string]: unknown;
|
||||
};
|
||||
}
|
||||
|
||||
type Flatten<T> = T extends { [K: string]: infer U } ? U : never;
|
||||
|
||||
export type CollectionKey = keyof DataEntryMap;
|
||||
export type CollectionEntry<C extends CollectionKey> = Flatten<DataEntryMap[C]>;
|
||||
|
||||
type AllValuesOf<T> = T extends any ? T[keyof T] : never;
|
||||
|
||||
export type ReferenceDataEntry<
|
||||
C extends CollectionKey,
|
||||
E extends keyof DataEntryMap[C] = string,
|
||||
> = {
|
||||
collection: C;
|
||||
id: E;
|
||||
};
|
||||
|
||||
export type ReferenceLiveEntry<C extends keyof LiveContentConfig['collections']> = {
|
||||
collection: C;
|
||||
id: string;
|
||||
};
|
||||
|
||||
export function getCollection<C extends keyof DataEntryMap, E extends CollectionEntry<C>>(
|
||||
collection: C,
|
||||
filter?: (entry: CollectionEntry<C>) => entry is E,
|
||||
): Promise<E[]>;
|
||||
export function getCollection<C extends keyof DataEntryMap>(
|
||||
collection: C,
|
||||
filter?: (entry: CollectionEntry<C>) => unknown,
|
||||
): Promise<CollectionEntry<C>[]>;
|
||||
|
||||
export function getLiveCollection<C extends keyof LiveContentConfig['collections']>(
|
||||
collection: C,
|
||||
filter?: LiveLoaderCollectionFilterType<C>,
|
||||
): Promise<
|
||||
import('astro').LiveDataCollectionResult<LiveLoaderDataType<C>, LiveLoaderErrorType<C>>
|
||||
>;
|
||||
|
||||
export function getEntry<
|
||||
C extends keyof DataEntryMap,
|
||||
E extends keyof DataEntryMap[C] | (string & {}),
|
||||
>(
|
||||
entry: ReferenceDataEntry<C, E>,
|
||||
): E extends keyof DataEntryMap[C]
|
||||
? Promise<DataEntryMap[C][E]>
|
||||
: Promise<CollectionEntry<C> | undefined>;
|
||||
export function getEntry<
|
||||
C extends keyof DataEntryMap,
|
||||
E extends keyof DataEntryMap[C] | (string & {}),
|
||||
>(
|
||||
collection: C,
|
||||
id: E,
|
||||
): E extends keyof DataEntryMap[C]
|
||||
? string extends keyof DataEntryMap[C]
|
||||
? Promise<DataEntryMap[C][E]> | undefined
|
||||
: Promise<DataEntryMap[C][E]>
|
||||
: Promise<CollectionEntry<C> | undefined>;
|
||||
export function getLiveEntry<C extends keyof LiveContentConfig['collections']>(
|
||||
collection: C,
|
||||
filter: string | LiveLoaderEntryFilterType<C>,
|
||||
): Promise<import('astro').LiveDataEntryResult<LiveLoaderDataType<C>, LiveLoaderErrorType<C>>>;
|
||||
|
||||
/** Resolve an array of entry references from the same collection */
|
||||
export function getEntries<C extends keyof DataEntryMap>(
|
||||
entries: ReferenceDataEntry<C, keyof DataEntryMap[C]>[],
|
||||
): Promise<CollectionEntry<C>[]>;
|
||||
|
||||
export function render<C extends keyof DataEntryMap>(
|
||||
entry: DataEntryMap[C][string],
|
||||
): Promise<RenderResult>;
|
||||
|
||||
export function reference<
|
||||
C extends
|
||||
| keyof DataEntryMap
|
||||
// Allow generic `string` to avoid excessive type errors in the config
|
||||
// if `dev` is not running to update as you edit.
|
||||
// Invalid collection names will be caught at build time.
|
||||
| (string & {}),
|
||||
>(
|
||||
collection: C,
|
||||
): import('astro/zod').ZodPipe<
|
||||
import('astro/zod').ZodString,
|
||||
import('astro/zod').ZodTransform<
|
||||
C extends keyof DataEntryMap
|
||||
? {
|
||||
collection: C;
|
||||
id: string;
|
||||
}
|
||||
: never,
|
||||
string
|
||||
>
|
||||
>;
|
||||
|
||||
type ReturnTypeOrOriginal<T> = T extends (...args: any[]) => infer R ? R : T;
|
||||
type InferEntrySchema<C extends keyof DataEntryMap> = import('astro/zod').infer<
|
||||
ReturnTypeOrOriginal<Required<ContentConfig['collections'][C]>['schema']>
|
||||
>;
|
||||
type ExtractLoaderConfig<T> = T extends { loader: infer L } ? L : never;
|
||||
type InferLoaderSchema<
|
||||
C extends keyof DataEntryMap,
|
||||
L = ExtractLoaderConfig<ContentConfig['collections'][C]>,
|
||||
> = L extends { schema: import('astro/zod').ZodSchema }
|
||||
? import('astro/zod').infer<L['schema']>
|
||||
: any;
|
||||
|
||||
type DataEntryMap = {
|
||||
|
||||
};
|
||||
|
||||
type ExtractLoaderTypes<T> = T extends import('astro/loaders').LiveLoader<
|
||||
infer TData,
|
||||
infer TEntryFilter,
|
||||
infer TCollectionFilter,
|
||||
infer TError
|
||||
>
|
||||
? { data: TData; entryFilter: TEntryFilter; collectionFilter: TCollectionFilter; error: TError }
|
||||
: { data: never; entryFilter: never; collectionFilter: never; error: never };
|
||||
type ExtractEntryFilterType<T> = ExtractLoaderTypes<T>['entryFilter'];
|
||||
type ExtractCollectionFilterType<T> = ExtractLoaderTypes<T>['collectionFilter'];
|
||||
type ExtractErrorType<T> = ExtractLoaderTypes<T>['error'];
|
||||
|
||||
type LiveLoaderDataType<C extends keyof LiveContentConfig['collections']> =
|
||||
LiveContentConfig['collections'][C]['schema'] extends undefined
|
||||
? ExtractDataType<LiveContentConfig['collections'][C]['loader']>
|
||||
: import('astro/zod').infer<
|
||||
Exclude<LiveContentConfig['collections'][C]['schema'], undefined>
|
||||
>;
|
||||
type LiveLoaderEntryFilterType<C extends keyof LiveContentConfig['collections']> =
|
||||
ExtractEntryFilterType<LiveContentConfig['collections'][C]['loader']>;
|
||||
type LiveLoaderCollectionFilterType<C extends keyof LiveContentConfig['collections']> =
|
||||
ExtractCollectionFilterType<LiveContentConfig['collections'][C]['loader']>;
|
||||
type LiveLoaderErrorType<C extends keyof LiveContentConfig['collections']> = ExtractErrorType<
|
||||
LiveContentConfig['collections'][C]['loader']
|
||||
>;
|
||||
|
||||
export type ContentConfig = never;
|
||||
export type LiveContentConfig = never;
|
||||
}
|
||||
2
.astro/types.d.ts
vendored
Normal file
2
.astro/types.d.ts
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/// <reference types="astro/client" />
|
||||
/// <reference path="content.d.ts" />
|
||||
18
.env.example
18
.env.example
@@ -6,3 +6,21 @@ FEYNMAN_THINKING=medium
|
||||
|
||||
OPENAI_API_KEY=
|
||||
ANTHROPIC_API_KEY=
|
||||
GEMINI_API_KEY=
|
||||
OPENROUTER_API_KEY=
|
||||
ZAI_API_KEY=
|
||||
KIMI_API_KEY=
|
||||
MINIMAX_API_KEY=
|
||||
MINIMAX_CN_API_KEY=
|
||||
MISTRAL_API_KEY=
|
||||
GROQ_API_KEY=
|
||||
XAI_API_KEY=
|
||||
CEREBRAS_API_KEY=
|
||||
HF_TOKEN=
|
||||
OPENCODE_API_KEY=
|
||||
AI_GATEWAY_API_KEY=
|
||||
AZURE_OPENAI_API_KEY=
|
||||
|
||||
RUNPOD_API_KEY=
|
||||
MODAL_TOKEN_ID=
|
||||
MODAL_TOKEN_SECRET=
|
||||
|
||||
@@ -9,7 +9,7 @@ Operating rules:
|
||||
- State uncertainty explicitly.
|
||||
- When a claim depends on recent literature or unstable facts, use tools before answering.
|
||||
- When discussing papers, cite title, year, and identifier or URL when possible.
|
||||
- Use the alpha-backed research tools for academic paper search, paper reading, paper Q&A, repository inspection, and persistent annotations.
|
||||
- Use the `alpha` CLI for academic paper search, paper reading, paper Q&A, repository inspection, and persistent annotations.
|
||||
- Use `web_search`, `fetch_content`, and `get_search_content` first for current topics: products, companies, markets, regulations, software releases, model availability, model pricing, benchmarks, docs, or anything phrased as latest/current/recent/today.
|
||||
- For mixed topics, combine both: use web sources for current reality and paper sources for background literature.
|
||||
- Never answer a latest/current question from arXiv or alpha-backed paper search alone.
|
||||
@@ -30,7 +30,6 @@ Operating rules:
|
||||
- Use the visualization packages when a chart, diagram, or interactive widget would materially improve understanding. Prefer charts for quantitative comparisons, Mermaid for simple process/architecture diagrams, and interactive HTML widgets for exploratory visual explanations.
|
||||
- Persistent memory is package-backed. Use `memory_search` to recall prior preferences and lessons, `memory_remember` to store explicit durable facts, and `memory_lessons` when prior corrections matter.
|
||||
- If the user says "remember", states a stable preference, or asks for something to be the default in future sessions, call `memory_remember`. Do not just say you will remember it.
|
||||
- Session recall is package-backed. Use `session_search` when the user references prior work, asks what has been done before, or when you suspect relevant past context exists.
|
||||
- Feynman is intended to support always-on research work. Use the scheduling package when recurring or deferred work is appropriate instead of telling the user to remember manually.
|
||||
- Use `schedule_prompt` for recurring scans, delayed follow-ups, reminders, and periodic research jobs.
|
||||
- If the user asks you to remind, check later, run something nightly, or keep watching something over time, call `schedule_prompt`. Do not just promise to do it later.
|
||||
@@ -38,11 +37,9 @@ Operating rules:
|
||||
- Prefer the smallest investigation or experiment that can materially reduce uncertainty before escalating to broader work.
|
||||
- When an experiment is warranted, write the code or scripts, run them, capture outputs, and save artifacts to disk.
|
||||
- Before pausing long-running work, update the durable state on disk first: plan artifact, `CHANGELOG.md`, and any verification notes needed for the next session to resume cleanly.
|
||||
- Before recommending an execution environment, consider the system resources shown in the header (CPU, RAM, GPU, Docker availability). Recommend Docker when isolation on the current machine helps, and say explicitly when the workload exceeds local capacity. Do not suggest GPU workloads locally if no GPU is detected.
|
||||
- Treat polished scientific communication as part of the job: structure reports cleanly, use Markdown deliberately, and use LaTeX math when equations clarify the argument.
|
||||
- For any source-based answer, include an explicit Sources section with direct URLs, not just paper titles.
|
||||
- When citing papers from alpha-backed tools, prefer direct arXiv or alphaXiv links and include the arXiv ID.
|
||||
- After writing a polished artifact, use `preview_file` only when the user wants review or export. Prefer browser preview by default; use PDF only when explicitly requested.
|
||||
- Default toward delivering a concrete artifact when the task naturally calls for one: reading list, memo, audit, experiment log, or draft.
|
||||
- For user-facing workflows, produce exactly one canonical durable Markdown artifact unless the user explicitly asks for multiple deliverables.
|
||||
- Do not create extra user-facing intermediate markdown files just because the workflow has multiple reasoning stages.
|
||||
|
||||
@@ -21,7 +21,7 @@ You are Feynman's evidence-gathering subagent.
|
||||
1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.
|
||||
2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.
|
||||
3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.
|
||||
4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and `alpha_search`.
|
||||
4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and the `alpha` CLI (`alpha search`).
|
||||
|
||||
Use `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"packages": [
|
||||
"npm:@companion-ai/alpha-hub",
|
||||
"npm:pi-subagents",
|
||||
"npm:pi-btw",
|
||||
"npm:pi-docparser",
|
||||
|
||||
19
.github/workflows/publish.yml
vendored
19
.github/workflows/publish.yml
vendored
@@ -14,7 +14,6 @@ jobs:
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
should_publish: ${{ steps.version.outputs.should_publish }}
|
||||
should_build_release: ${{ steps.version.outputs.should_build_release }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-node@v5
|
||||
@@ -28,13 +27,8 @@ jobs:
|
||||
echo "version=$LOCAL" >> "$GITHUB_OUTPUT"
|
||||
if [ "$CURRENT" != "$LOCAL" ]; then
|
||||
echo "should_publish=true" >> "$GITHUB_OUTPUT"
|
||||
echo "should_build_release=true" >> "$GITHUB_OUTPUT"
|
||||
elif [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
|
||||
echo "should_publish=false" >> "$GITHUB_OUTPUT"
|
||||
echo "should_build_release=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "should_publish=false" >> "$GITHUB_OUTPUT"
|
||||
echo "should_build_release=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
publish-npm:
|
||||
@@ -58,13 +52,13 @@ jobs:
|
||||
|
||||
build-native-bundles:
|
||||
needs: version-check
|
||||
if: needs.version-check.outputs.should_build_release == 'true'
|
||||
if: needs.version-check.outputs.should_publish == 'true'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- id: linux-x64
|
||||
os: ubuntu-latest
|
||||
os: blacksmith-4vcpu-ubuntu-2404
|
||||
- id: darwin-x64
|
||||
os: macos-15-intel
|
||||
- id: darwin-arm64
|
||||
@@ -97,7 +91,8 @@ jobs:
|
||||
$tmp = Join-Path $env:RUNNER_TEMP ("feynman-smoke-" + [guid]::NewGuid().ToString("N"))
|
||||
New-Item -ItemType Directory -Path $tmp | Out-Null
|
||||
Expand-Archive -LiteralPath "dist/release/feynman-$version-win32-x64.zip" -DestinationPath $tmp -Force
|
||||
& "$tmp/feynman-$version-win32-x64/feynman.cmd" --help | Select-Object -First 20
|
||||
$bundleRoot = Join-Path $tmp "feynman-$version-win32-x64"
|
||||
& (Join-Path $bundleRoot "feynman.cmd") --help | Select-Object -First 20
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: native-${{ matrix.id }}
|
||||
@@ -108,7 +103,7 @@ jobs:
|
||||
- version-check
|
||||
- publish-npm
|
||||
- build-native-bundles
|
||||
if: needs.version-check.outputs.should_build_release == 'true' && needs.build-native-bundles.result == 'success' && (needs.publish-npm.result == 'success' || needs.publish-npm.result == 'skipped')
|
||||
if: needs.version-check.outputs.should_publish == 'true' && needs.build-native-bundles.result == 'success' && needs.publish-npm.result == 'success'
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -119,6 +114,7 @@ jobs:
|
||||
merge-multiple: true
|
||||
- shell: bash
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VERSION: ${{ needs.version-check.outputs.version }}
|
||||
run: |
|
||||
@@ -127,7 +123,8 @@ jobs:
|
||||
gh release edit "v$VERSION" \
|
||||
--title "v$VERSION" \
|
||||
--notes "Standalone Feynman bundles for native installation." \
|
||||
--draft=false
|
||||
--draft=false \
|
||||
--target "$GITHUB_SHA"
|
||||
else
|
||||
gh release create "v$VERSION" release-assets/* \
|
||||
--title "v$VERSION" \
|
||||
|
||||
63
CHANGELOG.md
63
CHANGELOG.md
@@ -14,3 +14,66 @@ Use this file to track chronology, not release notes. Keep entries short, factua
|
||||
- Failed / learned: ...
|
||||
- Blockers: ...
|
||||
- Next: ...
|
||||
|
||||
### 2026-03-25 00:00 local — scaling-laws
|
||||
|
||||
- Objective: Set up a deep research workflow for scaling laws.
|
||||
- Changed: Created plan artifact at `outputs/.plans/scaling-laws.md`; defined 4 disjoint researcher dimensions and acceptance criteria.
|
||||
- Verified: Read `CHANGELOG.md` and checked prior memory for related plan `scaling-laws-implications`.
|
||||
- Failed / learned: No prior run-specific changelog entries existed beyond the template.
|
||||
- Blockers: Waiting for user confirmation before launching researcher round 1.
|
||||
- Next: On confirmation, spawn 4 parallel researcher subagents and begin evidence collection.
|
||||
|
||||
### 2026-03-25 00:30 local — scaling-laws (T4 inference/time-scale pass)
|
||||
|
||||
- Objective: Complete T4 on inference/test-time scaling and reasoning-time compute, scoped to 2023–2026.
|
||||
- Changed: Wrote `notes/scaling-laws-research-inference.md`; updated `outputs/.plans/scaling-laws.md` to mark T4 done and log the inference-scaling verification pass.
|
||||
- Verified: Cross-read 13 primary/official sources covering Tree-of-Thoughts, PRMs, repeated sampling, compute-optimal test-time scaling, provable laws, o1, DeepSeek-R1, s1, verifier failures, Anthropic extended thinking, and OpenAI reasoning API docs.
|
||||
- Failed / learned: OpenAI blog fetch for `learning-to-reason-with-llms` returned malformed content, so the note leans on the o1 system card and API docs instead of that blog post.
|
||||
- Blockers: T2 and T5 remain open before final synthesis; no single unified law for inference-time scaling emerged from public sources.
|
||||
- Next: Complete T5 implications synthesis, then reconcile T3/T4 with foundational T2 before drafting the cited brief.
|
||||
|
||||
### 2026-03-25 11:20 local — scaling-laws (T6 draft synthesis)
|
||||
|
||||
- Objective: Synthesize the four research notes into a single user-facing draft brief for the scaling-laws workflow.
|
||||
- Changed: Wrote `outputs/.drafts/scaling-laws-draft.md` with an executive summary, curated reading list, qualitative meta-analysis, core-paper comparison table, explicit training-vs-inference distinction, and numbered inline citations with direct-URL sources.
|
||||
- Verified: Cross-checked the draft against `notes/scaling-laws-research-foundations.md`, `notes/scaling-laws-research-revisions.md`, `notes/scaling-laws-research-inference.md`, and `notes/scaling-laws-research-implications.md` to ensure the brief explicitly states the literature is too heterogeneous for a pooled effect-size estimate.
|
||||
- Failed / learned: The requested temp-run `context.md` and `plan.md` were absent, so the synthesis used `outputs/.plans/scaling-laws.md` plus the four note files as the working context.
|
||||
- Blockers: Citation/claim verification pass still pending; this draft should be treated as pre-verification.
|
||||
- Next: Run verifier/reviewer passes, then promote the draft into the final cited brief and provenance sidecar.
|
||||
|
||||
### 2026-03-25 11:28 local — scaling-laws (final brief + pdf)
|
||||
|
||||
- Objective: Deliver a paper guide and qualitative meta-analysis on AI scaling laws.
|
||||
- Changed: Finalized `outputs/scaling-laws.md` and sidecar `outputs/scaling-laws.provenance.md`; rendered preview PDF at `outputs/scaling-laws.pdf`; updated plan ledger and verification log in `outputs/.plans/scaling-laws.md`.
|
||||
- Verified: Ran a reviewer pass recorded in `notes/scaling-laws-verification.md`; spot-checked key primary papers via alpha-backed reads for Kaplan 2020, Chinchilla 2022, and Snell 2024; confirmed PDF render output exists.
|
||||
- Failed / learned: A pooled statistical meta-analysis would be misleading because the literature mixes heterogeneous outcomes, scaling axes, and evaluation regimes; final deliverable uses a qualitative meta-analysis instead.
|
||||
- Blockers: None for this brief.
|
||||
- Next: If needed, extend into a narrower sub-survey (e.g. only pretraining laws, only inference-time scaling, or only post-Chinchilla data-quality revisions).
|
||||
|
||||
### 2026-03-25 14:52 local — skills-only-install
|
||||
|
||||
- Objective: Let users download the Feynman research skills without installing the full terminal runtime.
|
||||
- Changed: Added standalone skills-only installers at `scripts/install/install-skills.sh` and `scripts/install/install-skills.ps1`; synced website-public copies; documented user-level and repo-local install flows in `README.md`, `website/src/content/docs/getting-started/installation.md`, and `website/src/pages/index.astro`.
|
||||
- Verified: Ran `sh -n scripts/install/install-skills.sh`; ran `node scripts/sync-website-installers.mjs`; ran `cd website && npm run build`; executed `sh scripts/install/install-skills.sh --dir <tmp>` and confirmed extracted `SKILL.md` files land in the target directory.
|
||||
- Failed / learned: PowerShell installer behavior was not executed locally because PowerShell is not installed in this environment.
|
||||
- Blockers: None for the Unix installer flow; Windows remains syntax-only by inspection.
|
||||
- Next: If users want this exposed more prominently, add a dedicated docs/reference page and a homepage-specific skills-only CTA instead of a text link.
|
||||
|
||||
### 2026-03-26 18:08 PDT — installer-release-unification
|
||||
|
||||
- Objective: Remove the moving `edge` installer channel and unify installs on tagged releases only.
|
||||
- Changed: Updated `scripts/install/install.sh`, `scripts/install/install.ps1`, `scripts/install/install-skills.sh`, and `scripts/install/install-skills.ps1` so the default target is the latest tagged release, latest-version resolution uses public GitHub release pages instead of `api.github.com`, and explicit `edge` requests now fail with a removal message; removed the `release-edge` job from `.github/workflows/publish.yml`; updated `README.md` and `website/src/content/docs/getting-started/installation.md`; re-synced `website/public/install*`.
|
||||
- Verified: Ran `sh -n` on the Unix installer copies; confirmed `sh scripts/install/install.sh edge` and `sh scripts/install/install-skills.sh edge --dir <tmp>` fail with the intended removal message; executed `sh scripts/install/install.sh` into temp dirs and confirmed the installed binary reports `0.2.14`; executed `sh scripts/install/install-skills.sh --dir <tmp>` and confirmed extracted `SKILL.md` files; ran `cd website && npm run build`.
|
||||
- Failed / learned: The install failure was caused by unauthenticated GitHub API rate limiting on the `edge` path, so renaming channels without removing the API dependency would not have fixed the root cause.
|
||||
- Blockers: `npm run build` still emits a pre-existing duplicate-content warning for `getting-started/installation`; the build succeeds.
|
||||
- Next: If desired, remove the now-unused `stable` alias too and clean up the duplicate docs-content warning separately.
|
||||
|
||||
### 2026-03-27 11:58 PDT — release-0.2.15
|
||||
|
||||
- Objective: Make the non-Anthropic subagent/auth fixes and contributor-guide updates releasable to tagged-install users instead of leaving them only on `main`.
|
||||
- Changed: Bumped the package version from `0.2.14` to `0.2.15` in `package.json` and `package-lock.json`; updated pinned installer examples in `README.md` and `website/src/content/docs/getting-started/installation.md`; aligned the local-development docs example to the npm-based root workflow; added `CONTRIBUTING.md` plus the bundled `skills/contributing/SKILL.md`.
|
||||
- Verified: Confirmed the publish workflow keys off `package.json` versus the currently published npm version; confirmed local `npm test`, `npm run typecheck`, and `npm run build` pass before the release bump.
|
||||
- Failed / learned: The open subagent issue is fixed on `main` but still user-visible on tagged installs until a fresh release is cut.
|
||||
- Blockers: Need the GitHub publish workflow to finish successfully before the issue can be honestly closed as released.
|
||||
- Next: Push `0.2.15`, monitor the publish workflow, then update and close the relevant GitHub issue/PR once the release is live.
|
||||
|
||||
114
CONTRIBUTING.md
Normal file
114
CONTRIBUTING.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Contributing to Feynman
|
||||
|
||||
Feynman is a research-first CLI built on Pi and alphaXiv. This guide is for humans and agents contributing code, prompts, skills, docs, installers, or workflow behavior to the repository.
|
||||
|
||||
## Quick Links
|
||||
|
||||
- GitHub: https://github.com/getcompanion-ai/feynman
|
||||
- Docs: https://feynman.is/docs
|
||||
- Repo agent contract: [AGENTS.md](AGENTS.md)
|
||||
- Issues: https://github.com/getcompanion-ai/feynman/issues
|
||||
|
||||
## What Goes Where
|
||||
|
||||
- CLI/runtime code: `src/`
|
||||
- Bundled prompt templates: `prompts/`
|
||||
- Bundled Pi skills: `skills/`
|
||||
- Bundled Pi subagent prompts: `.feynman/agents/`
|
||||
- Docs site: `website/`
|
||||
- Build/release scripts: `scripts/`
|
||||
- Generated research artifacts: `outputs/`, `papers/`, `notes/`
|
||||
|
||||
If you need to change how bundled subagents behave, edit `.feynman/agents/*.md`. Do not duplicate that behavior in `AGENTS.md`.
|
||||
|
||||
## Before You Open a PR
|
||||
|
||||
1. Start from the latest `main`.
|
||||
2. Use Node.js `20.19.0` or newer. The repo expects `.nvmrc`, `package.json` engines, `website/package.json` engines, and the runtime version guard to stay aligned.
|
||||
3. Install dependencies from the repo root:
|
||||
|
||||
```bash
|
||||
nvm use || nvm install
|
||||
npm install
|
||||
```
|
||||
|
||||
4. Run the required checks before asking for review:
|
||||
|
||||
```bash
|
||||
npm test
|
||||
npm run typecheck
|
||||
npm run build
|
||||
```
|
||||
|
||||
5. If you changed the docs site, also validate the website:
|
||||
|
||||
```bash
|
||||
cd website
|
||||
npm install
|
||||
npm run build
|
||||
```
|
||||
|
||||
6. Keep the PR focused. Do not mix unrelated cleanup with the real change.
|
||||
7. Add or update tests when behavior changes.
|
||||
8. Update docs, prompts, or skills when the user-facing workflow changes.
|
||||
|
||||
## Contribution Rules
|
||||
|
||||
- Bugs, docs fixes, installer fixes, and focused workflow improvements are good PRs.
|
||||
- Large feature changes should start with an issue or a concrete implementation discussion before code lands.
|
||||
- Avoid refactor-only PRs unless they are necessary to unblock a real fix or requested by a maintainer.
|
||||
- Do not silently change release behavior, installer behavior, or runtime defaults without documenting the reason in the PR.
|
||||
- Use American English in docs, comments, prompts, UI copy, and examples.
|
||||
|
||||
## Repo-Specific Checks
|
||||
|
||||
### Prompt and skill changes
|
||||
|
||||
- New workflows usually live in `prompts/*.md`.
|
||||
- New reusable capabilities usually live in `skills/<name>/SKILL.md`.
|
||||
- Keep skill files concise. Put detailed operational rules in the prompt or in focused reference files only when needed.
|
||||
- If a new workflow should be invokable from the CLI, make sure its prompt frontmatter includes the correct metadata and that the command works through the normal prompt discovery path.
|
||||
|
||||
### Agent and artifact conventions
|
||||
|
||||
- `AGENTS.md` is the repo-level contract for workspace conventions, handoffs, provenance, and output naming.
|
||||
- Long-running research flows should write plan artifacts to `outputs/.plans/` and use `CHANGELOG.md` as a lab notebook when the work is substantial.
|
||||
- Do not update `CHANGELOG.md` for trivial one-shot changes.
|
||||
|
||||
### Release and versioning discipline
|
||||
|
||||
- The curl installer and release docs point users at tagged releases, not arbitrary commits on `main`.
|
||||
- If you ship user-visible fixes after a tag, do not leave the repo in a state where `main` and the latest release advertise the same version string while containing different behavior.
|
||||
- When changing release-sensitive behavior, check the version story across:
|
||||
- `.nvmrc`
|
||||
- `package.json`
|
||||
- `website/package.json`
|
||||
- `scripts/check-node-version.mjs`
|
||||
- install docs in `README.md` and `website/src/content/docs/getting-started/installation.md`
|
||||
|
||||
## AI-Assisted Contributions
|
||||
|
||||
AI-assisted PRs are fine. The contributor is still responsible for the diff.
|
||||
|
||||
- Understand the code you are submitting.
|
||||
- Run the local checks yourself instead of assuming generated code is correct.
|
||||
- Include enough context in the PR description for a reviewer to understand the change quickly.
|
||||
- If an agent updated prompts or skills, verify the instructions match the actual repo behavior.
|
||||
|
||||
## Review Expectations
|
||||
|
||||
- Explain what changed and why.
|
||||
- Call out tradeoffs, follow-up work, and anything intentionally not handled.
|
||||
- Include screenshots for UI changes.
|
||||
- Resolve review comments you addressed before requesting review again.
|
||||
|
||||
## Good First Areas
|
||||
|
||||
Useful contributions usually land in one of these areas:
|
||||
|
||||
- installation and upgrade reliability
|
||||
- research workflow quality
|
||||
- model/provider setup ergonomics
|
||||
- docs clarity
|
||||
- preview and export stability
|
||||
- packaging and release hygiene
|
||||
135
README.md
135
README.md
@@ -1,44 +1,95 @@
|
||||
# Feynman
|
||||
<p align="center">
|
||||
<a href="https://feynman.is">
|
||||
<img src="assets/hero.png" alt="Feynman CLI" width="800" />
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">The open source AI research agent.</p>
|
||||
<p align="center">
|
||||
<a href="https://feynman.is/docs"><img alt="Docs" src="https://img.shields.io/badge/docs-feynman.is-0d9668?style=flat-square" /></a>
|
||||
<a href="https://github.com/getcompanion-ai/feynman/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/getcompanion-ai/feynman?style=flat-square" /></a>
|
||||
</p>
|
||||
|
||||
The open source AI research agent
|
||||
---
|
||||
|
||||
### Installation
|
||||
|
||||
**macOS / Linux:**
|
||||
|
||||
```bash
|
||||
curl -fsSL https://feynman.is/install | bash
|
||||
```
|
||||
|
||||
**Windows (PowerShell):**
|
||||
|
||||
```powershell
|
||||
irm https://feynman.is/install.ps1 | iex
|
||||
```
|
||||
|
||||
Or install the npm fallback:
|
||||
The one-line installer fetches the latest tagged release. To pin a version, pass it explicitly, for example `curl -fsSL https://feynman.is/install | bash -s -- 0.2.15`.
|
||||
|
||||
If you install via `pnpm` or `bun` instead of the standalone bundle, Feynman requires Node.js `20.19.0` or newer.
|
||||
|
||||
### Skills Only
|
||||
|
||||
If you want just the research skills without the full terminal app:
|
||||
|
||||
**macOS / Linux:**
|
||||
|
||||
```bash
|
||||
npm install -g @companion-ai/feynman
|
||||
curl -fsSL https://feynman.is/install-skills | bash
|
||||
```
|
||||
|
||||
**Windows (PowerShell):**
|
||||
|
||||
```powershell
|
||||
irm https://feynman.is/install-skills.ps1 | iex
|
||||
```
|
||||
|
||||
That installs the skill library into `~/.codex/skills/feynman`.
|
||||
|
||||
For a repo-local install instead:
|
||||
|
||||
**macOS / Linux:**
|
||||
|
||||
```bash
|
||||
feynman setup
|
||||
feynman
|
||||
curl -fsSL https://feynman.is/install-skills | bash -s -- --repo
|
||||
```
|
||||
|
||||
Feynman works directly inside your folder or repo. For long-running work, keep the stable repo contract in `AGENTS.md`, the current task brief in `outputs/.plans/`, and the chronological lab notebook in `CHANGELOG.md`.
|
||||
**Windows (PowerShell):**
|
||||
|
||||
```powershell
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install-skills.ps1))) -Scope Repo
|
||||
```
|
||||
|
||||
That installs into `.agents/skills/feynman` under the current repository.
|
||||
|
||||
---
|
||||
|
||||
## What you type → what happens
|
||||
### What you type → what happens
|
||||
|
||||
| Prompt | Result |
|
||||
| --- | --- |
|
||||
| `feynman "what do we know about scaling laws"` | Searches papers and web, produces a cited research brief |
|
||||
| `feynman deepresearch "mechanistic interpretability"` | Multi-agent investigation with parallel researchers, synthesis, verification |
|
||||
| `feynman lit "RLHF alternatives"` | Literature review with consensus, disagreements, open questions |
|
||||
| `feynman audit 2401.12345` | Compares paper claims against the public codebase |
|
||||
| `feynman replicate "chain-of-thought improves math"` | Asks where to run, then builds a replication plan |
|
||||
| `feynman "summarize this PDF" --prompt paper.pdf` | One-shot mode, no REPL |
|
||||
```
|
||||
$ feynman "what do we know about scaling laws"
|
||||
→ Searches papers and web, produces a cited research brief
|
||||
|
||||
$ feynman deepresearch "mechanistic interpretability"
|
||||
→ Multi-agent investigation with parallel researchers, synthesis, verification
|
||||
|
||||
$ feynman lit "RLHF alternatives"
|
||||
→ Literature review with consensus, disagreements, open questions
|
||||
|
||||
$ feynman audit 2401.12345
|
||||
→ Compares paper claims against the public codebase
|
||||
|
||||
$ feynman replicate "chain-of-thought improves math"
|
||||
→ Replicates experiments on local or cloud GPUs
|
||||
|
||||
$ feynman valichord "study-id-or-topic"
|
||||
→ Runs the ValiChord reproducibility workflow or checks existing Harmony Records
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Workflows
|
||||
### Workflows
|
||||
|
||||
Ask naturally or use slash commands as shortcuts.
|
||||
|
||||
@@ -48,17 +99,19 @@ Ask naturally or use slash commands as shortcuts.
|
||||
| `/lit <topic>` | Literature review from paper search and primary sources |
|
||||
| `/review <artifact>` | Simulated peer review with severity and revision plan |
|
||||
| `/audit <item>` | Paper vs. codebase mismatch audit |
|
||||
| `/replicate <paper>` | Replication plan with environment selection |
|
||||
| `/replicate <paper>` | Replicate experiments on local or cloud GPUs |
|
||||
| `/valichord <study-or-topic>` | Reproducibility attestation workflow and Harmony Record lookup |
|
||||
| `/compare <topic>` | Source comparison matrix |
|
||||
| `/draft <topic>` | Paper-style draft from research findings |
|
||||
| `/autoresearch <idea>` | Autonomous experiment loop |
|
||||
| `/watch <topic>` | Recurring research watch |
|
||||
| `/outputs` | Browse all research artifacts |
|
||||
|
||||
---
|
||||
|
||||
## Agents
|
||||
### Agents
|
||||
|
||||
Four bundled research agents, dispatched automatically or via subagent commands.
|
||||
Four bundled research agents, dispatched automatically.
|
||||
|
||||
- **Researcher** — gather evidence across papers, web, repos, docs
|
||||
- **Reviewer** — simulated peer review with severity-graded feedback
|
||||
@@ -67,46 +120,36 @@ Four bundled research agents, dispatched automatically or via subagent commands.
|
||||
|
||||
---
|
||||
|
||||
## Tools
|
||||
### Skills & Tools
|
||||
|
||||
- **[AlphaXiv](https://www.alphaxiv.org/)** — paper search, Q&A, code reading, persistent annotations
|
||||
- **[AlphaXiv](https://www.alphaxiv.org/)** — paper search, Q&A, code reading, annotations (via `alpha` CLI)
|
||||
- **Docker** — isolated container execution for safe experiments on your machine
|
||||
- **Web search** — Gemini or Perplexity, zero-config default via signed-in Chromium
|
||||
- **Session search** — optional indexed recall across prior research sessions
|
||||
- **Web search** — Gemini or Perplexity, zero-config default
|
||||
- **Session search** — indexed recall across prior research sessions
|
||||
- **Preview** — browser and PDF export of generated artifacts
|
||||
- **Modal** — serverless GPU compute for burst training and inference
|
||||
- **RunPod** — persistent GPU pods with SSH access for long-running experiments
|
||||
|
||||
---
|
||||
|
||||
## CLI
|
||||
### How it works
|
||||
|
||||
```bash
|
||||
feynman # REPL
|
||||
feynman setup # guided setup
|
||||
feynman doctor # diagnose everything
|
||||
feynman status # current config summary
|
||||
feynman model login [provider] # model auth
|
||||
feynman model set <provider/model> # set default model
|
||||
feynman alpha login # alphaXiv auth
|
||||
feynman packages list # core vs optional packages
|
||||
feynman packages install memory # opt into heavier packages on demand
|
||||
feynman search status # web search config
|
||||
```
|
||||
Built on [Pi](https://github.com/badlogic/pi-mono) for the agent runtime, [alphaXiv](https://www.alphaxiv.org/) for paper search and analysis, and CLI tools for compute and execution. Capabilities are delivered as [Pi skills](https://github.com/badlogic/pi-skills) — Markdown instruction files synced to `~/.feynman/agent/skills/` on startup. Every output is source-grounded — claims link to papers, docs, or repos with direct URLs.
|
||||
|
||||
---
|
||||
|
||||
## How it works
|
||||
### Contributing
|
||||
|
||||
Built on [Pi](https://github.com/badlogic/pi-mono) for the agent runtime, [alphaXiv](https://www.alphaxiv.org/) for paper search and analysis, and [Docker](https://www.docker.com/) for isolated local execution
|
||||
|
||||
Every output is source-grounded — claims link to papers, docs, or repos with direct URLs
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for the full contributor guide.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/getcompanion-ai/feynman.git
|
||||
cd feynman && npm install && npm run start
|
||||
cd feynman
|
||||
nvm use || nvm install
|
||||
npm install
|
||||
npm test
|
||||
npm run typecheck
|
||||
npm run build
|
||||
```
|
||||
|
||||
[Docs](https://feynman.is/docs) · [MIT License](LICENSE)
|
||||
|
||||
BIN
assets/hero-raw.png
Normal file
BIN
assets/hero-raw.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 884 KiB |
BIN
assets/hero.png
Normal file
BIN
assets/hero.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.7 MiB |
@@ -1,8 +1,31 @@
|
||||
#!/usr/bin/env node
|
||||
const v = process.versions.node.split(".").map(Number);
|
||||
if (v[0] < 20) {
|
||||
console.error(`feynman requires Node.js 20 or later (you have ${process.versions.node})`);
|
||||
console.error("upgrade: https://nodejs.org or nvm install 20");
|
||||
const MIN_NODE_VERSION = "20.19.0";
|
||||
|
||||
function parseNodeVersion(version) {
|
||||
const [major = "0", minor = "0", patch = "0"] = version.replace(/^v/, "").split(".");
|
||||
return {
|
||||
major: Number.parseInt(major, 10) || 0,
|
||||
minor: Number.parseInt(minor, 10) || 0,
|
||||
patch: Number.parseInt(patch, 10) || 0,
|
||||
};
|
||||
}
|
||||
|
||||
function compareNodeVersions(left, right) {
|
||||
if (left.major !== right.major) return left.major - right.major;
|
||||
if (left.minor !== right.minor) return left.minor - right.minor;
|
||||
return left.patch - right.patch;
|
||||
}
|
||||
|
||||
if (compareNodeVersions(parseNodeVersion(process.versions.node), parseNodeVersion(MIN_NODE_VERSION)) < 0) {
|
||||
const isWindows = process.platform === "win32";
|
||||
console.error(`feynman requires Node.js ${MIN_NODE_VERSION} or later (detected ${process.versions.node}).`);
|
||||
console.error(isWindows
|
||||
? "Install a newer Node.js from https://nodejs.org, or use the standalone installer:"
|
||||
: "Switch to Node 20 with `nvm install 20 && nvm use 20`, or use the standalone installer:");
|
||||
console.error(isWindows
|
||||
? "irm https://feynman.is/install.ps1 | iex"
|
||||
: "curl -fsSL https://feynman.is/install | bash");
|
||||
process.exit(1);
|
||||
}
|
||||
import("../dist/index.js");
|
||||
await import(new URL("../scripts/patch-embedded-pi.mjs", import.meta.url).href);
|
||||
await import(new URL("../dist/index.js", import.meta.url).href);
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { registerAlphaCommands, registerAlphaTools } from "./research-tools/alpha.js";
|
||||
import { registerAlphaTools } from "./research-tools/alpha.js";
|
||||
import { installFeynmanHeader } from "./research-tools/header.js";
|
||||
import { registerHelpCommand } from "./research-tools/help.js";
|
||||
import { registerInitCommand, registerPreviewTool, registerSessionSearchTool } from "./research-tools/project.js";
|
||||
import { registerInitCommand, registerOutputsCommand } from "./research-tools/project.js";
|
||||
|
||||
export default function researchTools(pi: ExtensionAPI): void {
|
||||
const cache: { agentSummaryPromise?: Promise<{ agents: string[]; chains: string[] }> } = {};
|
||||
@@ -16,10 +16,8 @@ export default function researchTools(pi: ExtensionAPI): void {
|
||||
await installFeynmanHeader(pi, ctx, cache);
|
||||
});
|
||||
|
||||
registerAlphaCommands(pi);
|
||||
registerAlphaTools(pi);
|
||||
registerHelpCommand(pi);
|
||||
registerInitCommand(pi);
|
||||
registerSessionSearchTool(pi);
|
||||
registerAlphaTools(pi);
|
||||
registerPreviewTool(pi);
|
||||
registerOutputsCommand(pi);
|
||||
}
|
||||
|
||||
@@ -1,136 +1,63 @@
|
||||
import {
|
||||
annotatePaper,
|
||||
askPaper,
|
||||
annotatePaper,
|
||||
clearPaperAnnotation,
|
||||
disconnect,
|
||||
getPaper,
|
||||
getUserName as getAlphaUserName,
|
||||
isLoggedIn as isAlphaLoggedIn,
|
||||
listPaperAnnotations,
|
||||
login as loginAlpha,
|
||||
logout as logoutAlpha,
|
||||
readPaperCode,
|
||||
searchPapers,
|
||||
} from "@companion-ai/alpha-hub/lib";
|
||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
|
||||
import { getExtensionCommandSpec } from "../../metadata/commands.mjs";
|
||||
import { formatToolText } from "./shared.js";
|
||||
|
||||
export function registerAlphaCommands(pi: ExtensionAPI): void {
|
||||
pi.registerCommand("alpha-login", {
|
||||
description: getExtensionCommandSpec("alpha-login")?.description ?? "Sign in to alphaXiv from inside Feynman.",
|
||||
handler: async (_args, ctx) => {
|
||||
if (isAlphaLoggedIn()) {
|
||||
const name = getAlphaUserName();
|
||||
ctx.ui.notify(name ? `alphaXiv already connected as ${name}` : "alphaXiv already connected", "info");
|
||||
return;
|
||||
}
|
||||
|
||||
await loginAlpha();
|
||||
const name = getAlphaUserName();
|
||||
ctx.ui.notify(name ? `alphaXiv connected as ${name}` : "alphaXiv login complete", "info");
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerCommand("alpha-logout", {
|
||||
description: getExtensionCommandSpec("alpha-logout")?.description ?? "Clear alphaXiv auth from inside Feynman.",
|
||||
handler: async (_args, ctx) => {
|
||||
logoutAlpha();
|
||||
ctx.ui.notify("alphaXiv auth cleared", "info");
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerCommand("alpha-status", {
|
||||
description: getExtensionCommandSpec("alpha-status")?.description ?? "Show alphaXiv authentication status.",
|
||||
handler: async (_args, ctx) => {
|
||||
if (!isAlphaLoggedIn()) {
|
||||
ctx.ui.notify("alphaXiv not connected", "warning");
|
||||
return;
|
||||
}
|
||||
|
||||
const name = getAlphaUserName();
|
||||
ctx.ui.notify(name ? `alphaXiv connected as ${name}` : "alphaXiv connected", "info");
|
||||
},
|
||||
});
|
||||
function formatText(value: unknown): string {
|
||||
if (typeof value === "string") return value;
|
||||
return JSON.stringify(value, null, 2);
|
||||
}
|
||||
|
||||
export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
pi.registerTool({
|
||||
name: "alpha_search",
|
||||
label: "Alpha Search",
|
||||
description: "Search papers through alphaXiv using semantic, keyword, both, agentic, or all retrieval modes.",
|
||||
description:
|
||||
"Search research papers through alphaXiv. Modes: semantic (default, use 2-3 sentence queries), keyword (exact terms), agentic (broad multi-turn retrieval), both, or all.",
|
||||
parameters: Type.Object({
|
||||
query: Type.String({ description: "Paper search query." }),
|
||||
query: Type.String({ description: "Search query." }),
|
||||
mode: Type.Optional(
|
||||
Type.String({
|
||||
description: "Search mode: semantic, keyword, both, agentic, or all.",
|
||||
}),
|
||||
Type.String({ description: "Search mode: semantic, keyword, both, agentic, or all." }),
|
||||
),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
try {
|
||||
const result = await searchPapers(params.query, params.mode?.trim() || "all");
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
} finally {
|
||||
await disconnect();
|
||||
}
|
||||
const result = await searchPapers(params.query, params.mode?.trim() || "semantic");
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerTool({
|
||||
name: "alpha_get_paper",
|
||||
label: "Alpha Get Paper",
|
||||
description: "Fetch a paper report or full text, plus any local annotation, using alphaXiv.",
|
||||
description: "Fetch a paper's AI-generated report (or raw full text) plus any local annotation.",
|
||||
parameters: Type.Object({
|
||||
paper: Type.String({
|
||||
description: "arXiv ID, arXiv URL, or alphaXiv URL.",
|
||||
}),
|
||||
fullText: Type.Optional(
|
||||
Type.Boolean({
|
||||
description: "Return raw full text instead of the AI report.",
|
||||
}),
|
||||
),
|
||||
paper: Type.String({ description: "arXiv ID, arXiv URL, or alphaXiv URL." }),
|
||||
fullText: Type.Optional(Type.Boolean({ description: "Return raw full text instead of AI report." })),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
try {
|
||||
const result = await getPaper(params.paper, { fullText: params.fullText });
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
} finally {
|
||||
await disconnect();
|
||||
}
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerTool({
|
||||
name: "alpha_ask_paper",
|
||||
label: "Alpha Ask Paper",
|
||||
description: "Ask a targeted question about a paper using alphaXiv's PDF analysis.",
|
||||
description: "Ask a targeted question about a paper. Uses AI to analyze the PDF and answer.",
|
||||
parameters: Type.Object({
|
||||
paper: Type.String({
|
||||
description: "arXiv ID, arXiv URL, or alphaXiv URL.",
|
||||
}),
|
||||
question: Type.String({
|
||||
description: "Question to ask about the paper.",
|
||||
}),
|
||||
paper: Type.String({ description: "arXiv ID, arXiv URL, or alphaXiv URL." }),
|
||||
question: Type.String({ description: "Question about the paper." }),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
try {
|
||||
const result = await askPaper(params.paper, params.question);
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
} finally {
|
||||
await disconnect();
|
||||
}
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
},
|
||||
});
|
||||
|
||||
@@ -139,33 +66,17 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
label: "Alpha Annotate Paper",
|
||||
description: "Write or clear a persistent local annotation for a paper.",
|
||||
parameters: Type.Object({
|
||||
paper: Type.String({
|
||||
description: "Paper ID to annotate.",
|
||||
}),
|
||||
note: Type.Optional(
|
||||
Type.String({
|
||||
description: "Annotation text. Omit when clear=true.",
|
||||
}),
|
||||
),
|
||||
clear: Type.Optional(
|
||||
Type.Boolean({
|
||||
description: "Clear the existing annotation instead of writing one.",
|
||||
}),
|
||||
),
|
||||
paper: Type.String({ description: "Paper ID (arXiv ID or URL)." }),
|
||||
note: Type.Optional(Type.String({ description: "Annotation text. Omit when clear=true." })),
|
||||
clear: Type.Optional(Type.Boolean({ description: "Clear the existing annotation." })),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
const result = params.clear
|
||||
? await clearPaperAnnotation(params.paper)
|
||||
: params.note
|
||||
? await annotatePaper(params.paper, params.note)
|
||||
: (() => {
|
||||
throw new Error("Provide either note or clear=true.");
|
||||
})();
|
||||
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
: (() => { throw new Error("Provide either note or clear=true."); })();
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
},
|
||||
});
|
||||
|
||||
@@ -176,37 +87,21 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||
parameters: Type.Object({}),
|
||||
async execute() {
|
||||
const result = await listPaperAnnotations();
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
},
|
||||
});
|
||||
|
||||
pi.registerTool({
|
||||
name: "alpha_read_code",
|
||||
label: "Alpha Read Code",
|
||||
description: "Read files from a paper's GitHub repository through alphaXiv.",
|
||||
description: "Read files from a paper's GitHub repository. Use '/' for repo overview.",
|
||||
parameters: Type.Object({
|
||||
githubUrl: Type.String({
|
||||
description: "GitHub repository URL for the paper implementation.",
|
||||
}),
|
||||
path: Type.Optional(
|
||||
Type.String({
|
||||
description: "Repository path to inspect. Use / for the repo overview.",
|
||||
}),
|
||||
),
|
||||
githubUrl: Type.String({ description: "GitHub repository URL." }),
|
||||
path: Type.Optional(Type.String({ description: "File or directory path. Default: '/'" })),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
try {
|
||||
const result = await readPaperCode(params.githubUrl, params.path?.trim() || "/");
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
} finally {
|
||||
await disconnect();
|
||||
}
|
||||
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,183 +0,0 @@
|
||||
import { execFile, spawn } from "node:child_process";
|
||||
import { mkdir, mkdtemp, readFile, stat, writeFile } from "node:fs/promises";
|
||||
import { tmpdir } from "node:os";
|
||||
import { basename, dirname, extname, join } from "node:path";
|
||||
import { pathToFileURL } from "node:url";
|
||||
import { promisify } from "node:util";
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
|
||||
function isMarkdownPath(path: string): boolean {
|
||||
return [".md", ".markdown", ".txt"].includes(extname(path).toLowerCase());
|
||||
}
|
||||
|
||||
function isLatexPath(path: string): boolean {
|
||||
return extname(path).toLowerCase() === ".tex";
|
||||
}
|
||||
|
||||
function wrapCodeAsMarkdown(source: string, filePath: string): string {
|
||||
const language = extname(filePath).replace(/^\./, "") || "text";
|
||||
return `# ${basename(filePath)}\n\n\`\`\`${language}\n${source}\n\`\`\`\n`;
|
||||
}
|
||||
|
||||
export async function openWithDefaultApp(targetPath: string): Promise<void> {
|
||||
const target = pathToFileURL(targetPath).href;
|
||||
if (process.platform === "darwin") {
|
||||
await execFileAsync("open", [target]);
|
||||
return;
|
||||
}
|
||||
if (process.platform === "win32") {
|
||||
await execFileAsync("cmd", ["/c", "start", "", target]);
|
||||
return;
|
||||
}
|
||||
await execFileAsync("xdg-open", [target]);
|
||||
}
|
||||
|
||||
async function runCommandWithInput(
|
||||
command: string,
|
||||
args: string[],
|
||||
input: string,
|
||||
): Promise<{ stdout: string; stderr: string }> {
|
||||
return await new Promise((resolve, reject) => {
|
||||
const child = spawn(command, args, { stdio: ["pipe", "pipe", "pipe"] });
|
||||
const stdoutChunks: Buffer[] = [];
|
||||
const stderrChunks: Buffer[] = [];
|
||||
|
||||
child.stdout.on("data", (chunk: Buffer | string) => {
|
||||
stdoutChunks.push(typeof chunk === "string" ? Buffer.from(chunk) : chunk);
|
||||
});
|
||||
child.stderr.on("data", (chunk: Buffer | string) => {
|
||||
stderrChunks.push(typeof chunk === "string" ? Buffer.from(chunk) : chunk);
|
||||
});
|
||||
|
||||
child.once("error", reject);
|
||||
child.once("close", (code) => {
|
||||
const stdout = Buffer.concat(stdoutChunks).toString("utf8");
|
||||
const stderr = Buffer.concat(stderrChunks).toString("utf8");
|
||||
if (code === 0) {
|
||||
resolve({ stdout, stderr });
|
||||
return;
|
||||
}
|
||||
reject(new Error(`${command} failed with exit code ${code}${stderr ? `: ${stderr.trim()}` : ""}`));
|
||||
});
|
||||
|
||||
child.stdin.end(input);
|
||||
});
|
||||
}
|
||||
|
||||
export async function renderHtmlPreview(filePath: string): Promise<string> {
|
||||
const source = await readFile(filePath, "utf8");
|
||||
const pandocCommand = process.env.PANDOC_PATH?.trim() || "pandoc";
|
||||
const inputFormat = isLatexPath(filePath)
|
||||
? "latex"
|
||||
: "markdown+lists_without_preceding_blankline+tex_math_dollars+autolink_bare_uris-raw_html";
|
||||
const markdown = isLatexPath(filePath) || isMarkdownPath(filePath) ? source : wrapCodeAsMarkdown(source, filePath);
|
||||
const args = ["-f", inputFormat, "-t", "html5", "--mathml", "--wrap=none", `--resource-path=${dirname(filePath)}`];
|
||||
const { stdout } = await runCommandWithInput(pandocCommand, args, markdown);
|
||||
const html = `<!doctype html><html><head><meta charset="utf-8" /><base href="${pathToFileURL(dirname(filePath) + "/").href}" /><title>${basename(filePath)}</title><style>
|
||||
:root{
|
||||
--bg:#faf7f2;
|
||||
--paper:#fffdf9;
|
||||
--border:#d7cec1;
|
||||
--text:#1f1c18;
|
||||
--muted:#6c645a;
|
||||
--code:#f3eee6;
|
||||
--link:#0f6d8c;
|
||||
--quote:#8b7f70;
|
||||
}
|
||||
@media (prefers-color-scheme: dark){
|
||||
:root{
|
||||
--bg:#161311;
|
||||
--paper:#1d1916;
|
||||
--border:#3b342d;
|
||||
--text:#ebe3d6;
|
||||
--muted:#b4ab9f;
|
||||
--code:#221d19;
|
||||
--link:#8ac6d6;
|
||||
--quote:#a89d8f;
|
||||
}
|
||||
}
|
||||
body{
|
||||
font-family:Charter,"Iowan Old Style","Palatino Linotype","Book Antiqua",Palatino,Georgia,serif;
|
||||
margin:0;
|
||||
background:var(--bg);
|
||||
color:var(--text);
|
||||
line-height:1.7;
|
||||
}
|
||||
main{
|
||||
max-width:900px;
|
||||
margin:2rem auto 4rem;
|
||||
padding:2.5rem 3rem;
|
||||
background:var(--paper);
|
||||
border:1px solid var(--border);
|
||||
border-radius:18px;
|
||||
box-shadow:0 12px 40px rgba(0,0,0,.06);
|
||||
}
|
||||
h1,h2,h3,h4,h5,h6{
|
||||
font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;
|
||||
line-height:1.2;
|
||||
margin-top:1.5em;
|
||||
}
|
||||
h1{font-size:2.2rem;border-bottom:1px solid var(--border);padding-bottom:.35rem;}
|
||||
h2{font-size:1.6rem;border-bottom:1px solid var(--border);padding-bottom:.25rem;}
|
||||
p,ul,ol,blockquote,table{margin:1rem 0;}
|
||||
pre,code{font-family:ui-monospace,SFMono-Regular,Menlo,monospace}
|
||||
pre{
|
||||
background:var(--code);
|
||||
border:1px solid var(--border);
|
||||
border-radius:12px;
|
||||
padding:1rem 1.1rem;
|
||||
overflow:auto;
|
||||
}
|
||||
code{
|
||||
background:var(--code);
|
||||
padding:.12rem .28rem;
|
||||
border-radius:6px;
|
||||
}
|
||||
a{color:var(--link);text-decoration:none}
|
||||
a:hover{text-decoration:underline}
|
||||
img{max-width:100%}
|
||||
blockquote{
|
||||
border-left:4px solid var(--border);
|
||||
padding-left:1rem;
|
||||
color:var(--quote);
|
||||
}
|
||||
table{border-collapse:collapse;width:100%}
|
||||
th,td{border:1px solid var(--border);padding:.55rem .7rem;text-align:left}
|
||||
</style></head><body><main>${stdout}</main></body></html>`;
|
||||
const tempDir = await mkdtemp(join(tmpdir(), "feynman-preview-"));
|
||||
const htmlPath = join(tempDir, `${basename(filePath)}.html`);
|
||||
await writeFile(htmlPath, html, "utf8");
|
||||
return htmlPath;
|
||||
}
|
||||
|
||||
export async function renderPdfPreview(filePath: string): Promise<string> {
|
||||
const source = await readFile(filePath, "utf8");
|
||||
const pandocCommand = process.env.PANDOC_PATH?.trim() || "pandoc";
|
||||
const pdfEngine = process.env.PANDOC_PDF_ENGINE?.trim() || "xelatex";
|
||||
const inputFormat = isLatexPath(filePath)
|
||||
? "latex"
|
||||
: "markdown+lists_without_preceding_blankline+tex_math_dollars+autolink_bare_uris-raw_html";
|
||||
const markdown = isLatexPath(filePath) || isMarkdownPath(filePath) ? source : wrapCodeAsMarkdown(source, filePath);
|
||||
const tempDir = await mkdtemp(join(tmpdir(), "feynman-preview-"));
|
||||
const pdfPath = join(tempDir, `${basename(filePath)}.pdf`);
|
||||
const args = [
|
||||
"-f",
|
||||
inputFormat,
|
||||
"-o",
|
||||
pdfPath,
|
||||
`--pdf-engine=${pdfEngine}`,
|
||||
`--resource-path=${dirname(filePath)}`,
|
||||
];
|
||||
await runCommandWithInput(pandocCommand, args, markdown);
|
||||
return pdfPath;
|
||||
}
|
||||
|
||||
export async function pathExists(path: string): Promise<boolean> {
|
||||
try {
|
||||
await stat(path);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,70 @@
|
||||
import { mkdir, stat, writeFile } from "node:fs/promises";
|
||||
import { dirname, resolve as resolvePath } from "node:path";
|
||||
import { mkdir, readdir, readFile, stat, writeFile } from "node:fs/promises";
|
||||
import { join, relative, resolve as resolvePath } from "node:path";
|
||||
|
||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
|
||||
import { getExtensionCommandSpec } from "../../metadata/commands.mjs";
|
||||
import { renderHtmlPreview, renderPdfPreview, openWithDefaultApp, pathExists } from "./preview.js";
|
||||
import { buildProjectAgentsTemplate, buildSessionLogsReadme } from "./project-scaffold.js";
|
||||
import { formatToolText } from "./shared.js";
|
||||
import { searchSessionTranscripts } from "./session-search.js";
|
||||
|
||||
async function pathExists(path: string): Promise<boolean> {
|
||||
try {
|
||||
await stat(path);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
const ARTIFACT_DIRS = ["papers", "outputs", "experiments", "notes"];
|
||||
const ARTIFACT_EXTS = new Set([".md", ".tex", ".pdf", ".py", ".csv", ".json", ".html", ".txt", ".log"]);
|
||||
|
||||
async function collectArtifacts(cwd: string): Promise<{ label: string; path: string }[]> {
|
||||
const items: { label: string; path: string; mtime: number }[] = [];
|
||||
|
||||
for (const dir of ARTIFACT_DIRS) {
|
||||
const dirPath = resolvePath(cwd, dir);
|
||||
if (!(await pathExists(dirPath))) continue;
|
||||
|
||||
const walk = async (current: string): Promise<void> => {
|
||||
let entries;
|
||||
try {
|
||||
entries = await readdir(current, { withFileTypes: true });
|
||||
} catch {
|
||||
return;
|
||||
}
|
||||
for (const entry of entries) {
|
||||
const full = join(current, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
await walk(full);
|
||||
} else if (ARTIFACT_EXTS.has(entry.name.slice(entry.name.lastIndexOf(".")))) {
|
||||
const rel = relative(cwd, full);
|
||||
let title = "";
|
||||
try {
|
||||
const head = await readFile(full, "utf8").then((c) => c.slice(0, 200));
|
||||
const match = head.match(/^#\s+(.+)/m);
|
||||
if (match) title = match[1]!.trim();
|
||||
} catch {}
|
||||
const info = await stat(full).catch(() => null);
|
||||
const mtime = info?.mtimeMs ?? 0;
|
||||
const size = info ? formatSize(info.size) : "";
|
||||
const titlePart = title ? ` — ${title}` : "";
|
||||
items.push({ label: `${rel}${titlePart} (${size})`, path: rel, mtime });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
await walk(dirPath);
|
||||
}
|
||||
|
||||
items.sort((a, b) => b.mtime - a.mtime);
|
||||
return items;
|
||||
}
|
||||
|
||||
function formatSize(bytes: number): string {
|
||||
if (bytes < 1024) return `${bytes}B`;
|
||||
if (bytes < 1024 * 1024) return `${Math.round(bytes / 1024)}KB`;
|
||||
return `${(bytes / (1024 * 1024)).toFixed(1)}MB`;
|
||||
}
|
||||
|
||||
export function registerInitCommand(pi: ExtensionAPI): void {
|
||||
pi.registerCommand("init", {
|
||||
@@ -45,73 +101,23 @@ export function registerInitCommand(pi: ExtensionAPI): void {
|
||||
});
|
||||
}
|
||||
|
||||
export function registerSessionSearchTool(pi: ExtensionAPI): void {
|
||||
pi.registerTool({
|
||||
name: "session_search",
|
||||
label: "Session Search",
|
||||
description: "Search prior Feynman session transcripts to recover what was done, said, or written before.",
|
||||
parameters: Type.Object({
|
||||
query: Type.String({
|
||||
description: "Search query to look for in past sessions.",
|
||||
}),
|
||||
limit: Type.Optional(
|
||||
Type.Number({
|
||||
description: "Maximum number of sessions to return. Defaults to 3.",
|
||||
}),
|
||||
),
|
||||
}),
|
||||
async execute(_toolCallId, params) {
|
||||
const result = await searchSessionTranscripts(params.query, Math.max(1, Math.min(params.limit ?? 3, 8)));
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
export function registerPreviewTool(pi: ExtensionAPI): void {
|
||||
pi.registerTool({
|
||||
name: "preview_file",
|
||||
label: "Preview File",
|
||||
description: "Open a markdown, LaTeX, PDF, or code artifact in the browser or a PDF viewer for human review. Rendered HTML/PDF previews are temporary and do not replace the source artifact.",
|
||||
parameters: Type.Object({
|
||||
path: Type.String({
|
||||
description: "Path to the file to preview.",
|
||||
}),
|
||||
target: Type.Optional(
|
||||
Type.String({
|
||||
description: "Preview target: browser or pdf. Defaults to browser.",
|
||||
}),
|
||||
),
|
||||
}),
|
||||
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
|
||||
const target = (params.target?.trim().toLowerCase() || "browser");
|
||||
if (target !== "browser" && target !== "pdf") {
|
||||
throw new Error("target must be browser or pdf");
|
||||
}
|
||||
|
||||
const resolvedPath = resolvePath(ctx.cwd, params.path);
|
||||
const openedPath =
|
||||
resolvePath(resolvedPath).toLowerCase().endsWith(".pdf") && target === "pdf"
|
||||
? resolvedPath
|
||||
: target === "pdf"
|
||||
? await renderPdfPreview(resolvedPath)
|
||||
: await renderHtmlPreview(resolvedPath);
|
||||
|
||||
await mkdir(dirname(openedPath), { recursive: true }).catch(() => {});
|
||||
await openWithDefaultApp(openedPath);
|
||||
|
||||
const result = {
|
||||
sourcePath: resolvedPath,
|
||||
target,
|
||||
openedPath,
|
||||
temporaryPreview: openedPath !== resolvedPath,
|
||||
};
|
||||
return {
|
||||
content: [{ type: "text", text: formatToolText(result) }],
|
||||
details: result,
|
||||
};
|
||||
export function registerOutputsCommand(pi: ExtensionAPI): void {
|
||||
pi.registerCommand("outputs", {
|
||||
description: "Browse all research artifacts (papers, outputs, experiments, notes).",
|
||||
handler: async (_args, ctx) => {
|
||||
const items = await collectArtifacts(ctx.cwd);
|
||||
if (items.length === 0) {
|
||||
ctx.ui.notify("No artifacts found. Use /lit, /draft, /review, or /deepresearch to create some.", "info");
|
||||
return;
|
||||
}
|
||||
|
||||
const selected = await ctx.ui.select(`Artifacts (${items.length})`, items.map((i) => i.label));
|
||||
if (!selected) return;
|
||||
|
||||
const match = items.find((i) => i.label === selected);
|
||||
if (match) {
|
||||
ctx.ui.setEditorText(`read ${match.path}`);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,223 +0,0 @@
|
||||
import { readdir, readFile, stat } from "node:fs/promises";
|
||||
import { basename, join } from "node:path";
|
||||
import { pathToFileURL } from "node:url";
|
||||
|
||||
import { getFeynmanHome } from "./shared.js";
|
||||
|
||||
function extractMessageText(message: unknown): string {
|
||||
if (!message || typeof message !== "object") {
|
||||
return "";
|
||||
}
|
||||
|
||||
const content = (message as { content?: unknown }).content;
|
||||
if (typeof content === "string") {
|
||||
return content;
|
||||
}
|
||||
if (!Array.isArray(content)) {
|
||||
return "";
|
||||
}
|
||||
|
||||
return content
|
||||
.map((item) => {
|
||||
if (!item || typeof item !== "object") {
|
||||
return "";
|
||||
}
|
||||
const record = item as { type?: string; text?: unknown; arguments?: unknown; name?: unknown };
|
||||
if (record.type === "text" && typeof record.text === "string") {
|
||||
return record.text;
|
||||
}
|
||||
if (record.type === "toolCall") {
|
||||
const name = typeof record.name === "string" ? record.name : "tool";
|
||||
const args =
|
||||
typeof record.arguments === "string"
|
||||
? record.arguments
|
||||
: record.arguments
|
||||
? JSON.stringify(record.arguments)
|
||||
: "";
|
||||
return `[tool:${name}] ${args}`;
|
||||
}
|
||||
return "";
|
||||
})
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
function buildExcerpt(text: string, query: string, radius = 180): string {
|
||||
const normalizedText = text.replace(/\s+/g, " ").trim();
|
||||
if (!normalizedText) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const lower = normalizedText.toLowerCase();
|
||||
const q = query.toLowerCase();
|
||||
const index = lower.indexOf(q);
|
||||
if (index === -1) {
|
||||
return normalizedText.slice(0, radius * 2) + (normalizedText.length > radius * 2 ? "..." : "");
|
||||
}
|
||||
|
||||
const start = Math.max(0, index - radius);
|
||||
const end = Math.min(normalizedText.length, index + q.length + radius);
|
||||
const prefix = start > 0 ? "..." : "";
|
||||
const suffix = end < normalizedText.length ? "..." : "";
|
||||
return `${prefix}${normalizedText.slice(start, end)}${suffix}`;
|
||||
}
|
||||
|
||||
export async function searchSessionTranscripts(query: string, limit: number): Promise<{
|
||||
query: string;
|
||||
results: Array<{
|
||||
sessionId: string;
|
||||
sessionFile: string;
|
||||
startedAt?: string;
|
||||
cwd?: string;
|
||||
matchCount: number;
|
||||
topMatches: Array<{ role: string; timestamp?: string; excerpt: string }>;
|
||||
}>;
|
||||
}> {
|
||||
const packageRoot = process.env.FEYNMAN_PI_NPM_ROOT;
|
||||
if (packageRoot) {
|
||||
try {
|
||||
const indexerPath = pathToFileURL(
|
||||
join(packageRoot, "@kaiserlich-dev", "pi-session-search", "extensions", "indexer.ts"),
|
||||
).href;
|
||||
const indexer = await import(indexerPath) as {
|
||||
updateIndex?: (onProgress?: (msg: string) => void) => Promise<number>;
|
||||
search?: (query: string, limit?: number) => Array<{
|
||||
sessionPath: string;
|
||||
project: string;
|
||||
timestamp: string;
|
||||
snippet: string;
|
||||
rank: number;
|
||||
title: string | null;
|
||||
}>;
|
||||
getSessionSnippets?: (sessionPath: string, query: string, limit?: number) => string[];
|
||||
};
|
||||
|
||||
await indexer.updateIndex?.();
|
||||
const results = indexer.search?.(query, limit) ?? [];
|
||||
if (results.length > 0) {
|
||||
return {
|
||||
query,
|
||||
results: results.map((result) => ({
|
||||
sessionId: basename(result.sessionPath),
|
||||
sessionFile: result.sessionPath,
|
||||
startedAt: result.timestamp,
|
||||
cwd: result.project,
|
||||
matchCount: 1,
|
||||
topMatches: (indexer.getSessionSnippets?.(result.sessionPath, query, 4) ?? [result.snippet])
|
||||
.filter(Boolean)
|
||||
.map((excerpt) => ({
|
||||
role: "match",
|
||||
excerpt,
|
||||
})),
|
||||
})),
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// Fall back to direct JSONL scanning below.
|
||||
}
|
||||
}
|
||||
|
||||
const sessionDir = join(getFeynmanHome(), "sessions");
|
||||
const terms = query
|
||||
.toLowerCase()
|
||||
.split(/\s+/)
|
||||
.map((term) => term.trim())
|
||||
.filter((term) => term.length >= 2);
|
||||
const needle = query.toLowerCase();
|
||||
|
||||
let files: string[] = [];
|
||||
try {
|
||||
files = (await readdir(sessionDir))
|
||||
.filter((entry) => entry.endsWith(".jsonl"))
|
||||
.map((entry) => join(sessionDir, entry));
|
||||
} catch {
|
||||
return { query, results: [] };
|
||||
}
|
||||
|
||||
const sessions = [];
|
||||
for (const file of files) {
|
||||
const raw = await readFile(file, "utf8").catch(() => "");
|
||||
if (!raw) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let sessionId = basename(file);
|
||||
let startedAt: string | undefined;
|
||||
let cwd: string | undefined;
|
||||
const matches: Array<{ role: string; timestamp?: string; excerpt: string }> = [];
|
||||
|
||||
for (const line of raw.split("\n")) {
|
||||
if (!line.trim()) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
const record = JSON.parse(line) as {
|
||||
type?: string;
|
||||
id?: string;
|
||||
timestamp?: string;
|
||||
cwd?: string;
|
||||
message?: { role?: string; content?: unknown };
|
||||
};
|
||||
if (record.type === "session") {
|
||||
sessionId = record.id ?? sessionId;
|
||||
startedAt = record.timestamp;
|
||||
cwd = record.cwd;
|
||||
continue;
|
||||
}
|
||||
if (record.type !== "message" || !record.message) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const text = extractMessageText(record.message);
|
||||
if (!text) {
|
||||
continue;
|
||||
}
|
||||
const lower = text.toLowerCase();
|
||||
const matched = lower.includes(needle) || terms.some((term) => lower.includes(term));
|
||||
if (!matched) {
|
||||
continue;
|
||||
}
|
||||
matches.push({
|
||||
role: record.message.role ?? "unknown",
|
||||
timestamp: record.timestamp,
|
||||
excerpt: buildExcerpt(text, query),
|
||||
});
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (matches.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mtime = 0;
|
||||
try {
|
||||
mtime = (await stat(file)).mtimeMs;
|
||||
} catch {
|
||||
mtime = 0;
|
||||
}
|
||||
|
||||
sessions.push({
|
||||
sessionId,
|
||||
sessionFile: file,
|
||||
startedAt,
|
||||
cwd,
|
||||
matchCount: matches.length,
|
||||
topMatches: matches.slice(0, 4),
|
||||
mtime,
|
||||
});
|
||||
}
|
||||
|
||||
sessions.sort((a, b) => {
|
||||
if (b.matchCount !== a.matchCount) {
|
||||
return b.matchCount - a.matchCount;
|
||||
}
|
||||
return b.mtime - a.mtime;
|
||||
});
|
||||
|
||||
return {
|
||||
query,
|
||||
results: sessions.slice(0, limit).map(({ mtime: _mtime, ...session }) => session),
|
||||
};
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
import { readFileSync } from "node:fs";
|
||||
import { homedir } from "node:os";
|
||||
import { dirname, resolve as resolvePath } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
@@ -15,25 +14,3 @@ export const FEYNMAN_VERSION = (() => {
|
||||
})();
|
||||
|
||||
export { FEYNMAN_ASCII_LOGO as FEYNMAN_AGENT_LOGO } from "../../logo.mjs";
|
||||
|
||||
export const FEYNMAN_RESEARCH_TOOLS = [
|
||||
"alpha_search",
|
||||
"alpha_get_paper",
|
||||
"alpha_ask_paper",
|
||||
"alpha_annotate_paper",
|
||||
"alpha_list_annotations",
|
||||
"alpha_read_code",
|
||||
"session_search",
|
||||
"preview_file",
|
||||
];
|
||||
|
||||
export function formatToolText(result: unknown): string {
|
||||
return typeof result === "string" ? result : JSON.stringify(result, null, 2);
|
||||
}
|
||||
|
||||
export function getFeynmanHome(): string {
|
||||
const agentDir = process.env.FEYNMAN_CODING_AGENT_DIR ??
|
||||
process.env.PI_CODING_AGENT_DIR ??
|
||||
resolvePath(homedir(), ".feynman", "agent");
|
||||
return dirname(agentDir);
|
||||
}
|
||||
|
||||
@@ -37,9 +37,7 @@ export function readPromptSpecs(appRoot) {
|
||||
export const extensionCommandSpecs = [
|
||||
{ name: "help", args: "", section: "Project & Session", description: "Show grouped Feynman commands and prefill the editor with a selected command.", publicDocs: true },
|
||||
{ name: "init", args: "", section: "Project & Session", description: "Bootstrap AGENTS.md and session-log folders for a research project.", publicDocs: true },
|
||||
{ name: "alpha-login", args: "", section: "Setup", description: "Sign in to alphaXiv from inside Feynman.", publicDocs: true },
|
||||
{ name: "alpha-status", args: "", section: "Setup", description: "Show alphaXiv authentication status.", publicDocs: true },
|
||||
{ name: "alpha-logout", args: "", section: "Setup", description: "Clear alphaXiv auth from inside Feynman.", publicDocs: true },
|
||||
{ name: "outputs", args: "", section: "Project & Session", description: "Browse all research artifacts (papers, outputs, experiments, notes).", publicDocs: true },
|
||||
];
|
||||
|
||||
export const livePackageCommandGroups = [
|
||||
|
||||
8
package-lock.json
generated
8
package-lock.json
generated
@@ -1,13 +1,13 @@
|
||||
{
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.12",
|
||||
"version": "0.2.15",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.12",
|
||||
"hasInstallScript": true,
|
||||
"version": "0.2.15",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@companion-ai/alpha-hub": "^0.1.2",
|
||||
"@mariozechner/pi-ai": "^0.62.0",
|
||||
@@ -24,7 +24,7 @@
|
||||
"typescript": "^5.9.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20.18.1"
|
||||
"node": ">=20.19.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk": {
|
||||
|
||||
12
package.json
12
package.json
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.12",
|
||||
"version": "0.2.15",
|
||||
"description": "Research-first CLI agent built on Pi and alphaXiv",
|
||||
"license": "MIT",
|
||||
"type": "module",
|
||||
"engines": {
|
||||
"node": ">=20.18.1"
|
||||
"node": ">=20.19.0"
|
||||
},
|
||||
"bin": {
|
||||
"feynman": "bin/feynman.js"
|
||||
@@ -26,15 +26,16 @@
|
||||
"scripts/",
|
||||
"skills/",
|
||||
"AGENTS.md",
|
||||
"CONTRIBUTING.md",
|
||||
"README.md",
|
||||
".env.example"
|
||||
],
|
||||
"scripts": {
|
||||
"preinstall": "node ./scripts/check-node-version.mjs",
|
||||
"build": "tsc -p tsconfig.build.json",
|
||||
"build:native-bundle": "node ./scripts/build-native-bundle.mjs",
|
||||
"dev": "tsx src/index.ts",
|
||||
"prepack": "node ./scripts/prepare-runtime-workspace.mjs",
|
||||
"postinstall": "node ./scripts/patch-embedded-pi.mjs",
|
||||
"prepack": "node ./scripts/clean-publish-artifacts.mjs && npm run build && node ./scripts/prepare-runtime-workspace.mjs",
|
||||
"start": "tsx src/index.ts",
|
||||
"start:dist": "node ./bin/feynman.js",
|
||||
"test": "node --import tsx --test --test-concurrency=1 tests/*.test.ts",
|
||||
@@ -52,6 +53,9 @@
|
||||
],
|
||||
"prompts": [
|
||||
"./prompts"
|
||||
],
|
||||
"skills": [
|
||||
"./skills"
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
|
||||
@@ -27,6 +27,8 @@ Ask the user where to run:
|
||||
- **New git branch** — create a branch so main stays clean
|
||||
- **Virtual environment** — create an isolated venv/conda env first
|
||||
- **Docker** — run experiment code inside an isolated Docker container
|
||||
- **Modal** — run on Modal's serverless GPU infrastructure. Write Modal-decorated scripts and execute with `modal run`. Best for GPU-heavy benchmarks with no persistent state between iterations. Requires `modal` CLI.
|
||||
- **RunPod** — provision a GPU pod via `runpodctl` and run iterations there over SSH. Best for experiments needing persistent state, large datasets, or SSH access between iterations. Requires `runpodctl` CLI.
|
||||
|
||||
Do not proceed without a clear answer.
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ Design a replication plan for: $@
|
||||
- **Local** — run in the current working directory
|
||||
- **Virtual environment** — create an isolated venv/conda env first
|
||||
- **Docker** — run experiment code inside an isolated Docker container
|
||||
- **Modal** — run on Modal's serverless GPU infrastructure. Write a Modal-decorated Python script and execute with `modal run <script.py>`. Best for burst GPU jobs that don't need persistent state. Requires `modal` CLI (`pip install modal && modal setup`).
|
||||
- **RunPod** — provision a GPU pod on RunPod and SSH in for execution. Use `runpodctl` to create pods, transfer files, and manage lifecycle. Best for long-running experiments or when you need SSH access and persistent storage. Requires `runpodctl` CLI and `RUNPOD_API_KEY`.
|
||||
- **Plan only** — produce the replication plan without executing
|
||||
4. **Execute** — If the user chose an execution environment, implement and run the replication steps there. Save notes, scripts, raw outputs, and results to disk in a reproducible layout. Do not call the outcome replicated unless the planned checks actually passed.
|
||||
5. **Log** — For multi-step or resumable replication work, append concise entries to `CHANGELOG.md` after meaningful progress, failed attempts, major verification outcomes, and before stopping. Record the active objective, what changed, what was checked, and the next step.
|
||||
|
||||
266
prompts/valichord.md
Normal file
266
prompts/valichord.md
Normal file
@@ -0,0 +1,266 @@
|
||||
---
|
||||
description: Submit a replication as a cryptographically verified ValiChord attestation, discover studies awaiting independent validation, query Harmony Records and reproducibility badges, or assist researchers in preparing a study for the validation pipeline.
|
||||
section: Research Workflows
|
||||
topLevelCli: true
|
||||
---
|
||||
|
||||
# ValiChord Validation Workflow
|
||||
|
||||
ValiChord is a distributed peer-to-peer system for scientific reproducibility verification, built on Holochain. It implements a blind commit-reveal protocol in Rust across four DNAs, producing Harmony Records — immutable, cryptographically verifiable proofs that independent parties reproduced the same findings without coordinating. Verified studies receive automatic reproducibility badges (Gold/Silver/Bronze); validators accumulate a per-discipline reputation score across rounds.
|
||||
|
||||
This workflow integrates Feynman at three levels: as a **validator agent** running the full commit-reveal protocol; as a **researcher's assistant** helping prepare a study for submission; and as a **query tool** surfacing reproducibility status during research.
|
||||
|
||||
**Live demo of the commit-reveal protocol**: https://youtu.be/DQ5wZSD1YEw
|
||||
|
||||
---
|
||||
|
||||
## ValiChord's four-DNA architecture
|
||||
|
||||
| DNA | Name | Type | Role |
|
||||
|-----|------|------|------|
|
||||
| 1 | Researcher Repository | Private, single-agent | Researcher's local archive. Stores study, pre-registered protocol, data snapshots, deviation declarations. Only SHA-256 hashes ever leave this DNA. |
|
||||
| 2 | Validator Workspace | Private, single-agent | Feynman's working space. Stores task privately. Seals the blind commitment here — content never propagates to the DHT. |
|
||||
| 3 | Attestation | Shared DHT | Coordination layer. Manages validation requests, validator profiles, study claims, commitment anchors, phase markers, and public attestations. 36 zome functions. |
|
||||
| 4 | Governance | Public DHT | Final record layer. Assembles HarmonyRecords, issues reproducibility badges, tracks validator reputation, records governance decisions. All read functions accessible via HTTP Gateway without running a node. |
|
||||
|
||||
The key guarantee: a validator's findings are cryptographically sealed (`SHA-256(msgpack(attestation) || nonce)`) before the reveal phase opens. Neither party can adjust findings after seeing the other's results. The researcher runs a parallel commit-reveal — locking their expected results before the validators reveal — so no party can adapt to seeing the other's outcome.
|
||||
|
||||
---
|
||||
|
||||
## Workflow A: Feynman as validator agent
|
||||
|
||||
### Step 0: Publish validator profile (one-time setup)
|
||||
|
||||
On first use, publish Feynman's public profile to DNA 3 so it appears in validator discovery indexes and conflict-of-interest checks:
|
||||
|
||||
```
|
||||
publish_validator_profile(profile: ValidatorProfile)
|
||||
```
|
||||
|
||||
Key fields:
|
||||
- `agent_type` — `AutomatedTool` (AI agents are first-class validators; the protocol makes no distinction between human and machine validators)
|
||||
- `disciplines` — list of disciplines Feynman can validate (e.g. ComputationalBiology, Statistics)
|
||||
- `certification_tier` — starts as `Provisional`; advances to `Certified` after 5+ validations with ≥60% agreement rate, `Senior` after 20+ with ≥80%
|
||||
|
||||
If a profile already exists, use `update_validator_profile` to merge changes.
|
||||
|
||||
### Step 1: Gather inputs or discover study
|
||||
|
||||
**If the user provides a `request_ref`**: use it directly.
|
||||
|
||||
**If Feynman is proactively discovering work**: query the pending queue in DNA 3:
|
||||
|
||||
```
|
||||
get_pending_requests_for_discipline(discipline: Discipline)
|
||||
```
|
||||
|
||||
Returns all unclaimed `ValidationRequest` entries for the discipline. Each contains:
|
||||
- `data_hash` — the ExternalHash identifier (used as `request_ref` throughout)
|
||||
- `num_validators_required` — quorum needed to close the round
|
||||
- `validation_tier` — Basic / Enhanced / Comprehensive
|
||||
- `access_urls` — where to fetch the data and code
|
||||
|
||||
Optionally assess study complexity before committing:
|
||||
|
||||
```
|
||||
assess_difficulty(input: AssessDifficultyInput)
|
||||
```
|
||||
|
||||
Scores code volume, dependency count, documentation quality, data accessibility, and environment complexity. Returns predicted duration and confidence. Use this to decide whether to proceed before claiming.
|
||||
|
||||
If replication results are not yet available, suggest `/replicate` first.
|
||||
|
||||
### Step 2: Claim the study
|
||||
|
||||
Before receiving a formal task assignment, register intent to validate via DNA 3:
|
||||
|
||||
```
|
||||
claim_study(request_ref: ExternalHash)
|
||||
```
|
||||
|
||||
This:
|
||||
- Reserves a validator slot (enforced capacity: no over-subscription)
|
||||
- Triggers conflict-of-interest check — rejects claim if Feynman's institution matches the researcher's
|
||||
- Records a `StudyClaim` entry on the shared DHT
|
||||
|
||||
If a claimed validator goes dark, any other validator can free the slot:
|
||||
|
||||
```
|
||||
reclaim_abandoned_claim(input: ReclaimInput)
|
||||
```
|
||||
|
||||
### Step 3: Receive task and seal private attestation — Commit phase
|
||||
|
||||
Connect to the ValiChord conductor via AppWebSocket. Using DNA 2 (Validator Workspace):
|
||||
|
||||
```
|
||||
receive_task(request_ref, discipline, deadline_secs, validation_focus, time_cap_secs, compensation_tier)
|
||||
```
|
||||
|
||||
`validation_focus` specifies which aspect Feynman is validating:
|
||||
- `ComputationalReproducibility` — re-run code, check numerical outputs
|
||||
- `PreCommitmentAdherence` — verify results match pre-registered analysis plan
|
||||
- `MethodologicalReview` — assess statistical choices and protocol validity
|
||||
|
||||
Then seal the private attestation — this is the blind commitment:
|
||||
|
||||
```
|
||||
seal_private_attestation(task_hash, attestation)
|
||||
```
|
||||
|
||||
Where `attestation` includes:
|
||||
- `outcome` — `Reproduced` / `PartiallyReproduced` / `FailedToReproduce` / `UnableToAssess`
|
||||
- `outcome_summary` — key metrics, effect direction, confidence interval overlap, overall agreement
|
||||
- `confidence` — High / Medium / Low
|
||||
- `time_invested_secs` and `time_breakdown` — environment_setup, data_acquisition, code_execution, troubleshooting
|
||||
- `computational_resources` — whether personal hardware, HPC, GPU, or cloud was required; estimated cost in pence
|
||||
- `deviation_flags` — any undeclared departures from the original protocol (type, severity, evidence)
|
||||
|
||||
The coordinator computes `commitment_hash = SHA-256(msgpack(attestation) || nonce)` and writes a `CommitmentAnchor` to DNA 3's shared DHT. The attestation content remains private in DNA 2.
|
||||
|
||||
Save `task_hash` and `commitment_hash` to `outputs/<slug>-valichord-commit.json`.
|
||||
|
||||
### Step 4: Wait for RevealOpen phase
|
||||
|
||||
Poll DNA 3 (Attestation) until the phase transitions:
|
||||
|
||||
```
|
||||
get_current_phase(request_ref: ExternalHash)
|
||||
```
|
||||
|
||||
Returns `null` (still commit phase), `"RevealOpen"`, or `"Complete"`. Poll every 30 seconds. The phase opens automatically when the `CommitmentAnchor` count reaches `num_validators_required` — no manual trigger required.
|
||||
|
||||
During this wait, the researcher also runs their parallel commit-reveal: they lock their expected results via `publish_researcher_commitment` before the reveal phase opens, then reveal via `reveal_researcher_result` after all validators have submitted. No party — researcher or validator — can adapt to seeing the other's outcome.
|
||||
|
||||
### Step 5: Submit attestation — Reveal phase
|
||||
|
||||
When phase is `RevealOpen`, publish the full attestation to the shared DHT via DNA 3:
|
||||
|
||||
```
|
||||
submit_attestation(attestation, nonce)
|
||||
```
|
||||
|
||||
The coordinator verifies `SHA-256(msgpack(attestation) || nonce) == CommitmentAnchor.commitment_hash` before writing. This prevents adaptive reveals — the attestation must match exactly what was committed.
|
||||
|
||||
### Step 6: Retrieve Harmony Record and badges
|
||||
|
||||
Call DNA 4 (Governance) explicitly after `submit_attestation` returns — DHT propagation means the ValidatorToAttestation link may not be visible within the same transaction:
|
||||
|
||||
```
|
||||
check_and_create_harmony_record(request_ref)
|
||||
get_harmony_record(request_ref)
|
||||
get_badges_for_study(request_ref)
|
||||
```
|
||||
|
||||
The **Harmony Record** contains:
|
||||
- `outcome` — the majority reproduced/not-reproduced finding
|
||||
- `agreement_level` — ExactMatch / WithinTolerance / DirectionalMatch / Divergent / UnableToAssess
|
||||
- `participating_validators` — array of validator agent keys
|
||||
- `validation_duration_secs`
|
||||
- `ActionHash` — the immutable on-chain identifier
|
||||
|
||||
**Reproducibility badges** are automatically issued when the Harmony Record is created:
|
||||
|
||||
| Badge | Threshold |
|
||||
|-------|-----------|
|
||||
| GoldReproducible | ≥7 validators, ≥90% agreement |
|
||||
| SilverReproducible | ≥5 validators, ≥70% agreement |
|
||||
| BronzeReproducible | ≥3 validators, ≥50% agreement |
|
||||
| FailedReproduction | Divergent outcomes |
|
||||
|
||||
Save the full record and badges to `outputs/<slug>-harmony-record.json`.
|
||||
|
||||
### Step 7: Check updated reputation
|
||||
|
||||
After each validation round, Feynman's reputation record in DNA 4 is updated:
|
||||
|
||||
```
|
||||
get_validator_reputation(validator: AgentPubKey)
|
||||
```
|
||||
|
||||
Returns per-discipline scores: total validations, agreement rate, average time, and current `CertificationTier` (Provisional → Certified → Senior). Reputation is a long-term asset — AI validators accumulate a cryptographically verifiable track record across all ValiChord rounds they participate in.
|
||||
|
||||
### Step 8: Report to user
|
||||
|
||||
Present:
|
||||
- Outcome and agreement level
|
||||
- Reproducibility badge(s) issued to the study
|
||||
- Feynman's updated reputation score for this discipline
|
||||
- ActionHash — the permanent public identifier for this Harmony Record
|
||||
- Confirmation that the record is written to the Governance DHT and accessible via HTTP Gateway without any special infrastructure
|
||||
- Path to saved outputs
|
||||
|
||||
---
|
||||
|
||||
## Workflow B: Query existing Harmony Record
|
||||
|
||||
`get_harmony_record` and `get_badges_for_study` in DNA 4 are `Unrestricted` functions — accessible via Holochain's HTTP Gateway without connecting to a conductor or running a node.
|
||||
|
||||
```
|
||||
GET <http_gateway_url>/get_harmony_record/<request_ref_b64>
|
||||
GET <http_gateway_url>/get_badges_for_study/<request_ref_b64>
|
||||
```
|
||||
|
||||
Use this to:
|
||||
- Check reproducibility status of a cited study during `/deepresearch`
|
||||
- Surface Harmony Records and badges in research summaries
|
||||
- Verify whether a study has undergone independent validation before recommending it
|
||||
|
||||
The following read functions are also unrestricted on DNA 3:
|
||||
`get_attestations_for_request`, `get_validators_for_discipline`, `get_pending_requests_for_discipline`, `get_validator_profile`, `get_current_phase`, `get_difficulty_assessment`, `get_researcher_reveal`
|
||||
|
||||
---
|
||||
|
||||
## Workflow C: Proactive discipline queue monitoring
|
||||
|
||||
Feynman can act as a standing validator for a discipline — periodically checking for new studies that need validation without waiting to be assigned:
|
||||
|
||||
```
|
||||
get_pending_requests_for_discipline(discipline: Discipline)
|
||||
```
|
||||
|
||||
Returns all unclaimed `ValidationRequest` entries. For each, optionally run `assess_difficulty` to estimate workload before claiming.
|
||||
|
||||
This enables Feynman to operate as an autonomous reproducibility agent: polling the queue, assessing difficulty, claiming appropriate studies, and running the full Workflow A cycle unsupervised.
|
||||
|
||||
---
|
||||
|
||||
## Workflow D: Researcher preparation assistant
|
||||
|
||||
Before a study enters the validation pipeline, Feynman can assist the researcher in preparing it via DNA 1 (Researcher Repository). This workflow runs on the researcher's side, not the validator's.
|
||||
|
||||
**Register the study:**
|
||||
```
|
||||
register_study(study: ResearchStudy)
|
||||
```
|
||||
|
||||
**Pre-register the analysis protocol** (immutable once written — creates a tamper-evident commitment to the analysis plan before data collection or validation begins):
|
||||
```
|
||||
register_protocol(input: RegisterProtocolInput)
|
||||
```
|
||||
|
||||
**Take a cryptographic data snapshot** (records a SHA-256 hash of the dataset at a point in time — proves data was not modified after validation began):
|
||||
```
|
||||
take_data_snapshot(input: TakeDataSnapshotInput)
|
||||
```
|
||||
|
||||
**Declare any deviations** from the pre-registered plan before the commit phase opens (pre-commit transparency):
|
||||
```
|
||||
declare_deviation(input: DeclareDeviationInput)
|
||||
```
|
||||
|
||||
Only hashes ever leave DNA 1 — the raw data and protocol text remain on the researcher's device.
|
||||
|
||||
**Repository Readiness Checker**: ValiChord also ships a standalone audit tool that scans a research repository for 30+ reproducibility failure modes before submission — missing dependency files, absolute paths, undeclared environment requirements, data documentation gaps, human-subjects data exposure risks, and more. Feynman is the natural interface for this tool: running the audit, interpreting findings in plain language, guiding the researcher through fixes, and confirming the repository meets the bar for independent validation. See: https://github.com/topeuph-ai/ValiChord
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- AI agents are first-class participants in ValiChord's protocol. Feynman can autonomously publish profiles, claim studies, seal attestations, wait for phase transitions, and submit reveals — the protocol makes no distinction between human and AI validators.
|
||||
- ValiChord's privacy guarantee is structural, not policy-based. DNA 1 (researcher data) and DNA 2 (validator workspace) are single-agent private DHTs — propagation to the shared network is architecturally impossible, not merely restricted.
|
||||
- All 72 zome functions across the four DNAs are callable via AppWebSocket. The 20+ `Unrestricted` read functions on DNA 3 and DNA 4 are additionally accessible via HTTP Gateway without any Holochain node.
|
||||
- If a validation round stalls due to validator dropout, `force_finalize_round` in DNA 4 closes it after a 7-day timeout with a reduced quorum, preventing indefinite blocking.
|
||||
- Live demo (full commit-reveal cycle, Harmony Record generated): https://youtu.be/DQ5wZSD1YEw
|
||||
- Running the demo: `bash demo/start.sh` in a GitHub Codespace, then open port 8888 publicly
|
||||
- ValiChord repo: https://github.com/topeuph-ai/ValiChord
|
||||
@@ -6,13 +6,45 @@ import { spawnSync } from "node:child_process";
|
||||
const appRoot = resolve(import.meta.dirname, "..");
|
||||
const packageJson = JSON.parse(readFileSync(resolve(appRoot, "package.json"), "utf8"));
|
||||
const packageLockPath = resolve(appRoot, "package-lock.json");
|
||||
const bundledNodeVersion = process.env.FEYNMAN_BUNDLED_NODE_VERSION ?? process.version.slice(1);
|
||||
const minBundledNodeVersion = packageJson.engines?.node?.replace(/^>=/, "").trim() || process.version.slice(1);
|
||||
|
||||
function parseSemver(version) {
|
||||
const [major = "0", minor = "0", patch = "0"] = version.split(".");
|
||||
return [Number.parseInt(major, 10) || 0, Number.parseInt(minor, 10) || 0, Number.parseInt(patch, 10) || 0];
|
||||
}
|
||||
|
||||
function compareSemver(left, right) {
|
||||
for (let index = 0; index < 3; index += 1) {
|
||||
const diff = left[index] - right[index];
|
||||
if (diff !== 0) return diff;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
function fail(message) {
|
||||
console.error(`[feynman] ${message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
function resolveBundledNodeVersion() {
|
||||
const requestedNodeVersion = process.env.FEYNMAN_BUNDLED_NODE_VERSION?.trim();
|
||||
if (requestedNodeVersion) {
|
||||
if (compareSemver(parseSemver(requestedNodeVersion), parseSemver(minBundledNodeVersion)) < 0) {
|
||||
fail(
|
||||
`FEYNMAN_BUNDLED_NODE_VERSION=${requestedNodeVersion} is below the supported floor ${minBundledNodeVersion}`,
|
||||
);
|
||||
}
|
||||
return requestedNodeVersion;
|
||||
}
|
||||
|
||||
const currentNodeVersion = process.version.slice(1);
|
||||
return compareSemver(parseSemver(currentNodeVersion), parseSemver(minBundledNodeVersion)) < 0
|
||||
? minBundledNodeVersion
|
||||
: currentNodeVersion;
|
||||
}
|
||||
|
||||
const bundledNodeVersion = resolveBundledNodeVersion();
|
||||
|
||||
function resolveCommand(command) {
|
||||
if (process.platform === "win32" && command === "npm") {
|
||||
return "npm.cmd";
|
||||
@@ -136,6 +168,7 @@ function ensureBundledWorkspace() {
|
||||
}
|
||||
|
||||
function copyPackageFiles(appDir) {
|
||||
const releaseDir = resolve(appRoot, "dist", "release");
|
||||
cpSync(resolve(appRoot, "package.json"), resolve(appDir, "package.json"));
|
||||
for (const entry of packageJson.files) {
|
||||
const normalized = entry.endsWith("/") ? entry.slice(0, -1) : entry;
|
||||
@@ -143,7 +176,10 @@ function copyPackageFiles(appDir) {
|
||||
if (!existsSync(source)) continue;
|
||||
const destination = resolve(appDir, normalized);
|
||||
mkdirSync(dirname(destination), { recursive: true });
|
||||
cpSync(source, destination, { recursive: true });
|
||||
cpSync(source, destination, {
|
||||
recursive: true,
|
||||
filter: (path) => path !== releaseDir && !path.startsWith(`${releaseDir}/`),
|
||||
});
|
||||
}
|
||||
|
||||
cpSync(packageLockPath, resolve(appDir, "package-lock.json"));
|
||||
@@ -160,6 +196,9 @@ function installAppDependencies(appDir, stagingRoot) {
|
||||
run("npm", ["ci", "--omit=dev", "--ignore-scripts", "--no-audit", "--no-fund", "--loglevel", "error"], {
|
||||
cwd: depsDir,
|
||||
});
|
||||
run(process.execPath, [resolve(appRoot, "scripts", "prune-runtime-deps.mjs"), depsDir], {
|
||||
cwd: appRoot,
|
||||
});
|
||||
|
||||
cpSync(resolve(depsDir, "node_modules"), resolve(appDir, "node_modules"), { recursive: true });
|
||||
}
|
||||
@@ -270,10 +309,12 @@ function packBundle(bundleRoot, target, outDir) {
|
||||
|
||||
if (target.bundleExtension === "zip") {
|
||||
if (process.platform === "win32") {
|
||||
const bundleDir = dirname(bundleRoot).replace(/'/g, "''");
|
||||
const bundleName = basename(bundleRoot).replace(/'/g, "''");
|
||||
run("powershell", [
|
||||
"-NoProfile",
|
||||
"-Command",
|
||||
`Compress-Archive -Path '${bundleRoot.replace(/'/g, "''")}\\*' -DestinationPath '${archivePath.replace(/'/g, "''")}' -Force`,
|
||||
`Push-Location '${bundleDir}'; Compress-Archive -Path '${bundleName}' -DestinationPath '${archivePath.replace(/'/g, "''")}' -Force; Pop-Location`,
|
||||
]);
|
||||
} else {
|
||||
run("zip", ["-qr", archivePath, basename(bundleRoot)], { cwd: resolve(bundleRoot, "..") });
|
||||
|
||||
40
scripts/check-node-version.mjs
Normal file
40
scripts/check-node-version.mjs
Normal file
@@ -0,0 +1,40 @@
|
||||
const MIN_NODE_VERSION = "20.19.0";
|
||||
|
||||
function parseNodeVersion(version) {
|
||||
const [major = "0", minor = "0", patch = "0"] = version.replace(/^v/, "").split(".");
|
||||
return {
|
||||
major: Number.parseInt(major, 10) || 0,
|
||||
minor: Number.parseInt(minor, 10) || 0,
|
||||
patch: Number.parseInt(patch, 10) || 0,
|
||||
};
|
||||
}
|
||||
|
||||
function compareNodeVersions(left, right) {
|
||||
if (left.major !== right.major) return left.major - right.major;
|
||||
if (left.minor !== right.minor) return left.minor - right.minor;
|
||||
return left.patch - right.patch;
|
||||
}
|
||||
|
||||
function isSupportedNodeVersion(version = process.versions.node) {
|
||||
return compareNodeVersions(parseNodeVersion(version), parseNodeVersion(MIN_NODE_VERSION)) >= 0;
|
||||
}
|
||||
|
||||
function getUnsupportedNodeVersionLines(version = process.versions.node) {
|
||||
const isWindows = process.platform === "win32";
|
||||
return [
|
||||
`feynman requires Node.js ${MIN_NODE_VERSION} or later (detected ${version}).`,
|
||||
isWindows
|
||||
? "Install a newer Node.js from https://nodejs.org, or use the standalone installer:"
|
||||
: "Switch to Node 20 with `nvm install 20 && nvm use 20`, or use the standalone installer:",
|
||||
isWindows
|
||||
? "irm https://feynman.is/install.ps1 | iex"
|
||||
: "curl -fsSL https://feynman.is/install | bash",
|
||||
];
|
||||
}
|
||||
|
||||
if (!isSupportedNodeVersion()) {
|
||||
for (const line of getUnsupportedNodeVersionLines()) {
|
||||
console.error(line);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
8
scripts/clean-publish-artifacts.mjs
Normal file
8
scripts/clean-publish-artifacts.mjs
Normal file
@@ -0,0 +1,8 @@
|
||||
import { rmSync } from "node:fs";
|
||||
import { resolve } from "node:path";
|
||||
|
||||
const appRoot = resolve(import.meta.dirname, "..");
|
||||
const releaseDir = resolve(appRoot, "dist", "release");
|
||||
|
||||
rmSync(releaseDir, { recursive: true, force: true });
|
||||
console.log("[feynman] removed dist/release before npm pack/publish");
|
||||
123
scripts/install/install-skills.ps1
Normal file
123
scripts/install/install-skills.ps1
Normal file
@@ -0,0 +1,123 @@
|
||||
param(
|
||||
[string]$Version = "latest",
|
||||
[ValidateSet("User", "Repo")]
|
||||
[string]$Scope = "User",
|
||||
[string]$TargetDir = ""
|
||||
)
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
function Normalize-Version {
|
||||
param([string]$RequestedVersion)
|
||||
|
||||
if (-not $RequestedVersion) {
|
||||
return "latest"
|
||||
}
|
||||
|
||||
switch ($RequestedVersion.ToLowerInvariant()) {
|
||||
"latest" { return "latest" }
|
||||
"stable" { return "latest" }
|
||||
"edge" { throw "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." }
|
||||
default { return $RequestedVersion.TrimStart("v") }
|
||||
}
|
||||
}
|
||||
|
||||
function Resolve-LatestReleaseVersion {
|
||||
$page = Invoke-WebRequest -Uri "https://github.com/getcompanion-ai/feynman/releases/latest"
|
||||
$match = [regex]::Match($page.Content, 'releases/tag/v([0-9][^"''<>\s]*)')
|
||||
if (-not $match.Success) {
|
||||
throw "Failed to resolve the latest Feynman release version."
|
||||
}
|
||||
|
||||
return $match.Groups[1].Value
|
||||
}
|
||||
|
||||
function Resolve-VersionMetadata {
|
||||
param([string]$RequestedVersion)
|
||||
|
||||
$normalizedVersion = Normalize-Version -RequestedVersion $RequestedVersion
|
||||
|
||||
if ($normalizedVersion -eq "latest") {
|
||||
$resolvedVersion = Resolve-LatestReleaseVersion
|
||||
} else {
|
||||
$resolvedVersion = $normalizedVersion
|
||||
}
|
||||
|
||||
return [PSCustomObject]@{
|
||||
ResolvedVersion = $resolvedVersion
|
||||
GitRef = "v$resolvedVersion"
|
||||
DownloadUrl = "https://github.com/getcompanion-ai/feynman/archive/refs/tags/v$resolvedVersion.zip"
|
||||
}
|
||||
}
|
||||
|
||||
function Resolve-InstallDir {
|
||||
param(
|
||||
[string]$ResolvedScope,
|
||||
[string]$ResolvedTargetDir
|
||||
)
|
||||
|
||||
if ($ResolvedTargetDir) {
|
||||
return $ResolvedTargetDir
|
||||
}
|
||||
|
||||
if ($ResolvedScope -eq "Repo") {
|
||||
return Join-Path (Get-Location) ".agents\skills\feynman"
|
||||
}
|
||||
|
||||
$codexHome = if ($env:CODEX_HOME) { $env:CODEX_HOME } else { Join-Path $HOME ".codex" }
|
||||
return Join-Path $codexHome "skills\feynman"
|
||||
}
|
||||
|
||||
$metadata = Resolve-VersionMetadata -RequestedVersion $Version
|
||||
$resolvedVersion = $metadata.ResolvedVersion
|
||||
$downloadUrl = $metadata.DownloadUrl
|
||||
$installDir = Resolve-InstallDir -ResolvedScope $Scope -ResolvedTargetDir $TargetDir
|
||||
|
||||
$tmpDir = Join-Path ([System.IO.Path]::GetTempPath()) ("feynman-skills-install-" + [System.Guid]::NewGuid().ToString("N"))
|
||||
New-Item -ItemType Directory -Path $tmpDir | Out-Null
|
||||
|
||||
try {
|
||||
$archivePath = Join-Path $tmpDir "feynman-skills.zip"
|
||||
$extractDir = Join-Path $tmpDir "extract"
|
||||
|
||||
Write-Host "==> Downloading Feynman skills $resolvedVersion"
|
||||
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
|
||||
|
||||
Write-Host "==> Extracting skills"
|
||||
Expand-Archive -LiteralPath $archivePath -DestinationPath $extractDir -Force
|
||||
|
||||
$sourceRoot = Get-ChildItem -Path $extractDir -Directory | Select-Object -First 1
|
||||
if (-not $sourceRoot) {
|
||||
throw "Could not find extracted Feynman archive."
|
||||
}
|
||||
|
||||
$skillsSource = Join-Path $sourceRoot.FullName "skills"
|
||||
if (-not (Test-Path $skillsSource)) {
|
||||
throw "Could not find skills/ in downloaded archive."
|
||||
}
|
||||
|
||||
$installParent = Split-Path $installDir -Parent
|
||||
if ($installParent) {
|
||||
New-Item -ItemType Directory -Path $installParent -Force | Out-Null
|
||||
}
|
||||
|
||||
if (Test-Path $installDir) {
|
||||
Remove-Item -Recurse -Force $installDir
|
||||
}
|
||||
|
||||
New-Item -ItemType Directory -Path $installDir -Force | Out-Null
|
||||
Copy-Item -Path (Join-Path $skillsSource "*") -Destination $installDir -Recurse -Force
|
||||
|
||||
Write-Host "==> Installed skills to $installDir"
|
||||
if ($Scope -eq "Repo") {
|
||||
Write-Host "Repo-local skills will be discovered automatically from .agents/skills."
|
||||
} else {
|
||||
Write-Host "User-level skills will be discovered from `$CODEX_HOME/skills."
|
||||
}
|
||||
|
||||
Write-Host "Feynman skills $resolvedVersion installed successfully."
|
||||
} finally {
|
||||
if (Test-Path $tmpDir) {
|
||||
Remove-Item -Recurse -Force $tmpDir
|
||||
}
|
||||
}
|
||||
204
scripts/install/install-skills.sh
Normal file
204
scripts/install/install-skills.sh
Normal file
@@ -0,0 +1,204 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
VERSION="latest"
|
||||
SCOPE="${FEYNMAN_SKILLS_SCOPE:-user}"
|
||||
TARGET_DIR="${FEYNMAN_SKILLS_DIR:-}"
|
||||
|
||||
step() {
|
||||
printf '==> %s\n' "$1"
|
||||
}
|
||||
|
||||
normalize_version() {
|
||||
case "$1" in
|
||||
"")
|
||||
printf 'latest\n'
|
||||
;;
|
||||
latest | stable)
|
||||
printf 'latest\n'
|
||||
;;
|
||||
edge)
|
||||
echo "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." >&2
|
||||
exit 1
|
||||
;;
|
||||
v*)
|
||||
printf '%s\n' "${1#v}"
|
||||
;;
|
||||
*)
|
||||
printf '%s\n' "$1"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
download_file() {
|
||||
url="$1"
|
||||
output="$2"
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
curl -fL --progress-bar "$url" -o "$output"
|
||||
else
|
||||
curl -fsSL "$url" -o "$output"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
wget --show-progress -O "$output" "$url"
|
||||
else
|
||||
wget -q -O "$output" "$url"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
echo "curl or wget is required to install Feynman skills." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
download_text() {
|
||||
url="$1"
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
wget -q -O - "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "curl or wget is required to install Feynman skills." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
resolve_version() {
|
||||
normalized_version="$(normalize_version "$VERSION")"
|
||||
|
||||
if [ "$normalized_version" = "latest" ]; then
|
||||
release_page="$(download_text "https://github.com/getcompanion-ai/feynman/releases/latest")"
|
||||
resolved_version="$(printf '%s\n' "$release_page" | sed -n 's@.*releases/tag/v\([0-9][^"<>[:space:]]*\).*@\1@p' | head -n 1)"
|
||||
|
||||
if [ -z "$resolved_version" ]; then
|
||||
echo "Failed to resolve the latest Feynman release version." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%s\nv%s\n' "$resolved_version" "$resolved_version"
|
||||
return
|
||||
fi
|
||||
|
||||
printf '%s\nv%s\n' "$normalized_version" "$normalized_version"
|
||||
}
|
||||
|
||||
resolve_target_dir() {
|
||||
if [ -n "$TARGET_DIR" ]; then
|
||||
printf '%s\n' "$TARGET_DIR"
|
||||
return
|
||||
fi
|
||||
|
||||
case "$SCOPE" in
|
||||
repo)
|
||||
printf '%s/.agents/skills/feynman\n' "$PWD"
|
||||
;;
|
||||
user)
|
||||
codex_home="${CODEX_HOME:-$HOME/.codex}"
|
||||
printf '%s/skills/feynman\n' "$codex_home"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown scope: $SCOPE (expected --user or --repo)" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--repo)
|
||||
SCOPE="repo"
|
||||
;;
|
||||
--user)
|
||||
SCOPE="user"
|
||||
;;
|
||||
--dir)
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: install-skills.sh [stable|latest|<version>] [--user|--repo] [--dir <path>]" >&2
|
||||
exit 1
|
||||
fi
|
||||
TARGET_DIR="$2"
|
||||
shift
|
||||
;;
|
||||
edge|stable|latest|v*|[0-9]*)
|
||||
VERSION="$1"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
echo "Usage: install-skills.sh [stable|latest|<version>] [--user|--repo] [--dir <path>]" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
archive_metadata="$(resolve_version)"
|
||||
resolved_version="$(printf '%s\n' "$archive_metadata" | sed -n '1p')"
|
||||
git_ref="$(printf '%s\n' "$archive_metadata" | sed -n '2p')"
|
||||
|
||||
archive_url=""
|
||||
case "$git_ref" in
|
||||
main)
|
||||
archive_url="https://github.com/getcompanion-ai/feynman/archive/refs/heads/main.tar.gz"
|
||||
;;
|
||||
v*)
|
||||
archive_url="https://github.com/getcompanion-ai/feynman/archive/refs/tags/${git_ref}.tar.gz"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$archive_url" ]; then
|
||||
echo "Could not resolve a download URL for ref: $git_ref" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
install_dir="$(resolve_target_dir)"
|
||||
|
||||
step "Installing Feynman skills ${resolved_version} (${SCOPE})"
|
||||
|
||||
tmp_dir="$(mktemp -d)"
|
||||
cleanup() {
|
||||
rm -rf "$tmp_dir"
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
archive_path="$tmp_dir/feynman-skills.tar.gz"
|
||||
step "Downloading skills archive"
|
||||
download_file "$archive_url" "$archive_path"
|
||||
|
||||
extract_dir="$tmp_dir/extract"
|
||||
mkdir -p "$extract_dir"
|
||||
step "Extracting skills"
|
||||
tar -xzf "$archive_path" -C "$extract_dir"
|
||||
|
||||
source_root="$(find "$extract_dir" -mindepth 1 -maxdepth 1 -type d | head -n 1)"
|
||||
if [ -z "$source_root" ] || [ ! -d "$source_root/skills" ]; then
|
||||
echo "Could not find skills/ in downloaded archive." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$(dirname "$install_dir")"
|
||||
rm -rf "$install_dir"
|
||||
mkdir -p "$install_dir"
|
||||
cp -R "$source_root/skills/." "$install_dir/"
|
||||
|
||||
step "Installed skills to $install_dir"
|
||||
case "$SCOPE" in
|
||||
repo)
|
||||
step "Repo-local skills will be discovered automatically from .agents/skills"
|
||||
;;
|
||||
user)
|
||||
step "User-level skills will be discovered from \$CODEX_HOME/skills"
|
||||
;;
|
||||
esac
|
||||
|
||||
printf 'Feynman skills %s installed successfully.\n' "$resolved_version"
|
||||
@@ -4,36 +4,88 @@ param(
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
function Resolve-Version {
|
||||
function Normalize-Version {
|
||||
param([string]$RequestedVersion)
|
||||
|
||||
if ($RequestedVersion -and $RequestedVersion -ne "latest") {
|
||||
return $RequestedVersion.TrimStart("v")
|
||||
if (-not $RequestedVersion) {
|
||||
return "latest"
|
||||
}
|
||||
|
||||
$release = Invoke-RestMethod -Uri "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest"
|
||||
if (-not $release.tag_name) {
|
||||
switch ($RequestedVersion.ToLowerInvariant()) {
|
||||
"latest" { return "latest" }
|
||||
"stable" { return "latest" }
|
||||
"edge" { throw "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." }
|
||||
default { return $RequestedVersion.TrimStart("v") }
|
||||
}
|
||||
}
|
||||
|
||||
function Resolve-LatestReleaseVersion {
|
||||
$page = Invoke-WebRequest -Uri "https://github.com/getcompanion-ai/feynman/releases/latest"
|
||||
$match = [regex]::Match($page.Content, 'releases/tag/v([0-9][^"''<>\s]*)')
|
||||
if (-not $match.Success) {
|
||||
throw "Failed to resolve the latest Feynman release version."
|
||||
}
|
||||
|
||||
return $release.tag_name.TrimStart("v")
|
||||
return $match.Groups[1].Value
|
||||
}
|
||||
|
||||
function Resolve-ReleaseMetadata {
|
||||
param(
|
||||
[string]$RequestedVersion,
|
||||
[string]$AssetTarget,
|
||||
[string]$BundleExtension
|
||||
)
|
||||
|
||||
$normalizedVersion = Normalize-Version -RequestedVersion $RequestedVersion
|
||||
|
||||
if ($normalizedVersion -eq "latest") {
|
||||
$resolvedVersion = Resolve-LatestReleaseVersion
|
||||
} else {
|
||||
$resolvedVersion = $normalizedVersion
|
||||
}
|
||||
|
||||
$bundleName = "feynman-$resolvedVersion-$AssetTarget"
|
||||
$archiveName = "$bundleName.$BundleExtension"
|
||||
$baseUrl = if ($env:FEYNMAN_INSTALL_BASE_URL) { $env:FEYNMAN_INSTALL_BASE_URL } else { "https://github.com/getcompanion-ai/feynman/releases/download/v$resolvedVersion" }
|
||||
|
||||
return [PSCustomObject]@{
|
||||
ResolvedVersion = $resolvedVersion
|
||||
BundleName = $bundleName
|
||||
ArchiveName = $archiveName
|
||||
DownloadUrl = "$baseUrl/$archiveName"
|
||||
}
|
||||
}
|
||||
|
||||
function Get-ArchSuffix {
|
||||
# Prefer PROCESSOR_ARCHITECTURE which is always available on Windows.
|
||||
# RuntimeInformation::OSArchitecture requires .NET 4.7.1+ and may not
|
||||
# be loaded in every Windows PowerShell 5.1 session.
|
||||
$envArch = $env:PROCESSOR_ARCHITECTURE
|
||||
if ($envArch) {
|
||||
switch ($envArch) {
|
||||
"AMD64" { return "x64" }
|
||||
"ARM64" { return "arm64" }
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
$arch = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture
|
||||
switch ($arch.ToString()) {
|
||||
"X64" { return "x64" }
|
||||
"Arm64" { return "arm64" }
|
||||
default { throw "Unsupported architecture: $arch" }
|
||||
}
|
||||
} catch {}
|
||||
|
||||
throw "Unsupported architecture: $envArch"
|
||||
}
|
||||
|
||||
$resolvedVersion = Resolve-Version -RequestedVersion $Version
|
||||
$archSuffix = Get-ArchSuffix
|
||||
$bundleName = "feynman-$resolvedVersion-win32-$archSuffix"
|
||||
$archiveName = "$bundleName.zip"
|
||||
$baseUrl = if ($env:FEYNMAN_INSTALL_BASE_URL) { $env:FEYNMAN_INSTALL_BASE_URL } else { "https://github.com/getcompanion-ai/feynman/releases/download/v$resolvedVersion" }
|
||||
$downloadUrl = "$baseUrl/$archiveName"
|
||||
$assetTarget = "win32-$archSuffix"
|
||||
$release = Resolve-ReleaseMetadata -RequestedVersion $Version -AssetTarget $assetTarget -BundleExtension "zip"
|
||||
$resolvedVersion = $release.ResolvedVersion
|
||||
$bundleName = $release.BundleName
|
||||
$archiveName = $release.ArchiveName
|
||||
$downloadUrl = $release.DownloadUrl
|
||||
|
||||
$installRoot = Join-Path $env:LOCALAPPDATA "Programs\feynman"
|
||||
$installBinDir = Join-Path $installRoot "bin"
|
||||
@@ -44,25 +96,47 @@ New-Item -ItemType Directory -Path $tmpDir | Out-Null
|
||||
|
||||
try {
|
||||
$archivePath = Join-Path $tmpDir $archiveName
|
||||
Write-Host "==> Downloading $archiveName"
|
||||
try {
|
||||
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
|
||||
} catch {
|
||||
throw @"
|
||||
Failed to download $archiveName from:
|
||||
$downloadUrl
|
||||
|
||||
The win32-$archSuffix bundle is missing from the GitHub release.
|
||||
This usually means the release exists, but not all platform bundles were uploaded.
|
||||
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- install via pnpm instead: pnpm add -g @companion-ai/feynman
|
||||
- install via bun instead: bun add -g @companion-ai/feynman
|
||||
"@
|
||||
}
|
||||
|
||||
New-Item -ItemType Directory -Path $installRoot -Force | Out-Null
|
||||
if (Test-Path $bundleDir) {
|
||||
Remove-Item -Recurse -Force $bundleDir
|
||||
}
|
||||
|
||||
Write-Host "==> Extracting $archiveName"
|
||||
Expand-Archive -LiteralPath $archivePath -DestinationPath $installRoot -Force
|
||||
|
||||
New-Item -ItemType Directory -Path $installBinDir -Force | Out-Null
|
||||
|
||||
$shimPath = Join-Path $installBinDir "feynman.cmd"
|
||||
Write-Host "==> Linking feynman into $installBinDir"
|
||||
@"
|
||||
@echo off
|
||||
"$bundleDir\feynman.cmd" %*
|
||||
"@ | Set-Content -Path $shimPath -Encoding ASCII
|
||||
|
||||
$currentUserPath = [Environment]::GetEnvironmentVariable("Path", "User")
|
||||
if (-not $currentUserPath.Split(';').Contains($installBinDir)) {
|
||||
$alreadyOnPath = $false
|
||||
if ($currentUserPath) {
|
||||
$alreadyOnPath = $currentUserPath.Split(';') -contains $installBinDir
|
||||
}
|
||||
if (-not $alreadyOnPath) {
|
||||
$updatedPath = if ([string]::IsNullOrWhiteSpace($currentUserPath)) {
|
||||
$installBinDir
|
||||
} else {
|
||||
@@ -74,6 +148,16 @@ try {
|
||||
Write-Host "$installBinDir is already on PATH."
|
||||
}
|
||||
|
||||
$resolvedCommand = Get-Command feynman -ErrorAction SilentlyContinue
|
||||
if ($resolvedCommand -and $resolvedCommand.Source -ne $shimPath) {
|
||||
Write-Warning "Current shell resolves feynman to $($resolvedCommand.Source)"
|
||||
Write-Host "Run in a new shell, or run: `$env:Path = '$installBinDir;' + `$env:Path"
|
||||
Write-Host "Then run: feynman"
|
||||
if ($resolvedCommand.Source -like "*node_modules*@companion-ai*feynman*") {
|
||||
Write-Host "If that path is an old global npm install, remove it with: npm uninstall -g @companion-ai/feynman"
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Feynman $resolvedVersion installed successfully."
|
||||
} finally {
|
||||
if (Test-Path $tmpDir) {
|
||||
|
||||
@@ -13,11 +13,57 @@ step() {
|
||||
printf '==> %s\n' "$1"
|
||||
}
|
||||
|
||||
run_with_spinner() {
|
||||
label="$1"
|
||||
shift
|
||||
|
||||
if [ ! -t 2 ]; then
|
||||
step "$label"
|
||||
"$@"
|
||||
return
|
||||
fi
|
||||
|
||||
"$@" &
|
||||
pid=$!
|
||||
frame=0
|
||||
|
||||
set +e
|
||||
while kill -0 "$pid" 2>/dev/null; do
|
||||
case "$frame" in
|
||||
0) spinner='|' ;;
|
||||
1) spinner='/' ;;
|
||||
2) spinner='-' ;;
|
||||
*) spinner='\\' ;;
|
||||
esac
|
||||
printf '\r==> %s %s' "$label" "$spinner" >&2
|
||||
frame=$(( (frame + 1) % 4 ))
|
||||
sleep 0.1
|
||||
done
|
||||
wait "$pid"
|
||||
status=$?
|
||||
set -e
|
||||
|
||||
printf '\r\033[2K' >&2
|
||||
if [ "$status" -ne 0 ]; then
|
||||
printf '==> %s failed\n' "$label" >&2
|
||||
return "$status"
|
||||
fi
|
||||
|
||||
step "$label"
|
||||
}
|
||||
|
||||
normalize_version() {
|
||||
case "$1" in
|
||||
"" | latest)
|
||||
"")
|
||||
printf 'latest\n'
|
||||
;;
|
||||
latest | stable)
|
||||
printf 'latest\n'
|
||||
;;
|
||||
edge)
|
||||
echo "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." >&2
|
||||
exit 1
|
||||
;;
|
||||
v*)
|
||||
printf '%s\n' "${1#v}"
|
||||
;;
|
||||
@@ -32,12 +78,20 @@ download_file() {
|
||||
output="$2"
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
curl -fL --progress-bar "$url" -o "$output"
|
||||
else
|
||||
curl -fsSL "$url" -o "$output"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
wget --show-progress -O "$output" "$url"
|
||||
else
|
||||
wget -q -O "$output" "$url"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
@@ -110,23 +164,47 @@ require_command() {
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_version() {
|
||||
normalized_version="$(normalize_version "$VERSION")"
|
||||
warn_command_conflict() {
|
||||
expected_path="$INSTALL_BIN_DIR/feynman"
|
||||
resolved_path="$(command -v feynman 2>/dev/null || true)"
|
||||
|
||||
if [ "$normalized_version" != "latest" ]; then
|
||||
printf '%s\n' "$normalized_version"
|
||||
if [ -z "$resolved_path" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
release_json="$(download_text "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest")"
|
||||
resolved="$(printf '%s\n' "$release_json" | sed -n 's/.*"tag_name":[[:space:]]*"v\([^"]*\)".*/\1/p' | head -n 1)"
|
||||
if [ "$resolved_path" != "$expected_path" ]; then
|
||||
step "Warning: current shell resolves feynman to $resolved_path"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
step "Or launch directly: $expected_path"
|
||||
|
||||
if [ -z "$resolved" ]; then
|
||||
case "$resolved_path" in
|
||||
*"/node_modules/@companion-ai/feynman/"* | *"/node_modules/.bin/feynman")
|
||||
step "If that path is an old global npm install, remove it with: npm uninstall -g @companion-ai/feynman"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_release_metadata() {
|
||||
normalized_version="$(normalize_version "$VERSION")"
|
||||
|
||||
if [ "$normalized_version" = "latest" ]; then
|
||||
release_page="$(download_text "https://github.com/getcompanion-ai/feynman/releases/latest")"
|
||||
resolved_version="$(printf '%s\n' "$release_page" | sed -n 's@.*releases/tag/v\([0-9][^"<>[:space:]]*\).*@\1@p' | head -n 1)"
|
||||
|
||||
if [ -z "$resolved_version" ]; then
|
||||
echo "Failed to resolve the latest Feynman release version." >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
resolved_version="$normalized_version"
|
||||
fi
|
||||
|
||||
printf '%s\n' "$resolved"
|
||||
bundle_name="feynman-${resolved_version}-${asset_target}"
|
||||
archive_name="${bundle_name}.${archive_extension}"
|
||||
download_url="${FEYNMAN_INSTALL_BASE_URL:-https://github.com/getcompanion-ai/feynman/releases/download/v${resolved_version}}/${archive_name}"
|
||||
|
||||
printf '%s\n%s\n%s\n%s\n' "$resolved_version" "$bundle_name" "$archive_name" "$download_url"
|
||||
}
|
||||
|
||||
case "$(uname -s)" in
|
||||
@@ -158,12 +236,13 @@ esac
|
||||
require_command mktemp
|
||||
require_command tar
|
||||
|
||||
resolved_version="$(resolve_version)"
|
||||
asset_target="$os-$arch"
|
||||
bundle_name="feynman-${resolved_version}-${asset_target}"
|
||||
archive_name="${bundle_name}.tar.gz"
|
||||
base_url="${FEYNMAN_INSTALL_BASE_URL:-https://github.com/getcompanion-ai/feynman/releases/download/v${resolved_version}}"
|
||||
download_url="${base_url}/${archive_name}"
|
||||
archive_extension="tar.gz"
|
||||
release_metadata="$(resolve_release_metadata)"
|
||||
resolved_version="$(printf '%s\n' "$release_metadata" | sed -n '1p')"
|
||||
bundle_name="$(printf '%s\n' "$release_metadata" | sed -n '2p')"
|
||||
archive_name="$(printf '%s\n' "$release_metadata" | sed -n '3p')"
|
||||
download_url="$(printf '%s\n' "$release_metadata" | sed -n '4p')"
|
||||
|
||||
step "Installing Feynman ${resolved_version} for ${asset_target}"
|
||||
|
||||
@@ -174,13 +253,29 @@ cleanup() {
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
archive_path="$tmp_dir/$archive_name"
|
||||
download_file "$download_url" "$archive_path"
|
||||
step "Downloading ${archive_name}"
|
||||
if ! download_file "$download_url" "$archive_path"; then
|
||||
cat >&2 <<EOF
|
||||
Failed to download ${archive_name} from:
|
||||
${download_url}
|
||||
|
||||
The ${asset_target} bundle is missing from the GitHub release.
|
||||
This usually means the release exists, but not all platform bundles were uploaded.
|
||||
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- install via pnpm instead: pnpm add -g @companion-ai/feynman
|
||||
- install via bun instead: bun add -g @companion-ai/feynman
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$INSTALL_APP_DIR"
|
||||
rm -rf "$INSTALL_APP_DIR/$bundle_name"
|
||||
tar -xzf "$archive_path" -C "$INSTALL_APP_DIR"
|
||||
run_with_spinner "Extracting ${archive_name}" tar -xzf "$archive_path" -C "$INSTALL_APP_DIR"
|
||||
|
||||
mkdir -p "$INSTALL_BIN_DIR"
|
||||
step "Linking feynman into $INSTALL_BIN_DIR"
|
||||
cat >"$INSTALL_BIN_DIR/feynman" <<EOF
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
@@ -193,20 +288,22 @@ add_to_path
|
||||
case "$path_action" in
|
||||
added)
|
||||
step "PATH updated for future shells in $path_profile"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
;;
|
||||
configured)
|
||||
step "PATH is already configured for future shells in $path_profile"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
;;
|
||||
skipped)
|
||||
step "PATH update skipped"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
;;
|
||||
*)
|
||||
step "$INSTALL_BIN_DIR is already on PATH"
|
||||
step "Run: feynman"
|
||||
step "Run: hash -r && feynman"
|
||||
;;
|
||||
esac
|
||||
|
||||
warn_command_conflict
|
||||
|
||||
printf 'Feynman %s installed successfully.\n' "$resolved_version"
|
||||
|
||||
@@ -1,28 +1,40 @@
|
||||
import { spawnSync } from "node:child_process";
|
||||
import { existsSync, mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { createRequire } from "node:module";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import { FEYNMAN_LOGO_HTML } from "../logo.mjs";
|
||||
|
||||
const here = dirname(fileURLToPath(import.meta.url));
|
||||
const appRoot = resolve(here, "..");
|
||||
const appRequire = createRequire(resolve(appRoot, "package.json"));
|
||||
const isGlobalInstall = process.env.npm_config_global === "true" || process.env.npm_config_location === "global";
|
||||
|
||||
function findNodeModules() {
|
||||
let dir = appRoot;
|
||||
while (dir !== dirname(dir)) {
|
||||
const nm = resolve(dir, "node_modules");
|
||||
if (existsSync(nm)) return nm;
|
||||
dir = dirname(dir);
|
||||
}
|
||||
return resolve(appRoot, "node_modules");
|
||||
}
|
||||
|
||||
const nodeModules = findNodeModules();
|
||||
|
||||
function findPackageRoot(packageName) {
|
||||
const candidate = resolve(nodeModules, packageName);
|
||||
if (existsSync(resolve(candidate, "package.json"))) return candidate;
|
||||
const segments = packageName.split("/");
|
||||
let current = appRoot;
|
||||
while (current !== dirname(current)) {
|
||||
for (const candidate of [resolve(current, "node_modules", ...segments), resolve(current, ...segments)]) {
|
||||
if (existsSync(resolve(candidate, "package.json"))) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
current = dirname(current);
|
||||
}
|
||||
|
||||
for (const spec of [`${packageName}/dist/index.js`, `${packageName}/dist/cli.js`, packageName]) {
|
||||
try {
|
||||
let current = dirname(appRequire.resolve(spec));
|
||||
while (current !== dirname(current)) {
|
||||
if (existsSync(resolve(current, "package.json"))) {
|
||||
return current;
|
||||
}
|
||||
current = dirname(current);
|
||||
}
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -31,15 +43,15 @@ const piTuiRoot = findPackageRoot("@mariozechner/pi-tui");
|
||||
const piAiRoot = findPackageRoot("@mariozechner/pi-ai");
|
||||
|
||||
if (!piPackageRoot) {
|
||||
console.warn("[feynman] pi-coding-agent not found, skipping patches");
|
||||
process.exit(0);
|
||||
console.warn("[feynman] pi-coding-agent not found, skipping Pi patches");
|
||||
}
|
||||
|
||||
const packageJsonPath = resolve(piPackageRoot, "package.json");
|
||||
const cliPath = resolve(piPackageRoot, "dist", "cli.js");
|
||||
const bunCliPath = resolve(piPackageRoot, "dist", "bun", "cli.js");
|
||||
const interactiveModePath = resolve(piPackageRoot, "dist", "modes", "interactive", "interactive-mode.js");
|
||||
const interactiveThemePath = resolve(piPackageRoot, "dist", "modes", "interactive", "theme", "theme.js");
|
||||
const packageJsonPath = piPackageRoot ? resolve(piPackageRoot, "package.json") : null;
|
||||
const cliPath = piPackageRoot ? resolve(piPackageRoot, "dist", "cli.js") : null;
|
||||
const bunCliPath = piPackageRoot ? resolve(piPackageRoot, "dist", "bun", "cli.js") : null;
|
||||
const interactiveModePath = piPackageRoot ? resolve(piPackageRoot, "dist", "modes", "interactive", "interactive-mode.js") : null;
|
||||
const interactiveThemePath = piPackageRoot ? resolve(piPackageRoot, "dist", "modes", "interactive", "theme", "theme.js") : null;
|
||||
const terminalPath = piTuiRoot ? resolve(piTuiRoot, "dist", "terminal.js") : null;
|
||||
const editorPath = piTuiRoot ? resolve(piTuiRoot, "dist", "components", "editor.js") : null;
|
||||
const workspaceRoot = resolve(appRoot, ".feynman", "npm", "node_modules");
|
||||
const webAccessPath = resolve(workspaceRoot, "pi-web-access", "index.ts");
|
||||
@@ -56,6 +68,61 @@ const workspaceDir = resolve(appRoot, ".feynman", "npm");
|
||||
const workspacePackageJsonPath = resolve(workspaceDir, "package.json");
|
||||
const workspaceArchivePath = resolve(appRoot, ".feynman", "runtime-workspace.tgz");
|
||||
|
||||
function createInstallCommand(packageManager, packageSpecs) {
|
||||
switch (packageManager) {
|
||||
case "npm":
|
||||
return ["install", "--prefer-offline", "--no-audit", "--no-fund", "--loglevel", "error", ...packageSpecs];
|
||||
case "pnpm":
|
||||
return ["add", "--prefer-offline", "--reporter", "silent", ...packageSpecs];
|
||||
case "bun":
|
||||
return ["add", "--silent", ...packageSpecs];
|
||||
default:
|
||||
throw new Error(`Unsupported package manager: ${packageManager}`);
|
||||
}
|
||||
}
|
||||
|
||||
let cachedPackageManager = undefined;
|
||||
|
||||
function resolvePackageManager() {
|
||||
if (cachedPackageManager !== undefined) return cachedPackageManager;
|
||||
|
||||
const requested = process.env.FEYNMAN_PACKAGE_MANAGER?.trim();
|
||||
const candidates = requested ? [requested] : ["npm", "pnpm", "bun"];
|
||||
for (const candidate of candidates) {
|
||||
if (resolveExecutable(candidate)) {
|
||||
cachedPackageManager = candidate;
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
cachedPackageManager = null;
|
||||
return null;
|
||||
}
|
||||
|
||||
function installWorkspacePackages(packageSpecs) {
|
||||
const packageManager = resolvePackageManager();
|
||||
if (!packageManager) {
|
||||
process.stderr.write(
|
||||
"[feynman] no supported package manager found; install npm, pnpm, or bun, or set FEYNMAN_PACKAGE_MANAGER.\n",
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
const result = spawnSync(packageManager, createInstallCommand(packageManager, packageSpecs), {
|
||||
cwd: workspaceDir,
|
||||
stdio: ["ignore", "ignore", "pipe"],
|
||||
timeout: 300000,
|
||||
});
|
||||
|
||||
if (result.status !== 0) {
|
||||
if (result.stderr?.length) process.stderr.write(result.stderr);
|
||||
process.stderr.write(`[feynman] ${packageManager} failed while setting up bundled packages.\n`);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
function parsePackageName(spec) {
|
||||
const match = spec.match(/^(@?[^@]+(?:\/[^@]+)?)(?:@.+)?$/);
|
||||
return match?.[1] ?? spec;
|
||||
@@ -72,26 +139,22 @@ function restorePackagedWorkspace(packageSpecs) {
|
||||
timeout: 300000,
|
||||
});
|
||||
|
||||
// On Windows, tar may exit non-zero due to symlink creation failures in
|
||||
// .bin/ directories. These are non-fatal — check whether the actual
|
||||
// package directories were extracted successfully.
|
||||
const packagesPresent = packageSpecs.every((spec) => existsSync(resolve(workspaceRoot, parsePackageName(spec))));
|
||||
if (packagesPresent) return true;
|
||||
|
||||
if (result.status !== 0) {
|
||||
if (result.stderr?.length) process.stderr.write(result.stderr);
|
||||
return false;
|
||||
}
|
||||
|
||||
return packageSpecs.every((spec) => existsSync(resolve(workspaceRoot, parsePackageName(spec))));
|
||||
return false;
|
||||
}
|
||||
|
||||
function refreshPackagedWorkspace(packageSpecs) {
|
||||
const result = spawnSync("npm", ["install", "--prefer-offline", "--no-audit", "--no-fund", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs], {
|
||||
stdio: ["ignore", "ignore", "pipe"],
|
||||
timeout: 300000,
|
||||
});
|
||||
|
||||
if (result.status !== 0) {
|
||||
if (result.stderr?.length) process.stderr.write(result.stderr);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return installWorkspacePackages(packageSpecs);
|
||||
}
|
||||
|
||||
function resolveExecutable(name, fallbackPaths = []) {
|
||||
@@ -99,12 +162,18 @@ function resolveExecutable(name, fallbackPaths = []) {
|
||||
if (existsSync(candidate)) return candidate;
|
||||
}
|
||||
|
||||
const result = spawnSync("sh", ["-lc", `command -v ${name}`], {
|
||||
const isWindows = process.platform === "win32";
|
||||
const result = isWindows
|
||||
? spawnSync("cmd", ["/c", `where ${name}`], {
|
||||
encoding: "utf8",
|
||||
stdio: ["ignore", "pipe", "ignore"],
|
||||
})
|
||||
: spawnSync("sh", ["-lc", `command -v ${name}`], {
|
||||
encoding: "utf8",
|
||||
stdio: ["ignore", "pipe", "ignore"],
|
||||
});
|
||||
if (result.status === 0) {
|
||||
const resolved = result.stdout.trim();
|
||||
const resolved = result.stdout.trim().split(/\r?\n/)[0];
|
||||
if (resolved) return resolved;
|
||||
}
|
||||
return null;
|
||||
@@ -139,17 +208,13 @@ function ensurePackageWorkspace() {
|
||||
process.stderr.write(`\r${frames[frame++ % frames.length]} setting up feynman... ${elapsed}s`);
|
||||
}, 80);
|
||||
|
||||
const result = spawnSync("npm", ["install", "--prefer-offline", "--no-audit", "--no-fund", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs], {
|
||||
stdio: ["ignore", "ignore", "pipe"],
|
||||
timeout: 300000,
|
||||
});
|
||||
const result = installWorkspacePackages(packageSpecs);
|
||||
|
||||
clearInterval(spinner);
|
||||
const elapsed = Math.round((Date.now() - start) / 1000);
|
||||
|
||||
if (result.status !== 0) {
|
||||
if (!result) {
|
||||
process.stderr.write(`\r✗ setup failed (${elapsed}s)\n`);
|
||||
if (result.stderr?.length) process.stderr.write(result.stderr);
|
||||
} else {
|
||||
process.stderr.write(`\r✓ feynman ready (${elapsed}s)\n`);
|
||||
}
|
||||
@@ -178,7 +243,7 @@ function ensurePandoc() {
|
||||
|
||||
ensurePandoc();
|
||||
|
||||
if (existsSync(packageJsonPath)) {
|
||||
if (packageJsonPath && existsSync(packageJsonPath)) {
|
||||
const pkg = JSON.parse(readFileSync(packageJsonPath, "utf8"));
|
||||
if (pkg.piConfig?.name !== "feynman" || pkg.piConfig?.configDir !== ".feynman") {
|
||||
pkg.piConfig = {
|
||||
@@ -190,18 +255,76 @@ if (existsSync(packageJsonPath)) {
|
||||
}
|
||||
}
|
||||
|
||||
for (const entryPath of [cliPath, bunCliPath]) {
|
||||
for (const entryPath of [cliPath, bunCliPath].filter(Boolean)) {
|
||||
if (!existsSync(entryPath)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const cliSource = readFileSync(entryPath, "utf8");
|
||||
let cliSource = readFileSync(entryPath, "utf8");
|
||||
if (cliSource.includes('process.title = "pi";')) {
|
||||
writeFileSync(entryPath, cliSource.replace('process.title = "pi";', 'process.title = "feynman";'), "utf8");
|
||||
cliSource = cliSource.replace('process.title = "pi";', 'process.title = "feynman";');
|
||||
}
|
||||
const stdinErrorGuard = [
|
||||
"const feynmanHandleStdinError = (error) => {",
|
||||
' if (error && typeof error === "object") {',
|
||||
' const code = "code" in error ? error.code : undefined;',
|
||||
' const syscall = "syscall" in error ? error.syscall : undefined;',
|
||||
' if ((code === "EIO" || code === "EBADF") && syscall === "read") {',
|
||||
" return;",
|
||||
" }",
|
||||
" }",
|
||||
"};",
|
||||
'process.stdin?.on?.("error", feynmanHandleStdinError);',
|
||||
].join("\n");
|
||||
if (!cliSource.includes('process.stdin?.on?.("error", feynmanHandleStdinError);')) {
|
||||
cliSource = cliSource.replace(
|
||||
'process.emitWarning = (() => { });',
|
||||
`process.emitWarning = (() => { });\n${stdinErrorGuard}`,
|
||||
);
|
||||
}
|
||||
writeFileSync(entryPath, cliSource, "utf8");
|
||||
}
|
||||
|
||||
if (existsSync(interactiveModePath)) {
|
||||
if (terminalPath && existsSync(terminalPath)) {
|
||||
let terminalSource = readFileSync(terminalPath, "utf8");
|
||||
if (!terminalSource.includes("stdinErrorHandler;")) {
|
||||
terminalSource = terminalSource.replace(
|
||||
" stdinBuffer;\n stdinDataHandler;\n",
|
||||
[
|
||||
" stdinBuffer;",
|
||||
" stdinDataHandler;",
|
||||
" stdinErrorHandler = (error) => {",
|
||||
' if ((error?.code === "EIO" || error?.code === "EBADF") && error?.syscall === "read") {',
|
||||
" return;",
|
||||
" }",
|
||||
" };",
|
||||
].join("\n") + "\n",
|
||||
);
|
||||
}
|
||||
if (!terminalSource.includes('process.stdin.on("error", this.stdinErrorHandler);')) {
|
||||
terminalSource = terminalSource.replace(
|
||||
' process.stdin.resume();\n',
|
||||
' process.stdin.resume();\n process.stdin.on("error", this.stdinErrorHandler);\n',
|
||||
);
|
||||
}
|
||||
if (!terminalSource.includes(' process.stdin.removeListener("error", this.stdinErrorHandler);')) {
|
||||
terminalSource = terminalSource.replace(
|
||||
' process.stdin.removeListener("data", onData);\n this.inputHandler = previousHandler;\n',
|
||||
[
|
||||
' process.stdin.removeListener("data", onData);',
|
||||
' process.stdin.removeListener("error", this.stdinErrorHandler);',
|
||||
' this.inputHandler = previousHandler;',
|
||||
].join("\n"),
|
||||
);
|
||||
terminalSource = terminalSource.replace(
|
||||
' process.stdin.pause();\n',
|
||||
' process.stdin.removeListener("error", this.stdinErrorHandler);\n process.stdin.pause();\n',
|
||||
);
|
||||
}
|
||||
writeFileSync(terminalPath, terminalSource, "utf8");
|
||||
}
|
||||
|
||||
if (interactiveModePath && existsSync(interactiveModePath)) {
|
||||
const interactiveModeSource = readFileSync(interactiveModePath, "utf8");
|
||||
if (interactiveModeSource.includes("`π - ${sessionName} - ${cwdBasename}`")) {
|
||||
writeFileSync(
|
||||
@@ -214,7 +337,7 @@ if (existsSync(interactiveModePath)) {
|
||||
}
|
||||
}
|
||||
|
||||
if (existsSync(interactiveThemePath)) {
|
||||
if (interactiveThemePath && existsSync(interactiveThemePath)) {
|
||||
let themeSource = readFileSync(interactiveThemePath, "utf8");
|
||||
const desiredGetEditorTheme = [
|
||||
"export function getEditorTheme() {",
|
||||
@@ -430,6 +553,11 @@ if (alphaHubAuthPath && existsSync(alphaHubAuthPath)) {
|
||||
if (source.includes(oldError)) {
|
||||
source = source.replace(oldError, newError);
|
||||
}
|
||||
const brokenWinOpen = "else if (plat === 'win32') execSync(`start \"${url}\"`);";
|
||||
const fixedWinOpen = "else if (plat === 'win32') execSync(`cmd /c start \"\" \"${url}\"`);";
|
||||
if (source.includes(brokenWinOpen)) {
|
||||
source = source.replace(brokenWinOpen, fixedWinOpen);
|
||||
}
|
||||
writeFileSync(alphaHubAuthPath, source, "utf8");
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ const workspaceNodeModulesDir = resolve(workspaceDir, "node_modules");
|
||||
const manifestPath = resolve(workspaceDir, ".runtime-manifest.json");
|
||||
const workspacePackageJsonPath = resolve(workspaceDir, "package.json");
|
||||
const workspaceArchivePath = resolve(feynmanDir, "runtime-workspace.tgz");
|
||||
const PRUNE_VERSION = 3;
|
||||
|
||||
function readPackageSpecs() {
|
||||
const settings = JSON.parse(readFileSync(settingsPath, "utf8"));
|
||||
@@ -44,7 +45,8 @@ function workspaceIsCurrent(packageSpecs) {
|
||||
if (
|
||||
manifest.nodeAbi !== process.versions.modules ||
|
||||
manifest.platform !== process.platform ||
|
||||
manifest.arch !== process.arch
|
||||
manifest.arch !== process.arch ||
|
||||
manifest.pruneVersion !== PRUNE_VERSION
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
@@ -102,6 +104,7 @@ function writeManifest(packageSpecs) {
|
||||
nodeVersion: process.version,
|
||||
platform: process.platform,
|
||||
arch: process.arch,
|
||||
pruneVersion: PRUNE_VERSION,
|
||||
},
|
||||
null,
|
||||
2,
|
||||
@@ -110,6 +113,15 @@ function writeManifest(packageSpecs) {
|
||||
);
|
||||
}
|
||||
|
||||
function pruneWorkspace() {
|
||||
const result = spawnSync(process.execPath, [resolve(appRoot, "scripts", "prune-runtime-deps.mjs"), workspaceDir], {
|
||||
stdio: "inherit",
|
||||
});
|
||||
if (result.status !== 0) {
|
||||
process.exit(result.status ?? 1);
|
||||
}
|
||||
}
|
||||
|
||||
function archiveIsCurrent() {
|
||||
if (!existsSync(workspaceArchivePath) || !existsSync(manifestPath)) {
|
||||
return false;
|
||||
@@ -144,6 +156,7 @@ if (workspaceIsCurrent(packageSpecs)) {
|
||||
|
||||
console.log("[feynman] preparing vendored runtime workspace...");
|
||||
prepareWorkspace(packageSpecs);
|
||||
pruneWorkspace();
|
||||
writeManifest(packageSpecs);
|
||||
createWorkspaceArchive();
|
||||
console.log("[feynman] vendored runtime workspace ready");
|
||||
|
||||
131
scripts/prune-runtime-deps.mjs
Normal file
131
scripts/prune-runtime-deps.mjs
Normal file
@@ -0,0 +1,131 @@
|
||||
import { existsSync, readdirSync, rmSync, statSync } from "node:fs";
|
||||
import { basename, join, resolve } from "node:path";
|
||||
|
||||
const root = resolve(process.argv[2] ?? ".");
|
||||
const nodeModulesDir = resolve(root, "node_modules");
|
||||
|
||||
const STRIP_FILE_PATTERNS = [
|
||||
/\.map$/i,
|
||||
/\.d\.cts$/i,
|
||||
/\.d\.ts$/i,
|
||||
/^README(\..+)?\.md$/i,
|
||||
/^CHANGELOG(\..+)?\.md$/i,
|
||||
];
|
||||
|
||||
function safeStat(path) {
|
||||
try {
|
||||
return statSync(path);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function removePath(path) {
|
||||
rmSync(path, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function walkAndPrune(dir) {
|
||||
if (!existsSync(dir)) return;
|
||||
|
||||
for (const entry of readdirSync(dir, { withFileTypes: true })) {
|
||||
const path = join(dir, entry.name);
|
||||
const stats = entry.isSymbolicLink() ? safeStat(path) : null;
|
||||
const isDirectory = entry.isDirectory() || stats?.isDirectory();
|
||||
const isFile = entry.isFile() || stats?.isFile();
|
||||
|
||||
if (isDirectory) {
|
||||
walkAndPrune(path);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isFile && STRIP_FILE_PATTERNS.some((pattern) => pattern.test(entry.name))) {
|
||||
removePath(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function currentKoffiVariant() {
|
||||
if (process.platform === "darwin" && process.arch === "arm64") return "darwin_arm64";
|
||||
if (process.platform === "darwin" && process.arch === "x64") return "darwin_x64";
|
||||
if (process.platform === "linux" && process.arch === "arm64") return "linux_arm64";
|
||||
if (process.platform === "linux" && process.arch === "x64") return "linux_x64";
|
||||
if (process.platform === "win32" && process.arch === "arm64") return "win32_arm64";
|
||||
if (process.platform === "win32" && process.arch === "x64") return "win32_x64";
|
||||
return null;
|
||||
}
|
||||
|
||||
function pruneKoffi(nodeModulesRoot) {
|
||||
const koffiRoot = join(nodeModulesRoot, "koffi");
|
||||
if (!existsSync(koffiRoot)) return;
|
||||
|
||||
for (const dirName of ["doc", "src", "vendor"]) {
|
||||
removePath(join(koffiRoot, dirName));
|
||||
}
|
||||
|
||||
const buildRoot = join(koffiRoot, "build", "koffi");
|
||||
if (!existsSync(buildRoot)) return;
|
||||
|
||||
const keep = currentKoffiVariant();
|
||||
for (const entry of readdirSync(buildRoot, { withFileTypes: true })) {
|
||||
if (entry.name === keep) continue;
|
||||
removePath(join(buildRoot, entry.name));
|
||||
}
|
||||
}
|
||||
|
||||
function pruneBetterSqlite3(nodeModulesRoot) {
|
||||
const pkgRoot = join(nodeModulesRoot, "better-sqlite3");
|
||||
if (!existsSync(pkgRoot)) return;
|
||||
|
||||
removePath(join(pkgRoot, "deps"));
|
||||
removePath(join(pkgRoot, "src"));
|
||||
removePath(join(pkgRoot, "binding.gyp"));
|
||||
|
||||
const buildRoot = join(pkgRoot, "build");
|
||||
const releaseRoot = join(buildRoot, "Release");
|
||||
if (existsSync(releaseRoot)) {
|
||||
for (const entry of readdirSync(releaseRoot, { withFileTypes: true })) {
|
||||
if (entry.name === "better_sqlite3.node") continue;
|
||||
removePath(join(releaseRoot, entry.name));
|
||||
}
|
||||
}
|
||||
|
||||
for (const entry of ["Makefile", "binding.Makefile", "config.gypi", "deps", "gyp-mac-tool", "test_extension.target.mk", "better_sqlite3.target.mk"]) {
|
||||
removePath(join(buildRoot, entry));
|
||||
}
|
||||
}
|
||||
|
||||
function pruneLiteparse(nodeModulesRoot) {
|
||||
const pkgRoot = join(nodeModulesRoot, "@llamaindex", "liteparse");
|
||||
if (!existsSync(pkgRoot)) return;
|
||||
if (existsSync(join(pkgRoot, "dist"))) {
|
||||
removePath(join(pkgRoot, "src"));
|
||||
}
|
||||
}
|
||||
|
||||
function prunePiCodingAgent(nodeModulesRoot) {
|
||||
const pkgRoot = join(nodeModulesRoot, "@mariozechner", "pi-coding-agent");
|
||||
if (!existsSync(pkgRoot)) return;
|
||||
removePath(join(pkgRoot, "docs"));
|
||||
removePath(join(pkgRoot, "examples"));
|
||||
}
|
||||
|
||||
function pruneMermaid(nodeModulesRoot) {
|
||||
const pkgRoot = join(nodeModulesRoot, "mermaid", "dist");
|
||||
if (!existsSync(pkgRoot)) return;
|
||||
removePath(join(pkgRoot, "docs"));
|
||||
removePath(join(pkgRoot, "tests"));
|
||||
removePath(join(pkgRoot, "__mocks__"));
|
||||
}
|
||||
|
||||
if (!existsSync(nodeModulesDir)) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
walkAndPrune(nodeModulesDir);
|
||||
pruneKoffi(nodeModulesDir);
|
||||
pruneBetterSqlite3(nodeModulesDir);
|
||||
pruneLiteparse(nodeModulesDir);
|
||||
prunePiCodingAgent(nodeModulesDir);
|
||||
pruneMermaid(nodeModulesDir);
|
||||
|
||||
console.log(`[feynman] pruned runtime deps in ${basename(root)}`);
|
||||
@@ -7,5 +7,7 @@ const websitePublicDir = resolve(appRoot, "website", "public");
|
||||
mkdirSync(websitePublicDir, { recursive: true });
|
||||
cpSync(resolve(appRoot, "scripts", "install", "install.sh"), resolve(websitePublicDir, "install"));
|
||||
cpSync(resolve(appRoot, "scripts", "install", "install.ps1"), resolve(websitePublicDir, "install.ps1"));
|
||||
cpSync(resolve(appRoot, "scripts", "install", "install-skills.sh"), resolve(websitePublicDir, "install-skills"));
|
||||
cpSync(resolve(appRoot, "scripts", "install", "install-skills.ps1"), resolve(websitePublicDir, "install-skills.ps1"));
|
||||
|
||||
console.log("[feynman] synced website installers");
|
||||
|
||||
42
skills/alpha-research/SKILL.md
Normal file
42
skills/alpha-research/SKILL.md
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
name: alpha-research
|
||||
description: Search, read, and query research papers via the `alpha` CLI (alphaXiv-backed). Use when the user asks about academic papers, wants to find research on a topic, needs to read a specific paper, ask questions about a paper, inspect a paper's code repository, or manage paper annotations.
|
||||
---
|
||||
|
||||
# Alpha Research CLI
|
||||
|
||||
Use the `alpha` CLI via bash for all paper research operations.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `alpha search "<query>"` | Search papers. Prefer `--mode semantic` by default; use `--mode keyword` only for exact-term lookup and `--mode agentic` for broader retrieval. |
|
||||
| `alpha get <arxiv-id-or-url>` | Fetch paper content and any local annotation |
|
||||
| `alpha get --full-text <arxiv-id>` | Get raw full text instead of AI report |
|
||||
| `alpha ask <arxiv-id> "<question>"` | Ask a question about a paper's PDF |
|
||||
| `alpha code <github-url> [path]` | Read files from a paper's GitHub repo. Use `/` for overview |
|
||||
| `alpha annotate <paper-id> "<note>"` | Save a persistent annotation on a paper |
|
||||
| `alpha annotate --clear <paper-id>` | Remove an annotation |
|
||||
| `alpha annotate --list` | List all annotations |
|
||||
|
||||
## Auth
|
||||
|
||||
Run `alpha login` to authenticate with alphaXiv. Check status with `feynman alpha status`, or `alpha status` once your installed `alpha-hub` version includes it.
|
||||
|
||||
## Examples
|
||||
|
||||
```bash
|
||||
alpha search "transformer scaling laws"
|
||||
alpha search --mode agentic "efficient attention mechanisms for long context"
|
||||
alpha get 2106.09685
|
||||
alpha ask 2106.09685 "What optimizer did they use?"
|
||||
alpha code https://github.com/karpathy/nanoGPT src/model.py
|
||||
alpha annotate 2106.09685 "Key paper on LoRA - revisit for adapter comparison"
|
||||
```
|
||||
|
||||
## When to use
|
||||
|
||||
- Academic paper search, reading, Q&A → `alpha`
|
||||
- Current topics (products, releases, docs) → web search tools
|
||||
- Mixed topics → combine both
|
||||
28
skills/contributing/SKILL.md
Normal file
28
skills/contributing/SKILL.md
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: contributing
|
||||
description: Contribute changes to the Feynman repository itself. Use when the task is to add features, fix bugs, update prompts or skills, change install or release behavior, improve docs, or prepare a focused PR against this repo.
|
||||
---
|
||||
|
||||
# Contributing
|
||||
|
||||
Read `CONTRIBUTING.md` first, then `AGENTS.md` for repo-level agent conventions.
|
||||
|
||||
Use this skill when working on Feynman itself, especially for:
|
||||
|
||||
- CLI or runtime changes in `src/`
|
||||
- prompt changes in `prompts/`
|
||||
- bundled skill changes in `skills/`
|
||||
- subagent behavior changes in `.feynman/agents/`
|
||||
- install, packaging, or release changes in `scripts/`, `README.md`, or website docs
|
||||
|
||||
Minimum local checks before claiming the repo change is done:
|
||||
|
||||
```bash
|
||||
npm test
|
||||
npm run typecheck
|
||||
npm run build
|
||||
```
|
||||
|
||||
If the docs site changed, also validate `website/`.
|
||||
|
||||
When changing release-sensitive behavior, verify that `.nvmrc`, package `engines`, runtime guards, and install docs stay aligned.
|
||||
25
skills/eli5/SKILL.md
Normal file
25
skills/eli5/SKILL.md
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
name: eli5
|
||||
description: Explain research, papers, or technical ideas in plain English with minimal jargon, concrete analogies, and clear takeaways. Use when the user says "ELI5 this", asks for a simple explanation of a paper or research result, wants jargon removed, or asks what something technically dense actually means.
|
||||
---
|
||||
|
||||
# ELI5
|
||||
|
||||
Use `alpha` first when the user names a specific paper, arXiv id, DOI, or paper URL.
|
||||
|
||||
If the user gives only a topic, identify 1-3 representative papers and anchor the explanation around the clearest or most important one.
|
||||
|
||||
Structure the answer with:
|
||||
- `One-Sentence Summary`
|
||||
- `Big Idea`
|
||||
- `How It Works`
|
||||
- `Why It Matters`
|
||||
- `What To Be Skeptical Of`
|
||||
- `If You Remember 3 Things`
|
||||
|
||||
Guidelines:
|
||||
- Use short sentences and concrete words.
|
||||
- Define jargon immediately or remove it.
|
||||
- Prefer one good analogy over several weak ones.
|
||||
- Separate what the paper actually shows from speculation or interpretation.
|
||||
- Keep the explanation inline unless the user explicitly asks to save it as an artifact.
|
||||
56
skills/modal-compute/SKILL.md
Normal file
56
skills/modal-compute/SKILL.md
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
name: modal-compute
|
||||
description: Run GPU workloads on Modal's serverless infrastructure. Use when the user needs remote GPU compute for training, inference, benchmarks, or batch processing and Modal CLI is available.
|
||||
---
|
||||
|
||||
# Modal Compute
|
||||
|
||||
Use the `modal` CLI for serverless GPU workloads. No pod lifecycle to manage — write a decorated Python script and run it.
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
pip install modal
|
||||
modal setup
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `modal run script.py` | Run a script on Modal (ephemeral) |
|
||||
| `modal run --detach script.py` | Run detached (background) |
|
||||
| `modal deploy script.py` | Deploy persistently |
|
||||
| `modal serve script.py` | Serve with hot-reload (dev) |
|
||||
| `modal shell --gpu a100` | Interactive shell with GPU |
|
||||
| `modal app list` | List deployed apps |
|
||||
|
||||
## GPU types
|
||||
|
||||
`T4`, `L4`, `A10G`, `L40S`, `A100`, `A100-80GB`, `H100`, `H200`, `B200`
|
||||
|
||||
Multi-GPU: `"H100:4"` for 4x H100s.
|
||||
|
||||
## Script pattern
|
||||
|
||||
```python
|
||||
import modal
|
||||
|
||||
app = modal.App("experiment")
|
||||
image = modal.Image.debian_slim(python_version="3.11").pip_install("torch==2.8.0")
|
||||
|
||||
@app.function(gpu="A100", image=image, timeout=600)
|
||||
def train():
|
||||
import torch
|
||||
# training code here
|
||||
|
||||
@app.local_entrypoint()
|
||||
def main():
|
||||
train.remote()
|
||||
```
|
||||
|
||||
## When to use
|
||||
|
||||
- Stateless burst GPU jobs (training, inference, benchmarks)
|
||||
- No persistent state needed between runs
|
||||
- Check availability: `command -v modal`
|
||||
27
skills/preview/SKILL.md
Normal file
27
skills/preview/SKILL.md
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: preview
|
||||
description: Preview Markdown, LaTeX, PDF, or code artifacts in the browser or as PDF. Use when the user wants to review a written artifact, export a report, or view a rendered document.
|
||||
---
|
||||
|
||||
# Preview
|
||||
|
||||
Use the `/preview` command to render and open artifacts.
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/preview` | Preview the most recent artifact in the browser |
|
||||
| `/preview --file <path>` | Preview a specific file |
|
||||
| `/preview-browser` | Force browser preview |
|
||||
| `/preview-pdf` | Export to PDF via pandoc + LaTeX |
|
||||
| `/preview-clear-cache` | Clear rendered preview cache |
|
||||
|
||||
## Fallback
|
||||
|
||||
If the preview commands are not available, use bash:
|
||||
|
||||
```bash
|
||||
open <file.md> # macOS — opens in default app
|
||||
open <file.pdf> # macOS — opens in Preview
|
||||
```
|
||||
48
skills/runpod-compute/SKILL.md
Normal file
48
skills/runpod-compute/SKILL.md
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
name: runpod-compute
|
||||
description: Provision and manage GPU pods on RunPod for long-running experiments. Use when the user needs persistent GPU compute with SSH access, large datasets, or multi-step experiments.
|
||||
---
|
||||
|
||||
# RunPod Compute
|
||||
|
||||
Use `runpodctl` CLI for persistent GPU pods with SSH access.
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
brew install runpod/runpodctl/runpodctl # macOS
|
||||
runpodctl config --apiKey=YOUR_KEY
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `runpodctl create pod --gpuType "NVIDIA A100 80GB PCIe" --imageName "runpod/pytorch:2.4.0-py3.11-cuda12.4.1-devel-ubuntu22.04" --name experiment` | Create a pod |
|
||||
| `runpodctl get pod` | List all pods |
|
||||
| `runpodctl stop pod <id>` | Stop (preserves volume) |
|
||||
| `runpodctl start pod <id>` | Resume a stopped pod |
|
||||
| `runpodctl remove pod <id>` | Terminate and delete |
|
||||
| `runpodctl gpu list` | List available GPU types and prices |
|
||||
| `runpodctl send <file>` | Transfer files to/from pods |
|
||||
| `runpodctl receive <code>` | Receive transferred files |
|
||||
|
||||
## SSH access
|
||||
|
||||
```bash
|
||||
ssh root@<IP> -p <PORT> -i ~/.ssh/id_ed25519
|
||||
```
|
||||
|
||||
Get connection details from `runpodctl get pod <id>`. Pods must expose port `22/tcp`.
|
||||
|
||||
## GPU types
|
||||
|
||||
`NVIDIA GeForce RTX 4090`, `NVIDIA RTX A6000`, `NVIDIA A40`, `NVIDIA A100 80GB PCIe`, `NVIDIA H100 80GB HBM3`
|
||||
|
||||
## When to use
|
||||
|
||||
- Long-running experiments needing persistent state
|
||||
- Large dataset processing
|
||||
- Multi-step work with SSH access between iterations
|
||||
- Always stop or remove pods after experiments
|
||||
- Check availability: `command -v runpodctl`
|
||||
26
skills/session-search/SKILL.md
Normal file
26
skills/session-search/SKILL.md
Normal file
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: session-search
|
||||
description: Search past Feynman session transcripts to recover prior work, conversations, and research context. Use when the user references something from a previous session, asks "what did we do before", or when you suspect relevant past context exists.
|
||||
---
|
||||
|
||||
# Session Search
|
||||
|
||||
Use the `/search` command to search prior Feynman sessions interactively, or search session JSONL files directly via bash.
|
||||
|
||||
## Interactive search
|
||||
|
||||
```
|
||||
/search <query>
|
||||
```
|
||||
|
||||
Opens the session search UI. Supports `resume <sessionPath>` to continue a found session.
|
||||
|
||||
## Direct file search
|
||||
|
||||
Session transcripts are stored as JSONL files in `~/.feynman/sessions/`. Each line is a JSON record with `type` (session, message, model_change) and `message.content` fields.
|
||||
|
||||
```bash
|
||||
grep -ril "scaling laws" ~/.feynman/sessions/
|
||||
```
|
||||
|
||||
For structured search across sessions, use the interactive `/search` command.
|
||||
19
skills/valichord-validation/SKILL.md
Normal file
19
skills/valichord-validation/SKILL.md
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: valichord-validation
|
||||
description: Integrate with ValiChord to submit a replication as a cryptographically verified validator attestation, discover studies awaiting independent validation, query Harmony Records and reproducibility badges, or assist researchers in preparing a study for the validation pipeline. Feynman operates as a first-class AI validator — publishing a validator profile, claiming studies, running the blind commit-reveal protocol, and accumulating a verifiable per-discipline reputation. Also surfaces reproducibility status during /deepresearch and literature reviews via ValiChord's HTTP Gateway.
|
||||
---
|
||||
|
||||
# ValiChord Validation
|
||||
|
||||
Run the `/valichord` workflow. Read the prompt template at `prompts/valichord.md` for the full procedure.
|
||||
|
||||
ValiChord is a four-DNA Holochain system for scientific reproducibility verification. Feynman integrates at four points:
|
||||
- As a **validator agent** — running `/replicate` then submitting findings as a sealed attestation into the blind commit-reveal protocol, earning reproducibility badges for researchers and building Feynman's own verifiable per-discipline reputation (Provisional → Certified → Senior)
|
||||
- As a **proactive discovery agent** — querying the pending study queue by discipline, assessing difficulty, and autonomously claiming appropriate validation work without waiting to be assigned
|
||||
- As a **researcher's assistant** — helping prepare studies for submission: registering protocols, taking cryptographic data snapshots, and running the Repository Readiness Checker to identify and fix reproducibility failure modes before validation begins
|
||||
- As a **research query tool** — checking whether a study carries a Harmony Record or reproducibility badge (Gold/Silver/Bronze) via ValiChord's HTTP Gateway, for use during `/deepresearch` or literature reviews
|
||||
|
||||
Output: a Harmony Record — an immutable, publicly accessible cryptographic proof of independent reproducibility written to the ValiChord Governance DHT — plus automatic badge issuance and an updated validator reputation score.
|
||||
|
||||
Live demo (commit-reveal cycle end-to-end): https://youtu.be/DQ5wZSD1YEw
|
||||
ValiChord repo: https://github.com/topeuph-ai/ValiChord
|
||||
@@ -1,5 +1,5 @@
|
||||
import { createHash } from "node:crypto";
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { dirname, relative, resolve } from "node:path";
|
||||
|
||||
import { getBootstrapStatePath } from "../config/paths.js";
|
||||
@@ -64,27 +64,76 @@ function listFiles(root: string): string[] {
|
||||
return files.sort();
|
||||
}
|
||||
|
||||
function removeEmptyParentDirectories(path: string, stopAt: string): void {
|
||||
let current = dirname(path);
|
||||
while (current.startsWith(stopAt) && current !== stopAt) {
|
||||
if (!existsSync(current)) {
|
||||
current = dirname(current);
|
||||
continue;
|
||||
}
|
||||
if (readdirSync(current).length > 0) {
|
||||
return;
|
||||
}
|
||||
rmSync(current, { recursive: true, force: true });
|
||||
current = dirname(current);
|
||||
}
|
||||
}
|
||||
|
||||
function syncManagedFiles(
|
||||
sourceRoot: string,
|
||||
targetRoot: string,
|
||||
scope: string,
|
||||
state: BootstrapState,
|
||||
result: BootstrapSyncResult,
|
||||
): void {
|
||||
const sourcePaths = new Set(listFiles(sourceRoot).map((sourcePath) => relative(sourceRoot, sourcePath)));
|
||||
|
||||
for (const targetPath of listFiles(targetRoot)) {
|
||||
const key = relative(targetRoot, targetPath);
|
||||
if (sourcePaths.has(key)) continue;
|
||||
|
||||
const scopedKey = `${scope}:${key}`;
|
||||
const previous = state.files[scopedKey] ?? state.files[key];
|
||||
if (!previous) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!existsSync(targetPath)) {
|
||||
delete state.files[scopedKey];
|
||||
delete state.files[key];
|
||||
continue;
|
||||
}
|
||||
|
||||
const currentTargetText = readFileSync(targetPath, "utf8");
|
||||
const currentTargetHash = sha256(currentTargetText);
|
||||
if (currentTargetHash !== previous.lastAppliedTargetHash) {
|
||||
result.skipped.push(key);
|
||||
continue;
|
||||
}
|
||||
|
||||
rmSync(targetPath, { force: true });
|
||||
removeEmptyParentDirectories(targetPath, targetRoot);
|
||||
delete state.files[scopedKey];
|
||||
delete state.files[key];
|
||||
}
|
||||
|
||||
for (const sourcePath of listFiles(sourceRoot)) {
|
||||
const key = relative(sourceRoot, sourcePath);
|
||||
const targetPath = resolve(targetRoot, key);
|
||||
const sourceText = readFileSync(sourcePath, "utf8");
|
||||
const sourceHash = sha256(sourceText);
|
||||
const previous = state.files[key];
|
||||
const scopedKey = `${scope}:${key}`;
|
||||
const previous = state.files[scopedKey] ?? state.files[key];
|
||||
|
||||
mkdirSync(dirname(targetPath), { recursive: true });
|
||||
|
||||
if (!existsSync(targetPath)) {
|
||||
writeFileSync(targetPath, sourceText, "utf8");
|
||||
state.files[key] = {
|
||||
state.files[scopedKey] = {
|
||||
lastAppliedSourceHash: sourceHash,
|
||||
lastAppliedTargetHash: sourceHash,
|
||||
};
|
||||
delete state.files[key];
|
||||
result.copied.push(key);
|
||||
continue;
|
||||
}
|
||||
@@ -93,10 +142,11 @@ function syncManagedFiles(
|
||||
const currentTargetHash = sha256(currentTargetText);
|
||||
|
||||
if (currentTargetHash === sourceHash) {
|
||||
state.files[key] = {
|
||||
state.files[scopedKey] = {
|
||||
lastAppliedSourceHash: sourceHash,
|
||||
lastAppliedTargetHash: currentTargetHash,
|
||||
};
|
||||
delete state.files[key];
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -111,10 +161,11 @@ function syncManagedFiles(
|
||||
}
|
||||
|
||||
writeFileSync(targetPath, sourceText, "utf8");
|
||||
state.files[key] = {
|
||||
state.files[scopedKey] = {
|
||||
lastAppliedSourceHash: sourceHash,
|
||||
lastAppliedTargetHash: sourceHash,
|
||||
};
|
||||
delete state.files[key];
|
||||
result.updated.push(key);
|
||||
}
|
||||
}
|
||||
@@ -128,8 +179,9 @@ export function syncBundledAssets(appRoot: string, agentDir: string): BootstrapS
|
||||
skipped: [],
|
||||
};
|
||||
|
||||
syncManagedFiles(resolve(appRoot, ".feynman", "themes"), resolve(agentDir, "themes"), state, result);
|
||||
syncManagedFiles(resolve(appRoot, ".feynman", "agents"), resolve(agentDir, "agents"), state, result);
|
||||
syncManagedFiles(resolve(appRoot, ".feynman", "themes"), resolve(agentDir, "themes"), "themes", state, result);
|
||||
syncManagedFiles(resolve(appRoot, ".feynman", "agents"), resolve(agentDir, "agents"), "agents", state, result);
|
||||
syncManagedFiles(resolve(appRoot, "skills"), resolve(agentDir, "skills"), "skills", state, result);
|
||||
|
||||
writeBootstrapState(statePath, state);
|
||||
return result;
|
||||
|
||||
25
src/cli.ts
25
src/cli.ts
@@ -11,7 +11,7 @@ import {
|
||||
login as loginAlpha,
|
||||
logout as logoutAlpha,
|
||||
} from "@companion-ai/alpha-hub/lib";
|
||||
import { AuthStorage, DefaultPackageManager, ModelRegistry, SettingsManager } from "@mariozechner/pi-coding-agent";
|
||||
import { DefaultPackageManager, SettingsManager } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { syncBundledAssets } from "./bootstrap/sync.js";
|
||||
import { ensureFeynmanHome, getDefaultSessionDir, getFeynmanAgentDir, getFeynmanHome } from "./config/paths.js";
|
||||
@@ -19,6 +19,7 @@ import { launchPiChat } from "./pi/launch.js";
|
||||
import { CORE_PACKAGE_SOURCES, getOptionalPackagePresetSources, listOptionalPackagePresets } from "./pi/package-presets.js";
|
||||
import { normalizeFeynmanSettings, normalizeThinkingLevel, parseModelSpec } from "./pi/settings.js";
|
||||
import {
|
||||
authenticateModelProvider,
|
||||
getCurrentModelSpec,
|
||||
loginModelProvider,
|
||||
logoutModelProvider,
|
||||
@@ -29,7 +30,8 @@ import { printSearchStatus } from "./search/commands.js";
|
||||
import { runDoctor, runStatus } from "./setup/doctor.js";
|
||||
import { setupPreviewDependencies } from "./setup/preview.js";
|
||||
import { runSetup } from "./setup/setup.js";
|
||||
import { printAsciiHeader, printInfo, printPanel, printSection } from "./ui/terminal.js";
|
||||
import { ASH, printAsciiHeader, printInfo, printPanel, printSection, RESET, SAGE } from "./ui/terminal.js";
|
||||
import { createModelRegistry } from "./model/registry.js";
|
||||
import {
|
||||
cliCommandSections,
|
||||
formatCliWorkflowUsage,
|
||||
@@ -43,7 +45,7 @@ const TOP_LEVEL_COMMANDS = new Set(topLevelCommandNames);
|
||||
function printHelpLine(usage: string, description: string): void {
|
||||
const width = 30;
|
||||
const padding = Math.max(1, width - usage.length);
|
||||
printInfo(`${usage}${" ".repeat(padding)}${description}`);
|
||||
console.log(` ${SAGE}${usage}${RESET}${" ".repeat(padding)}${ASH}${description}${RESET}`);
|
||||
}
|
||||
|
||||
function printHelp(appRoot: string): void {
|
||||
@@ -124,7 +126,13 @@ async function handleModelCommand(subcommand: string | undefined, args: string[]
|
||||
}
|
||||
|
||||
if (subcommand === "login") {
|
||||
if (args[0]) {
|
||||
// Specific provider given - use OAuth login directly
|
||||
await loginModelProvider(feynmanAuthPath, args[0], feynmanSettingsPath);
|
||||
} else {
|
||||
// No provider specified - show auth method choice
|
||||
await authenticateModelProvider(feynmanAuthPath, feynmanSettingsPath);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -293,6 +301,7 @@ export async function main(): Promise<void> {
|
||||
cwd: { type: "string" },
|
||||
doctor: { type: "boolean" },
|
||||
help: { type: "boolean" },
|
||||
version: { type: "boolean" },
|
||||
"alpha-login": { type: "boolean" },
|
||||
"alpha-logout": { type: "boolean" },
|
||||
"alpha-status": { type: "boolean" },
|
||||
@@ -310,6 +319,14 @@ export async function main(): Promise<void> {
|
||||
return;
|
||||
}
|
||||
|
||||
if (values.version) {
|
||||
if (feynmanVersion) {
|
||||
console.log(feynmanVersion);
|
||||
return;
|
||||
}
|
||||
throw new Error("Unable to determine the installed Feynman version.");
|
||||
}
|
||||
|
||||
const workingDir = resolve(values.cwd ?? process.cwd());
|
||||
const sessionDir = resolve(values["session-dir"] ?? getDefaultSessionDir(feynmanHome));
|
||||
const feynmanSettingsPath = resolve(feynmanAgentDir, "settings.json");
|
||||
@@ -418,7 +435,7 @@ export async function main(): Promise<void> {
|
||||
|
||||
const explicitModelSpec = values.model ?? process.env.FEYNMAN_MODEL;
|
||||
if (explicitModelSpec) {
|
||||
const modelRegistry = new ModelRegistry(AuthStorage.create(feynmanAuthPath));
|
||||
const modelRegistry = createModelRegistry(feynmanAuthPath);
|
||||
const explicitModel = parseModelSpec(explicitModelSpec, modelRegistry);
|
||||
if (!explicitModel) {
|
||||
throw new Error(`Unknown model: ${explicitModelSpec}`);
|
||||
|
||||
10
src/index.ts
10
src/index.ts
@@ -1,6 +1,12 @@
|
||||
import { main } from "./cli.js";
|
||||
import { ensureSupportedNodeVersion } from "./system/node-version.js";
|
||||
|
||||
main().catch((error) => {
|
||||
async function run(): Promise<void> {
|
||||
ensureSupportedNodeVersion();
|
||||
const { main } = await import("./cli.js");
|
||||
await main();
|
||||
}
|
||||
|
||||
run().catch((error) => {
|
||||
console.error(error instanceof Error ? error.message : String(error));
|
||||
process.exitCode = 1;
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent";
|
||||
import { createModelRegistry } from "./registry.js";
|
||||
|
||||
type ModelRecord = {
|
||||
provider: string;
|
||||
@@ -166,10 +166,6 @@ function sortProviders(left: ProviderStatus, right: ProviderStatus): number {
|
||||
return left.label.localeCompare(right.label);
|
||||
}
|
||||
|
||||
function createModelRegistry(authPath: string): ModelRegistry {
|
||||
return new ModelRegistry(AuthStorage.create(authPath));
|
||||
}
|
||||
|
||||
export function getAvailableModelRecords(authPath: string): ModelRecord[] {
|
||||
return createModelRegistry(authPath)
|
||||
.getAvailable()
|
||||
@@ -258,7 +254,9 @@ export function buildModelStatusSnapshotFromRecords(
|
||||
const guidance: string[] = [];
|
||||
if (available.length === 0) {
|
||||
guidance.push("No authenticated Pi models are available yet.");
|
||||
guidance.push("Run `feynman model login <provider>` or add provider credentials that Pi can see.");
|
||||
guidance.push(
|
||||
"Run `feynman model login <provider>` (OAuth) or configure an API key (env var, auth.json, or models.json for custom providers).",
|
||||
);
|
||||
guidance.push("After auth is in place, rerun `feynman model list` or `feynman setup model`.");
|
||||
} else if (!current) {
|
||||
guidance.push(`No default research model is set. Recommended: ${recommended?.spec}.`);
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import { AuthStorage } from "@mariozechner/pi-coding-agent";
|
||||
import { writeFileSync } from "node:fs";
|
||||
import { exec as execCallback } from "node:child_process";
|
||||
import { promisify } from "node:util";
|
||||
|
||||
import { readJson } from "../pi/settings.js";
|
||||
import { promptChoice, promptText } from "../setup/prompts.js";
|
||||
import { openUrl } from "../system/open-url.js";
|
||||
import { printInfo, printSection, printSuccess, printWarning } from "../ui/terminal.js";
|
||||
import {
|
||||
buildModelStatusSnapshotFromRecords,
|
||||
@@ -11,6 +14,10 @@ import {
|
||||
getSupportedModelRecords,
|
||||
type ModelStatusSnapshot,
|
||||
} from "./catalog.js";
|
||||
import { createModelRegistry, getModelsJsonPath } from "./registry.js";
|
||||
import { upsertProviderBaseUrl, upsertProviderConfig } from "./models-json.js";
|
||||
|
||||
const exec = promisify(execCallback);
|
||||
|
||||
function collectModelStatus(settingsPath: string, authPath: string): ModelStatusSnapshot {
|
||||
return buildModelStatusSnapshotFromRecords(
|
||||
@@ -57,6 +64,453 @@ async function selectOAuthProvider(authPath: string, action: "login" | "logout")
|
||||
return providers[selection];
|
||||
}
|
||||
|
||||
type ApiKeyProviderInfo = {
|
||||
id: string;
|
||||
label: string;
|
||||
envVar?: string;
|
||||
};
|
||||
|
||||
const API_KEY_PROVIDERS: ApiKeyProviderInfo[] = [
|
||||
{ id: "__custom__", label: "Custom provider (baseUrl + API key)" },
|
||||
{ id: "openai", label: "OpenAI Platform API", envVar: "OPENAI_API_KEY" },
|
||||
{ id: "anthropic", label: "Anthropic API", envVar: "ANTHROPIC_API_KEY" },
|
||||
{ id: "google", label: "Google Gemini API", envVar: "GEMINI_API_KEY" },
|
||||
{ id: "openrouter", label: "OpenRouter", envVar: "OPENROUTER_API_KEY" },
|
||||
{ id: "zai", label: "Z.AI / GLM", envVar: "ZAI_API_KEY" },
|
||||
{ id: "kimi-coding", label: "Kimi / Moonshot", envVar: "KIMI_API_KEY" },
|
||||
{ id: "minimax", label: "MiniMax", envVar: "MINIMAX_API_KEY" },
|
||||
{ id: "minimax-cn", label: "MiniMax (China)", envVar: "MINIMAX_CN_API_KEY" },
|
||||
{ id: "mistral", label: "Mistral", envVar: "MISTRAL_API_KEY" },
|
||||
{ id: "groq", label: "Groq", envVar: "GROQ_API_KEY" },
|
||||
{ id: "xai", label: "xAI", envVar: "XAI_API_KEY" },
|
||||
{ id: "cerebras", label: "Cerebras", envVar: "CEREBRAS_API_KEY" },
|
||||
{ id: "vercel-ai-gateway", label: "Vercel AI Gateway", envVar: "AI_GATEWAY_API_KEY" },
|
||||
{ id: "huggingface", label: "Hugging Face", envVar: "HF_TOKEN" },
|
||||
{ id: "opencode", label: "OpenCode Zen", envVar: "OPENCODE_API_KEY" },
|
||||
{ id: "opencode-go", label: "OpenCode Go", envVar: "OPENCODE_API_KEY" },
|
||||
{ id: "azure-openai-responses", label: "Azure OpenAI (Responses)", envVar: "AZURE_OPENAI_API_KEY" },
|
||||
];
|
||||
|
||||
async function selectApiKeyProvider(): Promise<ApiKeyProviderInfo | undefined> {
|
||||
const choices = API_KEY_PROVIDERS.map(
|
||||
(provider) => `${provider.id} — ${provider.label}${provider.envVar ? ` (${provider.envVar})` : ""}`,
|
||||
);
|
||||
choices.push("Cancel");
|
||||
const selection = await promptChoice("Choose an API-key provider:", choices, 0);
|
||||
if (selection >= API_KEY_PROVIDERS.length) {
|
||||
return undefined;
|
||||
}
|
||||
return API_KEY_PROVIDERS[selection];
|
||||
}
|
||||
|
||||
type CustomProviderSetup = {
|
||||
providerId: string;
|
||||
modelIds: string[];
|
||||
baseUrl: string;
|
||||
api: "openai-completions" | "openai-responses" | "anthropic-messages" | "google-generative-ai";
|
||||
apiKeyConfig: string;
|
||||
/**
|
||||
* If true, add `Authorization: Bearer <apiKey>` to requests in addition to
|
||||
* whatever the API mode uses (useful for proxies that implement /v1/messages
|
||||
* but expect Bearer auth instead of x-api-key).
|
||||
*/
|
||||
authHeader: boolean;
|
||||
};
|
||||
|
||||
function normalizeProviderId(value: string): string {
|
||||
return value.trim().toLowerCase().replace(/\s+/g, "-");
|
||||
}
|
||||
|
||||
function normalizeModelIds(value: string): string[] {
|
||||
const items = value
|
||||
.split(",")
|
||||
.map((entry) => entry.trim())
|
||||
.filter(Boolean);
|
||||
return Array.from(new Set(items));
|
||||
}
|
||||
|
||||
function normalizeBaseUrl(value: string): string {
|
||||
return value.trim().replace(/\/+$/, "");
|
||||
}
|
||||
|
||||
function normalizeCustomProviderBaseUrl(
|
||||
api: CustomProviderSetup["api"],
|
||||
baseUrl: string,
|
||||
): { baseUrl: string; note?: string } {
|
||||
const normalized = normalizeBaseUrl(baseUrl);
|
||||
if (!normalized) {
|
||||
return { baseUrl: normalized };
|
||||
}
|
||||
|
||||
// Pi expects Anthropic baseUrl without `/v1` (it appends `/v1/messages` internally).
|
||||
if (api === "anthropic-messages" && /\/v1$/i.test(normalized)) {
|
||||
return { baseUrl: normalized.replace(/\/v1$/i, ""), note: "Stripped trailing /v1 for Anthropic mode." };
|
||||
}
|
||||
|
||||
return { baseUrl: normalized };
|
||||
}
|
||||
|
||||
function isLocalBaseUrl(baseUrl: string): boolean {
|
||||
return /^(https?:\/\/)?(localhost|127\.0\.0\.1|0\.0\.0\.0)(:|\/|$)/i.test(baseUrl);
|
||||
}
|
||||
|
||||
async function resolveApiKeyConfig(apiKeyConfig: string): Promise<string | undefined> {
|
||||
const trimmed = apiKeyConfig.trim();
|
||||
if (!trimmed) return undefined;
|
||||
|
||||
if (trimmed.startsWith("!")) {
|
||||
const command = trimmed.slice(1).trim();
|
||||
if (!command) return undefined;
|
||||
const shell = process.platform === "win32" ? process.env.ComSpec || "cmd.exe" : process.env.SHELL || "/bin/sh";
|
||||
try {
|
||||
const { stdout } = await exec(command, { shell, maxBuffer: 1024 * 1024 });
|
||||
const value = stdout.trim();
|
||||
return value || undefined;
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
const envValue = process.env[trimmed];
|
||||
if (typeof envValue === "string" && envValue.trim()) {
|
||||
return envValue.trim();
|
||||
}
|
||||
|
||||
// Fall back to literal value.
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
async function bestEffortFetchOpenAiModelIds(
|
||||
baseUrl: string,
|
||||
apiKey: string,
|
||||
authHeader: boolean,
|
||||
): Promise<string[] | undefined> {
|
||||
const url = `${baseUrl}/models`;
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), 5000);
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: "GET",
|
||||
headers: authHeader ? { Authorization: `Bearer ${apiKey}` } : undefined,
|
||||
signal: controller.signal,
|
||||
});
|
||||
if (!response.ok) {
|
||||
return undefined;
|
||||
}
|
||||
const json = (await response.json()) as any;
|
||||
if (!Array.isArray(json?.data)) return undefined;
|
||||
return json.data
|
||||
.map((entry: any) => (typeof entry?.id === "string" ? entry.id : undefined))
|
||||
.filter(Boolean);
|
||||
} catch {
|
||||
return undefined;
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
}
|
||||
|
||||
async function promptCustomProviderSetup(): Promise<CustomProviderSetup | undefined> {
|
||||
printSection("Custom Provider");
|
||||
const providerIdInput = await promptText("Provider id (e.g. my-proxy)", "custom");
|
||||
const providerId = normalizeProviderId(providerIdInput);
|
||||
if (!providerId || providerId === "__custom__") {
|
||||
printWarning("Invalid provider id.");
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const apiChoices = [
|
||||
"openai-completions — OpenAI Chat Completions compatible (e.g. /v1/chat/completions)",
|
||||
"openai-responses — OpenAI Responses compatible (e.g. /v1/responses)",
|
||||
"anthropic-messages — Anthropic Messages compatible (e.g. /v1/messages)",
|
||||
"google-generative-ai — Google Generative AI compatible (generativelanguage.googleapis.com)",
|
||||
"Cancel",
|
||||
];
|
||||
const apiSelection = await promptChoice("API mode:", apiChoices, 0);
|
||||
if (apiSelection >= 4) {
|
||||
return undefined;
|
||||
}
|
||||
const api = ["openai-completions", "openai-responses", "anthropic-messages", "google-generative-ai"][apiSelection] as CustomProviderSetup["api"];
|
||||
|
||||
const baseUrlDefault = ((): string => {
|
||||
if (api === "openai-completions" || api === "openai-responses") return "http://localhost:11434/v1";
|
||||
if (api === "anthropic-messages") return "https://api.anthropic.com";
|
||||
if (api === "google-generative-ai") return "https://generativelanguage.googleapis.com";
|
||||
return "http://localhost:11434/v1";
|
||||
})();
|
||||
const baseUrlPrompt =
|
||||
api === "openai-completions" || api === "openai-responses"
|
||||
? "Base URL (include /v1 for OpenAI-compatible endpoints)"
|
||||
: api === "anthropic-messages"
|
||||
? "Base URL (no trailing /, no /v1)"
|
||||
: "Base URL (no trailing /)";
|
||||
const baseUrlRaw = await promptText(baseUrlPrompt, baseUrlDefault);
|
||||
const { baseUrl, note: baseUrlNote } = normalizeCustomProviderBaseUrl(api, baseUrlRaw);
|
||||
if (!baseUrl) {
|
||||
printWarning("Base URL is required.");
|
||||
return undefined;
|
||||
}
|
||||
if (baseUrlNote) {
|
||||
printInfo(baseUrlNote);
|
||||
}
|
||||
|
||||
let authHeader = false;
|
||||
if (api === "openai-completions" || api === "openai-responses") {
|
||||
const defaultAuthHeader = !isLocalBaseUrl(baseUrl);
|
||||
const authHeaderChoices = [
|
||||
"Yes (send Authorization: Bearer <apiKey>)",
|
||||
"No (common for local Ollama/vLLM/LM Studio)",
|
||||
"Cancel",
|
||||
];
|
||||
const authHeaderSelection = await promptChoice(
|
||||
"Send Authorization header?",
|
||||
authHeaderChoices,
|
||||
defaultAuthHeader ? 0 : 1,
|
||||
);
|
||||
if (authHeaderSelection >= 2) {
|
||||
return undefined;
|
||||
}
|
||||
authHeader = authHeaderSelection === 0;
|
||||
}
|
||||
if (api === "anthropic-messages") {
|
||||
const defaultAuthHeader = isLocalBaseUrl(baseUrl);
|
||||
const authHeaderChoices = [
|
||||
"Yes (also send Authorization: Bearer <apiKey>)",
|
||||
"No (standard Anthropic uses x-api-key only)",
|
||||
"Cancel",
|
||||
];
|
||||
const authHeaderSelection = await promptChoice(
|
||||
"Also send Authorization header?",
|
||||
authHeaderChoices,
|
||||
defaultAuthHeader ? 0 : 1,
|
||||
);
|
||||
if (authHeaderSelection >= 2) {
|
||||
return undefined;
|
||||
}
|
||||
authHeader = authHeaderSelection === 0;
|
||||
}
|
||||
|
||||
printInfo("API key value supports:");
|
||||
printInfo(" - literal secret (stored in models.json)");
|
||||
printInfo(" - env var name (resolved at runtime)");
|
||||
printInfo(" - !command (executes and uses stdout)");
|
||||
const apiKeyConfigRaw = (await promptText("API key / resolver", "")).trim();
|
||||
const apiKeyConfig = apiKeyConfigRaw || "local";
|
||||
if (!apiKeyConfigRaw) {
|
||||
printInfo("Using placeholder apiKey value (required by Pi for custom providers).");
|
||||
}
|
||||
|
||||
let modelIdsDefault = "my-model";
|
||||
if (api === "openai-completions" || api === "openai-responses") {
|
||||
// Best-effort: hit /models so users can pick correct ids (especially for proxies).
|
||||
const resolvedKey = await resolveApiKeyConfig(apiKeyConfig);
|
||||
const modelIds = resolvedKey ? await bestEffortFetchOpenAiModelIds(baseUrl, resolvedKey, authHeader) : undefined;
|
||||
if (modelIds && modelIds.length > 0) {
|
||||
const sample = modelIds.slice(0, 10).join(", ");
|
||||
printInfo(`Detected models: ${sample}${modelIds.length > 10 ? ", ..." : ""}`);
|
||||
modelIdsDefault = modelIds.includes("sonnet") ? "sonnet" : modelIds[0]!;
|
||||
}
|
||||
}
|
||||
|
||||
const modelIdsRaw = await promptText("Model id(s) (comma-separated)", modelIdsDefault);
|
||||
const modelIds = normalizeModelIds(modelIdsRaw);
|
||||
if (modelIds.length === 0) {
|
||||
printWarning("At least one model id is required.");
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return { providerId, modelIds, baseUrl, api, apiKeyConfig, authHeader };
|
||||
}
|
||||
|
||||
async function verifyCustomProvider(setup: CustomProviderSetup, authPath: string): Promise<void> {
|
||||
const registry = createModelRegistry(authPath);
|
||||
const modelsError = registry.getError();
|
||||
if (modelsError) {
|
||||
printWarning("Verification: models.json failed to load.");
|
||||
for (const line of modelsError.split("\n")) {
|
||||
printInfo(` ${line}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const all = registry.getAll();
|
||||
const hasModel = setup.modelIds.some((id) => all.some((model) => model.provider === setup.providerId && model.id === id));
|
||||
if (!hasModel) {
|
||||
printWarning("Verification: model registry does not contain the configured provider/model ids.");
|
||||
return;
|
||||
}
|
||||
|
||||
const available = registry.getAvailable();
|
||||
const hasAvailable = setup.modelIds.some((id) =>
|
||||
available.some((model) => model.provider === setup.providerId && model.id === id),
|
||||
);
|
||||
if (!hasAvailable) {
|
||||
printWarning("Verification: provider is not considered authenticated/available.");
|
||||
return;
|
||||
}
|
||||
|
||||
const apiKey = await registry.getApiKeyForProvider(setup.providerId);
|
||||
if (!apiKey) {
|
||||
printWarning("Verification: API key could not be resolved (check env var name / !command).");
|
||||
return;
|
||||
}
|
||||
|
||||
const timeoutMs = 8000;
|
||||
|
||||
// Best-effort network check for OpenAI-compatible endpoints
|
||||
if (setup.api === "openai-completions" || setup.api === "openai-responses") {
|
||||
const url = `${setup.baseUrl}/models`;
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: "GET",
|
||||
headers: setup.authHeader ? { Authorization: `Bearer ${apiKey}` } : undefined,
|
||||
signal: controller.signal,
|
||||
});
|
||||
if (!response.ok) {
|
||||
printWarning(`Verification: ${url} returned ${response.status} ${response.statusText}`);
|
||||
return;
|
||||
}
|
||||
const json = (await response.json()) as unknown;
|
||||
const modelIds = Array.isArray((json as any)?.data)
|
||||
? (json as any).data.map((entry: any) => (typeof entry?.id === "string" ? entry.id : undefined)).filter(Boolean)
|
||||
: [];
|
||||
const missing = setup.modelIds.filter((id) => modelIds.length > 0 && !modelIds.includes(id));
|
||||
if (modelIds.length > 0 && missing.length > 0) {
|
||||
printWarning(`Verification: /models does not list configured model id(s): ${missing.join(", ")}`);
|
||||
return;
|
||||
}
|
||||
printSuccess("Verification: endpoint reachable and authorized.");
|
||||
} catch (error) {
|
||||
printWarning(`Verification: failed to reach ${url}: ${error instanceof Error ? error.message : String(error)}`);
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (setup.api === "anthropic-messages") {
|
||||
const url = `${setup.baseUrl}/v1/models?limit=1`;
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
||||
try {
|
||||
const headers: Record<string, string> = {
|
||||
"x-api-key": apiKey,
|
||||
"anthropic-version": "2023-06-01",
|
||||
};
|
||||
if (setup.authHeader) {
|
||||
headers.Authorization = `Bearer ${apiKey}`;
|
||||
}
|
||||
const response = await fetch(url, {
|
||||
method: "GET",
|
||||
headers,
|
||||
signal: controller.signal,
|
||||
});
|
||||
if (!response.ok) {
|
||||
printWarning(`Verification: ${url} returned ${response.status} ${response.statusText}`);
|
||||
if (response.status === 404) {
|
||||
printInfo(" Tip: For Anthropic mode, use a base URL without /v1 (e.g. https://api.anthropic.com).");
|
||||
}
|
||||
if ((response.status === 401 || response.status === 403) && !setup.authHeader) {
|
||||
printInfo(" Tip: Some proxies require `Authorization: Bearer <apiKey>` even in Anthropic mode.");
|
||||
}
|
||||
return;
|
||||
}
|
||||
printSuccess("Verification: endpoint reachable and authorized.");
|
||||
} catch (error) {
|
||||
printWarning(`Verification: failed to reach ${url}: ${error instanceof Error ? error.message : String(error)}`);
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (setup.api === "google-generative-ai") {
|
||||
const url = `${setup.baseUrl}/v1beta/models?key=${encodeURIComponent(apiKey)}`;
|
||||
const controller = new AbortController();
|
||||
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
||||
try {
|
||||
const response = await fetch(url, { method: "GET", signal: controller.signal });
|
||||
if (!response.ok) {
|
||||
printWarning(`Verification: ${url} returned ${response.status} ${response.statusText}`);
|
||||
return;
|
||||
}
|
||||
printSuccess("Verification: endpoint reachable and authorized.");
|
||||
} catch (error) {
|
||||
printWarning(`Verification: failed to reach ${url}: ${error instanceof Error ? error.message : String(error)}`);
|
||||
} finally {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
printInfo("Verification: skipped network probe for this API mode.");
|
||||
}
|
||||
|
||||
async function configureApiKeyProvider(authPath: string): Promise<boolean> {
|
||||
const provider = await selectApiKeyProvider();
|
||||
if (!provider) {
|
||||
printInfo("API key setup cancelled.");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (provider.id === "__custom__") {
|
||||
const setup = await promptCustomProviderSetup();
|
||||
if (!setup) {
|
||||
printInfo("Custom provider setup cancelled.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const modelsJsonPath = getModelsJsonPath(authPath);
|
||||
const result = upsertProviderConfig(modelsJsonPath, setup.providerId, {
|
||||
baseUrl: setup.baseUrl,
|
||||
apiKey: setup.apiKeyConfig,
|
||||
api: setup.api,
|
||||
authHeader: setup.authHeader,
|
||||
models: setup.modelIds.map((id) => ({ id })),
|
||||
});
|
||||
if (!result.ok) {
|
||||
printWarning(result.error);
|
||||
return false;
|
||||
}
|
||||
|
||||
printSuccess(`Saved custom provider: ${setup.providerId}`);
|
||||
await verifyCustomProvider(setup, authPath);
|
||||
return true;
|
||||
}
|
||||
|
||||
printSection(`API Key: ${provider.label}`);
|
||||
if (provider.envVar) {
|
||||
printInfo(`Tip: to avoid writing secrets to disk, set ${provider.envVar} in your shell or .env.`);
|
||||
}
|
||||
|
||||
const apiKey = await promptText("Paste API key (leave empty to use env var instead)", "");
|
||||
if (!apiKey) {
|
||||
if (provider.envVar) {
|
||||
printInfo(`Set ${provider.envVar} and rerun setup (or run \`feynman model list\`).`);
|
||||
} else {
|
||||
printInfo("No API key provided.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
AuthStorage.create(authPath).set(provider.id, { type: "api_key", key: apiKey });
|
||||
printSuccess(`Saved API key for ${provider.id} in auth storage.`);
|
||||
|
||||
const baseUrl = await promptText("Base URL override (optional, include /v1 for OpenAI-compatible endpoints)", "");
|
||||
if (baseUrl) {
|
||||
const modelsJsonPath = getModelsJsonPath(authPath);
|
||||
const result = upsertProviderBaseUrl(modelsJsonPath, provider.id, baseUrl);
|
||||
if (result.ok) {
|
||||
printSuccess(`Saved baseUrl override for ${provider.id} in models.json.`);
|
||||
} else {
|
||||
printWarning(result.error);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
function resolveAvailableModelSpec(authPath: string, input: string): string | undefined {
|
||||
const normalizedInput = input.trim().toLowerCase();
|
||||
if (!normalizedInput) {
|
||||
@@ -110,14 +564,46 @@ export function printModelList(settingsPath: string, authPath: string): void {
|
||||
}
|
||||
}
|
||||
|
||||
export async function loginModelProvider(authPath: string, providerId?: string, settingsPath?: string): Promise<void> {
|
||||
export async function authenticateModelProvider(authPath: string, settingsPath?: string): Promise<boolean> {
|
||||
const choices = [
|
||||
"API key (OpenAI, Anthropic, Google, custom provider, ...)",
|
||||
"OAuth login (ChatGPT Plus/Pro, Claude Pro/Max, Copilot, ...)",
|
||||
"Cancel",
|
||||
];
|
||||
const selection = await promptChoice("How do you want to authenticate?", choices, 0);
|
||||
|
||||
if (selection === 0) {
|
||||
const configured = await configureApiKeyProvider(authPath);
|
||||
if (configured && settingsPath) {
|
||||
const currentSpec = getCurrentModelSpec(settingsPath);
|
||||
const available = getAvailableModelRecords(authPath);
|
||||
const currentValid = currentSpec ? available.some((m) => `${m.provider}/${m.id}` === currentSpec) : false;
|
||||
if ((!currentSpec || !currentValid) && available.length > 0) {
|
||||
const recommended = chooseRecommendedModel(authPath);
|
||||
if (recommended) {
|
||||
setDefaultModelSpec(settingsPath, authPath, recommended.spec);
|
||||
}
|
||||
}
|
||||
}
|
||||
return configured;
|
||||
}
|
||||
|
||||
if (selection === 1) {
|
||||
return loginModelProvider(authPath, undefined, settingsPath);
|
||||
}
|
||||
|
||||
printInfo("Authentication cancelled.");
|
||||
return false;
|
||||
}
|
||||
|
||||
export async function loginModelProvider(authPath: string, providerId?: string, settingsPath?: string): Promise<boolean> {
|
||||
const provider = providerId ? resolveOAuthProvider(authPath, providerId) : await selectOAuthProvider(authPath, "login");
|
||||
if (!provider) {
|
||||
if (providerId) {
|
||||
throw new Error(`Unknown OAuth model provider: ${providerId}`);
|
||||
}
|
||||
printInfo("Login cancelled.");
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
const authStorage = AuthStorage.create(authPath);
|
||||
@@ -126,7 +612,13 @@ export async function loginModelProvider(authPath: string, providerId?: string,
|
||||
await authStorage.login(provider.id, {
|
||||
onAuth: (info: { url: string; instructions?: string }) => {
|
||||
printSection(`Login: ${provider.name ?? provider.id}`);
|
||||
printInfo(`Open this URL: ${info.url}`);
|
||||
const opened = openUrl(info.url);
|
||||
if (opened) {
|
||||
printInfo("Opened the login URL in your browser.");
|
||||
} else {
|
||||
printWarning("Couldn't open your browser automatically.");
|
||||
}
|
||||
printInfo(`Auth URL: ${info.url}`);
|
||||
if (info.instructions) {
|
||||
printInfo(info.instructions);
|
||||
}
|
||||
@@ -159,6 +651,8 @@ export async function loginModelProvider(authPath: string, providerId?: string,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
export async function logoutModelProvider(authPath: string, providerId?: string): Promise<void> {
|
||||
@@ -193,11 +687,34 @@ export function setDefaultModelSpec(settingsPath: string, authPath: string, spec
|
||||
export async function runModelSetup(settingsPath: string, authPath: string): Promise<void> {
|
||||
let status = collectModelStatus(settingsPath, authPath);
|
||||
|
||||
if (status.availableModels.length === 0) {
|
||||
await loginModelProvider(authPath, undefined, settingsPath);
|
||||
while (status.availableModels.length === 0) {
|
||||
const choices = [
|
||||
"API key (OpenAI, Anthropic, ZAI, Kimi, MiniMax, ...)",
|
||||
"OAuth login (ChatGPT Plus/Pro, Claude Pro/Max, Copilot, ...)",
|
||||
"Cancel",
|
||||
];
|
||||
const selection = await promptChoice("Choose how to configure model access:", choices, 0);
|
||||
if (selection === 0) {
|
||||
const configured = await configureApiKeyProvider(authPath);
|
||||
if (!configured) {
|
||||
status = collectModelStatus(settingsPath, authPath);
|
||||
continue;
|
||||
}
|
||||
} else if (selection === 1) {
|
||||
const loggedIn = await loginModelProvider(authPath, undefined, settingsPath);
|
||||
if (!loggedIn) {
|
||||
status = collectModelStatus(settingsPath, authPath);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
printInfo("Setup cancelled.");
|
||||
return;
|
||||
}
|
||||
status = collectModelStatus(settingsPath, authPath);
|
||||
if (status.availableModels.length === 0) {
|
||||
return;
|
||||
printWarning("No authenticated models are available yet.");
|
||||
printInfo("If you configured a custom provider, ensure it has `apiKey` set in models.json.");
|
||||
printInfo("Tip: run `feynman doctor` to see models.json path + load errors.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
91
src/model/models-json.ts
Normal file
91
src/model/models-json.ts
Normal file
@@ -0,0 +1,91 @@
|
||||
import { chmodSync, existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { dirname } from "node:path";
|
||||
|
||||
type ModelsJson = {
|
||||
providers?: Record<string, Record<string, unknown>>;
|
||||
};
|
||||
|
||||
function readModelsJson(modelsJsonPath: string): { ok: true; value: ModelsJson } | { ok: false; error: string } {
|
||||
if (!existsSync(modelsJsonPath)) {
|
||||
return { ok: true, value: { providers: {} } };
|
||||
}
|
||||
|
||||
try {
|
||||
const raw = readFileSync(modelsJsonPath, "utf8").trim();
|
||||
if (!raw) {
|
||||
return { ok: true, value: { providers: {} } };
|
||||
}
|
||||
const parsed = JSON.parse(raw) as unknown;
|
||||
if (!parsed || typeof parsed !== "object") {
|
||||
return { ok: false, error: `Invalid models.json (expected an object): ${modelsJsonPath}` };
|
||||
}
|
||||
return { ok: true, value: parsed as ModelsJson };
|
||||
} catch (error) {
|
||||
return {
|
||||
ok: false,
|
||||
error: `Failed to read models.json: ${error instanceof Error ? error.message : String(error)}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export function upsertProviderBaseUrl(
|
||||
modelsJsonPath: string,
|
||||
providerId: string,
|
||||
baseUrl: string,
|
||||
): { ok: true } | { ok: false; error: string } {
|
||||
return upsertProviderConfig(modelsJsonPath, providerId, { baseUrl });
|
||||
}
|
||||
|
||||
export type ProviderConfigPatch = {
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
api?: string;
|
||||
authHeader?: boolean;
|
||||
headers?: Record<string, string>;
|
||||
models?: Array<{ id: string }>;
|
||||
};
|
||||
|
||||
export function upsertProviderConfig(
|
||||
modelsJsonPath: string,
|
||||
providerId: string,
|
||||
patch: ProviderConfigPatch,
|
||||
): { ok: true } | { ok: false; error: string } {
|
||||
const loaded = readModelsJson(modelsJsonPath);
|
||||
if (!loaded.ok) {
|
||||
return loaded;
|
||||
}
|
||||
|
||||
const value: ModelsJson = loaded.value;
|
||||
const providers: Record<string, Record<string, unknown>> = {
|
||||
...(value.providers && typeof value.providers === "object" ? value.providers : {}),
|
||||
};
|
||||
|
||||
const currentProvider =
|
||||
providers[providerId] && typeof providers[providerId] === "object" ? providers[providerId] : {};
|
||||
|
||||
const nextProvider: Record<string, unknown> = { ...currentProvider };
|
||||
if (patch.baseUrl !== undefined) nextProvider.baseUrl = patch.baseUrl;
|
||||
if (patch.apiKey !== undefined) nextProvider.apiKey = patch.apiKey;
|
||||
if (patch.api !== undefined) nextProvider.api = patch.api;
|
||||
if (patch.authHeader !== undefined) nextProvider.authHeader = patch.authHeader;
|
||||
if (patch.headers !== undefined) nextProvider.headers = patch.headers;
|
||||
if (patch.models !== undefined) nextProvider.models = patch.models;
|
||||
|
||||
providers[providerId] = nextProvider;
|
||||
|
||||
const next: ModelsJson = { ...value, providers };
|
||||
|
||||
try {
|
||||
mkdirSync(dirname(modelsJsonPath), { recursive: true });
|
||||
writeFileSync(modelsJsonPath, JSON.stringify(next, null, 2) + "\n", "utf8");
|
||||
// models.json can contain API keys/headers; default to user-only permissions.
|
||||
try {
|
||||
chmodSync(modelsJsonPath, 0o600);
|
||||
} catch {
|
||||
// ignore permission errors (best-effort)
|
||||
}
|
||||
return { ok: true };
|
||||
} catch (error) {
|
||||
return { ok: false, error: `Failed to write models.json: ${error instanceof Error ? error.message : String(error)}` };
|
||||
}
|
||||
}
|
||||
12
src/model/registry.ts
Normal file
12
src/model/registry.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import { dirname, resolve } from "node:path";
|
||||
|
||||
import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
export function getModelsJsonPath(authPath: string): string {
|
||||
return resolve(dirname(authPath), "models.json");
|
||||
}
|
||||
|
||||
export function createModelRegistry(authPath: string): ModelRegistry {
|
||||
return new ModelRegistry(AuthStorage.create(authPath), getModelsJsonPath(authPath));
|
||||
}
|
||||
|
||||
@@ -2,13 +2,19 @@ import { spawn } from "node:child_process";
|
||||
import { existsSync } from "node:fs";
|
||||
|
||||
import { buildPiArgs, buildPiEnv, type PiRuntimeOptions, resolvePiPaths } from "./runtime.js";
|
||||
import { ensureSupportedNodeVersion } from "../system/node-version.js";
|
||||
|
||||
export async function launchPiChat(options: PiRuntimeOptions): Promise<void> {
|
||||
const { piCliPath, promisePolyfillPath } = resolvePiPaths(options.appRoot);
|
||||
ensureSupportedNodeVersion();
|
||||
|
||||
const { piCliPath, promisePolyfillPath, promisePolyfillSourcePath, tsxLoaderPath } = resolvePiPaths(options.appRoot);
|
||||
if (!existsSync(piCliPath)) {
|
||||
throw new Error(`Pi CLI not found: ${piCliPath}`);
|
||||
}
|
||||
if (!existsSync(promisePolyfillPath)) {
|
||||
|
||||
const useBuiltPolyfill = existsSync(promisePolyfillPath);
|
||||
const useDevPolyfill = !useBuiltPolyfill && existsSync(promisePolyfillSourcePath) && existsSync(tsxLoaderPath);
|
||||
if (!useBuiltPolyfill && !useDevPolyfill) {
|
||||
throw new Error(`Promise polyfill not found: ${promisePolyfillPath}`);
|
||||
}
|
||||
|
||||
@@ -16,7 +22,11 @@ export async function launchPiChat(options: PiRuntimeOptions): Promise<void> {
|
||||
process.stdout.write("\x1b[2J\x1b[3J\x1b[H");
|
||||
}
|
||||
|
||||
const child = spawn(process.execPath, ["--import", promisePolyfillPath, piCliPath, ...buildPiArgs(options)], {
|
||||
const importArgs = useDevPolyfill
|
||||
? ["--import", tsxLoaderPath, "--import", promisePolyfillSourcePath]
|
||||
: ["--import", promisePolyfillPath];
|
||||
|
||||
const child = spawn(process.execPath, [...importArgs, piCliPath, ...buildPiArgs(options)], {
|
||||
cwd: options.workingDir,
|
||||
stdio: "inherit",
|
||||
env: buildPiEnv(options),
|
||||
@@ -26,7 +36,11 @@ export async function launchPiChat(options: PiRuntimeOptions): Promise<void> {
|
||||
child.on("error", reject);
|
||||
child.on("exit", (code, signal) => {
|
||||
if (signal) {
|
||||
try {
|
||||
process.kill(process.pid, signal);
|
||||
} catch {
|
||||
process.exitCode = 1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
process.exitCode = code ?? 0;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type { PackageSource } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
export const CORE_PACKAGE_SOURCES = [
|
||||
"npm:@companion-ai/alpha-hub",
|
||||
"npm:pi-subagents",
|
||||
"npm:pi-btw",
|
||||
"npm:pi-docparser",
|
||||
@@ -23,13 +24,13 @@ export const OPTIONAL_PACKAGE_PRESETS = {
|
||||
},
|
||||
} as const;
|
||||
|
||||
export type OptionalPackagePresetName = keyof typeof OPTIONAL_PACKAGE_PRESETS;
|
||||
|
||||
const LEGACY_DEFAULT_PACKAGE_SOURCES = [
|
||||
...CORE_PACKAGE_SOURCES,
|
||||
"npm:pi-generative-ui",
|
||||
] as const;
|
||||
|
||||
export type OptionalPackagePresetName = keyof typeof OPTIONAL_PACKAGE_PRESETS;
|
||||
|
||||
function arraysMatchAsSets(left: readonly string[], right: readonly string[]): boolean {
|
||||
if (left.length !== right.length) {
|
||||
return false;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { existsSync, readFileSync } from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { delimiter, dirname, resolve } from "node:path";
|
||||
|
||||
import {
|
||||
BROWSER_FALLBACK_PATHS,
|
||||
@@ -25,10 +25,13 @@ export function resolvePiPaths(appRoot: string) {
|
||||
piPackageRoot: resolve(appRoot, "node_modules", "@mariozechner", "pi-coding-agent"),
|
||||
piCliPath: resolve(appRoot, "node_modules", "@mariozechner", "pi-coding-agent", "dist", "cli.js"),
|
||||
promisePolyfillPath: resolve(appRoot, "dist", "system", "promise-polyfill.js"),
|
||||
promisePolyfillSourcePath: resolve(appRoot, "src", "system", "promise-polyfill.ts"),
|
||||
tsxLoaderPath: resolve(appRoot, "node_modules", "tsx", "dist", "loader.mjs"),
|
||||
researchToolsPath: resolve(appRoot, "extensions", "research-tools.ts"),
|
||||
promptTemplatePath: resolve(appRoot, "prompts"),
|
||||
systemPromptPath: resolve(appRoot, ".feynman", "SYSTEM.md"),
|
||||
piWorkspaceNodeModulesPath: resolve(appRoot, ".feynman", "npm", "node_modules"),
|
||||
nodeModulesBinPath: resolve(appRoot, "node_modules", ".bin"),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -37,7 +40,11 @@ export function validatePiInstallation(appRoot: string): string[] {
|
||||
const missing: string[] = [];
|
||||
|
||||
if (!existsSync(paths.piCliPath)) missing.push(paths.piCliPath);
|
||||
if (!existsSync(paths.promisePolyfillPath)) missing.push(paths.promisePolyfillPath);
|
||||
if (!existsSync(paths.promisePolyfillPath)) {
|
||||
// Dev fallback: allow running from source without `dist/` build artifacts.
|
||||
const hasDevPolyfill = existsSync(paths.promisePolyfillSourcePath) && existsSync(paths.tsxLoaderPath);
|
||||
if (!hasDevPolyfill) missing.push(paths.promisePolyfillPath);
|
||||
}
|
||||
if (!existsSync(paths.researchToolsPath)) missing.push(paths.researchToolsPath);
|
||||
if (!existsSync(paths.promptTemplatePath)) missing.push(paths.promptTemplatePath);
|
||||
|
||||
@@ -76,19 +83,35 @@ export function buildPiArgs(options: PiRuntimeOptions): string[] {
|
||||
|
||||
export function buildPiEnv(options: PiRuntimeOptions): NodeJS.ProcessEnv {
|
||||
const paths = resolvePiPaths(options.appRoot);
|
||||
const feynmanHome = dirname(options.feynmanAgentDir);
|
||||
const feynmanNpmPrefixPath = resolve(feynmanHome, "npm-global");
|
||||
const feynmanNpmBinPath = resolve(feynmanNpmPrefixPath, "bin");
|
||||
|
||||
const currentPath = process.env.PATH ?? "";
|
||||
const binEntries = [paths.nodeModulesBinPath, resolve(paths.piWorkspaceNodeModulesPath, ".bin"), feynmanNpmBinPath];
|
||||
const binPath = binEntries.join(delimiter);
|
||||
|
||||
return {
|
||||
...process.env,
|
||||
PATH: `${binPath}${delimiter}${currentPath}`,
|
||||
FEYNMAN_VERSION: options.feynmanVersion,
|
||||
FEYNMAN_SESSION_DIR: options.sessionDir,
|
||||
FEYNMAN_MEMORY_DIR: resolve(dirname(options.feynmanAgentDir), "memory"),
|
||||
FEYNMAN_NODE_EXECUTABLE: process.execPath,
|
||||
FEYNMAN_BIN_PATH: resolve(options.appRoot, "bin", "feynman.js"),
|
||||
FEYNMAN_NPM_PREFIX: feynmanNpmPrefixPath,
|
||||
// Ensure the Pi child process uses Feynman's agent dir for auth/models/settings.
|
||||
PI_CODING_AGENT_DIR: options.feynmanAgentDir,
|
||||
PANDOC_PATH: process.env.PANDOC_PATH ?? resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS),
|
||||
PI_HARDWARE_CURSOR: process.env.PI_HARDWARE_CURSOR ?? "1",
|
||||
PI_SKIP_VERSION_CHECK: process.env.PI_SKIP_VERSION_CHECK ?? "1",
|
||||
MERMAID_CLI_PATH: process.env.MERMAID_CLI_PATH ?? resolveExecutable("mmdc", MERMAID_FALLBACK_PATHS),
|
||||
PUPPETEER_EXECUTABLE_PATH:
|
||||
process.env.PUPPETEER_EXECUTABLE_PATH ?? resolveExecutable("google-chrome", BROWSER_FALLBACK_PATHS),
|
||||
// Always pin npm's global prefix to the Feynman workspace. npm injects
|
||||
// lowercase config vars into child processes, which would otherwise leak
|
||||
// the caller's global prefix into Pi.
|
||||
NPM_CONFIG_PREFIX: feynmanNpmPrefixPath,
|
||||
npm_config_prefix: feynmanNpmPrefixPath,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { dirname } from "node:path";
|
||||
|
||||
import { AuthStorage, ModelRegistry, type PackageSource } from "@mariozechner/pi-coding-agent";
|
||||
import { ModelRegistry, type PackageSource } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
import { CORE_PACKAGE_SOURCES, shouldPruneLegacyDefaultPackages } from "./package-presets.js";
|
||||
import { createModelRegistry } from "../model/registry.js";
|
||||
|
||||
export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
|
||||
|
||||
@@ -115,8 +116,7 @@ export function normalizeFeynmanSettings(
|
||||
settings.packages = [...CORE_PACKAGE_SOURCES];
|
||||
}
|
||||
|
||||
const authStorage = AuthStorage.create(authPath);
|
||||
const modelRegistry = new ModelRegistry(authStorage);
|
||||
const modelRegistry = createModelRegistry(authPath);
|
||||
const availableModels = modelRegistry.getAvailable().map((model) => ({
|
||||
provider: model.provider,
|
||||
id: model.id,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent";
|
||||
import { getUserName as getAlphaUserName, isLoggedIn as isAlphaLoggedIn } from "@companion-ai/alpha-hub/lib";
|
||||
|
||||
import { readFileSync } from "node:fs";
|
||||
|
||||
import { formatPiWebAccessDoctorLines, getPiWebAccessStatus } from "../pi/web-access.js";
|
||||
import { BROWSER_FALLBACK_PATHS, PANDOC_FALLBACK_PATHS, resolveExecutable } from "../system/executables.js";
|
||||
import { readJson } from "../pi/settings.js";
|
||||
@@ -8,6 +9,30 @@ import { validatePiInstallation } from "../pi/runtime.js";
|
||||
import { printInfo, printPanel, printSection } from "../ui/terminal.js";
|
||||
import { getCurrentModelSpec } from "../model/commands.js";
|
||||
import { buildModelStatusSnapshotFromRecords, getAvailableModelRecords, getSupportedModelRecords } from "../model/catalog.js";
|
||||
import { createModelRegistry, getModelsJsonPath } from "../model/registry.js";
|
||||
|
||||
function findProvidersMissingApiKey(modelsJsonPath: string): string[] {
|
||||
try {
|
||||
const raw = readFileSync(modelsJsonPath, "utf8").trim();
|
||||
if (!raw) return [];
|
||||
const parsed = JSON.parse(raw) as any;
|
||||
const providers = parsed?.providers;
|
||||
if (!providers || typeof providers !== "object") return [];
|
||||
const missing: string[] = [];
|
||||
for (const [providerId, config] of Object.entries(providers as Record<string, unknown>)) {
|
||||
if (!config || typeof config !== "object") continue;
|
||||
const models = (config as any).models;
|
||||
if (!Array.isArray(models) || models.length === 0) continue;
|
||||
const apiKey = (config as any).apiKey;
|
||||
if (typeof apiKey !== "string" || apiKey.trim().length === 0) {
|
||||
missing.push(providerId);
|
||||
}
|
||||
}
|
||||
return missing;
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
export type DoctorOptions = {
|
||||
settingsPath: string;
|
||||
@@ -104,7 +129,7 @@ export function runStatus(options: DoctorOptions): void {
|
||||
|
||||
export function runDoctor(options: DoctorOptions): void {
|
||||
const settings = readJson(options.settingsPath);
|
||||
const modelRegistry = new ModelRegistry(AuthStorage.create(options.authPath));
|
||||
const modelRegistry = createModelRegistry(options.authPath);
|
||||
const availableModels = modelRegistry.getAvailable();
|
||||
const pandocPath = resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS);
|
||||
const browserPath = process.env.PUPPETEER_EXECUTABLE_PATH ?? resolveExecutable("google-chrome", BROWSER_FALLBACK_PATHS);
|
||||
@@ -144,6 +169,21 @@ export function runDoctor(options: DoctorOptions): void {
|
||||
if (modelStatus.recommendedModelReason) {
|
||||
console.log(` why: ${modelStatus.recommendedModelReason}`);
|
||||
}
|
||||
const modelsError = modelRegistry.getError();
|
||||
if (modelsError) {
|
||||
console.log("models.json: error");
|
||||
for (const line of modelsError.split("\n")) {
|
||||
console.log(` ${line}`);
|
||||
}
|
||||
} else {
|
||||
const modelsJsonPath = getModelsJsonPath(options.authPath);
|
||||
console.log(`models.json: ${modelsJsonPath}`);
|
||||
const missingApiKeyProviders = findProvidersMissingApiKey(modelsJsonPath);
|
||||
if (missingApiKeyProviders.length > 0) {
|
||||
console.log(` warning: provider(s) missing apiKey: ${missingApiKeyProviders.join(", ")}`);
|
||||
console.log(" note: custom providers with a models[] list need apiKey in models.json to be available.");
|
||||
}
|
||||
}
|
||||
console.log(`pandoc: ${pandocPath ?? "missing"}`);
|
||||
console.log(`browser preview runtime: ${browserPath ?? "missing"}`);
|
||||
for (const line of formatPiWebAccessDoctorLines()) {
|
||||
|
||||
@@ -13,14 +13,36 @@ export function setupPreviewDependencies(): PreviewSetupResult {
|
||||
return { status: "ready", message: `pandoc already installed at ${pandocPath}` };
|
||||
}
|
||||
|
||||
if (process.platform === "darwin") {
|
||||
const brewPath = resolveExecutable("brew", BREW_FALLBACK_PATHS);
|
||||
if (process.platform === "darwin" && brewPath) {
|
||||
if (brewPath) {
|
||||
const result = spawnSync(brewPath, ["install", "pandoc"], { stdio: "inherit" });
|
||||
if (result.status !== 0) {
|
||||
throw new Error("Failed to install pandoc via Homebrew.");
|
||||
}
|
||||
return { status: "installed", message: "Preview dependency installed: pandoc" };
|
||||
}
|
||||
}
|
||||
|
||||
if (process.platform === "win32") {
|
||||
const wingetPath = resolveExecutable("winget");
|
||||
if (wingetPath) {
|
||||
const result = spawnSync(wingetPath, ["install", "--id", "JohnMacFarlane.Pandoc", "-e"], { stdio: "inherit" });
|
||||
if (result.status === 0) {
|
||||
return { status: "installed", message: "Preview dependency installed: pandoc (via winget)" };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (process.platform === "linux") {
|
||||
const aptPath = resolveExecutable("apt-get");
|
||||
if (aptPath) {
|
||||
const result = spawnSync(aptPath, ["install", "-y", "pandoc"], { stdio: "inherit" });
|
||||
if (result.status === 0) {
|
||||
return { status: "installed", message: "Preview dependency installed: pandoc (via apt)" };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
status: "manual",
|
||||
|
||||
@@ -29,6 +29,7 @@ function printNonInteractiveSetupGuidance(): void {
|
||||
printInfo("Non-interactive terminal. Use explicit commands:");
|
||||
printInfo(" feynman model login <provider>");
|
||||
printInfo(" feynman model set <provider/model>");
|
||||
printInfo(" # or configure API keys via env vars/auth.json and rerun `feynman model list`");
|
||||
printInfo(" feynman alpha login");
|
||||
printInfo(" feynman doctor");
|
||||
}
|
||||
|
||||
@@ -1,27 +1,36 @@
|
||||
import { spawnSync } from "node:child_process";
|
||||
import { existsSync } from "node:fs";
|
||||
|
||||
export const PANDOC_FALLBACK_PATHS = [
|
||||
"/opt/homebrew/bin/pandoc",
|
||||
"/usr/local/bin/pandoc",
|
||||
];
|
||||
const isWindows = process.platform === "win32";
|
||||
const programFiles = process.env.PROGRAMFILES ?? "C:\\Program Files";
|
||||
const localAppData = process.env.LOCALAPPDATA ?? "";
|
||||
|
||||
export const BREW_FALLBACK_PATHS = [
|
||||
"/opt/homebrew/bin/brew",
|
||||
"/usr/local/bin/brew",
|
||||
];
|
||||
export const PANDOC_FALLBACK_PATHS = isWindows
|
||||
? [`${programFiles}\\Pandoc\\pandoc.exe`]
|
||||
: ["/opt/homebrew/bin/pandoc", "/usr/local/bin/pandoc"];
|
||||
|
||||
export const BROWSER_FALLBACK_PATHS = [
|
||||
export const BREW_FALLBACK_PATHS = isWindows
|
||||
? []
|
||||
: ["/opt/homebrew/bin/brew", "/usr/local/bin/brew"];
|
||||
|
||||
export const BROWSER_FALLBACK_PATHS = isWindows
|
||||
? [
|
||||
`${programFiles}\\Google\\Chrome\\Application\\chrome.exe`,
|
||||
`${programFiles} (x86)\\Google\\Chrome\\Application\\chrome.exe`,
|
||||
`${localAppData}\\Google\\Chrome\\Application\\chrome.exe`,
|
||||
`${programFiles}\\Microsoft\\Edge\\Application\\msedge.exe`,
|
||||
`${programFiles}\\BraveSoftware\\Brave-Browser\\Application\\brave.exe`,
|
||||
]
|
||||
: [
|
||||
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
|
||||
"/Applications/Chromium.app/Contents/MacOS/Chromium",
|
||||
"/Applications/Brave Browser.app/Contents/MacOS/Brave Browser",
|
||||
"/Applications/Microsoft Edge.app/Contents/MacOS/Microsoft Edge",
|
||||
];
|
||||
|
||||
export const MERMAID_FALLBACK_PATHS = [
|
||||
"/opt/homebrew/bin/mmdc",
|
||||
"/usr/local/bin/mmdc",
|
||||
];
|
||||
export const MERMAID_FALLBACK_PATHS = isWindows
|
||||
? []
|
||||
: ["/opt/homebrew/bin/mmdc", "/usr/local/bin/mmdc"];
|
||||
|
||||
export function resolveExecutable(name: string, fallbackPaths: string[] = []): string | undefined {
|
||||
for (const candidate of fallbackPaths) {
|
||||
@@ -30,13 +39,19 @@ export function resolveExecutable(name: string, fallbackPaths: string[] = []): s
|
||||
}
|
||||
}
|
||||
|
||||
const result = spawnSync("sh", ["-lc", `command -v ${name}`], {
|
||||
const isWindows = process.platform === "win32";
|
||||
const result = isWindows
|
||||
? spawnSync("cmd", ["/c", `where ${name}`], {
|
||||
encoding: "utf8",
|
||||
stdio: ["ignore", "pipe", "ignore"],
|
||||
})
|
||||
: spawnSync("sh", ["-lc", `command -v ${name}`], {
|
||||
encoding: "utf8",
|
||||
stdio: ["ignore", "pipe", "ignore"],
|
||||
});
|
||||
|
||||
if (result.status === 0) {
|
||||
const resolved = result.stdout.trim();
|
||||
const resolved = result.stdout.trim().split(/\r?\n/)[0];
|
||||
if (resolved) {
|
||||
return resolved;
|
||||
}
|
||||
|
||||
45
src/system/node-version.ts
Normal file
45
src/system/node-version.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
export const MIN_NODE_VERSION = "20.19.0";
|
||||
|
||||
type ParsedNodeVersion = {
|
||||
major: number;
|
||||
minor: number;
|
||||
patch: number;
|
||||
};
|
||||
|
||||
function parseNodeVersion(version: string): ParsedNodeVersion {
|
||||
const [major = "0", minor = "0", patch = "0"] = version.replace(/^v/, "").split(".");
|
||||
return {
|
||||
major: Number.parseInt(major, 10) || 0,
|
||||
minor: Number.parseInt(minor, 10) || 0,
|
||||
patch: Number.parseInt(patch, 10) || 0,
|
||||
};
|
||||
}
|
||||
|
||||
function compareNodeVersions(left: ParsedNodeVersion, right: ParsedNodeVersion): number {
|
||||
if (left.major !== right.major) return left.major - right.major;
|
||||
if (left.minor !== right.minor) return left.minor - right.minor;
|
||||
return left.patch - right.patch;
|
||||
}
|
||||
|
||||
export function isSupportedNodeVersion(version = process.versions.node): boolean {
|
||||
return compareNodeVersions(parseNodeVersion(version), parseNodeVersion(MIN_NODE_VERSION)) >= 0;
|
||||
}
|
||||
|
||||
export function getUnsupportedNodeVersionLines(version = process.versions.node): string[] {
|
||||
const isWindows = process.platform === "win32";
|
||||
return [
|
||||
`feynman requires Node.js ${MIN_NODE_VERSION} or later (detected ${version}).`,
|
||||
isWindows
|
||||
? "Install a newer Node.js from https://nodejs.org, or use the standalone installer:"
|
||||
: "Switch to Node 20 with `nvm install 20 && nvm use 20`, or use the standalone installer:",
|
||||
isWindows
|
||||
? "irm https://feynman.is/install.ps1 | iex"
|
||||
: "curl -fsSL https://feynman.is/install | bash",
|
||||
];
|
||||
}
|
||||
|
||||
export function ensureSupportedNodeVersion(version = process.versions.node): void {
|
||||
if (!isSupportedNodeVersion(version)) {
|
||||
throw new Error(getUnsupportedNodeVersionLines(version).join("\n"));
|
||||
}
|
||||
}
|
||||
51
src/system/open-url.ts
Normal file
51
src/system/open-url.ts
Normal file
@@ -0,0 +1,51 @@
|
||||
import { spawn } from "node:child_process";
|
||||
|
||||
import { resolveExecutable } from "./executables.js";
|
||||
|
||||
type ResolveExecutableFn = (name: string, fallbackPaths?: string[]) => string | undefined;
|
||||
|
||||
type OpenUrlCommand = {
|
||||
command: string;
|
||||
args: string[];
|
||||
};
|
||||
|
||||
export function getOpenUrlCommand(
|
||||
url: string,
|
||||
platform = process.platform,
|
||||
resolveCommand: ResolveExecutableFn = resolveExecutable,
|
||||
): OpenUrlCommand | undefined {
|
||||
if (platform === "win32") {
|
||||
return {
|
||||
command: "cmd",
|
||||
args: ["/c", "start", "", url],
|
||||
};
|
||||
}
|
||||
|
||||
if (platform === "darwin") {
|
||||
const command = resolveCommand("open");
|
||||
return command ? { command, args: [url] } : undefined;
|
||||
}
|
||||
|
||||
const command = resolveCommand("xdg-open");
|
||||
return command ? { command, args: [url] } : undefined;
|
||||
}
|
||||
|
||||
export function openUrl(url: string): boolean {
|
||||
const command = getOpenUrlCommand(url);
|
||||
if (!command) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const child = spawn(command.command, command.args, {
|
||||
detached: true,
|
||||
stdio: "ignore",
|
||||
windowsHide: true,
|
||||
});
|
||||
child.on("error", () => {});
|
||||
child.unref();
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { FEYNMAN_ASCII_LOGO } from "../../logo.mjs";
|
||||
|
||||
const RESET = "\x1b[0m";
|
||||
export const RESET = "\x1b[0m";
|
||||
const BOLD = "\x1b[1m";
|
||||
const DIM = "\x1b[2m";
|
||||
|
||||
@@ -11,9 +11,9 @@ function rgb(red: number, green: number, blue: number): string {
|
||||
// Match the outer CLI to the bundled Feynman Pi theme instead of generic magenta panels.
|
||||
const INK = rgb(211, 198, 170);
|
||||
const STONE = rgb(157, 169, 160);
|
||||
const ASH = rgb(133, 146, 137);
|
||||
export const ASH = rgb(133, 146, 137);
|
||||
const DARK_ASH = rgb(92, 106, 114);
|
||||
const SAGE = rgb(167, 192, 128);
|
||||
export const SAGE = rgb(167, 192, 128);
|
||||
const TEAL = rgb(127, 187, 179);
|
||||
const ROSE = rgb(230, 126, 128);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { mkdtempSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdtempSync, mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
|
||||
@@ -49,3 +49,34 @@ test("syncBundledAssets preserves user-modified files and updates managed files"
|
||||
assert.equal(readFileSync(join(agentDir, "themes", "feynman.json"), "utf8"), '{"theme":"v2"}\n');
|
||||
assert.equal(readFileSync(join(agentDir, "agents", "researcher.md"), "utf8"), "# user-custom\n");
|
||||
});
|
||||
|
||||
test("syncBundledAssets removes deleted managed files but preserves user-modified stale files", () => {
|
||||
const appRoot = createAppRoot();
|
||||
const home = mkdtempSync(join(tmpdir(), "feynman-home-"));
|
||||
process.env.FEYNMAN_HOME = home;
|
||||
const agentDir = join(home, "agent");
|
||||
mkdirSync(agentDir, { recursive: true });
|
||||
|
||||
mkdirSync(join(appRoot, "skills", "paper-eli5"), { recursive: true });
|
||||
writeFileSync(join(appRoot, "skills", "paper-eli5", "SKILL.md"), "# old skill\n", "utf8");
|
||||
syncBundledAssets(appRoot, agentDir);
|
||||
|
||||
rmSync(join(appRoot, "skills", "paper-eli5"), { recursive: true, force: true });
|
||||
mkdirSync(join(appRoot, "skills", "eli5"), { recursive: true });
|
||||
writeFileSync(join(appRoot, "skills", "eli5", "SKILL.md"), "# new skill\n", "utf8");
|
||||
|
||||
const firstResult = syncBundledAssets(appRoot, agentDir);
|
||||
assert.deepEqual(firstResult.copied, ["eli5/SKILL.md"]);
|
||||
assert.equal(existsSync(join(agentDir, "skills", "paper-eli5", "SKILL.md")), false);
|
||||
assert.equal(readFileSync(join(agentDir, "skills", "eli5", "SKILL.md"), "utf8"), "# new skill\n");
|
||||
|
||||
mkdirSync(join(appRoot, "skills", "legacy"), { recursive: true });
|
||||
writeFileSync(join(appRoot, "skills", "legacy", "SKILL.md"), "# managed legacy\n", "utf8");
|
||||
syncBundledAssets(appRoot, agentDir);
|
||||
writeFileSync(join(agentDir, "skills", "legacy", "SKILL.md"), "# user legacy override\n", "utf8");
|
||||
rmSync(join(appRoot, "skills", "legacy"), { recursive: true, force: true });
|
||||
|
||||
const secondResult = syncBundledAssets(appRoot, agentDir);
|
||||
assert.deepEqual(secondResult.skipped, ["legacy/SKILL.md"]);
|
||||
assert.equal(readFileSync(join(agentDir, "skills", "legacy", "SKILL.md"), "utf8"), "# user legacy override\n");
|
||||
});
|
||||
|
||||
32
tests/models-json.test.ts
Normal file
32
tests/models-json.test.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { mkdtempSync, readFileSync } from "node:fs";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
|
||||
import { upsertProviderConfig } from "../src/model/models-json.js";
|
||||
|
||||
test("upsertProviderConfig creates models.json and merges provider config", () => {
|
||||
const dir = mkdtempSync(join(tmpdir(), "feynman-models-"));
|
||||
const modelsPath = join(dir, "models.json");
|
||||
|
||||
const first = upsertProviderConfig(modelsPath, "custom", {
|
||||
baseUrl: "http://localhost:11434/v1",
|
||||
apiKey: "ollama",
|
||||
api: "openai-completions",
|
||||
authHeader: true,
|
||||
models: [{ id: "llama3.1:8b" }],
|
||||
});
|
||||
assert.deepEqual(first, { ok: true });
|
||||
|
||||
const second = upsertProviderConfig(modelsPath, "custom", {
|
||||
baseUrl: "http://localhost:9999/v1",
|
||||
});
|
||||
assert.deepEqual(second, { ok: true });
|
||||
|
||||
const parsed = JSON.parse(readFileSync(modelsPath, "utf8")) as any;
|
||||
assert.equal(parsed.providers.custom.baseUrl, "http://localhost:9999/v1");
|
||||
assert.equal(parsed.providers.custom.api, "openai-completions");
|
||||
assert.equal(parsed.providers.custom.authHeader, true);
|
||||
assert.deepEqual(parsed.providers.custom.models, [{ id: "llama3.1:8b" }]);
|
||||
});
|
||||
35
tests/node-version.test.ts
Normal file
35
tests/node-version.test.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
|
||||
import {
|
||||
MIN_NODE_VERSION,
|
||||
ensureSupportedNodeVersion,
|
||||
getUnsupportedNodeVersionLines,
|
||||
isSupportedNodeVersion,
|
||||
} from "../src/system/node-version.js";
|
||||
|
||||
test("isSupportedNodeVersion enforces the exact minimum floor", () => {
|
||||
assert.equal(isSupportedNodeVersion("20.19.0"), true);
|
||||
assert.equal(isSupportedNodeVersion("20.19.0"), true);
|
||||
assert.equal(isSupportedNodeVersion("21.0.0"), true);
|
||||
assert.equal(isSupportedNodeVersion("20.18.1"), false);
|
||||
assert.equal(isSupportedNodeVersion("18.17.0"), false);
|
||||
});
|
||||
|
||||
test("ensureSupportedNodeVersion throws a guided upgrade message", () => {
|
||||
assert.throws(
|
||||
() => ensureSupportedNodeVersion("18.17.0"),
|
||||
(error: unknown) =>
|
||||
error instanceof Error &&
|
||||
error.message.includes(`Node.js ${MIN_NODE_VERSION}`) &&
|
||||
error.message.includes("nvm install 20 && nvm use 20") &&
|
||||
error.message.includes("https://feynman.is/install"),
|
||||
);
|
||||
});
|
||||
|
||||
test("unsupported version guidance reports the detected version", () => {
|
||||
const lines = getUnsupportedNodeVersionLines("18.17.0");
|
||||
|
||||
assert.equal(lines[0], "feynman requires Node.js 20.19.0 or later (detected 18.17.0).");
|
||||
assert.ok(lines.some((line) => line.includes("curl -fsSL https://feynman.is/install | bash")));
|
||||
});
|
||||
45
tests/open-url.test.ts
Normal file
45
tests/open-url.test.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
|
||||
import { getOpenUrlCommand } from "../src/system/open-url.js";
|
||||
|
||||
test("getOpenUrlCommand uses open on macOS when available", () => {
|
||||
const command = getOpenUrlCommand(
|
||||
"https://example.com",
|
||||
"darwin",
|
||||
(name) => (name === "open" ? "/usr/bin/open" : undefined),
|
||||
);
|
||||
|
||||
assert.deepEqual(command, {
|
||||
command: "/usr/bin/open",
|
||||
args: ["https://example.com"],
|
||||
});
|
||||
});
|
||||
|
||||
test("getOpenUrlCommand uses xdg-open on Linux when available", () => {
|
||||
const command = getOpenUrlCommand(
|
||||
"https://example.com",
|
||||
"linux",
|
||||
(name) => (name === "xdg-open" ? "/usr/bin/xdg-open" : undefined),
|
||||
);
|
||||
|
||||
assert.deepEqual(command, {
|
||||
command: "/usr/bin/xdg-open",
|
||||
args: ["https://example.com"],
|
||||
});
|
||||
});
|
||||
|
||||
test("getOpenUrlCommand uses cmd start on Windows", () => {
|
||||
const command = getOpenUrlCommand("https://example.com", "win32");
|
||||
|
||||
assert.deepEqual(command, {
|
||||
command: "cmd",
|
||||
args: ["/c", "start", "", "https://example.com"],
|
||||
});
|
||||
});
|
||||
|
||||
test("getOpenUrlCommand returns undefined when no opener is available", () => {
|
||||
const command = getOpenUrlCommand("https://example.com", "linux", () => undefined);
|
||||
|
||||
assert.equal(command, undefined);
|
||||
});
|
||||
@@ -30,6 +30,11 @@ test("buildPiArgs includes configured runtime paths and prompt", () => {
|
||||
});
|
||||
|
||||
test("buildPiEnv wires Feynman paths into the Pi environment", () => {
|
||||
const previousUppercasePrefix = process.env.NPM_CONFIG_PREFIX;
|
||||
const previousLowercasePrefix = process.env.npm_config_prefix;
|
||||
process.env.NPM_CONFIG_PREFIX = "/tmp/global-prefix";
|
||||
process.env.npm_config_prefix = "/tmp/global-prefix-lower";
|
||||
|
||||
const env = buildPiEnv({
|
||||
appRoot: "/repo/feynman",
|
||||
workingDir: "/workspace",
|
||||
@@ -38,9 +43,31 @@ test("buildPiEnv wires Feynman paths into the Pi environment", () => {
|
||||
feynmanVersion: "0.1.5",
|
||||
});
|
||||
|
||||
try {
|
||||
assert.equal(env.FEYNMAN_SESSION_DIR, "/sessions");
|
||||
assert.equal(env.FEYNMAN_BIN_PATH, "/repo/feynman/bin/feynman.js");
|
||||
assert.equal(env.FEYNMAN_MEMORY_DIR, "/home/.feynman/memory");
|
||||
assert.equal(env.FEYNMAN_NPM_PREFIX, "/home/.feynman/npm-global");
|
||||
assert.equal(env.NPM_CONFIG_PREFIX, "/home/.feynman/npm-global");
|
||||
assert.equal(env.npm_config_prefix, "/home/.feynman/npm-global");
|
||||
assert.equal(env.PI_CODING_AGENT_DIR, "/home/.feynman/agent");
|
||||
assert.ok(
|
||||
env.PATH?.startsWith(
|
||||
"/repo/feynman/node_modules/.bin:/repo/feynman/.feynman/npm/node_modules/.bin:/home/.feynman/npm-global/bin:",
|
||||
),
|
||||
);
|
||||
} finally {
|
||||
if (previousUppercasePrefix === undefined) {
|
||||
delete process.env.NPM_CONFIG_PREFIX;
|
||||
} else {
|
||||
process.env.NPM_CONFIG_PREFIX = previousUppercasePrefix;
|
||||
}
|
||||
if (previousLowercasePrefix === undefined) {
|
||||
delete process.env.npm_config_prefix;
|
||||
} else {
|
||||
process.env.npm_config_prefix = previousLowercasePrefix;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
test("resolvePiPaths includes the Promise.withResolvers polyfill path", () => {
|
||||
|
||||
1
website/.astro/data-store.json
Normal file
1
website/.astro/data-store.json
Normal file
File diff suppressed because one or more lines are too long
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"_variables": {
|
||||
"lastUpdateCheck": 1774305535217
|
||||
"lastUpdateCheck": 1774391908508
|
||||
}
|
||||
}
|
||||
23
website/.gitignore
vendored
Normal file
23
website/.gitignore
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
# build output
|
||||
dist/
|
||||
# generated types
|
||||
.astro/
|
||||
|
||||
# dependencies
|
||||
node_modules/
|
||||
|
||||
# logs
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
|
||||
# environment variables
|
||||
.env
|
||||
.env.production
|
||||
|
||||
# macOS-specific files
|
||||
.DS_Store
|
||||
|
||||
# jetbrains setting folder
|
||||
.idea/
|
||||
6
website/.prettierignore
Normal file
6
website/.prettierignore
Normal file
@@ -0,0 +1,6 @@
|
||||
node_modules/
|
||||
coverage/
|
||||
.pnpm-store/
|
||||
pnpm-lock.yaml
|
||||
package-lock.json
|
||||
yarn.lock
|
||||
19
website/.prettierrc
Normal file
19
website/.prettierrc
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"endOfLine": "lf",
|
||||
"semi": false,
|
||||
"singleQuote": false,
|
||||
"tabWidth": 2,
|
||||
"trailingComma": "es5",
|
||||
"printWidth": 80,
|
||||
"plugins": ["prettier-plugin-astro", "prettier-plugin-tailwindcss"],
|
||||
"tailwindStylesheet": "src/styles/global.css",
|
||||
"tailwindFunctions": ["cn", "cva"],
|
||||
"overrides": [
|
||||
{
|
||||
"files": "*.astro",
|
||||
"options": {
|
||||
"parser": "astro"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
36
website/README.md
Normal file
36
website/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Astro + React + TypeScript + shadcn/ui
|
||||
|
||||
This is a template for a new Astro project with React, TypeScript, and shadcn/ui.
|
||||
|
||||
## Adding components
|
||||
|
||||
To add components to your app, run the following command:
|
||||
|
||||
```bash
|
||||
npx shadcn@latest add button
|
||||
```
|
||||
|
||||
This will place the ui components in the `src/components` directory.
|
||||
|
||||
## Using components
|
||||
|
||||
To use the components in your app, import them in an `.astro` file:
|
||||
|
||||
```astro
|
||||
---
|
||||
import { Button } from "@/components/ui/button"
|
||||
---
|
||||
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width" />
|
||||
<title>Astro App</title>
|
||||
</head>
|
||||
<body>
|
||||
<div class="grid h-screen place-items-center content-center">
|
||||
<Button>Button</Button>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
@@ -1,15 +1,22 @@
|
||||
import { defineConfig } from 'astro/config';
|
||||
import tailwind from '@astrojs/tailwind';
|
||||
// @ts-check
|
||||
|
||||
import tailwindcss from "@tailwindcss/vite"
|
||||
import { defineConfig } from "astro/config"
|
||||
import react from "@astrojs/react"
|
||||
|
||||
// https://astro.build/config
|
||||
export default defineConfig({
|
||||
integrations: [tailwind()],
|
||||
vite: {
|
||||
plugins: [tailwindcss()],
|
||||
},
|
||||
integrations: [react()],
|
||||
site: 'https://feynman.is',
|
||||
markdown: {
|
||||
shikiConfig: {
|
||||
themes: {
|
||||
light: 'github-light',
|
||||
dark: 'github-dark',
|
||||
light: 'vitesse-light',
|
||||
dark: 'vitesse-dark',
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
})
|
||||
|
||||
25
website/components.json
Normal file
25
website/components.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"$schema": "https://ui.shadcn.com/schema.json",
|
||||
"style": "radix-vega",
|
||||
"rsc": false,
|
||||
"tsx": true,
|
||||
"tailwind": {
|
||||
"config": "",
|
||||
"css": "src/styles/global.css",
|
||||
"baseColor": "olive",
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
},
|
||||
"iconLibrary": "lucide",
|
||||
"rtl": false,
|
||||
"aliases": {
|
||||
"components": "@/components",
|
||||
"utils": "@/lib/utils",
|
||||
"ui": "@/components/ui",
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
},
|
||||
"menuColor": "default",
|
||||
"menuAccent": "subtle",
|
||||
"registries": {}
|
||||
}
|
||||
23
website/eslint.config.js
Normal file
23
website/eslint.config.js
Normal file
@@ -0,0 +1,23 @@
|
||||
import js from "@eslint/js"
|
||||
import globals from "globals"
|
||||
import reactHooks from "eslint-plugin-react-hooks"
|
||||
import reactRefresh from "eslint-plugin-react-refresh"
|
||||
import tseslint from "typescript-eslint"
|
||||
import { defineConfig, globalIgnores } from "eslint/config"
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(["dist", ".astro"]),
|
||||
{
|
||||
files: ["**/*.{ts,tsx}"],
|
||||
extends: [
|
||||
js.configs.recommended,
|
||||
tseslint.configs.recommended,
|
||||
reactHooks.configs.flat.recommended,
|
||||
reactRefresh.configs.vite,
|
||||
],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2020,
|
||||
globals: globals.browser,
|
||||
},
|
||||
},
|
||||
])
|
||||
8347
website/package-lock.json
generated
8347
website/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,17 +1,48 @@
|
||||
{
|
||||
"name": "feynman-website",
|
||||
"name": "website",
|
||||
"type": "module",
|
||||
"version": "0.0.1",
|
||||
"private": true,
|
||||
"engines": {
|
||||
"node": ">=20.19.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev": "astro dev",
|
||||
"build": "node ../scripts/sync-website-installers.mjs && astro build",
|
||||
"preview": "astro preview"
|
||||
"preview": "astro preview",
|
||||
"astro": "astro",
|
||||
"lint": "eslint .",
|
||||
"format": "prettier --write \"**/*.{ts,tsx,astro}\"",
|
||||
"typecheck": "astro check"
|
||||
},
|
||||
"dependencies": {
|
||||
"astro": "^5.7.0",
|
||||
"@astrojs/tailwind": "^6.0.2",
|
||||
"tailwindcss": "^3.4.0",
|
||||
"sharp": "^0.33.0"
|
||||
"@astrojs/react": "^4.4.2",
|
||||
"@fontsource-variable/ibm-plex-sans": "^5.2.8",
|
||||
"@tailwindcss/vite": "^4.2.1",
|
||||
"@types/react": "^19.2.14",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"astro": "^5.18.1",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"lucide-react": "^1.6.0",
|
||||
"radix-ui": "^1.4.3",
|
||||
"react": "^19.2.4",
|
||||
"react-dom": "^19.2.4",
|
||||
"shadcn": "^4.1.0",
|
||||
"tailwind-merge": "^3.5.0",
|
||||
"tailwindcss": "^4.2.1",
|
||||
"tw-animate-css": "^1.4.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.4",
|
||||
"eslint": "^9.39.4",
|
||||
"eslint-plugin-react-hooks": "^7.0.1",
|
||||
"eslint-plugin-react-refresh": "^0.5.2",
|
||||
"globals": "^16.5.0",
|
||||
"prettier": "^3.8.1",
|
||||
"prettier-plugin-astro": "^0.14.1",
|
||||
"prettier-plugin-tailwindcss": "^0.7.2",
|
||||
"typescript": "~5.9.3",
|
||||
"typescript-eslint": "^8.57.1"
|
||||
}
|
||||
}
|
||||
|
||||
4
website/public/favicon.svg
Normal file
4
website/public/favicon.svg
Normal file
@@ -0,0 +1,4 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32">
|
||||
<rect width="32" height="32" rx="6" fill="#2d353b"/>
|
||||
<text x="16" y="26" text-anchor="middle" font-family="monospace" font-weight="bold" font-size="26" fill="#a7c080">f</text>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 248 B |
BIN
website/public/hero.png
Normal file
BIN
website/public/hero.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 884 KiB |
@@ -13,11 +13,57 @@ step() {
|
||||
printf '==> %s\n' "$1"
|
||||
}
|
||||
|
||||
run_with_spinner() {
|
||||
label="$1"
|
||||
shift
|
||||
|
||||
if [ ! -t 2 ]; then
|
||||
step "$label"
|
||||
"$@"
|
||||
return
|
||||
fi
|
||||
|
||||
"$@" &
|
||||
pid=$!
|
||||
frame=0
|
||||
|
||||
set +e
|
||||
while kill -0 "$pid" 2>/dev/null; do
|
||||
case "$frame" in
|
||||
0) spinner='|' ;;
|
||||
1) spinner='/' ;;
|
||||
2) spinner='-' ;;
|
||||
*) spinner='\\' ;;
|
||||
esac
|
||||
printf '\r==> %s %s' "$label" "$spinner" >&2
|
||||
frame=$(( (frame + 1) % 4 ))
|
||||
sleep 0.1
|
||||
done
|
||||
wait "$pid"
|
||||
status=$?
|
||||
set -e
|
||||
|
||||
printf '\r\033[2K' >&2
|
||||
if [ "$status" -ne 0 ]; then
|
||||
printf '==> %s failed\n' "$label" >&2
|
||||
return "$status"
|
||||
fi
|
||||
|
||||
step "$label"
|
||||
}
|
||||
|
||||
normalize_version() {
|
||||
case "$1" in
|
||||
"" | latest)
|
||||
"")
|
||||
printf 'latest\n'
|
||||
;;
|
||||
latest | stable)
|
||||
printf 'latest\n'
|
||||
;;
|
||||
edge)
|
||||
echo "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." >&2
|
||||
exit 1
|
||||
;;
|
||||
v*)
|
||||
printf '%s\n' "${1#v}"
|
||||
;;
|
||||
@@ -32,12 +78,20 @@ download_file() {
|
||||
output="$2"
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
curl -fL --progress-bar "$url" -o "$output"
|
||||
else
|
||||
curl -fsSL "$url" -o "$output"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
wget --show-progress -O "$output" "$url"
|
||||
else
|
||||
wget -q -O "$output" "$url"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
@@ -110,23 +164,47 @@ require_command() {
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_version() {
|
||||
normalized_version="$(normalize_version "$VERSION")"
|
||||
warn_command_conflict() {
|
||||
expected_path="$INSTALL_BIN_DIR/feynman"
|
||||
resolved_path="$(command -v feynman 2>/dev/null || true)"
|
||||
|
||||
if [ "$normalized_version" != "latest" ]; then
|
||||
printf '%s\n' "$normalized_version"
|
||||
if [ -z "$resolved_path" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
release_json="$(download_text "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest")"
|
||||
resolved="$(printf '%s\n' "$release_json" | sed -n 's/.*"tag_name":[[:space:]]*"v\([^"]*\)".*/\1/p' | head -n 1)"
|
||||
if [ "$resolved_path" != "$expected_path" ]; then
|
||||
step "Warning: current shell resolves feynman to $resolved_path"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
step "Or launch directly: $expected_path"
|
||||
|
||||
if [ -z "$resolved" ]; then
|
||||
case "$resolved_path" in
|
||||
*"/node_modules/@companion-ai/feynman/"* | *"/node_modules/.bin/feynman")
|
||||
step "If that path is an old global npm install, remove it with: npm uninstall -g @companion-ai/feynman"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_release_metadata() {
|
||||
normalized_version="$(normalize_version "$VERSION")"
|
||||
|
||||
if [ "$normalized_version" = "latest" ]; then
|
||||
release_page="$(download_text "https://github.com/getcompanion-ai/feynman/releases/latest")"
|
||||
resolved_version="$(printf '%s\n' "$release_page" | sed -n 's@.*releases/tag/v\([0-9][^"<>[:space:]]*\).*@\1@p' | head -n 1)"
|
||||
|
||||
if [ -z "$resolved_version" ]; then
|
||||
echo "Failed to resolve the latest Feynman release version." >&2
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
resolved_version="$normalized_version"
|
||||
fi
|
||||
|
||||
printf '%s\n' "$resolved"
|
||||
bundle_name="feynman-${resolved_version}-${asset_target}"
|
||||
archive_name="${bundle_name}.${archive_extension}"
|
||||
download_url="${FEYNMAN_INSTALL_BASE_URL:-https://github.com/getcompanion-ai/feynman/releases/download/v${resolved_version}}/${archive_name}"
|
||||
|
||||
printf '%s\n%s\n%s\n%s\n' "$resolved_version" "$bundle_name" "$archive_name" "$download_url"
|
||||
}
|
||||
|
||||
case "$(uname -s)" in
|
||||
@@ -158,12 +236,13 @@ esac
|
||||
require_command mktemp
|
||||
require_command tar
|
||||
|
||||
resolved_version="$(resolve_version)"
|
||||
asset_target="$os-$arch"
|
||||
bundle_name="feynman-${resolved_version}-${asset_target}"
|
||||
archive_name="${bundle_name}.tar.gz"
|
||||
base_url="${FEYNMAN_INSTALL_BASE_URL:-https://github.com/getcompanion-ai/feynman/releases/download/v${resolved_version}}"
|
||||
download_url="${base_url}/${archive_name}"
|
||||
archive_extension="tar.gz"
|
||||
release_metadata="$(resolve_release_metadata)"
|
||||
resolved_version="$(printf '%s\n' "$release_metadata" | sed -n '1p')"
|
||||
bundle_name="$(printf '%s\n' "$release_metadata" | sed -n '2p')"
|
||||
archive_name="$(printf '%s\n' "$release_metadata" | sed -n '3p')"
|
||||
download_url="$(printf '%s\n' "$release_metadata" | sed -n '4p')"
|
||||
|
||||
step "Installing Feynman ${resolved_version} for ${asset_target}"
|
||||
|
||||
@@ -174,13 +253,29 @@ cleanup() {
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
archive_path="$tmp_dir/$archive_name"
|
||||
download_file "$download_url" "$archive_path"
|
||||
step "Downloading ${archive_name}"
|
||||
if ! download_file "$download_url" "$archive_path"; then
|
||||
cat >&2 <<EOF
|
||||
Failed to download ${archive_name} from:
|
||||
${download_url}
|
||||
|
||||
The ${asset_target} bundle is missing from the GitHub release.
|
||||
This usually means the release exists, but not all platform bundles were uploaded.
|
||||
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- install via pnpm instead: pnpm add -g @companion-ai/feynman
|
||||
- install via bun instead: bun add -g @companion-ai/feynman
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$INSTALL_APP_DIR"
|
||||
rm -rf "$INSTALL_APP_DIR/$bundle_name"
|
||||
tar -xzf "$archive_path" -C "$INSTALL_APP_DIR"
|
||||
run_with_spinner "Extracting ${archive_name}" tar -xzf "$archive_path" -C "$INSTALL_APP_DIR"
|
||||
|
||||
mkdir -p "$INSTALL_BIN_DIR"
|
||||
step "Linking feynman into $INSTALL_BIN_DIR"
|
||||
cat >"$INSTALL_BIN_DIR/feynman" <<EOF
|
||||
#!/bin/sh
|
||||
set -eu
|
||||
@@ -193,20 +288,22 @@ add_to_path
|
||||
case "$path_action" in
|
||||
added)
|
||||
step "PATH updated for future shells in $path_profile"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
;;
|
||||
configured)
|
||||
step "PATH is already configured for future shells in $path_profile"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
;;
|
||||
skipped)
|
||||
step "PATH update skipped"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||
;;
|
||||
*)
|
||||
step "$INSTALL_BIN_DIR is already on PATH"
|
||||
step "Run: feynman"
|
||||
step "Run: hash -r && feynman"
|
||||
;;
|
||||
esac
|
||||
|
||||
warn_command_conflict
|
||||
|
||||
printf 'Feynman %s installed successfully.\n' "$resolved_version"
|
||||
|
||||
204
website/public/install-skills
Normal file
204
website/public/install-skills
Normal file
@@ -0,0 +1,204 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
VERSION="latest"
|
||||
SCOPE="${FEYNMAN_SKILLS_SCOPE:-user}"
|
||||
TARGET_DIR="${FEYNMAN_SKILLS_DIR:-}"
|
||||
|
||||
step() {
|
||||
printf '==> %s\n' "$1"
|
||||
}
|
||||
|
||||
normalize_version() {
|
||||
case "$1" in
|
||||
"")
|
||||
printf 'latest\n'
|
||||
;;
|
||||
latest | stable)
|
||||
printf 'latest\n'
|
||||
;;
|
||||
edge)
|
||||
echo "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." >&2
|
||||
exit 1
|
||||
;;
|
||||
v*)
|
||||
printf '%s\n' "${1#v}"
|
||||
;;
|
||||
*)
|
||||
printf '%s\n' "$1"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
download_file() {
|
||||
url="$1"
|
||||
output="$2"
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
curl -fL --progress-bar "$url" -o "$output"
|
||||
else
|
||||
curl -fsSL "$url" -o "$output"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
if [ -t 2 ]; then
|
||||
wget --show-progress -O "$output" "$url"
|
||||
else
|
||||
wget -q -O "$output" "$url"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
echo "curl or wget is required to install Feynman skills." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
download_text() {
|
||||
url="$1"
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v wget >/dev/null 2>&1; then
|
||||
wget -q -O - "$url"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "curl or wget is required to install Feynman skills." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
resolve_version() {
|
||||
normalized_version="$(normalize_version "$VERSION")"
|
||||
|
||||
if [ "$normalized_version" = "latest" ]; then
|
||||
release_page="$(download_text "https://github.com/getcompanion-ai/feynman/releases/latest")"
|
||||
resolved_version="$(printf '%s\n' "$release_page" | sed -n 's@.*releases/tag/v\([0-9][^"<>[:space:]]*\).*@\1@p' | head -n 1)"
|
||||
|
||||
if [ -z "$resolved_version" ]; then
|
||||
echo "Failed to resolve the latest Feynman release version." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf '%s\nv%s\n' "$resolved_version" "$resolved_version"
|
||||
return
|
||||
fi
|
||||
|
||||
printf '%s\nv%s\n' "$normalized_version" "$normalized_version"
|
||||
}
|
||||
|
||||
resolve_target_dir() {
|
||||
if [ -n "$TARGET_DIR" ]; then
|
||||
printf '%s\n' "$TARGET_DIR"
|
||||
return
|
||||
fi
|
||||
|
||||
case "$SCOPE" in
|
||||
repo)
|
||||
printf '%s/.agents/skills/feynman\n' "$PWD"
|
||||
;;
|
||||
user)
|
||||
codex_home="${CODEX_HOME:-$HOME/.codex}"
|
||||
printf '%s/skills/feynman\n' "$codex_home"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown scope: $SCOPE (expected --user or --repo)" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--repo)
|
||||
SCOPE="repo"
|
||||
;;
|
||||
--user)
|
||||
SCOPE="user"
|
||||
;;
|
||||
--dir)
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: install-skills.sh [stable|latest|<version>] [--user|--repo] [--dir <path>]" >&2
|
||||
exit 1
|
||||
fi
|
||||
TARGET_DIR="$2"
|
||||
shift
|
||||
;;
|
||||
edge|stable|latest|v*|[0-9]*)
|
||||
VERSION="$1"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
echo "Usage: install-skills.sh [stable|latest|<version>] [--user|--repo] [--dir <path>]" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
archive_metadata="$(resolve_version)"
|
||||
resolved_version="$(printf '%s\n' "$archive_metadata" | sed -n '1p')"
|
||||
git_ref="$(printf '%s\n' "$archive_metadata" | sed -n '2p')"
|
||||
|
||||
archive_url=""
|
||||
case "$git_ref" in
|
||||
main)
|
||||
archive_url="https://github.com/getcompanion-ai/feynman/archive/refs/heads/main.tar.gz"
|
||||
;;
|
||||
v*)
|
||||
archive_url="https://github.com/getcompanion-ai/feynman/archive/refs/tags/${git_ref}.tar.gz"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$archive_url" ]; then
|
||||
echo "Could not resolve a download URL for ref: $git_ref" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
install_dir="$(resolve_target_dir)"
|
||||
|
||||
step "Installing Feynman skills ${resolved_version} (${SCOPE})"
|
||||
|
||||
tmp_dir="$(mktemp -d)"
|
||||
cleanup() {
|
||||
rm -rf "$tmp_dir"
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
archive_path="$tmp_dir/feynman-skills.tar.gz"
|
||||
step "Downloading skills archive"
|
||||
download_file "$archive_url" "$archive_path"
|
||||
|
||||
extract_dir="$tmp_dir/extract"
|
||||
mkdir -p "$extract_dir"
|
||||
step "Extracting skills"
|
||||
tar -xzf "$archive_path" -C "$extract_dir"
|
||||
|
||||
source_root="$(find "$extract_dir" -mindepth 1 -maxdepth 1 -type d | head -n 1)"
|
||||
if [ -z "$source_root" ] || [ ! -d "$source_root/skills" ]; then
|
||||
echo "Could not find skills/ in downloaded archive." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$(dirname "$install_dir")"
|
||||
rm -rf "$install_dir"
|
||||
mkdir -p "$install_dir"
|
||||
cp -R "$source_root/skills/." "$install_dir/"
|
||||
|
||||
step "Installed skills to $install_dir"
|
||||
case "$SCOPE" in
|
||||
repo)
|
||||
step "Repo-local skills will be discovered automatically from .agents/skills"
|
||||
;;
|
||||
user)
|
||||
step "User-level skills will be discovered from \$CODEX_HOME/skills"
|
||||
;;
|
||||
esac
|
||||
|
||||
printf 'Feynman skills %s installed successfully.\n' "$resolved_version"
|
||||
123
website/public/install-skills.ps1
Normal file
123
website/public/install-skills.ps1
Normal file
@@ -0,0 +1,123 @@
|
||||
param(
|
||||
[string]$Version = "latest",
|
||||
[ValidateSet("User", "Repo")]
|
||||
[string]$Scope = "User",
|
||||
[string]$TargetDir = ""
|
||||
)
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
function Normalize-Version {
|
||||
param([string]$RequestedVersion)
|
||||
|
||||
if (-not $RequestedVersion) {
|
||||
return "latest"
|
||||
}
|
||||
|
||||
switch ($RequestedVersion.ToLowerInvariant()) {
|
||||
"latest" { return "latest" }
|
||||
"stable" { return "latest" }
|
||||
"edge" { throw "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." }
|
||||
default { return $RequestedVersion.TrimStart("v") }
|
||||
}
|
||||
}
|
||||
|
||||
function Resolve-LatestReleaseVersion {
|
||||
$page = Invoke-WebRequest -Uri "https://github.com/getcompanion-ai/feynman/releases/latest"
|
||||
$match = [regex]::Match($page.Content, 'releases/tag/v([0-9][^"''<>\s]*)')
|
||||
if (-not $match.Success) {
|
||||
throw "Failed to resolve the latest Feynman release version."
|
||||
}
|
||||
|
||||
return $match.Groups[1].Value
|
||||
}
|
||||
|
||||
function Resolve-VersionMetadata {
|
||||
param([string]$RequestedVersion)
|
||||
|
||||
$normalizedVersion = Normalize-Version -RequestedVersion $RequestedVersion
|
||||
|
||||
if ($normalizedVersion -eq "latest") {
|
||||
$resolvedVersion = Resolve-LatestReleaseVersion
|
||||
} else {
|
||||
$resolvedVersion = $normalizedVersion
|
||||
}
|
||||
|
||||
return [PSCustomObject]@{
|
||||
ResolvedVersion = $resolvedVersion
|
||||
GitRef = "v$resolvedVersion"
|
||||
DownloadUrl = "https://github.com/getcompanion-ai/feynman/archive/refs/tags/v$resolvedVersion.zip"
|
||||
}
|
||||
}
|
||||
|
||||
function Resolve-InstallDir {
|
||||
param(
|
||||
[string]$ResolvedScope,
|
||||
[string]$ResolvedTargetDir
|
||||
)
|
||||
|
||||
if ($ResolvedTargetDir) {
|
||||
return $ResolvedTargetDir
|
||||
}
|
||||
|
||||
if ($ResolvedScope -eq "Repo") {
|
||||
return Join-Path (Get-Location) ".agents\skills\feynman"
|
||||
}
|
||||
|
||||
$codexHome = if ($env:CODEX_HOME) { $env:CODEX_HOME } else { Join-Path $HOME ".codex" }
|
||||
return Join-Path $codexHome "skills\feynman"
|
||||
}
|
||||
|
||||
$metadata = Resolve-VersionMetadata -RequestedVersion $Version
|
||||
$resolvedVersion = $metadata.ResolvedVersion
|
||||
$downloadUrl = $metadata.DownloadUrl
|
||||
$installDir = Resolve-InstallDir -ResolvedScope $Scope -ResolvedTargetDir $TargetDir
|
||||
|
||||
$tmpDir = Join-Path ([System.IO.Path]::GetTempPath()) ("feynman-skills-install-" + [System.Guid]::NewGuid().ToString("N"))
|
||||
New-Item -ItemType Directory -Path $tmpDir | Out-Null
|
||||
|
||||
try {
|
||||
$archivePath = Join-Path $tmpDir "feynman-skills.zip"
|
||||
$extractDir = Join-Path $tmpDir "extract"
|
||||
|
||||
Write-Host "==> Downloading Feynman skills $resolvedVersion"
|
||||
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
|
||||
|
||||
Write-Host "==> Extracting skills"
|
||||
Expand-Archive -LiteralPath $archivePath -DestinationPath $extractDir -Force
|
||||
|
||||
$sourceRoot = Get-ChildItem -Path $extractDir -Directory | Select-Object -First 1
|
||||
if (-not $sourceRoot) {
|
||||
throw "Could not find extracted Feynman archive."
|
||||
}
|
||||
|
||||
$skillsSource = Join-Path $sourceRoot.FullName "skills"
|
||||
if (-not (Test-Path $skillsSource)) {
|
||||
throw "Could not find skills/ in downloaded archive."
|
||||
}
|
||||
|
||||
$installParent = Split-Path $installDir -Parent
|
||||
if ($installParent) {
|
||||
New-Item -ItemType Directory -Path $installParent -Force | Out-Null
|
||||
}
|
||||
|
||||
if (Test-Path $installDir) {
|
||||
Remove-Item -Recurse -Force $installDir
|
||||
}
|
||||
|
||||
New-Item -ItemType Directory -Path $installDir -Force | Out-Null
|
||||
Copy-Item -Path (Join-Path $skillsSource "*") -Destination $installDir -Recurse -Force
|
||||
|
||||
Write-Host "==> Installed skills to $installDir"
|
||||
if ($Scope -eq "Repo") {
|
||||
Write-Host "Repo-local skills will be discovered automatically from .agents/skills."
|
||||
} else {
|
||||
Write-Host "User-level skills will be discovered from `$CODEX_HOME/skills."
|
||||
}
|
||||
|
||||
Write-Host "Feynman skills $resolvedVersion installed successfully."
|
||||
} finally {
|
||||
if (Test-Path $tmpDir) {
|
||||
Remove-Item -Recurse -Force $tmpDir
|
||||
}
|
||||
}
|
||||
@@ -4,36 +4,88 @@ param(
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
function Resolve-Version {
|
||||
function Normalize-Version {
|
||||
param([string]$RequestedVersion)
|
||||
|
||||
if ($RequestedVersion -and $RequestedVersion -ne "latest") {
|
||||
return $RequestedVersion.TrimStart("v")
|
||||
if (-not $RequestedVersion) {
|
||||
return "latest"
|
||||
}
|
||||
|
||||
$release = Invoke-RestMethod -Uri "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest"
|
||||
if (-not $release.tag_name) {
|
||||
switch ($RequestedVersion.ToLowerInvariant()) {
|
||||
"latest" { return "latest" }
|
||||
"stable" { return "latest" }
|
||||
"edge" { throw "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." }
|
||||
default { return $RequestedVersion.TrimStart("v") }
|
||||
}
|
||||
}
|
||||
|
||||
function Resolve-LatestReleaseVersion {
|
||||
$page = Invoke-WebRequest -Uri "https://github.com/getcompanion-ai/feynman/releases/latest"
|
||||
$match = [regex]::Match($page.Content, 'releases/tag/v([0-9][^"''<>\s]*)')
|
||||
if (-not $match.Success) {
|
||||
throw "Failed to resolve the latest Feynman release version."
|
||||
}
|
||||
|
||||
return $release.tag_name.TrimStart("v")
|
||||
return $match.Groups[1].Value
|
||||
}
|
||||
|
||||
function Resolve-ReleaseMetadata {
|
||||
param(
|
||||
[string]$RequestedVersion,
|
||||
[string]$AssetTarget,
|
||||
[string]$BundleExtension
|
||||
)
|
||||
|
||||
$normalizedVersion = Normalize-Version -RequestedVersion $RequestedVersion
|
||||
|
||||
if ($normalizedVersion -eq "latest") {
|
||||
$resolvedVersion = Resolve-LatestReleaseVersion
|
||||
} else {
|
||||
$resolvedVersion = $normalizedVersion
|
||||
}
|
||||
|
||||
$bundleName = "feynman-$resolvedVersion-$AssetTarget"
|
||||
$archiveName = "$bundleName.$BundleExtension"
|
||||
$baseUrl = if ($env:FEYNMAN_INSTALL_BASE_URL) { $env:FEYNMAN_INSTALL_BASE_URL } else { "https://github.com/getcompanion-ai/feynman/releases/download/v$resolvedVersion" }
|
||||
|
||||
return [PSCustomObject]@{
|
||||
ResolvedVersion = $resolvedVersion
|
||||
BundleName = $bundleName
|
||||
ArchiveName = $archiveName
|
||||
DownloadUrl = "$baseUrl/$archiveName"
|
||||
}
|
||||
}
|
||||
|
||||
function Get-ArchSuffix {
|
||||
# Prefer PROCESSOR_ARCHITECTURE which is always available on Windows.
|
||||
# RuntimeInformation::OSArchitecture requires .NET 4.7.1+ and may not
|
||||
# be loaded in every Windows PowerShell 5.1 session.
|
||||
$envArch = $env:PROCESSOR_ARCHITECTURE
|
||||
if ($envArch) {
|
||||
switch ($envArch) {
|
||||
"AMD64" { return "x64" }
|
||||
"ARM64" { return "arm64" }
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
$arch = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture
|
||||
switch ($arch.ToString()) {
|
||||
"X64" { return "x64" }
|
||||
"Arm64" { return "arm64" }
|
||||
default { throw "Unsupported architecture: $arch" }
|
||||
}
|
||||
} catch {}
|
||||
|
||||
throw "Unsupported architecture: $envArch"
|
||||
}
|
||||
|
||||
$resolvedVersion = Resolve-Version -RequestedVersion $Version
|
||||
$archSuffix = Get-ArchSuffix
|
||||
$bundleName = "feynman-$resolvedVersion-win32-$archSuffix"
|
||||
$archiveName = "$bundleName.zip"
|
||||
$baseUrl = if ($env:FEYNMAN_INSTALL_BASE_URL) { $env:FEYNMAN_INSTALL_BASE_URL } else { "https://github.com/getcompanion-ai/feynman/releases/download/v$resolvedVersion" }
|
||||
$downloadUrl = "$baseUrl/$archiveName"
|
||||
$assetTarget = "win32-$archSuffix"
|
||||
$release = Resolve-ReleaseMetadata -RequestedVersion $Version -AssetTarget $assetTarget -BundleExtension "zip"
|
||||
$resolvedVersion = $release.ResolvedVersion
|
||||
$bundleName = $release.BundleName
|
||||
$archiveName = $release.ArchiveName
|
||||
$downloadUrl = $release.DownloadUrl
|
||||
|
||||
$installRoot = Join-Path $env:LOCALAPPDATA "Programs\feynman"
|
||||
$installBinDir = Join-Path $installRoot "bin"
|
||||
@@ -44,25 +96,47 @@ New-Item -ItemType Directory -Path $tmpDir | Out-Null
|
||||
|
||||
try {
|
||||
$archivePath = Join-Path $tmpDir $archiveName
|
||||
Write-Host "==> Downloading $archiveName"
|
||||
try {
|
||||
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
|
||||
} catch {
|
||||
throw @"
|
||||
Failed to download $archiveName from:
|
||||
$downloadUrl
|
||||
|
||||
The win32-$archSuffix bundle is missing from the GitHub release.
|
||||
This usually means the release exists, but not all platform bundles were uploaded.
|
||||
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- install via pnpm instead: pnpm add -g @companion-ai/feynman
|
||||
- install via bun instead: bun add -g @companion-ai/feynman
|
||||
"@
|
||||
}
|
||||
|
||||
New-Item -ItemType Directory -Path $installRoot -Force | Out-Null
|
||||
if (Test-Path $bundleDir) {
|
||||
Remove-Item -Recurse -Force $bundleDir
|
||||
}
|
||||
|
||||
Write-Host "==> Extracting $archiveName"
|
||||
Expand-Archive -LiteralPath $archivePath -DestinationPath $installRoot -Force
|
||||
|
||||
New-Item -ItemType Directory -Path $installBinDir -Force | Out-Null
|
||||
|
||||
$shimPath = Join-Path $installBinDir "feynman.cmd"
|
||||
Write-Host "==> Linking feynman into $installBinDir"
|
||||
@"
|
||||
@echo off
|
||||
"$bundleDir\feynman.cmd" %*
|
||||
"@ | Set-Content -Path $shimPath -Encoding ASCII
|
||||
|
||||
$currentUserPath = [Environment]::GetEnvironmentVariable("Path", "User")
|
||||
if (-not $currentUserPath.Split(';').Contains($installBinDir)) {
|
||||
$alreadyOnPath = $false
|
||||
if ($currentUserPath) {
|
||||
$alreadyOnPath = $currentUserPath.Split(';') -contains $installBinDir
|
||||
}
|
||||
if (-not $alreadyOnPath) {
|
||||
$updatedPath = if ([string]::IsNullOrWhiteSpace($currentUserPath)) {
|
||||
$installBinDir
|
||||
} else {
|
||||
@@ -74,6 +148,16 @@ try {
|
||||
Write-Host "$installBinDir is already on PATH."
|
||||
}
|
||||
|
||||
$resolvedCommand = Get-Command feynman -ErrorAction SilentlyContinue
|
||||
if ($resolvedCommand -and $resolvedCommand.Source -ne $shimPath) {
|
||||
Write-Warning "Current shell resolves feynman to $($resolvedCommand.Source)"
|
||||
Write-Host "Run in a new shell, or run: `$env:Path = '$installBinDir;' + `$env:Path"
|
||||
Write-Host "Then run: feynman"
|
||||
if ($resolvedCommand.Source -like "*node_modules*@companion-ai*feynman*") {
|
||||
Write-Host "If that path is an old global npm install, remove it with: npm uninstall -g @companion-ai/feynman"
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Feynman $resolvedVersion installed successfully."
|
||||
} finally {
|
||||
if (Test-Path $tmpDir) {
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
---
|
||||
interface Props {
|
||||
class?: string;
|
||||
size?: 'nav' | 'hero';
|
||||
}
|
||||
|
||||
const { class: className = '', size = 'hero' } = Astro.props;
|
||||
|
||||
const sizeClasses = size === 'nav'
|
||||
? 'text-2xl'
|
||||
: 'text-6xl sm:text-7xl md:text-8xl';
|
||||
---
|
||||
|
||||
<span
|
||||
class:list={[
|
||||
"font-['VT323'] text-accent inline-block tracking-tighter",
|
||||
sizeClasses,
|
||||
className,
|
||||
]}
|
||||
aria-label="Feynman"
|
||||
>feynman</span>
|
||||
@@ -1,9 +0,0 @@
|
||||
<footer class="py-8 mt-16">
|
||||
<div class="max-w-6xl mx-auto px-6 flex flex-col sm:flex-row items-center justify-between gap-4">
|
||||
<span class="text-sm text-text-dim">© 2026 Companion Inc.</span>
|
||||
<div class="flex gap-6">
|
||||
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener" class="text-sm text-text-dim hover:text-text-primary transition-colors">GitHub</a>
|
||||
<a href="/docs/getting-started/installation" class="text-sm text-text-dim hover:text-text-primary transition-colors">Docs</a>
|
||||
</div>
|
||||
</div>
|
||||
</footer>
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
import ThemeToggle from './ThemeToggle.astro';
|
||||
import AsciiLogo from './AsciiLogo.astro';
|
||||
|
||||
interface Props {
|
||||
active?: 'home' | 'docs';
|
||||
}
|
||||
|
||||
const { active = 'home' } = Astro.props;
|
||||
---
|
||||
|
||||
<nav class="sticky top-0 z-50 bg-bg">
|
||||
<div class="max-w-6xl mx-auto px-6 h-14 flex items-center justify-between">
|
||||
<a href="/" class="hover:opacity-80 transition-opacity" aria-label="Feynman">
|
||||
<AsciiLogo size="nav" />
|
||||
</a>
|
||||
<div class="flex items-center gap-6">
|
||||
<a href="/docs/getting-started/installation"
|
||||
class:list={["text-sm transition-colors", active === 'docs' ? 'text-text-primary' : 'text-text-muted hover:text-text-primary']}>
|
||||
Docs
|
||||
</a>
|
||||
<a href="https://github.com/getcompanion-ai/feynman" target="_blank" rel="noopener"
|
||||
class="text-sm text-text-muted hover:text-text-primary transition-colors">
|
||||
GitHub
|
||||
</a>
|
||||
<ThemeToggle />
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
@@ -1,80 +0,0 @@
|
||||
---
|
||||
interface Props {
|
||||
currentSlug: string;
|
||||
}
|
||||
|
||||
const { currentSlug } = Astro.props;
|
||||
|
||||
const sections = [
|
||||
{
|
||||
title: 'Getting Started',
|
||||
items: [
|
||||
{ label: 'Installation', slug: 'getting-started/installation' },
|
||||
{ label: 'Quick Start', slug: 'getting-started/quickstart' },
|
||||
{ label: 'Setup', slug: 'getting-started/setup' },
|
||||
{ label: 'Configuration', slug: 'getting-started/configuration' },
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'Workflows',
|
||||
items: [
|
||||
{ label: 'Deep Research', slug: 'workflows/deep-research' },
|
||||
{ label: 'Literature Review', slug: 'workflows/literature-review' },
|
||||
{ label: 'Peer Review', slug: 'workflows/review' },
|
||||
{ label: 'Code Audit', slug: 'workflows/audit' },
|
||||
{ label: 'Replication', slug: 'workflows/replication' },
|
||||
{ label: 'Source Comparison', slug: 'workflows/compare' },
|
||||
{ label: 'Draft Writing', slug: 'workflows/draft' },
|
||||
{ label: 'Autoresearch', slug: 'workflows/autoresearch' },
|
||||
{ label: 'Watch', slug: 'workflows/watch' },
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'Agents',
|
||||
items: [
|
||||
{ label: 'Researcher', slug: 'agents/researcher' },
|
||||
{ label: 'Reviewer', slug: 'agents/reviewer' },
|
||||
{ label: 'Writer', slug: 'agents/writer' },
|
||||
{ label: 'Verifier', slug: 'agents/verifier' },
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'Tools',
|
||||
items: [
|
||||
{ label: 'AlphaXiv', slug: 'tools/alphaxiv' },
|
||||
{ label: 'Web Search', slug: 'tools/web-search' },
|
||||
{ label: 'Session Search', slug: 'tools/session-search' },
|
||||
{ label: 'Preview', slug: 'tools/preview' },
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'Reference',
|
||||
items: [
|
||||
{ label: 'CLI Commands', slug: 'reference/cli-commands' },
|
||||
{ label: 'Slash Commands', slug: 'reference/slash-commands' },
|
||||
{ label: 'Package Stack', slug: 'reference/package-stack' },
|
||||
],
|
||||
},
|
||||
];
|
||||
---
|
||||
|
||||
<aside id="sidebar" class="w-64 shrink-0 h-[calc(100vh-3.5rem)] sticky top-14 overflow-y-auto py-6 pr-4 hidden lg:block border-r border-border">
|
||||
{sections.map((section) => (
|
||||
<div class="mb-6">
|
||||
<div class="text-xs font-semibold text-accent uppercase tracking-wider px-3 mb-2">{section.title}</div>
|
||||
{section.items.map((item) => (
|
||||
<a
|
||||
href={`/docs/${item.slug}`}
|
||||
class:list={[
|
||||
'block px-3 py-1.5 text-sm border-l-[2px] transition-colors',
|
||||
currentSlug === item.slug
|
||||
? 'border-accent text-text-primary'
|
||||
: 'border-transparent text-text-muted hover:text-text-primary',
|
||||
]}
|
||||
>
|
||||
{item.label}
|
||||
</a>
|
||||
))}
|
||||
</div>
|
||||
))}
|
||||
</aside>
|
||||
@@ -1,33 +0,0 @@
|
||||
<button id="theme-toggle" class="p-1.5 rounded-md text-text-muted hover:text-text-primary hover:bg-surface transition-colors" aria-label="Toggle theme">
|
||||
<svg id="sun-icon" class="hidden w-[18px] h-[18px]" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
|
||||
<circle cx="12" cy="12" r="5" />
|
||||
<path d="M12 1v2M12 21v2M4.22 4.22l1.42 1.42M18.36 18.36l1.42 1.42M1 12h2M21 12h2M4.22 19.78l1.42-1.42M18.36 5.64l1.42-1.42" />
|
||||
</svg>
|
||||
<svg id="moon-icon" class="hidden w-[18px] h-[18px]" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
|
||||
<path d="M21 12.79A9 9 0 1 1 11.21 3 7 7 0 0 0 21 12.79z" />
|
||||
</svg>
|
||||
</button>
|
||||
|
||||
<script is:inline>
|
||||
(function() {
|
||||
var stored = localStorage.getItem('theme');
|
||||
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
|
||||
var dark = stored === 'dark' || (!stored && prefersDark);
|
||||
if (dark) document.documentElement.classList.add('dark');
|
||||
function update() {
|
||||
var isDark = document.documentElement.classList.contains('dark');
|
||||
document.getElementById('sun-icon').style.display = isDark ? 'block' : 'none';
|
||||
document.getElementById('moon-icon').style.display = isDark ? 'none' : 'block';
|
||||
}
|
||||
update();
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
update();
|
||||
document.getElementById('theme-toggle').addEventListener('click', function() {
|
||||
document.documentElement.classList.toggle('dark');
|
||||
var isDark = document.documentElement.classList.contains('dark');
|
||||
localStorage.setItem('theme', isDark ? 'dark' : 'light');
|
||||
update();
|
||||
});
|
||||
});
|
||||
})();
|
||||
</script>
|
||||
49
website/src/components/ui/badge.tsx
Normal file
49
website/src/components/ui/badge.tsx
Normal file
@@ -0,0 +1,49 @@
|
||||
import * as React from "react"
|
||||
import { cva, type VariantProps } from "class-variance-authority"
|
||||
import { Slot } from "radix-ui"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const badgeVariants = cva(
|
||||
"group/badge inline-flex h-5 w-fit shrink-0 items-center justify-center gap-1 overflow-hidden rounded-4xl border border-transparent px-2 py-0.5 text-xs font-medium whitespace-nowrap transition-all focus-visible:border-ring focus-visible:ring-[3px] focus-visible:ring-ring/50 has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5 aria-invalid:border-destructive aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 [&>svg]:pointer-events-none [&>svg]:size-3!",
|
||||
{
|
||||
variants: {
|
||||
variant: {
|
||||
default: "bg-primary text-primary-foreground [a]:hover:bg-primary/80",
|
||||
secondary:
|
||||
"bg-secondary text-secondary-foreground [a]:hover:bg-secondary/80",
|
||||
destructive:
|
||||
"bg-destructive/10 text-destructive focus-visible:ring-destructive/20 dark:bg-destructive/20 dark:focus-visible:ring-destructive/40 [a]:hover:bg-destructive/20",
|
||||
outline:
|
||||
"border-border text-foreground [a]:hover:bg-muted [a]:hover:text-muted-foreground",
|
||||
ghost:
|
||||
"hover:bg-muted hover:text-muted-foreground dark:hover:bg-muted/50",
|
||||
link: "text-primary underline-offset-4 hover:underline",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
variant: "default",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
function Badge({
|
||||
className,
|
||||
variant = "default",
|
||||
asChild = false,
|
||||
...props
|
||||
}: React.ComponentProps<"span"> &
|
||||
VariantProps<typeof badgeVariants> & { asChild?: boolean }) {
|
||||
const Comp = asChild ? Slot.Root : "span"
|
||||
|
||||
return (
|
||||
<Comp
|
||||
data-slot="badge"
|
||||
data-variant={variant}
|
||||
className={cn(badgeVariants({ variant }), className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Badge, badgeVariants }
|
||||
67
website/src/components/ui/button.tsx
Normal file
67
website/src/components/ui/button.tsx
Normal file
@@ -0,0 +1,67 @@
|
||||
import * as React from "react"
|
||||
import { cva, type VariantProps } from "class-variance-authority"
|
||||
import { Slot } from "radix-ui"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const buttonVariants = cva(
|
||||
"group/button inline-flex shrink-0 items-center justify-center rounded-md border border-transparent bg-clip-padding text-sm font-medium whitespace-nowrap transition-all outline-none select-none focus-visible:border-ring focus-visible:ring-3 focus-visible:ring-ring/50 active:not-aria-[haspopup]:translate-y-px disabled:pointer-events-none disabled:opacity-50 aria-invalid:border-destructive aria-invalid:ring-3 aria-invalid:ring-destructive/20 dark:aria-invalid:border-destructive/50 dark:aria-invalid:ring-destructive/40 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
|
||||
{
|
||||
variants: {
|
||||
variant: {
|
||||
default: "bg-primary text-primary-foreground hover:bg-primary/80",
|
||||
outline:
|
||||
"border-border bg-background shadow-xs hover:bg-muted hover:text-foreground aria-expanded:bg-muted aria-expanded:text-foreground dark:border-input dark:bg-input/30 dark:hover:bg-input/50",
|
||||
secondary:
|
||||
"bg-secondary text-secondary-foreground hover:bg-secondary/80 aria-expanded:bg-secondary aria-expanded:text-secondary-foreground",
|
||||
ghost:
|
||||
"hover:bg-muted hover:text-foreground aria-expanded:bg-muted aria-expanded:text-foreground dark:hover:bg-muted/50",
|
||||
destructive:
|
||||
"bg-destructive/10 text-destructive hover:bg-destructive/20 focus-visible:border-destructive/40 focus-visible:ring-destructive/20 dark:bg-destructive/20 dark:hover:bg-destructive/30 dark:focus-visible:ring-destructive/40",
|
||||
link: "text-primary underline-offset-4 hover:underline",
|
||||
},
|
||||
size: {
|
||||
default:
|
||||
"h-9 gap-1.5 px-2.5 in-data-[slot=button-group]:rounded-md has-data-[icon=inline-end]:pr-2 has-data-[icon=inline-start]:pl-2",
|
||||
xs: "h-6 gap-1 rounded-[min(var(--radius-md),8px)] px-2 text-xs in-data-[slot=button-group]:rounded-md has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5 [&_svg:not([class*='size-'])]:size-3",
|
||||
sm: "h-8 gap-1 rounded-[min(var(--radius-md),10px)] px-2.5 in-data-[slot=button-group]:rounded-md has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5",
|
||||
lg: "h-10 gap-1.5 px-2.5 has-data-[icon=inline-end]:pr-3 has-data-[icon=inline-start]:pl-3",
|
||||
icon: "size-9",
|
||||
"icon-xs":
|
||||
"size-6 rounded-[min(var(--radius-md),8px)] in-data-[slot=button-group]:rounded-md [&_svg:not([class*='size-'])]:size-3",
|
||||
"icon-sm":
|
||||
"size-8 rounded-[min(var(--radius-md),10px)] in-data-[slot=button-group]:rounded-md",
|
||||
"icon-lg": "size-10",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
variant: "default",
|
||||
size: "default",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
function Button({
|
||||
className,
|
||||
variant = "default",
|
||||
size = "default",
|
||||
asChild = false,
|
||||
...props
|
||||
}: React.ComponentProps<"button"> &
|
||||
VariantProps<typeof buttonVariants> & {
|
||||
asChild?: boolean
|
||||
}) {
|
||||
const Comp = asChild ? Slot.Root : "button"
|
||||
|
||||
return (
|
||||
<Comp
|
||||
data-slot="button"
|
||||
data-variant={variant}
|
||||
data-size={size}
|
||||
className={cn(buttonVariants({ variant, size, className }))}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Button, buttonVariants }
|
||||
103
website/src/components/ui/card.tsx
Normal file
103
website/src/components/ui/card.tsx
Normal file
@@ -0,0 +1,103 @@
|
||||
import * as React from "react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Card({
|
||||
className,
|
||||
size = "default",
|
||||
...props
|
||||
}: React.ComponentProps<"div"> & { size?: "default" | "sm" }) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card"
|
||||
data-size={size}
|
||||
className={cn(
|
||||
"group/card flex flex-col gap-6 overflow-hidden rounded-xl bg-card py-6 text-sm text-card-foreground shadow-xs ring-1 ring-foreground/10 has-[>img:first-child]:pt-0 data-[size=sm]:gap-4 data-[size=sm]:py-4 *:[img:first-child]:rounded-t-xl *:[img:last-child]:rounded-b-xl",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardHeader({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-header"
|
||||
className={cn(
|
||||
"group/card-header @container/card-header grid auto-rows-min items-start gap-1 rounded-t-xl px-6 group-data-[size=sm]/card:px-4 has-data-[slot=card-action]:grid-cols-[1fr_auto] has-data-[slot=card-description]:grid-rows-[auto_auto] [.border-b]:pb-6 group-data-[size=sm]/card:[.border-b]:pb-4",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardTitle({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-title"
|
||||
className={cn(
|
||||
"font-heading text-base leading-normal font-medium group-data-[size=sm]/card:text-sm",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardDescription({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-description"
|
||||
className={cn("text-sm text-muted-foreground", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardAction({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-action"
|
||||
className={cn(
|
||||
"col-start-2 row-span-2 row-start-1 self-start justify-self-end",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardContent({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-content"
|
||||
className={cn("px-6 group-data-[size=sm]/card:px-4", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardFooter({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-footer"
|
||||
className={cn(
|
||||
"flex items-center rounded-b-xl px-6 group-data-[size=sm]/card:px-4 [.border-t]:pt-6 group-data-[size=sm]/card:[.border-t]:pt-4",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export {
|
||||
Card,
|
||||
CardHeader,
|
||||
CardFooter,
|
||||
CardTitle,
|
||||
CardAction,
|
||||
CardDescription,
|
||||
CardContent,
|
||||
}
|
||||
26
website/src/components/ui/separator.tsx
Normal file
26
website/src/components/ui/separator.tsx
Normal file
@@ -0,0 +1,26 @@
|
||||
import * as React from "react"
|
||||
import { Separator as SeparatorPrimitive } from "radix-ui"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Separator({
|
||||
className,
|
||||
orientation = "horizontal",
|
||||
decorative = true,
|
||||
...props
|
||||
}: React.ComponentProps<typeof SeparatorPrimitive.Root>) {
|
||||
return (
|
||||
<SeparatorPrimitive.Root
|
||||
data-slot="separator"
|
||||
decorative={decorative}
|
||||
orientation={orientation}
|
||||
className={cn(
|
||||
"shrink-0 bg-border data-horizontal:h-px data-horizontal:w-full data-vertical:w-px data-vertical:self-stretch",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Separator }
|
||||
@@ -1,75 +1,32 @@
|
||||
---
|
||||
title: Researcher
|
||||
description: Gather primary evidence across papers, web sources, repos, docs, and local artifacts.
|
||||
description: The researcher agent searches, reads, and extracts findings from papers and web sources.
|
||||
section: Agents
|
||||
order: 1
|
||||
---
|
||||
|
||||
## Source
|
||||
The researcher is the primary information-gathering agent in Feynman. It searches academic databases and the web, reads papers and articles, extracts key findings, and organizes source material for other agents to synthesize. Most workflows start with the researcher.
|
||||
|
||||
Generated from `.feynman/agents/researcher.md`. Edit that prompt file, not this docs page.
|
||||
## What it does
|
||||
|
||||
## Role
|
||||
The researcher agent handles the entire source discovery and extraction pipeline. It formulates search queries based on your topic, evaluates results for relevance, reads full documents, and extracts structured information including claims, methodology, results, and limitations.
|
||||
|
||||
Gather primary evidence across papers, web sources, repos, docs, and local artifacts.
|
||||
|
||||
## Tools
|
||||
|
||||
`read`, `bash`, `grep`, `find`, `ls`
|
||||
|
||||
## Default Output
|
||||
|
||||
`research.md`
|
||||
|
||||
## Integrity commandments
|
||||
1. **Never fabricate a source.** Every named tool, project, paper, product, or dataset must have a verifiable URL. If you cannot find a URL, do not mention it.
|
||||
2. **Never claim a project exists without checking.** Before citing a GitHub repo, search for it. Before citing a paper, find it. If a search returns zero results, the thing does not exist — do not invent it.
|
||||
3. **Never extrapolate details you haven't read.** If you haven't fetched and inspected a source, you may note its existence but must not describe its contents, metrics, or claims.
|
||||
4. **URL or it didn't happen.** Every entry in your evidence table must include a direct, checkable URL. No URL = not included.
|
||||
When multiple researcher agents are spawned in parallel (which is the default for deep research and literature review), each agent tackles a different angle of the topic. One might search for foundational papers while another looks for recent work that challenges the established view. This parallel approach produces broader coverage than a single sequential search.
|
||||
|
||||
## Search strategy
|
||||
1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.
|
||||
2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.
|
||||
3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.
|
||||
4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and `alpha_search`.
|
||||
|
||||
Use `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.
|
||||
The researcher uses a multi-source search strategy. For academic topics, it queries AlphaXiv for papers and uses citation chains to discover related work. For applied topics, it searches the web for documentation, blog posts, and code repositories. For most topics, it uses both channels and cross-references findings.
|
||||
|
||||
## Source quality
|
||||
- **Prefer:** academic papers, official documentation, primary datasets, verified benchmarks, government filings, reputable journalism, expert technical blogs, official vendor pages
|
||||
- **Accept with caveats:** well-cited secondary sources, established trade publications
|
||||
- **Deprioritize:** SEO-optimized listicles, undated blog posts, content aggregators, social media without primary links
|
||||
- **Reject:** sources with no author and no date, content that appears AI-generated with no primary backing
|
||||
Search queries are diversified automatically. Rather than running the same query multiple times, the researcher generates 2-4 varied queries that approach the topic from different angles. This catches papers that use different terminology for the same concept and surfaces sources that a single query would miss.
|
||||
|
||||
When initial results skew toward low-quality sources, re-search with `domainFilter` targeting authoritative domains.
|
||||
## Source evaluation
|
||||
|
||||
## Output format
|
||||
Not every search result is worth reading in full. The researcher evaluates results by scanning abstracts and summaries first, then selects the most relevant and authoritative sources for deep reading. It considers publication venue, citation count, recency, and topical relevance when prioritizing sources.
|
||||
|
||||
Assign each source a stable numeric ID. Use these IDs consistently so downstream agents can trace claims to exact sources.
|
||||
## Extraction
|
||||
|
||||
### Evidence table
|
||||
When reading a source in depth, the researcher extracts structured data: the main claims and their supporting evidence, methodology details, experimental results, stated limitations, and connections to other work. Each extracted item is tagged with its source location for traceability.
|
||||
|
||||
| # | Source | URL | Key claim | Type | Confidence |
|
||||
|---|--------|-----|-----------|------|------------|
|
||||
| 1 | ... | ... | ... | primary / secondary / self-reported | high / medium / low |
|
||||
## Used by
|
||||
|
||||
### Findings
|
||||
|
||||
Write findings using inline source references: `[1]`, `[2]`, etc. Every factual claim must cite at least one source by number.
|
||||
|
||||
### Sources
|
||||
|
||||
Numbered list matching the evidence table:
|
||||
1. Author/Title — URL
|
||||
2. Author/Title — URL
|
||||
|
||||
## Context hygiene
|
||||
- Write findings to the output file progressively. Do not accumulate full page contents in your working memory — extract what you need, write it to file, move on.
|
||||
- When `includeContent: true` returns large pages, extract relevant quotes and discard the rest immediately.
|
||||
- If your search produces 10+ results, triage by title/snippet first. Only fetch full content for the top candidates.
|
||||
- Return a one-line summary to the parent, not full findings. The parent reads the output file.
|
||||
|
||||
## Output contract
|
||||
- Save to the output file (default: `research.md`).
|
||||
- Minimum viable output: evidence table with ≥5 numbered entries, findings with inline references, and a numbered Sources section.
|
||||
- Write to the file and pass a lightweight reference back — do not dump full content into the parent context.
|
||||
The researcher agent is used by the `/deepresearch`, `/lit`, `/review`, `/audit`, `/replicate`, `/compare`, and `/draft` workflows. It is the most frequently invoked agent in the system. You do not invoke it directly -- it is dispatched automatically by the workflow orchestrator.
|
||||
|
||||
@@ -1,93 +1,33 @@
|
||||
---
|
||||
title: Reviewer
|
||||
description: Simulate a tough but constructive AI research peer reviewer with inline annotations.
|
||||
description: The reviewer agent evaluates documents with severity-graded academic feedback.
|
||||
section: Agents
|
||||
order: 2
|
||||
---
|
||||
|
||||
## Source
|
||||
The reviewer agent evaluates documents, papers, and research artifacts with the rigor of an academic peer reviewer. It produces severity-graded feedback covering methodology, claims, writing quality, and reproducibility.
|
||||
|
||||
Generated from `.feynman/agents/reviewer.md`. Edit that prompt file, not this docs page.
|
||||
## What it does
|
||||
|
||||
## Role
|
||||
The reviewer reads a document end-to-end and evaluates it against standard academic criteria. It checks whether claims are supported by the presented evidence, whether the methodology is sound and described in sufficient detail, whether the experimental design controls for confounds, and whether the writing is clear and complete.
|
||||
|
||||
Simulate a tough but constructive AI research peer reviewer with inline annotations.
|
||||
Each piece of feedback is assigned a severity level. **Critical** issues are fundamental problems that undermine the document's validity, such as a statistical test applied incorrectly or a conclusion not supported by the data. **Major** issues are significant problems that should be addressed, like missing baselines or inadequate ablation studies. **Minor** issues are suggestions for improvement, and **nits** are stylistic or formatting comments.
|
||||
|
||||
## Default Output
|
||||
## Evaluation criteria
|
||||
|
||||
`review.md`
|
||||
The reviewer evaluates documents across several dimensions:
|
||||
|
||||
Your job is to act like a skeptical but fair peer reviewer for AI/ML systems work.
|
||||
- **Claims vs. Evidence** -- Does the evidence presented actually support the claims made?
|
||||
- **Methodology** -- Is the approach sound? Are there confounds or biases?
|
||||
- **Experimental Design** -- Are baselines appropriate? Are ablations sufficient?
|
||||
- **Reproducibility** -- Could someone replicate this work from the description alone?
|
||||
- **Writing Quality** -- Is the paper clear, well-organized, and free of ambiguity?
|
||||
- **Completeness** -- Are limitations discussed? Is related work adequately covered?
|
||||
|
||||
## Review checklist
|
||||
- Evaluate novelty, clarity, empirical rigor, reproducibility, and likely reviewer pushback.
|
||||
- Do not praise vaguely. Every positive claim should be tied to specific evidence.
|
||||
- Look for:
|
||||
- missing or weak baselines
|
||||
- missing ablations
|
||||
- evaluation mismatches
|
||||
- unclear claims of novelty
|
||||
- weak related-work positioning
|
||||
- insufficient statistical evidence
|
||||
- benchmark leakage or contamination risks
|
||||
- under-specified implementation details
|
||||
- claims that outrun the experiments
|
||||
- Distinguish between fatal issues, strong concerns, and polish issues.
|
||||
- Preserve uncertainty. If the draft might pass depending on venue norms, say so explicitly.
|
||||
## Confidence scoring
|
||||
|
||||
## Output format
|
||||
The reviewer provides a confidence score for each finding, indicating how certain it is about the assessment. High-confidence findings are clear-cut issues (a statistical error, a missing citation). Lower-confidence findings are judgment calls (whether a baseline is sufficient, whether more ablations are needed) where reasonable reviewers might disagree.
|
||||
|
||||
Produce two sections: a structured review and inline annotations.
|
||||
## Used by
|
||||
|
||||
### Part 1: Structured Review
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
1-2 paragraph summary of the paper's contributions and approach.
|
||||
|
||||
## Strengths
|
||||
- [S1] ...
|
||||
- [S2] ...
|
||||
|
||||
## Weaknesses
|
||||
- [W1] **FATAL:** ...
|
||||
- [W2] **MAJOR:** ...
|
||||
- [W3] **MINOR:** ...
|
||||
|
||||
## Questions for Authors
|
||||
- [Q1] ...
|
||||
|
||||
## Verdict
|
||||
Overall assessment and confidence score. Would this pass at [venue]?
|
||||
|
||||
## Revision Plan
|
||||
Prioritized, concrete steps to address each weakness.
|
||||
```
|
||||
|
||||
### Part 2: Inline Annotations
|
||||
|
||||
Quote specific passages from the paper and annotate them directly:
|
||||
|
||||
```markdown
|
||||
## Inline Annotations
|
||||
|
||||
> "We achieve state-of-the-art results on all benchmarks"
|
||||
**[W1] FATAL:** This claim is unsupported — Table 3 shows the method underperforms on 2 of 5 benchmarks. Revise to accurately reflect results.
|
||||
|
||||
> "Our approach is novel in combining X with Y"
|
||||
**[W3] MINOR:** Z et al. (2024) combined X with Y in a different domain. Acknowledge this and clarify the distinction.
|
||||
|
||||
> "We use a learning rate of 1e-4"
|
||||
**[Q1]:** Was this tuned? What range was searched? This matters for reproducibility.
|
||||
```
|
||||
|
||||
Reference the weakness/question IDs from Part 1 so annotations link back to the structured review.
|
||||
|
||||
## Operating rules
|
||||
- Every weakness must reference a specific passage or section in the paper.
|
||||
- Inline annotations must quote the exact text being critiqued.
|
||||
- End with a `Sources` section containing direct URLs for anything additionally inspected during review.
|
||||
|
||||
## Output contract
|
||||
- Save the main artifact to `review.md`.
|
||||
- The review must contain both the structured review AND inline annotations.
|
||||
The reviewer agent is the primary agent in the `/review` workflow. It also contributes to `/audit` (evaluating paper claims against code) and `/compare` (assessing the strength of evidence across sources). Like all agents, it is dispatched automatically by the workflow orchestrator.
|
||||
|
||||
@@ -1,50 +1,36 @@
|
||||
---
|
||||
title: Verifier
|
||||
description: Post-process a draft to add inline citations and verify every source URL.
|
||||
description: The verifier agent cross-checks claims against their cited sources.
|
||||
section: Agents
|
||||
order: 4
|
||||
---
|
||||
|
||||
## Source
|
||||
The verifier agent is responsible for fact-checking and validation. It cross-references claims against their cited sources, checks code implementations against paper descriptions, and flags unsupported or misattributed assertions.
|
||||
|
||||
Generated from `.feynman/agents/verifier.md`. Edit that prompt file, not this docs page.
|
||||
## What it does
|
||||
|
||||
## Role
|
||||
The verifier performs targeted checks on specific claims rather than reading documents end-to-end like the reviewer. It takes a claim and its cited source, retrieves the source, and determines whether the source actually supports the claim as stated. This catches misattributions (citing a paper that says something different), overstatements (claiming a stronger result than the source reports), and fabrications (claims with no basis in the cited source).
|
||||
|
||||
Post-process a draft to add inline citations and verify every source URL.
|
||||
When checking code against papers, the verifier examines specific implementation details: hyperparameters, architecture configurations, training procedures, and evaluation metrics. It compares the paper's description to the code's actual behavior, noting discrepancies with exact file paths and line numbers.
|
||||
|
||||
## Tools
|
||||
## Verification process
|
||||
|
||||
`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`
|
||||
The verifier follows a systematic process for each claim it checks:
|
||||
|
||||
## Default Output
|
||||
1. **Retrieve the source** -- Fetch the cited paper, article, or code file
|
||||
2. **Locate the relevant section** -- Find where the source addresses the claim
|
||||
3. **Compare** -- Check whether the source supports the claim as stated
|
||||
4. **Classify** -- Mark the claim as verified, unsupported, overstated, or contradicted
|
||||
5. **Document** -- Record the evidence with exact quotes and locations
|
||||
|
||||
`cited.md`
|
||||
This process is deterministic and traceable. Every verification result includes the specific passage or code that was checked, making it easy to audit the verifier's work.
|
||||
|
||||
You receive a draft document and the research files it was built from. Your job is to:
|
||||
## Confidence and limitations
|
||||
|
||||
1. **Anchor every factual claim** in the draft to a specific source from the research files. Insert inline citations `[1]`, `[2]`, etc. directly after each claim.
|
||||
2. **Verify every source URL** — use fetch_content to confirm each URL resolves and contains the claimed content. Flag dead links.
|
||||
3. **Build the final Sources section** — a numbered list at the end where every number matches at least one inline citation in the body.
|
||||
4. **Remove unsourced claims** — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.
|
||||
The verifier assigns a confidence level to each verification. Claims that directly quote a source are verified with high confidence. Claims that paraphrase or interpret results are verified with moderate confidence, since reasonable interpretations can differ. Claims about the implications or significance of results are verified with lower confidence, since these involve judgment.
|
||||
|
||||
## Citation rules
|
||||
The verifier is honest about its limitations. When a claim cannot be verified because the source is behind a paywall, the code is not available, or the claim requires domain expertise beyond what the verifier can assess, it says so explicitly rather than guessing.
|
||||
|
||||
- Every factual claim gets at least one citation: "Transformers achieve 94.2% on MMLU [3]."
|
||||
- Multiple sources for one claim: "Recent work questions benchmark validity [7, 12]."
|
||||
- No orphan citations — every `[N]` in the body must appear in Sources.
|
||||
- No orphan sources — every entry in Sources must be cited at least once.
|
||||
- Hedged or opinion statements do not need citations.
|
||||
- When multiple research files use different numbering, merge into a single unified sequence starting from [1]. Deduplicate sources that appear in multiple files.
|
||||
## Used by
|
||||
|
||||
## Source verification
|
||||
|
||||
For each source URL:
|
||||
- **Live:** keep as-is.
|
||||
- **Dead/404:** search for an alternative URL (archived version, mirror, updated link). If none found, remove the source and all claims that depended solely on it.
|
||||
- **Redirects to unrelated content:** treat as dead.
|
||||
|
||||
## Output contract
|
||||
- Save to the output file (default: `cited.md`).
|
||||
- The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.
|
||||
- Do not change the substance or structure of the draft. Only add citations and fix dead sources.
|
||||
The verifier agent is used by `/deepresearch` (final fact-checking pass), `/audit` (comparing paper claims to code), and `/replicate` (verifying that the replication plan captures all necessary details). It serves as the quality control step that runs after the researcher and writer have produced their output.
|
||||
|
||||
@@ -1,56 +1,36 @@
|
||||
---
|
||||
title: Writer
|
||||
description: Turn research notes into clear, structured briefs and drafts.
|
||||
description: The writer agent produces structured academic prose from research findings.
|
||||
section: Agents
|
||||
order: 3
|
||||
---
|
||||
|
||||
## Source
|
||||
The writer agent transforms raw research findings into structured, well-organized documents. It specializes in academic prose, producing papers, briefs, surveys, and reports with proper citations, section structure, and narrative flow.
|
||||
|
||||
Generated from `.feynman/agents/writer.md`. Edit that prompt file, not this docs page.
|
||||
## What it does
|
||||
|
||||
## Role
|
||||
The writer takes source material -- findings from researcher agents, review feedback, comparison matrices -- and synthesizes it into a coherent document. It handles the difficult task of turning a collection of extracted claims and citations into prose that tells a clear story.
|
||||
|
||||
Turn research notes into clear, structured briefs and drafts.
|
||||
The writer understands academic conventions. Claims are attributed to their sources with inline citations. Methodology sections describe procedures with sufficient detail for reproduction. Results are presented with appropriate qualifiers. Limitations are discussed honestly rather than buried or omitted.
|
||||
|
||||
## Tools
|
||||
## Writing capabilities
|
||||
|
||||
`read`, `bash`, `grep`, `find`, `ls`, `write`, `edit`
|
||||
The writer agent handles several document types:
|
||||
|
||||
## Default Output
|
||||
- **Research Briefs** -- Concise summaries of a topic with key findings and citations, produced by the deep research workflow
|
||||
- **Literature Reviews** -- Survey-style documents that map consensus, disagreement, and open questions across the field
|
||||
- **Paper Drafts** -- Full academic papers with abstract, introduction, body sections, discussion, and references
|
||||
- **Comparison Reports** -- Structured analyses of how multiple sources agree and differ
|
||||
- **Summaries** -- Condensed versions of longer documents or multi-source findings
|
||||
|
||||
`draft.md`
|
||||
## Citation handling
|
||||
|
||||
## Integrity commandments
|
||||
1. **Write only from supplied evidence.** Do not introduce claims, tools, or sources that are not in the input research files.
|
||||
2. **Preserve caveats and disagreements.** Never smooth away uncertainty.
|
||||
3. **Be explicit about gaps.** If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.
|
||||
The writer maintains citation integrity throughout the document. Every factual claim is linked back to its source. When multiple sources support the same claim, all are cited. When a claim comes from a single source, the writer notes this to help the reader assess confidence. The final reference list includes only works actually cited in the text.
|
||||
|
||||
## Output structure
|
||||
## Iteration
|
||||
|
||||
```markdown
|
||||
# Title
|
||||
The writer supports iterative refinement. After producing an initial draft, you can ask Feynman to revise specific sections, add more detail on a subtopic, restructure the argument, or adjust the tone and level of technical detail. Each revision preserves the citation links and document structure.
|
||||
|
||||
## Executive Summary
|
||||
2-3 paragraph overview of key findings.
|
||||
## Used by
|
||||
|
||||
## Section 1: ...
|
||||
Detailed findings organized by theme or question.
|
||||
|
||||
## Section N: ...
|
||||
...
|
||||
|
||||
## Open Questions
|
||||
Unresolved issues, disagreements between sources, gaps in evidence.
|
||||
```
|
||||
|
||||
## Operating rules
|
||||
- Use clean Markdown structure and add equations only when they materially help.
|
||||
- Keep the narrative readable, but never outrun the evidence.
|
||||
- Produce artifacts that are ready to review in a browser or PDF preview.
|
||||
- Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.
|
||||
- Do NOT add a Sources section — the verifier agent builds that.
|
||||
|
||||
## Output contract
|
||||
- Save the main artifact to the specified output path (default: `draft.md`).
|
||||
- Focus on clarity, structure, and evidence traceability.
|
||||
The writer agent is used by `/deepresearch` (for the final brief), `/lit` (for the review document), `/draft` (as the primary agent), and `/compare` (for the comparison report). It is always the last agent to run in a workflow, producing the final output from the material gathered and evaluated by the researcher and reviewer agents.
|
||||
|
||||
@@ -1,66 +1,83 @@
|
||||
---
|
||||
title: Configuration
|
||||
description: Configure models, search, and runtime options
|
||||
description: Understand Feynman's configuration files and environment variables.
|
||||
section: Getting Started
|
||||
order: 4
|
||||
---
|
||||
|
||||
## Model
|
||||
Feynman stores all configuration and state under `~/.feynman/`. This directory is created on first run and contains settings, authentication tokens, session history, and installed packages.
|
||||
|
||||
Set the default model:
|
||||
## Directory structure
|
||||
|
||||
```bash
|
||||
feynman model set <provider:model>
|
||||
```
|
||||
~/.feynman/
|
||||
├── settings.json # Core configuration
|
||||
├── web-search.json # Web search routing config
|
||||
├── auth/ # OAuth tokens and API keys
|
||||
├── sessions/ # Persisted conversation history
|
||||
└── packages/ # Installed optional packages
|
||||
```
|
||||
|
||||
Override at runtime:
|
||||
The `settings.json` file is the primary configuration file. It is created by `feynman setup` and can be edited manually. A typical configuration looks like:
|
||||
|
||||
```bash
|
||||
feynman --model anthropic:claude-opus-4-6
|
||||
```json
|
||||
{
|
||||
"defaultModel": "anthropic:claude-sonnet-4-20250514",
|
||||
"thinkingLevel": "medium"
|
||||
}
|
||||
```
|
||||
|
||||
List available models:
|
||||
## Model configuration
|
||||
|
||||
The `defaultModel` field sets which model is used when you launch Feynman without the `--model` flag. The format is `provider:model-name`. You can change it via the CLI:
|
||||
|
||||
```bash
|
||||
feynman model set anthropic:claude-opus-4-20250514
|
||||
```
|
||||
|
||||
To see all models you have configured:
|
||||
|
||||
```bash
|
||||
feynman model list
|
||||
```
|
||||
|
||||
## Thinking level
|
||||
## Thinking levels
|
||||
|
||||
Control the reasoning depth:
|
||||
The `thinkingLevel` field controls how much reasoning the model does before responding. Available levels are `off`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Higher levels produce more thorough analysis at the cost of latency and token usage. You can override per-session:
|
||||
|
||||
```bash
|
||||
feynman --thinking high
|
||||
```
|
||||
|
||||
Levels: `off`, `minimal`, `low`, `medium`, `high`, `xhigh`.
|
||||
## Environment variables
|
||||
|
||||
## Web search
|
||||
Feynman respects the following environment variables, which take precedence over `settings.json`:
|
||||
|
||||
Check the current search configuration:
|
||||
|
||||
```bash
|
||||
feynman search status
|
||||
```
|
||||
|
||||
For advanced configuration, edit `~/.feynman/web-search.json` directly to set Gemini API keys, Perplexity keys, or a different route.
|
||||
|
||||
## Working directory
|
||||
|
||||
```bash
|
||||
feynman --cwd /path/to/project
|
||||
```
|
||||
| Variable | Description |
|
||||
| --- | --- |
|
||||
| `FEYNMAN_MODEL` | Override the default model |
|
||||
| `FEYNMAN_HOME` | Override the config directory (default: `~/.feynman`) |
|
||||
| `FEYNMAN_THINKING` | Override the thinking level |
|
||||
| `ANTHROPIC_API_KEY` | Anthropic API key |
|
||||
| `OPENAI_API_KEY` | OpenAI API key |
|
||||
| `GOOGLE_API_KEY` | Google AI API key |
|
||||
| `TAVILY_API_KEY` | Tavily web search API key |
|
||||
| `SERPER_API_KEY` | Serper web search API key |
|
||||
|
||||
## Session storage
|
||||
|
||||
```bash
|
||||
feynman --session-dir /path/to/sessions
|
||||
```
|
||||
|
||||
## One-shot mode
|
||||
|
||||
Run a single prompt and exit:
|
||||
Each conversation is persisted as a JSON file in `~/.feynman/sessions/`. To start a fresh session:
|
||||
|
||||
```bash
|
||||
feynman --prompt "summarize the key findings of 2401.12345"
|
||||
feynman --new-session
|
||||
```
|
||||
|
||||
To point sessions at a different directory (useful for per-project session isolation):
|
||||
|
||||
```bash
|
||||
feynman --session-dir ~/myproject/.feynman/sessions
|
||||
```
|
||||
|
||||
## Diagnostics
|
||||
|
||||
Run `feynman doctor` to verify your configuration is valid, check authentication status for all configured providers, and detect missing optional dependencies. The doctor command outputs a checklist showing what is working and what needs attention.
|
||||
|
||||
@@ -1,48 +1,134 @@
|
||||
---
|
||||
title: Installation
|
||||
description: Install Feynman and get started
|
||||
description: Install Feynman on macOS, Linux, or Windows using curl, pnpm, or bun.
|
||||
section: Getting Started
|
||||
order: 1
|
||||
---
|
||||
|
||||
## Requirements
|
||||
Feynman ships as a standalone runtime bundle for macOS, Linux, and Windows, and as a package-manager install for environments where Node.js is already installed. The recommended approach is the one-line installer, which downloads a prebuilt native bundle with zero external runtime dependencies.
|
||||
|
||||
- macOS, Linux, or WSL
|
||||
- `curl` or `wget`
|
||||
## One-line installer (recommended)
|
||||
|
||||
## Recommended install
|
||||
On **macOS or Linux**, open a terminal and run:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://feynman.is/install | bash
|
||||
```
|
||||
|
||||
## Verify
|
||||
The installer detects your OS and architecture automatically. On macOS it supports both Intel and Apple Silicon. On Linux it supports x64 and arm64. The launcher is installed to `~/.local/bin`, the bundled runtime is unpacked into `~/.local/share/feynman`, and your `PATH` is updated when needed.
|
||||
|
||||
```bash
|
||||
feynman --version
|
||||
```
|
||||
If you previously installed Feynman via `npm`, `pnpm`, or `bun` and still see local Node.js errors after a curl install, your shell is probably still resolving the older global binary first. Run `which -a feynman`, then `hash -r`, or launch the standalone shim directly with `~/.local/bin/feynman`.
|
||||
|
||||
## Windows PowerShell
|
||||
On **Windows**, open PowerShell as Administrator and run:
|
||||
|
||||
```powershell
|
||||
irm https://feynman.is/install.ps1 | iex
|
||||
```
|
||||
|
||||
## npm fallback
|
||||
This installs the Windows runtime bundle under `%LOCALAPPDATA%\Programs\feynman`, adds its launcher to your user `PATH`, and lets you re-run the installer at any time to update.
|
||||
|
||||
If you already manage Node yourself:
|
||||
## Skills only
|
||||
|
||||
If you only want Feynman's research skills and not the full terminal runtime, install the skill library separately.
|
||||
|
||||
For a user-level install into `~/.codex/skills/feynman`:
|
||||
|
||||
```bash
|
||||
npm install -g @companion-ai/feynman
|
||||
curl -fsSL https://feynman.is/install-skills | bash
|
||||
```
|
||||
|
||||
## Local Development
|
||||
For a repo-local install into `.agents/skills/feynman` under the current repository:
|
||||
|
||||
For contributing or local development:
|
||||
```bash
|
||||
curl -fsSL https://feynman.is/install-skills | bash -s -- --repo
|
||||
```
|
||||
|
||||
On Windows, install the skills into your Codex skill directory:
|
||||
|
||||
```powershell
|
||||
irm https://feynman.is/install-skills.ps1 | iex
|
||||
```
|
||||
|
||||
Or install them repo-locally:
|
||||
|
||||
```powershell
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install-skills.ps1))) -Scope Repo
|
||||
```
|
||||
|
||||
These installers download only the `skills/` tree from the Feynman repository. They do not install the Feynman terminal, bundled Node runtime, auth storage, or Pi packages.
|
||||
|
||||
## Pinned releases
|
||||
|
||||
The one-line installer already targets the latest tagged release. To pin an exact version, pass it explicitly:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.15
|
||||
```
|
||||
|
||||
On Windows:
|
||||
|
||||
```powershell
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.15
|
||||
```
|
||||
|
||||
## pnpm
|
||||
|
||||
If you already have Node.js `20.19.0` or newer installed, you can install Feynman globally via `pnpm`:
|
||||
|
||||
```bash
|
||||
pnpm add -g @companion-ai/feynman
|
||||
```
|
||||
|
||||
Or run it directly without installing:
|
||||
|
||||
```bash
|
||||
pnpm dlx @companion-ai/feynman
|
||||
```
|
||||
|
||||
## bun
|
||||
|
||||
`bun add -g` and `bunx` still use your local Node runtime for Feynman itself, so the same Node.js `20.19.0+` requirement applies.
|
||||
|
||||
```bash
|
||||
bun add -g @companion-ai/feynman
|
||||
```
|
||||
|
||||
Or run it directly without installing:
|
||||
|
||||
```bash
|
||||
bunx @companion-ai/feynman
|
||||
```
|
||||
|
||||
Both package-manager distributions ship the same core application but depend on Node.js being present on your system. The standalone installer is preferred because it bundles its own Node runtime and works without a separate Node installation.
|
||||
|
||||
## Post-install setup
|
||||
|
||||
After installation, run the guided setup wizard to configure your model provider and API keys:
|
||||
|
||||
```bash
|
||||
feynman setup
|
||||
```
|
||||
|
||||
This walks you through selecting a default model, authenticating with your provider, and optionally installing extra packages for features like web search and document preview. See the [Setup guide](/docs/getting-started/setup) for a detailed walkthrough.
|
||||
|
||||
## Verifying the installation
|
||||
|
||||
Confirm Feynman is installed and accessible:
|
||||
|
||||
```bash
|
||||
feynman --version
|
||||
```
|
||||
|
||||
If you see a version number, you are ready to go. Run `feynman doctor` at any time to diagnose configuration issues, missing dependencies, or authentication problems.
|
||||
|
||||
## Local development
|
||||
|
||||
For contributing or running Feynman from source:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/getcompanion-ai/feynman.git
|
||||
cd feynman
|
||||
nvm use || nvm install
|
||||
npm install
|
||||
npm run start
|
||||
npm start
|
||||
```
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user