Compare commits
83 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe24224965 | ||
|
|
9bc59dad53 | ||
|
|
7fd94c028e | ||
|
|
080bf8ad2c | ||
|
|
82cafd10cc | ||
|
|
419bcea3d1 | ||
|
|
d5b6f9cd00 | ||
|
|
8fade18b98 | ||
|
|
66f1fe5ffc | ||
|
|
01c2808606 | ||
|
|
dd3c07633b | ||
|
|
fa259f5cea | ||
|
|
8fc7c0488c | ||
|
|
455de783dc | ||
|
|
01155cadbe | ||
|
|
59af81c613 | ||
|
|
0995f5cc22 | ||
|
|
af6486312d | ||
|
|
8de8054e4f | ||
|
|
5d10285372 | ||
|
|
4f6574f233 | ||
|
|
aa96b5ee14 | ||
|
|
b3a82d4a92 | ||
|
|
790824af20 | ||
|
|
4137a29507 | ||
|
|
5b9362918e | ||
|
|
bfa538fa00 | ||
|
|
96234425ba | ||
|
|
3148f2e62b | ||
|
|
554350cc0e | ||
|
|
d9812cf4f2 | ||
|
|
aed607ce62 | ||
|
|
ab8a284c74 | ||
|
|
62d63be1d8 | ||
|
|
e2fdf0d505 | ||
|
|
cba7532d59 | ||
|
|
2dea96f25f | ||
|
|
83a570235f | ||
|
|
ff6328121e | ||
|
|
404c8b5469 | ||
|
|
4c62e78ca5 | ||
|
|
10c93a673b | ||
|
|
30d07246d1 | ||
|
|
dbd89d8e3d | ||
|
|
c8536583bf | ||
|
|
ca74226c83 | ||
|
|
bc9fa2be86 | ||
|
|
f6dbacc9d5 | ||
|
|
572de7ba85 | ||
|
|
85e0c4d8c4 | ||
|
|
584d065902 | ||
|
|
151956ea24 | ||
|
|
75b0467761 | ||
|
|
4ac668c50a | ||
|
|
8178173ff7 | ||
|
|
4eeccafed0 | ||
|
|
7024a86024 | ||
|
|
5fab329ad1 | ||
|
|
563068180f | ||
|
|
8dd20935ad | ||
|
|
aaa0f63bc7 | ||
|
|
79e14dd79d | ||
|
|
cd85e875df | ||
|
|
3ee6ff4199 | ||
|
|
762ca66a68 | ||
|
|
2aa4c84ce5 | ||
|
|
3d84624011 | ||
|
|
6445c20e02 | ||
|
|
4c0a417232 | ||
|
|
42cedd3137 | ||
|
|
b07b0f4197 | ||
|
|
323faf56ee | ||
|
|
1e333ba490 | ||
|
|
1dd7f30a37 | ||
|
|
17c48be4b5 | ||
|
|
8f8cf2a4a9 | ||
|
|
7d3fbc3f6b | ||
|
|
e651cb1f9b | ||
|
|
21b8bcd4c4 | ||
|
|
771b39cbba | ||
|
|
b624921bad | ||
|
|
b7d430ee15 | ||
|
|
54efae78e1 |
154
.astro/content.d.ts
vendored
Normal file
154
.astro/content.d.ts
vendored
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
declare module 'astro:content' {
|
||||||
|
export interface RenderResult {
|
||||||
|
Content: import('astro/runtime/server/index.js').AstroComponentFactory;
|
||||||
|
headings: import('astro').MarkdownHeading[];
|
||||||
|
remarkPluginFrontmatter: Record<string, any>;
|
||||||
|
}
|
||||||
|
interface Render {
|
||||||
|
'.md': Promise<RenderResult>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface RenderedContent {
|
||||||
|
html: string;
|
||||||
|
metadata?: {
|
||||||
|
imagePaths: Array<string>;
|
||||||
|
[key: string]: unknown;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
type Flatten<T> = T extends { [K: string]: infer U } ? U : never;
|
||||||
|
|
||||||
|
export type CollectionKey = keyof DataEntryMap;
|
||||||
|
export type CollectionEntry<C extends CollectionKey> = Flatten<DataEntryMap[C]>;
|
||||||
|
|
||||||
|
type AllValuesOf<T> = T extends any ? T[keyof T] : never;
|
||||||
|
|
||||||
|
export type ReferenceDataEntry<
|
||||||
|
C extends CollectionKey,
|
||||||
|
E extends keyof DataEntryMap[C] = string,
|
||||||
|
> = {
|
||||||
|
collection: C;
|
||||||
|
id: E;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type ReferenceLiveEntry<C extends keyof LiveContentConfig['collections']> = {
|
||||||
|
collection: C;
|
||||||
|
id: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function getCollection<C extends keyof DataEntryMap, E extends CollectionEntry<C>>(
|
||||||
|
collection: C,
|
||||||
|
filter?: (entry: CollectionEntry<C>) => entry is E,
|
||||||
|
): Promise<E[]>;
|
||||||
|
export function getCollection<C extends keyof DataEntryMap>(
|
||||||
|
collection: C,
|
||||||
|
filter?: (entry: CollectionEntry<C>) => unknown,
|
||||||
|
): Promise<CollectionEntry<C>[]>;
|
||||||
|
|
||||||
|
export function getLiveCollection<C extends keyof LiveContentConfig['collections']>(
|
||||||
|
collection: C,
|
||||||
|
filter?: LiveLoaderCollectionFilterType<C>,
|
||||||
|
): Promise<
|
||||||
|
import('astro').LiveDataCollectionResult<LiveLoaderDataType<C>, LiveLoaderErrorType<C>>
|
||||||
|
>;
|
||||||
|
|
||||||
|
export function getEntry<
|
||||||
|
C extends keyof DataEntryMap,
|
||||||
|
E extends keyof DataEntryMap[C] | (string & {}),
|
||||||
|
>(
|
||||||
|
entry: ReferenceDataEntry<C, E>,
|
||||||
|
): E extends keyof DataEntryMap[C]
|
||||||
|
? Promise<DataEntryMap[C][E]>
|
||||||
|
: Promise<CollectionEntry<C> | undefined>;
|
||||||
|
export function getEntry<
|
||||||
|
C extends keyof DataEntryMap,
|
||||||
|
E extends keyof DataEntryMap[C] | (string & {}),
|
||||||
|
>(
|
||||||
|
collection: C,
|
||||||
|
id: E,
|
||||||
|
): E extends keyof DataEntryMap[C]
|
||||||
|
? string extends keyof DataEntryMap[C]
|
||||||
|
? Promise<DataEntryMap[C][E]> | undefined
|
||||||
|
: Promise<DataEntryMap[C][E]>
|
||||||
|
: Promise<CollectionEntry<C> | undefined>;
|
||||||
|
export function getLiveEntry<C extends keyof LiveContentConfig['collections']>(
|
||||||
|
collection: C,
|
||||||
|
filter: string | LiveLoaderEntryFilterType<C>,
|
||||||
|
): Promise<import('astro').LiveDataEntryResult<LiveLoaderDataType<C>, LiveLoaderErrorType<C>>>;
|
||||||
|
|
||||||
|
/** Resolve an array of entry references from the same collection */
|
||||||
|
export function getEntries<C extends keyof DataEntryMap>(
|
||||||
|
entries: ReferenceDataEntry<C, keyof DataEntryMap[C]>[],
|
||||||
|
): Promise<CollectionEntry<C>[]>;
|
||||||
|
|
||||||
|
export function render<C extends keyof DataEntryMap>(
|
||||||
|
entry: DataEntryMap[C][string],
|
||||||
|
): Promise<RenderResult>;
|
||||||
|
|
||||||
|
export function reference<
|
||||||
|
C extends
|
||||||
|
| keyof DataEntryMap
|
||||||
|
// Allow generic `string` to avoid excessive type errors in the config
|
||||||
|
// if `dev` is not running to update as you edit.
|
||||||
|
// Invalid collection names will be caught at build time.
|
||||||
|
| (string & {}),
|
||||||
|
>(
|
||||||
|
collection: C,
|
||||||
|
): import('astro/zod').ZodPipe<
|
||||||
|
import('astro/zod').ZodString,
|
||||||
|
import('astro/zod').ZodTransform<
|
||||||
|
C extends keyof DataEntryMap
|
||||||
|
? {
|
||||||
|
collection: C;
|
||||||
|
id: string;
|
||||||
|
}
|
||||||
|
: never,
|
||||||
|
string
|
||||||
|
>
|
||||||
|
>;
|
||||||
|
|
||||||
|
type ReturnTypeOrOriginal<T> = T extends (...args: any[]) => infer R ? R : T;
|
||||||
|
type InferEntrySchema<C extends keyof DataEntryMap> = import('astro/zod').infer<
|
||||||
|
ReturnTypeOrOriginal<Required<ContentConfig['collections'][C]>['schema']>
|
||||||
|
>;
|
||||||
|
type ExtractLoaderConfig<T> = T extends { loader: infer L } ? L : never;
|
||||||
|
type InferLoaderSchema<
|
||||||
|
C extends keyof DataEntryMap,
|
||||||
|
L = ExtractLoaderConfig<ContentConfig['collections'][C]>,
|
||||||
|
> = L extends { schema: import('astro/zod').ZodSchema }
|
||||||
|
? import('astro/zod').infer<L['schema']>
|
||||||
|
: any;
|
||||||
|
|
||||||
|
type DataEntryMap = {
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
type ExtractLoaderTypes<T> = T extends import('astro/loaders').LiveLoader<
|
||||||
|
infer TData,
|
||||||
|
infer TEntryFilter,
|
||||||
|
infer TCollectionFilter,
|
||||||
|
infer TError
|
||||||
|
>
|
||||||
|
? { data: TData; entryFilter: TEntryFilter; collectionFilter: TCollectionFilter; error: TError }
|
||||||
|
: { data: never; entryFilter: never; collectionFilter: never; error: never };
|
||||||
|
type ExtractEntryFilterType<T> = ExtractLoaderTypes<T>['entryFilter'];
|
||||||
|
type ExtractCollectionFilterType<T> = ExtractLoaderTypes<T>['collectionFilter'];
|
||||||
|
type ExtractErrorType<T> = ExtractLoaderTypes<T>['error'];
|
||||||
|
|
||||||
|
type LiveLoaderDataType<C extends keyof LiveContentConfig['collections']> =
|
||||||
|
LiveContentConfig['collections'][C]['schema'] extends undefined
|
||||||
|
? ExtractDataType<LiveContentConfig['collections'][C]['loader']>
|
||||||
|
: import('astro/zod').infer<
|
||||||
|
Exclude<LiveContentConfig['collections'][C]['schema'], undefined>
|
||||||
|
>;
|
||||||
|
type LiveLoaderEntryFilterType<C extends keyof LiveContentConfig['collections']> =
|
||||||
|
ExtractEntryFilterType<LiveContentConfig['collections'][C]['loader']>;
|
||||||
|
type LiveLoaderCollectionFilterType<C extends keyof LiveContentConfig['collections']> =
|
||||||
|
ExtractCollectionFilterType<LiveContentConfig['collections'][C]['loader']>;
|
||||||
|
type LiveLoaderErrorType<C extends keyof LiveContentConfig['collections']> = ExtractErrorType<
|
||||||
|
LiveContentConfig['collections'][C]['loader']
|
||||||
|
>;
|
||||||
|
|
||||||
|
export type ContentConfig = never;
|
||||||
|
export type LiveContentConfig = never;
|
||||||
|
}
|
||||||
2
.astro/types.d.ts
vendored
Normal file
2
.astro/types.d.ts
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
/// <reference types="astro/client" />
|
||||||
|
/// <reference path="content.d.ts" />
|
||||||
18
.env.example
18
.env.example
@@ -6,3 +6,21 @@ FEYNMAN_THINKING=medium
|
|||||||
|
|
||||||
OPENAI_API_KEY=
|
OPENAI_API_KEY=
|
||||||
ANTHROPIC_API_KEY=
|
ANTHROPIC_API_KEY=
|
||||||
|
GEMINI_API_KEY=
|
||||||
|
OPENROUTER_API_KEY=
|
||||||
|
ZAI_API_KEY=
|
||||||
|
KIMI_API_KEY=
|
||||||
|
MINIMAX_API_KEY=
|
||||||
|
MINIMAX_CN_API_KEY=
|
||||||
|
MISTRAL_API_KEY=
|
||||||
|
GROQ_API_KEY=
|
||||||
|
XAI_API_KEY=
|
||||||
|
CEREBRAS_API_KEY=
|
||||||
|
HF_TOKEN=
|
||||||
|
OPENCODE_API_KEY=
|
||||||
|
AI_GATEWAY_API_KEY=
|
||||||
|
AZURE_OPENAI_API_KEY=
|
||||||
|
|
||||||
|
RUNPOD_API_KEY=
|
||||||
|
MODAL_TOKEN_ID=
|
||||||
|
MODAL_TOKEN_SECRET=
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Operating rules:
|
|||||||
- State uncertainty explicitly.
|
- State uncertainty explicitly.
|
||||||
- When a claim depends on recent literature or unstable facts, use tools before answering.
|
- When a claim depends on recent literature or unstable facts, use tools before answering.
|
||||||
- When discussing papers, cite title, year, and identifier or URL when possible.
|
- When discussing papers, cite title, year, and identifier or URL when possible.
|
||||||
- Use the alpha-backed research tools for academic paper search, paper reading, paper Q&A, repository inspection, and persistent annotations.
|
- Use the `alpha` CLI for academic paper search, paper reading, paper Q&A, repository inspection, and persistent annotations.
|
||||||
- Use `web_search`, `fetch_content`, and `get_search_content` first for current topics: products, companies, markets, regulations, software releases, model availability, model pricing, benchmarks, docs, or anything phrased as latest/current/recent/today.
|
- Use `web_search`, `fetch_content`, and `get_search_content` first for current topics: products, companies, markets, regulations, software releases, model availability, model pricing, benchmarks, docs, or anything phrased as latest/current/recent/today.
|
||||||
- For mixed topics, combine both: use web sources for current reality and paper sources for background literature.
|
- For mixed topics, combine both: use web sources for current reality and paper sources for background literature.
|
||||||
- Never answer a latest/current question from arXiv or alpha-backed paper search alone.
|
- Never answer a latest/current question from arXiv or alpha-backed paper search alone.
|
||||||
@@ -24,13 +24,14 @@ Operating rules:
|
|||||||
- Do not force chain-shaped orchestration onto the user. Multi-agent decomposition is an internal tactic, not the primary UX.
|
- Do not force chain-shaped orchestration onto the user. Multi-agent decomposition is an internal tactic, not the primary UX.
|
||||||
- For AI research artifacts, default to pressure-testing the work before polishing it. Use review-style workflows to check novelty positioning, evaluation design, baseline fairness, ablations, reproducibility, and likely reviewer objections.
|
- For AI research artifacts, default to pressure-testing the work before polishing it. Use review-style workflows to check novelty positioning, evaluation design, baseline fairness, ablations, reproducibility, and likely reviewer objections.
|
||||||
- Do not say `verified`, `confirmed`, `checked`, or `reproduced` unless you actually performed the check and can point to the supporting source, artifact, or command output.
|
- Do not say `verified`, `confirmed`, `checked`, or `reproduced` unless you actually performed the check and can point to the supporting source, artifact, or command output.
|
||||||
|
- Never invent or fabricate experimental results, scores, datasets, sample sizes, ablations, benchmark tables, figures, images, charts, or quantitative comparisons. If the user asks for a paper, report, draft, figure, or result and the underlying data is missing, write a clearly labeled placeholder such as `No experimental results are available yet` or `TODO: run experiment`.
|
||||||
|
- Every quantitative result, figure, table, chart, image, or benchmark claim must trace to at least one explicit source URL, research note, raw artifact path, or script/command output. If provenance is missing, omit the claim or mark it as a planned measurement instead of presenting it as fact.
|
||||||
- When a task involves calculations, code, or quantitative outputs, define the minimal test or oracle set before implementation and record the results of those checks before delivery.
|
- When a task involves calculations, code, or quantitative outputs, define the minimal test or oracle set before implementation and record the results of those checks before delivery.
|
||||||
- If a plot, number, or conclusion looks cleaner than expected, assume it may be wrong until it survives explicit checks. Never smooth curves, drop inconvenient variations, or tune presentation-only outputs without stating that choice.
|
- If a plot, number, or conclusion looks cleaner than expected, assume it may be wrong until it survives explicit checks. Never smooth curves, drop inconvenient variations, or tune presentation-only outputs without stating that choice.
|
||||||
- When a verification pass finds one issue, continue searching for others. Do not stop after the first error unless the whole branch is blocked.
|
- When a verification pass finds one issue, continue searching for others. Do not stop after the first error unless the whole branch is blocked.
|
||||||
- Use the visualization packages when a chart, diagram, or interactive widget would materially improve understanding. Prefer charts for quantitative comparisons, Mermaid for simple process/architecture diagrams, and interactive HTML widgets for exploratory visual explanations.
|
- Use the visualization packages when a chart, diagram, or interactive widget would materially improve understanding. Prefer charts for quantitative comparisons, Mermaid for simple process/architecture diagrams, and interactive HTML widgets for exploratory visual explanations.
|
||||||
- Persistent memory is package-backed. Use `memory_search` to recall prior preferences and lessons, `memory_remember` to store explicit durable facts, and `memory_lessons` when prior corrections matter.
|
- Persistent memory is package-backed. Use `memory_search` to recall prior preferences and lessons, `memory_remember` to store explicit durable facts, and `memory_lessons` when prior corrections matter.
|
||||||
- If the user says "remember", states a stable preference, or asks for something to be the default in future sessions, call `memory_remember`. Do not just say you will remember it.
|
- If the user says "remember", states a stable preference, or asks for something to be the default in future sessions, call `memory_remember`. Do not just say you will remember it.
|
||||||
- Session recall is package-backed. Use `session_search` when the user references prior work, asks what has been done before, or when you suspect relevant past context exists.
|
|
||||||
- Feynman is intended to support always-on research work. Use the scheduling package when recurring or deferred work is appropriate instead of telling the user to remember manually.
|
- Feynman is intended to support always-on research work. Use the scheduling package when recurring or deferred work is appropriate instead of telling the user to remember manually.
|
||||||
- Use `schedule_prompt` for recurring scans, delayed follow-ups, reminders, and periodic research jobs.
|
- Use `schedule_prompt` for recurring scans, delayed follow-ups, reminders, and periodic research jobs.
|
||||||
- If the user asks you to remind, check later, run something nightly, or keep watching something over time, call `schedule_prompt`. Do not just promise to do it later.
|
- If the user asks you to remind, check later, run something nightly, or keep watching something over time, call `schedule_prompt`. Do not just promise to do it later.
|
||||||
@@ -38,11 +39,9 @@ Operating rules:
|
|||||||
- Prefer the smallest investigation or experiment that can materially reduce uncertainty before escalating to broader work.
|
- Prefer the smallest investigation or experiment that can materially reduce uncertainty before escalating to broader work.
|
||||||
- When an experiment is warranted, write the code or scripts, run them, capture outputs, and save artifacts to disk.
|
- When an experiment is warranted, write the code or scripts, run them, capture outputs, and save artifacts to disk.
|
||||||
- Before pausing long-running work, update the durable state on disk first: plan artifact, `CHANGELOG.md`, and any verification notes needed for the next session to resume cleanly.
|
- Before pausing long-running work, update the durable state on disk first: plan artifact, `CHANGELOG.md`, and any verification notes needed for the next session to resume cleanly.
|
||||||
- Before recommending an execution environment, consider the system resources shown in the header (CPU, RAM, GPU, Docker availability). Recommend Docker when isolation on the current machine helps, and say explicitly when the workload exceeds local capacity. Do not suggest GPU workloads locally if no GPU is detected.
|
|
||||||
- Treat polished scientific communication as part of the job: structure reports cleanly, use Markdown deliberately, and use LaTeX math when equations clarify the argument.
|
- Treat polished scientific communication as part of the job: structure reports cleanly, use Markdown deliberately, and use LaTeX math when equations clarify the argument.
|
||||||
- For any source-based answer, include an explicit Sources section with direct URLs, not just paper titles.
|
- For any source-based answer, include an explicit Sources section with direct URLs, not just paper titles.
|
||||||
- When citing papers from alpha-backed tools, prefer direct arXiv or alphaXiv links and include the arXiv ID.
|
- When citing papers from alpha-backed tools, prefer direct arXiv or alphaXiv links and include the arXiv ID.
|
||||||
- After writing a polished artifact, use `preview_file` only when the user wants review or export. Prefer browser preview by default; use PDF only when explicitly requested.
|
|
||||||
- Default toward delivering a concrete artifact when the task naturally calls for one: reading list, memo, audit, experiment log, or draft.
|
- Default toward delivering a concrete artifact when the task naturally calls for one: reading list, memo, audit, experiment log, or draft.
|
||||||
- For user-facing workflows, produce exactly one canonical durable Markdown artifact unless the user explicitly asks for multiple deliverables.
|
- For user-facing workflows, produce exactly one canonical durable Markdown artifact unless the user explicitly asks for multiple deliverables.
|
||||||
- Do not create extra user-facing intermediate markdown files just because the workflow has multiple reasoning stages.
|
- Do not create extra user-facing intermediate markdown files just because the workflow has multiple reasoning stages.
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ You are Feynman's evidence-gathering subagent.
|
|||||||
1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.
|
1. **Start wide.** Begin with short, broad queries to map the landscape. Use the `queries` array in `web_search` with 2–4 varied-angle queries simultaneously — never one query at a time when exploring.
|
||||||
2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.
|
2. **Evaluate availability.** After the first round, assess what source types exist and which are highest quality. Adjust strategy accordingly.
|
||||||
3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.
|
3. **Progressively narrow.** Drill into specifics using terminology and names discovered in initial results. Refine queries, don't repeat them.
|
||||||
4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and `alpha_search`.
|
4. **Cross-source.** When the topic spans current reality and academic literature, always use both `web_search` and the `alpha` CLI (`alpha search`).
|
||||||
|
|
||||||
Use `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.
|
Use `recencyFilter` on `web_search` for fast-moving topics. Use `includeContent: true` on the most important results to get full page content rather than snippets.
|
||||||
|
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ You receive a draft document and the research files it was built from. Your job
|
|||||||
4. **Remove unsourced claims** — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.
|
4. **Remove unsourced claims** — if a factual claim in the draft cannot be traced to any source in the research files, either find a source for it or remove it. Do not leave unsourced factual claims.
|
||||||
5. **Verify meaning, not just topic overlap.** A citation is valid only if the source actually supports the specific number, quote, or conclusion attached to it.
|
5. **Verify meaning, not just topic overlap.** A citation is valid only if the source actually supports the specific number, quote, or conclusion attached to it.
|
||||||
6. **Refuse fake certainty.** Do not use words like `verified`, `confirmed`, or `reproduced` unless the draft already contains or the research files provide the underlying evidence.
|
6. **Refuse fake certainty.** Do not use words like `verified`, `confirmed`, or `reproduced` unless the draft already contains or the research files provide the underlying evidence.
|
||||||
|
7. **Never invent or keep fabricated results.** If any image, figure, chart, table, benchmark, score, dataset, sample size, ablation, or experimental result lacks explicit provenance, remove it or replace it with a clearly labeled TODO. Never keep a made-up result because it “looks plausible.”
|
||||||
|
|
||||||
## Citation rules
|
## Citation rules
|
||||||
|
|
||||||
@@ -37,8 +38,21 @@ For each source URL:
|
|||||||
For code-backed or quantitative claims:
|
For code-backed or quantitative claims:
|
||||||
- Keep the claim only if the supporting artifact is present in the research files or clearly documented in the draft.
|
- Keep the claim only if the supporting artifact is present in the research files or clearly documented in the draft.
|
||||||
- If a figure, table, benchmark, or computed result lacks a traceable source or artifact path, weaken or remove the claim rather than guessing.
|
- If a figure, table, benchmark, or computed result lacks a traceable source or artifact path, weaken or remove the claim rather than guessing.
|
||||||
|
- Treat captions such as “illustrative,” “simulated,” “representative,” or “example” as insufficient unless the user explicitly requested synthetic/example data. Otherwise remove the visual and mark the missing experiment.
|
||||||
- Do not preserve polished summaries that outrun the raw evidence.
|
- Do not preserve polished summaries that outrun the raw evidence.
|
||||||
|
|
||||||
|
## Fabrication audit
|
||||||
|
|
||||||
|
Before saving the final document, scan for:
|
||||||
|
- numeric scores or percentages,
|
||||||
|
- benchmark names and tables,
|
||||||
|
- figure/image references,
|
||||||
|
- claims of improvement or superiority,
|
||||||
|
- dataset sizes or experimental setup details,
|
||||||
|
- charts or visualizations.
|
||||||
|
|
||||||
|
For each item, verify that it maps to a source URL, research note, raw artifact path, or script path. If not, remove it or replace it with a TODO. Add a short `Removed Unsupported Claims` section only when you remove material.
|
||||||
|
|
||||||
## Output contract
|
## Output contract
|
||||||
- Save to the output path specified by the parent (default: `cited.md`).
|
- Save to the output path specified by the parent (default: `cited.md`).
|
||||||
- The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.
|
- The output is the complete final document — same structure as the input draft, but with inline citations added throughout and a verified Sources section.
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ You are Feynman's writing subagent.
|
|||||||
3. **Be explicit about gaps.** If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.
|
3. **Be explicit about gaps.** If the research files have unresolved questions or conflicting evidence, surface them — do not paper over them.
|
||||||
4. **Do not promote draft text into fact.** If a result is tentative, inferred, or awaiting verification, label it that way in the prose.
|
4. **Do not promote draft text into fact.** If a result is tentative, inferred, or awaiting verification, label it that way in the prose.
|
||||||
5. **No aesthetic laundering.** Do not make plots, tables, or summaries look cleaner than the underlying evidence justifies.
|
5. **No aesthetic laundering.** Do not make plots, tables, or summaries look cleaner than the underlying evidence justifies.
|
||||||
|
6. **Never fabricate results.** Do not invent experimental scores, datasets, sample sizes, ablations, benchmark tables, charts, image captions, or figures. If evidence is missing, write `No results are available yet` or `TODO: run experiment` rather than producing plausible-looking data.
|
||||||
|
|
||||||
## Output structure
|
## Output structure
|
||||||
|
|
||||||
@@ -36,9 +37,10 @@ Unresolved issues, disagreements between sources, gaps in evidence.
|
|||||||
|
|
||||||
## Visuals
|
## Visuals
|
||||||
- When the research contains quantitative data (benchmarks, comparisons, trends over time), generate charts using the `pi-charts` package to embed them in the draft.
|
- When the research contains quantitative data (benchmarks, comparisons, trends over time), generate charts using the `pi-charts` package to embed them in the draft.
|
||||||
- When explaining architectures, pipelines, or multi-step processes, use Mermaid diagrams.
|
- Do not create charts from invented or example data. If values are missing, describe the planned measurement instead.
|
||||||
- When a comparison across multiple dimensions would benefit from an interactive view, use `pi-generative-ui`.
|
- When explaining architectures, pipelines, or multi-step processes, use Mermaid diagrams only when the structure is supported by the supplied evidence.
|
||||||
- Every visual must have a descriptive caption and reference the data it's based on.
|
- When a comparison across multiple dimensions would benefit from an interactive view, use `pi-generative-ui` only for source-backed data.
|
||||||
|
- Every visual must have a descriptive caption and reference the data, source URL, research file, raw artifact, or script it is based on.
|
||||||
- Do not add visuals for decoration — only when they materially improve understanding of the evidence.
|
- Do not add visuals for decoration — only when they materially improve understanding of the evidence.
|
||||||
|
|
||||||
## Operating rules
|
## Operating rules
|
||||||
@@ -48,6 +50,7 @@ Unresolved issues, disagreements between sources, gaps in evidence.
|
|||||||
- Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.
|
- Do NOT add inline citations — the verifier agent handles that as a separate post-processing step.
|
||||||
- Do NOT add a Sources section — the verifier agent builds that.
|
- Do NOT add a Sources section — the verifier agent builds that.
|
||||||
- Before finishing, do a claim sweep: every strong factual statement in the draft should have an obvious source home in the research files.
|
- Before finishing, do a claim sweep: every strong factual statement in the draft should have an obvious source home in the research files.
|
||||||
|
- Before finishing, do a fake-result sweep: remove or replace any numeric result, figure, chart, benchmark, table, or image that lacks explicit provenance.
|
||||||
|
|
||||||
## Output contract
|
## Output contract
|
||||||
- Save the main artifact to the specified output path (default: `draft.md`).
|
- Save the main artifact to the specified output path (default: `draft.md`).
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
{
|
{
|
||||||
"packages": [
|
"packages": [
|
||||||
|
"npm:@companion-ai/alpha-hub",
|
||||||
"npm:pi-subagents",
|
"npm:pi-subagents",
|
||||||
"npm:pi-btw",
|
"npm:pi-btw",
|
||||||
"npm:pi-docparser",
|
"npm:pi-docparser",
|
||||||
|
|||||||
87
.github/workflows/publish.yml
vendored
87
.github/workflows/publish.yml
vendored
@@ -10,55 +10,68 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
version-check:
|
version-check:
|
||||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
outputs:
|
outputs:
|
||||||
version: ${{ steps.version.outputs.version }}
|
version: ${{ steps.version.outputs.version }}
|
||||||
should_publish: ${{ steps.version.outputs.should_publish }}
|
should_release: ${{ steps.version.outputs.should_release }}
|
||||||
should_build_release: ${{ steps.version.outputs.should_build_release }}
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v6
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-node@v5
|
- uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: 24.14.0
|
node-version: 24
|
||||||
|
registry-url: "https://registry.npmjs.org"
|
||||||
- id: version
|
- id: version
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
CURRENT=$(npm view @companion-ai/feynman version 2>/dev/null || echo "0.0.0")
|
|
||||||
LOCAL=$(node -p "require('./package.json').version")
|
LOCAL=$(node -p "require('./package.json').version")
|
||||||
echo "version=$LOCAL" >> "$GITHUB_OUTPUT"
|
echo "version=$LOCAL" >> "$GITHUB_OUTPUT"
|
||||||
if [ "$CURRENT" != "$LOCAL" ]; then
|
if gh release view "v$LOCAL" >/dev/null 2>&1; then
|
||||||
echo "should_publish=true" >> "$GITHUB_OUTPUT"
|
echo "should_release=false" >> "$GITHUB_OUTPUT"
|
||||||
echo "should_build_release=true" >> "$GITHUB_OUTPUT"
|
|
||||||
elif [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
|
|
||||||
echo "should_publish=false" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "should_build_release=true" >> "$GITHUB_OUTPUT"
|
|
||||||
else
|
else
|
||||||
echo "should_publish=false" >> "$GITHUB_OUTPUT"
|
echo "should_release=true" >> "$GITHUB_OUTPUT"
|
||||||
echo "should_build_release=false" >> "$GITHUB_OUTPUT"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
publish-npm:
|
verify:
|
||||||
needs: version-check
|
needs: version-check
|
||||||
if: needs.version-check.outputs.should_publish == 'true'
|
if: needs.version-check.outputs.should_release == 'true'
|
||||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v6
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-node@v5
|
- uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: 24.14.0
|
node-version: 24
|
||||||
registry-url: https://registry.npmjs.org
|
registry-url: "https://registry.npmjs.org"
|
||||||
- run: npm ci --ignore-scripts
|
- run: npm ci
|
||||||
- run: npm run build
|
|
||||||
- run: npm test
|
- run: npm test
|
||||||
- run: npm publish --access public
|
- run: npm pack
|
||||||
env:
|
|
||||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
publish-npm:
|
||||||
|
needs:
|
||||||
|
- version-check
|
||||||
|
- verify
|
||||||
|
if: needs.version-check.outputs.should_release == 'true' && needs.verify.result == 'success'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
id-token: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
- uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: 24
|
||||||
|
registry-url: "https://registry.npmjs.org"
|
||||||
|
- run: npm ci
|
||||||
|
- run: npm publish --provenance --access public
|
||||||
|
|
||||||
build-native-bundles:
|
build-native-bundles:
|
||||||
needs: version-check
|
needs: version-check
|
||||||
if: needs.version-check.outputs.should_build_release == 'true'
|
if: needs.version-check.outputs.should_release == 'true'
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -70,13 +83,15 @@ jobs:
|
|||||||
- id: darwin-arm64
|
- id: darwin-arm64
|
||||||
os: macos-14
|
os: macos-14
|
||||||
- id: win32-x64
|
- id: win32-x64
|
||||||
os: blacksmith-4vcpu-windows-2025
|
os: windows-latest
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v6
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-node@v5
|
- uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: 24.14.0
|
node-version: 24
|
||||||
- run: npm ci --ignore-scripts
|
- run: npm ci --ignore-scripts
|
||||||
- run: npm run build
|
- run: npm run build
|
||||||
- run: npm run build:native-bundle
|
- run: npm run build:native-bundle
|
||||||
@@ -97,7 +112,8 @@ jobs:
|
|||||||
$tmp = Join-Path $env:RUNNER_TEMP ("feynman-smoke-" + [guid]::NewGuid().ToString("N"))
|
$tmp = Join-Path $env:RUNNER_TEMP ("feynman-smoke-" + [guid]::NewGuid().ToString("N"))
|
||||||
New-Item -ItemType Directory -Path $tmp | Out-Null
|
New-Item -ItemType Directory -Path $tmp | Out-Null
|
||||||
Expand-Archive -LiteralPath "dist/release/feynman-$version-win32-x64.zip" -DestinationPath $tmp -Force
|
Expand-Archive -LiteralPath "dist/release/feynman-$version-win32-x64.zip" -DestinationPath $tmp -Force
|
||||||
& "$tmp/feynman-$version-win32-x64/feynman.cmd" --help | Select-Object -First 20
|
$bundleRoot = Join-Path $tmp "feynman-$version-win32-x64"
|
||||||
|
& (Join-Path $bundleRoot "feynman.cmd") --help | Select-Object -First 20
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: native-${{ matrix.id }}
|
name: native-${{ matrix.id }}
|
||||||
@@ -108,8 +124,8 @@ jobs:
|
|||||||
- version-check
|
- version-check
|
||||||
- publish-npm
|
- publish-npm
|
||||||
- build-native-bundles
|
- build-native-bundles
|
||||||
if: needs.version-check.outputs.should_build_release == 'true' && needs.build-native-bundles.result == 'success' && (needs.publish-npm.result == 'success' || needs.publish-npm.result == 'skipped')
|
if: needs.version-check.outputs.should_release == 'true' && needs.publish-npm.result == 'success' && needs.build-native-bundles.result == 'success'
|
||||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
@@ -117,8 +133,10 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: release-assets
|
path: release-assets
|
||||||
merge-multiple: true
|
merge-multiple: true
|
||||||
- shell: bash
|
- name: Create GitHub release
|
||||||
|
shell: bash
|
||||||
env:
|
env:
|
||||||
|
GH_REPO: ${{ github.repository }}
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
VERSION: ${{ needs.version-check.outputs.version }}
|
VERSION: ${{ needs.version-check.outputs.version }}
|
||||||
run: |
|
run: |
|
||||||
@@ -127,7 +145,8 @@ jobs:
|
|||||||
gh release edit "v$VERSION" \
|
gh release edit "v$VERSION" \
|
||||||
--title "v$VERSION" \
|
--title "v$VERSION" \
|
||||||
--notes "Standalone Feynman bundles for native installation." \
|
--notes "Standalone Feynman bundles for native installation." \
|
||||||
--draft=false
|
--draft=false \
|
||||||
|
--latest
|
||||||
else
|
else
|
||||||
gh release create "v$VERSION" release-assets/* \
|
gh release create "v$VERSION" release-assets/* \
|
||||||
--title "v$VERSION" \
|
--title "v$VERSION" \
|
||||||
|
|||||||
298
CHANGELOG.md
298
CHANGELOG.md
@@ -14,3 +14,301 @@ Use this file to track chronology, not release notes. Keep entries short, factua
|
|||||||
- Failed / learned: ...
|
- Failed / learned: ...
|
||||||
- Blockers: ...
|
- Blockers: ...
|
||||||
- Next: ...
|
- Next: ...
|
||||||
|
|
||||||
|
### 2026-04-12 00:00 local — capital-france
|
||||||
|
|
||||||
|
- Objective: Run an unattended deep-research workflow for the question "What is the capital of France?"
|
||||||
|
- Changed: Created plan artifact at `outputs/.plans/capital-france.md`; scoped the workflow as a narrow fact-verification run with direct lead-agent evidence gathering instead of researcher subagents.
|
||||||
|
- Verified: Read existing `CHANGELOG.md` and recalled prior saved plan memory for `capital-france` before finalizing the new run plan.
|
||||||
|
- Failed / learned: None yet.
|
||||||
|
- Blockers: Need at least two current independent authoritative sources and a quick ambiguity check before drafting.
|
||||||
|
- Next: Collect current official/public sources, resolve any legal nuance, then draft and verify the brief.
|
||||||
|
|
||||||
|
### 2026-04-12 00:20 local — capital-france
|
||||||
|
|
||||||
|
- Objective: Complete evidence gathering and ambiguity check for the capital-of-France workflow.
|
||||||
|
- Changed: Wrote `notes/capital-france-research-web.md` and `notes/capital-france-legal-context.md`; identified Insee (2024) and a Sénat report as the two main corroborating sources.
|
||||||
|
- Verified: Cross-read current public French sources that explicitly describe Paris as the capital/capital city of France; found no current contradiction.
|
||||||
|
- Failed / learned: The Presidency homepage was useful contextual support but not explicit enough to carry the core claim alone.
|
||||||
|
- Blockers: Need citation pass and final review pass before promotion.
|
||||||
|
- Next: Draft the brief, then run verifier and reviewer passes.
|
||||||
|
|
||||||
|
### 2026-04-12 00:35 local — capital-france
|
||||||
|
|
||||||
|
- Objective: Move from gathered evidence to a citable draft.
|
||||||
|
- Changed: Wrote `outputs/.drafts/capital-france-draft.md` and updated the plan ledger to mark drafting complete.
|
||||||
|
- Verified: Kept the core claim narrowly scoped to what the Insee and Sénat sources explicitly support; treated the Élysée page as contextual only.
|
||||||
|
- Failed / learned: None.
|
||||||
|
- Blockers: Need verifier URL/citation pass and reviewer verification pass before final promotion.
|
||||||
|
- Next: Run verifier on the draft, then review and promote the final brief.
|
||||||
|
|
||||||
|
### 2026-04-12 00:50 local — capital-france
|
||||||
|
|
||||||
|
- Objective: Complete citation, verification, and final promotion for the capital-of-France workflow.
|
||||||
|
- Changed: Produced `outputs/capital-france-brief.md`, ran verification into `notes/capital-france-verification.md`, promoted the final brief to `outputs/capital-france.md`, and wrote `outputs/capital-france.provenance.md`.
|
||||||
|
- Verified: Reviewer found no FATAL or MAJOR issues. Core claim remains backed by two independent French public-institution sources, with Insee as the primary explicit source and the Sénat report as corroboration.
|
||||||
|
- Failed / learned: The runtime did not expose a named `verifier` subagent, so I used an available worker in a verifier-equivalent role and recorded that deviation in the plan.
|
||||||
|
- Blockers: None.
|
||||||
|
- Next: If needed, extend the brief with deeper legal-historical sourcing, but the narrow factual question is sufficiently answered.
|
||||||
|
|
||||||
|
### 2026-04-12 10:05 local — capital-france
|
||||||
|
|
||||||
|
- Objective: Run the citation-verification pass on the capital-of-France draft and promote a final cited brief.
|
||||||
|
- Changed: Verified the three draft source URLs were live (HTTP 200 at check time), added numbered inline citations, downgraded unsupported phrasing around the Élysée/context and broad ambiguity claims, and wrote `outputs/capital-france-brief.md`.
|
||||||
|
- Verified: Confirmed Insee explicitly says Paris is the capital of France; confirmed the Sénat report describes Paris’s capital status and the presence of national institutions; confirmed the Élysée homepage is contextual only and not explicit enough to carry the core claim.
|
||||||
|
- Failed / learned: The draft wording about the Presidency being seated in Paris was not directly supported by the cited homepage, so it was removed rather than carried forward.
|
||||||
|
- Blockers: Reviewer pass still pending if the workflow requires an adversarial final check.
|
||||||
|
- Next: If needed, run a final reviewer pass; otherwise use `outputs/capital-france-brief.md` as the canonical brief.
|
||||||
|
|
||||||
|
### 2026-04-12 10:20 local — capital-france
|
||||||
|
|
||||||
|
- Objective: Close the workflow with final review, final artifact promotion, and provenance.
|
||||||
|
- Changed: Ran a reviewer pass recorded in `notes/capital-france-verification.md`; promoted the cited brief into `outputs/capital-france.md`; wrote `outputs/capital-france.provenance.md`; updated the run plan to mark all tasks complete.
|
||||||
|
- Verified: Reviewer verdict was PASS WITH MINOR REVISIONS only; those minor wording fixes were applied before delivery.
|
||||||
|
- Failed / learned: The runtime did not expose a project-named `verifier` agent, so the citation pass used an available worker agent as a verifier-equivalent step.
|
||||||
|
- Blockers: None.
|
||||||
|
- Next: Optional only — produce a legal memorandum on the basis of Paris's capital status if requested.
|
||||||
|
|
||||||
|
### 2026-04-14 12:00 local — capital-belgium
|
||||||
|
|
||||||
|
- Objective: Run a deep-research workflow for the question "What is the capital of Belgium?"
|
||||||
|
- Changed: Created plan artifact at `outputs/.plans/capital-belgium.md`; gathered evidence into `notes/capital-belgium-research-web.md` from Belgium.be, FPS Foreign Affairs, Britannica, and a Belgian Senate constitution check.
|
||||||
|
- Verified: Found two explicit current Belgian government statements that Brussels is the federal capital of Belgium, plus independent Britannica corroboration; no conflicting nuance surfaced in the consulted legal text.
|
||||||
|
- Failed / learned: This is narrow enough that researcher subagents would add overhead without increasing evidence quality.
|
||||||
|
- Blockers: Need draft, citation/URL verification pass, final review pass, and promotion.
|
||||||
|
- Next: Draft the brief, run verifier-equivalent and reviewer passes, then promote final output with provenance.
|
||||||
|
|
||||||
|
### 2026-04-14 12:25 local — capital-belgium
|
||||||
|
|
||||||
|
- Objective: Complete citation, verification, and final promotion for the capital-of-Belgium workflow.
|
||||||
|
- Changed: Wrote `outputs/.drafts/capital-belgium-draft.md`; produced cited brief `outputs/capital-belgium-brief.md`; ran verification into `notes/capital-belgium-verification.md`; promoted final output to `outputs/capital-belgium.md`; wrote `outputs/capital-belgium.provenance.md`; updated the plan ledger and verification log.
|
||||||
|
- Verified: Core claim is now backed by Belgium.be, Belgian Foreign Affairs, Britannica, and direct constitutional text from Senate-hosted Article 194 stating that Brussels is the capital of Belgium and the seat of the federal government.
|
||||||
|
- Failed / learned: The runtime did not expose a named `verifier` subagent, so a worker performed a verifier-equivalent citation/URL check; reviewer surfaced a stronger constitutional source than the first draft had emphasized.
|
||||||
|
- Blockers: None.
|
||||||
|
- Next: Optional only — if requested, expand this into a legal-historical note on Brussels’s capital status and the distinction between city, region, and federal institutions.
|
||||||
|
|
||||||
|
### 2026-03-25 00:00 local — scaling-laws
|
||||||
|
|
||||||
|
- Objective: Set up a deep research workflow for scaling laws.
|
||||||
|
- Changed: Created plan artifact at `outputs/.plans/scaling-laws.md`; defined 4 disjoint researcher dimensions and acceptance criteria.
|
||||||
|
- Verified: Read `CHANGELOG.md` and checked prior memory for related plan `scaling-laws-implications`.
|
||||||
|
- Failed / learned: No prior run-specific changelog entries existed beyond the template.
|
||||||
|
- Blockers: Waiting for user confirmation before launching researcher round 1.
|
||||||
|
- Next: On confirmation, spawn 4 parallel researcher subagents and begin evidence collection.
|
||||||
|
|
||||||
|
### 2026-03-25 00:30 local — scaling-laws (T4 inference/time-scale pass)
|
||||||
|
|
||||||
|
- Objective: Complete T4 on inference/test-time scaling and reasoning-time compute, scoped to 2023–2026.
|
||||||
|
- Changed: Wrote `notes/scaling-laws-research-inference.md`; updated `outputs/.plans/scaling-laws.md` to mark T4 done and log the inference-scaling verification pass.
|
||||||
|
- Verified: Cross-read 13 primary/official sources covering Tree-of-Thoughts, PRMs, repeated sampling, compute-optimal test-time scaling, provable laws, o1, DeepSeek-R1, s1, verifier failures, Anthropic extended thinking, and OpenAI reasoning API docs.
|
||||||
|
- Failed / learned: OpenAI blog fetch for `learning-to-reason-with-llms` returned malformed content, so the note leans on the o1 system card and API docs instead of that blog post.
|
||||||
|
- Blockers: T2 and T5 remain open before final synthesis; no single unified law for inference-time scaling emerged from public sources.
|
||||||
|
- Next: Complete T5 implications synthesis, then reconcile T3/T4 with foundational T2 before drafting the cited brief.
|
||||||
|
|
||||||
|
### 2026-03-25 11:20 local — scaling-laws (T6 draft synthesis)
|
||||||
|
|
||||||
|
- Objective: Synthesize the four research notes into a single user-facing draft brief for the scaling-laws workflow.
|
||||||
|
- Changed: Wrote `outputs/.drafts/scaling-laws-draft.md` with an executive summary, curated reading list, qualitative meta-analysis, core-paper comparison table, explicit training-vs-inference distinction, and numbered inline citations with direct-URL sources.
|
||||||
|
- Verified: Cross-checked the draft against `notes/scaling-laws-research-foundations.md`, `notes/scaling-laws-research-revisions.md`, `notes/scaling-laws-research-inference.md`, and `notes/scaling-laws-research-implications.md` to ensure the brief explicitly states the literature is too heterogeneous for a pooled effect-size estimate.
|
||||||
|
- Failed / learned: The requested temp-run `context.md` and `plan.md` were absent, so the synthesis used `outputs/.plans/scaling-laws.md` plus the four note files as the working context.
|
||||||
|
- Blockers: Citation/claim verification pass still pending; this draft should be treated as pre-verification.
|
||||||
|
- Next: Run verifier/reviewer passes, then promote the draft into the final cited brief and provenance sidecar.
|
||||||
|
|
||||||
|
### 2026-03-25 11:28 local — scaling-laws (final brief + pdf)
|
||||||
|
|
||||||
|
- Objective: Deliver a paper guide and qualitative meta-analysis on AI scaling laws.
|
||||||
|
- Changed: Finalized `outputs/scaling-laws.md` and sidecar `outputs/scaling-laws.provenance.md`; rendered preview PDF at `outputs/scaling-laws.pdf`; updated plan ledger and verification log in `outputs/.plans/scaling-laws.md`.
|
||||||
|
- Verified: Ran a reviewer pass recorded in `notes/scaling-laws-verification.md`; spot-checked key primary papers via alpha-backed reads for Kaplan 2020, Chinchilla 2022, and Snell 2024; confirmed PDF render output exists.
|
||||||
|
- Failed / learned: A pooled statistical meta-analysis would be misleading because the literature mixes heterogeneous outcomes, scaling axes, and evaluation regimes; final deliverable uses a qualitative meta-analysis instead.
|
||||||
|
- Blockers: None for this brief.
|
||||||
|
- Next: If needed, extend into a narrower sub-survey (e.g. only pretraining laws, only inference-time scaling, or only post-Chinchilla data-quality revisions).
|
||||||
|
|
||||||
|
### 2026-03-25 14:52 local — skills-only-install
|
||||||
|
|
||||||
|
- Objective: Let users download the Feynman research skills without installing the full terminal runtime.
|
||||||
|
- Changed: Added standalone skills-only installers at `scripts/install/install-skills.sh` and `scripts/install/install-skills.ps1`; synced website-public copies; documented user-level and repo-local install flows in `README.md`, `website/src/content/docs/getting-started/installation.md`, and `website/src/pages/index.astro`.
|
||||||
|
- Verified: Ran `sh -n scripts/install/install-skills.sh`; ran `node scripts/sync-website-installers.mjs`; ran `cd website && npm run build`; executed `sh scripts/install/install-skills.sh --dir <tmp>` and confirmed extracted `SKILL.md` files land in the target directory.
|
||||||
|
- Failed / learned: PowerShell installer behavior was not executed locally because PowerShell is not installed in this environment.
|
||||||
|
- Blockers: None for the Unix installer flow; Windows remains syntax-only by inspection.
|
||||||
|
- Next: If users want this exposed more prominently, add a dedicated docs/reference page and a homepage-specific skills-only CTA instead of a text link.
|
||||||
|
|
||||||
|
### 2026-03-26 18:08 PDT — installer-release-unification
|
||||||
|
|
||||||
|
- Objective: Remove the moving `edge` installer channel and unify installs on tagged releases only.
|
||||||
|
- Changed: Updated `scripts/install/install.sh`, `scripts/install/install.ps1`, `scripts/install/install-skills.sh`, and `scripts/install/install-skills.ps1` so the default target is the latest tagged release, latest-version resolution uses public GitHub release pages instead of `api.github.com`, and explicit `edge` requests now fail with a removal message; removed the `release-edge` job from `.github/workflows/publish.yml`; updated `README.md` and `website/src/content/docs/getting-started/installation.md`; re-synced `website/public/install*`.
|
||||||
|
- Verified: Ran `sh -n` on the Unix installer copies; confirmed `sh scripts/install/install.sh edge` and `sh scripts/install/install-skills.sh edge --dir <tmp>` fail with the intended removal message; executed `sh scripts/install/install.sh` into temp dirs and confirmed the installed binary reports `0.2.14`; executed `sh scripts/install/install-skills.sh --dir <tmp>` and confirmed extracted `SKILL.md` files; ran `cd website && npm run build`.
|
||||||
|
- Failed / learned: The install failure was caused by unauthenticated GitHub API rate limiting on the `edge` path, so renaming channels without removing the API dependency would not have fixed the root cause.
|
||||||
|
- Blockers: `npm run build` still emits a pre-existing duplicate-content warning for `getting-started/installation`; the build succeeds.
|
||||||
|
- Next: If desired, remove the now-unused `stable` alias too and clean up the duplicate docs-content warning separately.
|
||||||
|
|
||||||
|
### 2026-03-27 11:58 PDT — release-0.2.15
|
||||||
|
|
||||||
|
- Objective: Make the non-Anthropic subagent/auth fixes and contributor-guide updates releasable to tagged-install users instead of leaving them only on `main`.
|
||||||
|
- Changed: Bumped the package version from `0.2.14` to `0.2.15` in `package.json` and `package-lock.json`; updated pinned installer examples in `README.md` and `website/src/content/docs/getting-started/installation.md`; aligned the local-development docs example to the npm-based root workflow; added `CONTRIBUTING.md` plus the bundled `skills/contributing/SKILL.md`.
|
||||||
|
- Verified: Confirmed the publish workflow keys off `package.json` versus the currently published npm version; confirmed local `npm test`, `npm run typecheck`, and `npm run build` pass before the release bump.
|
||||||
|
- Failed / learned: The open subagent issue is fixed on `main` but still user-visible on tagged installs until a fresh release is cut.
|
||||||
|
- Blockers: Need the GitHub publish workflow to finish successfully before the issue can be honestly closed as released.
|
||||||
|
- Next: Push `0.2.15`, monitor the publish workflow, then update and close the relevant GitHub issue/PR once the release is live.
|
||||||
|
|
||||||
|
### 2026-03-28 15:15 PDT — pi-subagents-agent-dir-compat
|
||||||
|
|
||||||
|
- Objective: Debug why tagged installs can still fail subagent/auth flows after `0.2.15` when users are not on Anthropic.
|
||||||
|
- Changed: Added `scripts/lib/pi-subagents-patch.mjs` plus type declarations and wired `scripts/patch-embedded-pi.mjs` to rewrite vendored `pi-subagents` runtime files so they resolve user-scoped paths from `PI_CODING_AGENT_DIR` instead of hardcoded `~/.pi/agent`; added `tests/pi-subagents-patch.test.ts`.
|
||||||
|
- Verified: Materialized `.feynman/npm`, inspected the shipped `pi-subagents@0.11.11` sources, confirmed the hardcoded `~/.pi/agent` paths in `index.ts`, `agents.ts`, `artifacts.ts`, `run-history.ts`, `skills.ts`, and `chain-clarify.ts`; ran `node scripts/patch-embedded-pi.mjs`; ran `npm test`, `npm run typecheck`, and `npm run build`.
|
||||||
|
- Failed / learned: The earlier `0.2.15` fix only proved that Feynman exported `PI_CODING_AGENT_DIR` to the top-level Pi child; it did not cover vendored extension code that still hardcoded `.pi` paths internally.
|
||||||
|
- Blockers: Users still need a release containing this patch before tagged installs benefit from it.
|
||||||
|
- Next: Cut the next release and verify a tagged install exercises subagents without reading from `~/.pi/agent`.
|
||||||
|
|
||||||
|
### 2026-03-28 21:46 PDT — release-0.2.16
|
||||||
|
|
||||||
|
- Objective: Ship the vendored `pi-subagents` agent-dir compatibility fix to tagged installs.
|
||||||
|
- Changed: Bumped the package version from `0.2.15` to `0.2.16` in `package.json` and `package-lock.json`; updated pinned installer examples in `README.md` and `website/src/content/docs/getting-started/installation.md`.
|
||||||
|
- Verified: Re-ran `npm test`, `npm run typecheck`, and `npm run build`; ran `cd website && npm run build`; ran `npm pack` and confirmed the `0.2.16` tarball includes the new `scripts/lib/pi-subagents-patch.*` files.
|
||||||
|
- Failed / learned: An initial local `build:native-bundle` check failed because `npm pack` and `build:native-bundle` were run in parallel, and `prepack` intentionally removes `dist/release`; rerunning `npm run build:native-bundle` sequentially succeeded.
|
||||||
|
- Blockers: None in the repo; publishing still depends on the GitHub workflow running on the bumped version.
|
||||||
|
- Next: Push the `0.2.16` release bump and monitor npm/GitHub release publication.
|
||||||
|
|
||||||
|
### 2026-03-31 10:45 PDT — pi-maintenance-issues-prs
|
||||||
|
|
||||||
|
- Objective: Triage open Pi-related issues/PRs, fix the concrete package update regression, and refresh Pi dependencies against current upstream releases.
|
||||||
|
- Changed: Pinned direct package-manager operations (`feynman update`, `feynman packages install`) to Feynman's npm prefix by exporting `FEYNMAN_NPM_PREFIX`, `NPM_CONFIG_PREFIX`, and `npm_config_prefix` before invoking Pi's `DefaultPackageManager`; bumped `@mariozechner/pi-ai` and `@mariozechner/pi-coding-agent` from `0.62.0` to `0.64.0`; adapted `src/model/registry.ts` to the new `ModelRegistry.create(...)` factory; integrated PR #15's `/feynman-model` command on top of current `main`.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, and `npm run build` successfully after the dependency bump and PR integration; confirmed upstream `pi-coding-agent@0.64.0` still uses `npm install -g` for user-scope package updates, so the Feynman-side prefix fix is still required.
|
||||||
|
- Failed / learned: PR #14 is a stale branch with no clean merge path against current `main`; the only user-facing delta is the ValiChord prompt/skill addition, and the branch also carries unrelated release churn plus demo-style material, so it was not merged in this pass.
|
||||||
|
- Blockers: None in the local repo state; remote merge/push still depends on repository credentials and branch policy.
|
||||||
|
- Next: If remote write access is available, commit and push the validated maintenance changes, then close issue #22 and resolve PR #15 as merged while leaving PR #14 unmerged pending a cleaned-up, non-promotional resubmission.
|
||||||
|
|
||||||
|
### 2026-03-31 12:05 PDT — pi-backlog-cleanup-round-2
|
||||||
|
|
||||||
|
- Objective: Finish the remaining high-confidence open tracker items after the Pi 0.64.0 upgrade instead of leaving the issue list half-reconciled.
|
||||||
|
- Changed: Added a Windows extension-loader patch helper so Feynman rewrites Pi extension imports to `file://` URLs on Windows before interactive startup; added `/commands`, `/tools`, and `/capabilities` discovery commands and surfaced `/hotkeys` plus `/service-tier` in help metadata; added explicit service-tier support via `feynman model tier`, `--service-tier`, status/doctor output, and a provider-payload hook that passes `service_tier` only to supported OpenAI/OpenAI Codex/Anthropic models; added Exa provider recognition to Feynman's web-search status layer and vendored `pi-web-access`.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, and `npm run build`; smoke-imported the modified vendored `pi-web-access` modules with `node --import tsx`.
|
||||||
|
- Failed / learned: The remaining ValiChord PR is still stale and mixes a real prompt/skill update with unrelated branch churn; it is a review/triage item, not a clean merge candidate.
|
||||||
|
- Blockers: No local build blockers remain; issue/PR closure still depends on the final push landing on `main`.
|
||||||
|
- Next: Push the verified cleanup commit, then close issues fixed by the dependency bump plus the new discoverability/service-tier/Windows patches, and close the stale ValiChord PR explicitly instead of leaving it open indefinitely.
|
||||||
|
|
||||||
|
### 2026-04-09 09:37 PDT — windows-startup-import-specifiers
|
||||||
|
|
||||||
|
- Objective: Fix Windows startup failures where `feynman` exits before the Pi child process initializes.
|
||||||
|
- Changed: Converted the Node preload module paths passed via `node --import` in `src/pi/launch.ts` to `file://` specifiers using a new `toNodeImportSpecifier(...)` helper in `src/pi/runtime.ts`; expanded `scripts/patch-embedded-pi.mjs` so it also patches the bundled workspace copy of Pi's extension loader when present.
|
||||||
|
- Verified: Added a regression test in `tests/pi-runtime.test.ts` covering absolute-path to `file://` conversion for preload imports; ran `npm test`, `npm run typecheck`, and `npm run build`.
|
||||||
|
- Failed / learned: The raw Windows `ERR_UNSUPPORTED_ESM_URL_SCHEME` stack is more consistent with Node rejecting the child-process `--import C:\\...` preload before Pi starts than with a normal in-app extension load failure.
|
||||||
|
- Blockers: Windows runtime execution was not available locally, so the fix is verified by code path inspection and automated tests rather than an actual Windows shell run.
|
||||||
|
- Next: Ask the affected user to reinstall or update to the next published package once released, and confirm the Windows REPL now starts from a normal PowerShell session.
|
||||||
|
|
||||||
|
### 2026-04-09 11:02 PDT — tracker-hardening-pass
|
||||||
|
|
||||||
|
- Objective: Triage the open repo backlog, land the highest-signal fixes locally, and add guardrails against stale promotional workflow content.
|
||||||
|
- Changed: Hardened Windows launch paths in `bin/feynman.js`, `scripts/build-native-bundle.mjs`, and `scripts/install/install.ps1`; set npm prefix overrides earlier in `scripts/patch-embedded-pi.mjs`; added a `pi-web-access` runtime patch helper plus `FEYNMAN_WEB_SEARCH_CONFIG` env wiring so bundled web search reads the same `~/.feynman/web-search.json` that doctor/status report; taught `src/pi/web-access.ts` to honor the legacy `route` key; fixed bundled skill references and expanded the skills-only installers/docs to ship the prompt and guidance files those skills reference; added regression tests for config paths, catalog snapshot edges, skill-path packaging, `pi-web-access` patching, and blocked promotional content.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, and `npm run build` successfully after the full maintenance pass.
|
||||||
|
- Failed / learned: The skills-only install issue was not just docs drift; the shipped `SKILL.md` files referenced prompt paths that only made sense after installation, so the repo needed both path normalization and packaging changes.
|
||||||
|
- Blockers: Remote issue/PR closure and merge actions still depend on the final reviewed branch state being pushed.
|
||||||
|
- Next: Push the validated fixes, close the duplicate Windows/reporting issues they supersede, reject the promotional ValiChord PR explicitly, and then review whether the remaining docs-only or feature PRs should be merged separately.
|
||||||
|
|
||||||
|
### 2026-04-09 10:28 PDT — verification-and-security-pass
|
||||||
|
|
||||||
|
- Objective: Run a deeper install/security verification pass against the post-cleanup `0.2.17` tree instead of assuming the earlier targeted fixes covered the shipped artifacts.
|
||||||
|
- Changed: Reworked `extensions/research-tools/header.ts` to use `@mariozechner/pi-tui` width-aware helpers for truncation/wrapping so wide Unicode text does not overflow custom header rows; changed `src/pi/launch.ts` to stop mirroring child crash signals back onto the parent process and instead emit a conventional exit code; added `FEYNMAN_INSTALL_SKILLS_ARCHIVE_URL` overrides to the skills installers for pre-release smoke testing; aligned root and website dependency trees with patched transitive versions using npm `overrides`; fixed `src/pi/web-access.ts` so `search status` respects `FEYNMAN_HOME` semantics instead of hardcoding the current shell home directory; added `tests/pi-launch.test.ts`.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, `npm run build`, `cd website && npm run build`, `npm run build:native-bundle`; smoke-tested `scripts/install/install.sh` against a locally served `dist/release/feynman-0.2.17-darwin-arm64.tar.gz`; smoke-tested `scripts/install/install-skills.sh` against a local source archive; confirmed installed `feynman --version`, `feynman --help`, `feynman doctor`, and packaged `feynman search status` work from the installed bundle; `npm audit --omit=dev` is clean in the root app and website after overrides.
|
||||||
|
- Failed / learned: The first packaged `search status` smoke test still showed the user home path because the native bundle had been built before the `FEYNMAN_HOME` path fix; rebuilding the native bundle resolved that mismatch.
|
||||||
|
- Blockers: PowerShell runtime was unavailable locally, so Windows installer execution remained code-path validated rather than actually executed.
|
||||||
|
- Next: Push the second-pass hardening commit, then keep issue `#46` and issue `#47` open until users on the affected Linux/CJK environments confirm whether the launcher/header fixes fully resolve them.
|
||||||
|
|
||||||
|
### 2026-04-09 10:36 PDT — remaining-tracker-triage-pass
|
||||||
|
|
||||||
|
- Objective: Reduce the remaining open tracker items by landing the lowest-risk missing docs/catalog updates and a targeted Cloud Code Assist compatibility patch instead of only hand-triaging them.
|
||||||
|
- Changed: Added MiniMax M2.7 recommendation preferences in `src/model/catalog.ts`; documented model switching, authenticated-provider visibility, and `/feynman-model` subagent overrides in `website/src/content/docs/getting-started/configuration.md` and `website/src/content/docs/reference/slash-commands.md`; added a runtime patch helper in `scripts/lib/pi-google-legacy-schema-patch.mjs` and wired `scripts/patch-embedded-pi.mjs` to normalize JSON Schema `const` into `enum` for the legacy `parameters` field used by Cloud Code Assist Claude models.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, `npm run build`, and `cd website && npm run build` after the patch/helper/docs changes.
|
||||||
|
- Failed / learned: The MiniMax provider catalog in Pi already uses canonical IDs like `MiniMax-M2.7`, so the only failure during validation was a test assertion using the wrong casing rather than a runtime bug.
|
||||||
|
- Blockers: The Cloud Code Assist fix is validated by targeted patch tests and code-path review rather than an end-to-end Google account repro in this environment.
|
||||||
|
- Next: Push the tracker-triage commit, close the docs/MiniMax PRs as superseded by main, close the support-style model issues against the new docs, and decide whether the remaining feature requests should be left open or closed as not planned/upstream-dependent.
|
||||||
|
|
||||||
|
### 2026-04-10 10:22 PDT — web-access-stale-override-fix
|
||||||
|
|
||||||
|
- Objective: Fix the new `ctx.modelRegistry.getApiKeyAndHeaders is not a function` / stale `search-filter.js` report without reintroducing broad vendor drift.
|
||||||
|
- Changed: Removed the stale `.feynman/vendor-overrides/pi-web-access/*` files and removed `syncVendorOverride` from `scripts/patch-embedded-pi.mjs`; kept the targeted `pi-web-access` runtime config-path patch; added `feynman search set <provider> [api-key]` and `feynman search clear` commands with a shared save path in `src/pi/web-access.ts`.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, `npm run build`; ran `node scripts/patch-embedded-pi.mjs`, confirmed the installed `pi-web-access/index.ts` has no `search-filter` / condense helper references, and smoke-imported `./.feynman/npm/node_modules/pi-web-access/index.ts`; ran `npm pack --dry-run` and confirmed stale `vendor-overrides` files are no longer in the package tarball.
|
||||||
|
- Failed / learned: The public Linux installer Docker test was attempted but Docker Desktop became unresponsive even for simple `docker run node:22-bookworm node -v` commands; the earlier Linux npm-artifact container smoke remains valid, but this specific public-installer run is blocked by the local Docker daemon.
|
||||||
|
- Blockers: Issue `#54` is too underspecified to fix directly without logs; public Linux installer behavior still needs a stable Docker daemon or a real Linux shell to reproduce the user's exact npm errors.
|
||||||
|
- Next: Push the stale-override fix, close PR `#52` and PR `#53` as superseded/merged-by-main once pushed, and ask for logs on issue `#54` instead of guessing.
|
||||||
|
|
||||||
|
### 2026-04-10 10:49 PDT — rpc-and-website-verification-pass
|
||||||
|
|
||||||
|
- Objective: Exercise the Feynman wrapper's RPC mode and the website quality gates that were not fully covered by the prior passes.
|
||||||
|
- Changed: Added `--mode <text|json|rpc>` pass-through support in the Feynman wrapper and skipped terminal clearing in RPC mode; added `@astrojs/check` to the website dev dependencies, fixed React Refresh lint violations in the generated UI components by exporting only components, and added safe website dependency overrides for dev-audit findings.
|
||||||
|
- Verified: Ran a JSONL RPC smoke test through `node bin/feynman.js --mode rpc` with `get_state`; ran `npm test`, `npm run typecheck`, `npm run build`, `cd website && npm run lint`, `cd website && npm run typecheck`, `cd website && npm run build`, full root `npm audit`, full website `npm audit`, and `npm run build:native-bundle`.
|
||||||
|
- Failed / learned: Website typecheck was previously a no-op prompt because `@astrojs/check` was missing; installing it exposed dev-audit findings that needed explicit overrides before the full website audit was clean.
|
||||||
|
- Blockers: Docker Desktop remained unreliable after restart attempts, so this pass still does not include a second successful public-installer Linux Docker run.
|
||||||
|
- Next: Push the RPC/website verification commit and keep future Docker/public-installer validation separate from repo correctness unless Docker is stable.
|
||||||
|
|
||||||
|
### 2026-04-12 09:32 PDT — pi-0.66.1-upgrade-pass
|
||||||
|
|
||||||
|
- Objective: Update Feynman from Pi `0.64.0` to the current `0.66.1` packages and absorb any downstream SDK/runtime compatibility changes instead of leaving the repo pinned behind upstream.
|
||||||
|
- Changed: Bumped `@mariozechner/pi-ai` and `@mariozechner/pi-coding-agent` to `0.66.1` plus `@companion-ai/alpha-hub` to `0.1.3` in `package.json` and `package-lock.json`; updated `extensions/research-tools.ts` to stop listening for the removed `session_switch` extension event and rely on `session_start`, which now carries startup/reload/new/resume/fork reasons in Pi `0.66.x`.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, and `npm run build` successfully after the upgrade; smoke-ran `node bin/feynman.js --version`, `node bin/feynman.js doctor`, and `node bin/feynman.js status` successfully; checked upstream package diffs and confirmed the breaking change that affected this repo was the typed extension lifecycle change in `pi-coding-agent`, while `pi-ai` mainly brought refreshed provider/model catalog code including Bedrock/OpenAI provider updates and new generated model entries.
|
||||||
|
- Failed / learned: `ctx7` resolved Pi correctly to `/badlogic/pi-mono`, but its docs snapshot was not release-note oriented; the concrete downstream-impact analysis came from the actual `0.64.0` → `0.66.1` package diffs and local validation, not from prose docs alone.
|
||||||
|
- Failed / learned: The first post-upgrade CLI smoke test failed before Feynman startup because `@companion-ai/alpha-hub@0.1.2` shipped a zero-byte `src/lib/auth.js`; bumping to `0.1.3` fixed that adjacent runtime blocker.
|
||||||
|
- Blockers: `npm install` reports two high-severity vulnerabilities remain in the dependency tree; this pass focused on the Pi upgrade and did not remediate unrelated audit findings.
|
||||||
|
- Next: Push the Pi upgrade, then decide whether to layer the pending model-command fixes on top of this branch or land them separately to keep the dependency bump easy to review.
|
||||||
|
|
||||||
|
### 2026-04-12 13:00 PDT — model-command-and-bedrock-fix-pass
|
||||||
|
|
||||||
|
- Objective: Finish the remaining user-facing model-management regressions instead of stopping at the Pi dependency bump.
|
||||||
|
- Changed: Updated `src/model/commands.ts` so `feynman model login <provider>` resolves both OAuth and API-key providers; `feynman model logout <provider>` clears either auth mode; `feynman model set` accepts both `provider/model` and `provider:model`; ambiguous bare model IDs now prefer explicitly configured providers from auth storage; added an `amazon-bedrock` setup path that validates the AWS credential chain with the AWS SDK and stores Pi's `<authenticated>` sentinel so Bedrock models appear in `model list`; synced `src/cli.ts`, `metadata/commands.mjs`, `README.md`, and the website docs to the new behavior.
|
||||||
|
- Verified: Added regression tests in `tests/model-harness.test.ts` for `provider:model`, API-key provider resolution, and ambiguous bare-ID handling; ran `npm test`, `npm run typecheck`, `npm run build`, and `cd website && npm run build`; exercised command-level flows against throwaway `FEYNMAN_HOME` directories: interactive `node bin/feynman.js model login google`, `node bin/feynman.js model set google:gemini-3-pro-preview`, `node bin/feynman.js model set gpt-5.4` with only OpenAI configured, and `node bin/feynman.js model login amazon-bedrock`; confirmed `model list` shows Bedrock models after the new setup path; ran a live one-shot prompt `node bin/feynman.js --prompt "Reply with exactly OK"` and got `OK`.
|
||||||
|
- Failed / learned: The website build still emits duplicate-id warnings for a handful of docs pages, but it completes successfully; those warnings predate this pass and were not introduced by the model-command edits.
|
||||||
|
- Blockers: The Bedrock path is verified with the current shell's AWS credential chain, not with a fresh machine lacking AWS config; broader upstream Pi behavior around IMDS/default-profile autodiscovery without the sentinel is still outside this repo.
|
||||||
|
- Next: Commit and push the combined Pi/model/docs maintenance branch, then decide whether to tackle the deeper search/deepresearch hang issues separately or leave them for focused repro work.
|
||||||
|
|
||||||
|
### 2026-04-12 13:35 PDT — workflow-unattended-and-search-curator-fix-pass
|
||||||
|
|
||||||
|
- Objective: Fix the remaining workflow deadlocks instead of leaving `deepresearch` and terminal web search half-functional after the maintenance push.
|
||||||
|
- Changed: Updated the built-in research workflow prompts (`deepresearch`, `lit`, `review`, `audit`, `compare`, `draft`, `watch`) so they present the plan and continue automatically rather than blocking for approval; extended the `pi-web-access` runtime patch so Feynman rewrites its default workflow from browser-based `summary-review` to `none`; added explicit `workflow: "none"` persistence in `src/search/commands.ts` and `src/pi/web-access.ts`, plus surfaced the workflow in doctor/status-style output.
|
||||||
|
- Verified: Reproduced the original `deepresearch` failure mode in print mode, where the run created `outputs/.plans/capital-france.md` and then stopped waiting for user confirmation; after the prompt changes, reran `deepresearch "What is the capital of France?"` and confirmed it progressed beyond planning and produced `outputs/.drafts/capital-france-draft.md`; inspected `pi-web-access@0.10.6` and confirmed the exact `waiting for summary approval...` string and `summary-review` default live in that package; added regression tests for the new `pi-web-access` patch and workflow-none status handling; reran `npm test`, `npm run typecheck`, and `npm run build`; smoke-tested `feynman search set exa exa_test_key` under a throwaway `FEYNMAN_HOME` and confirmed it writes `"workflow": "none"` to `web-search.json`.
|
||||||
|
- Failed / learned: The long-running deepresearch session still spends substantial time in later reasoning/writing steps for even a narrow query, but the plan-confirmation deadlock itself is resolved; the remaining slowness is model/workflow behavior, not the original stop-after-plan bug.
|
||||||
|
- Blockers: I did not install and execute the full optional `pi-session-search` package locally, so the terminal `summary approval` fix is validated by source inspection plus the Feynman patch path and config persistence rather than a local end-to-end package install.
|
||||||
|
- Next: Commit and push the workflow/search fix pass, then close or answer the remaining deepresearch/search issues with the specific root causes and shipped fixes.
|
||||||
|
|
||||||
|
### 2026-04-12 14:05 PDT — final-artifact-hardening-pass
|
||||||
|
|
||||||
|
- Objective: Reduce the chance of unattended research workflows stopping at intermediate artifacts like `<slug>-brief.md` without promoting the final deliverable and provenance sidecar.
|
||||||
|
- Changed: Tightened `prompts/deepresearch.md` so the agent must verify on disk that the plan, draft, cited brief, promoted final output, and provenance sidecar all exist before stopping; tightened `prompts/lit.md` so it explicitly checks for the final output plus provenance sidecar instead of stopping at an intermediate cited draft.
|
||||||
|
- Verified: Cross-read the current deepresearch/lit deliver steps after the earlier unattended-run reproductions and confirmed the missing enforcement point was the final on-disk artifact check, not the naming convention itself.
|
||||||
|
- Failed / learned: This is still prompt-level enforcement rather than a deterministic post-processing hook, so it improves completion reliability but does not provide the same guarantees as a dedicated artifact-finalization wrapper.
|
||||||
|
- Blockers: I did not rerun a full broad deepresearch workflow end-to-end after this prompt-only hardening because those runs are materially longer and more expensive than the narrow reproductions already used to isolate the earlier deadlocks.
|
||||||
|
- Next: Commit and push the prompt hardening, then, if needed, add a deterministic wrapper around final artifact promotion instead of relying only on prompt adherence.
|
||||||
|
|
||||||
|
### 2026-04-14 09:30 PDT — wsl-login-and-uninstall-docs-pass
|
||||||
|
|
||||||
|
- Objective: Fix the remaining WSL setup blocker and close the last actionable support issue instead of leaving the tracker open after the earlier workflow/model fixes.
|
||||||
|
- Changed: Added a dedicated alpha-hub auth patch helper and tests; extended the alphaXiv login patch so WSL uses `wslview` when available and falls back to `cmd.exe /c start`, while also printing the auth URL explicitly for manual copy/paste if browser launch still fails; documented standalone uninstall steps in `README.md` and `website/src/content/docs/getting-started/installation.md`.
|
||||||
|
- Verified: Added regression tests for the alpha-hub auth patch, reran `npm test`, `npm run typecheck`, and `npm run build`, and smoke-checked the patched alpha-hub source rewrite to confirm it injects both the WSL browser path and the explicit auth URL logging.
|
||||||
|
- Failed / learned: This repo can patch alpha-hub's login UX reliably, but it still does not ship a destructive `feynman uninstall` command; the practical fix for the support issue is documented uninstall steps rather than a rushed cross-platform remover.
|
||||||
|
- Blockers: I did not run a true WSL shell here, so the WSL fix is validated by the deterministic source patch plus tests rather than an actual Windows-hosted browser-launch repro.
|
||||||
|
- Next: Push the WSL/login pass and close the stale issues and PRs that are already superseded by `main`.
|
||||||
|
|
||||||
|
### 2026-04-14 09:35 PDT — review-findings-and-audit-cleanup
|
||||||
|
|
||||||
|
- Objective: Fix the remaining concrete issues found in the deeper review pass instead of stopping at tracker cleanup.
|
||||||
|
- Changed: Updated the `pi-web-access` patch so Feynman defaults search workflow to `none` without disabling explicit `summary-review`; softened the research workflow prompts so only unattended/one-shot runs auto-continue while interactive users still get a chance to request plan changes; corrected uninstall docs to mention `~/.ahub` alongside `~/.feynman`; bumped the root `basic-ftp` override from `5.2.1` to `5.2.2`.
|
||||||
|
- Verified: Ran `npm test`, `npm run typecheck`, `npm run build`, `cd website && npm run build`, and `npm audit`; root audit is now clean.
|
||||||
|
- Failed / learned: Astro still emits a duplicate-content-id warning for `website/src/content/docs/getting-started/installation.md`, but the website build succeeds and I did not identify a low-risk repo-side fix for that warning in this pass.
|
||||||
|
- Blockers: The duplicate-id warning remains as a build warning only, not a failing correctness gate.
|
||||||
|
- Next: If desired, isolate the Astro duplicate-id warning separately with a minimal reproduction rather than mixing it into runtime/CLI maintenance.
|
||||||
|
|
||||||
|
### 2026-04-14 10:55 PDT — summarize-workflow-restore
|
||||||
|
|
||||||
|
- Objective: Restore the useful summarization workflow that had been closed in PR `#69` without being merged.
|
||||||
|
- Changed: Added `prompts/summarize.md` as a top-level CLI workflow so `feynman summarize <source>` is available again; kept the RLM-based tiering approach from the original proposal and aligned Tier 3 confirmation behavior with the repo's unattended-run conventions.
|
||||||
|
- Verified: Confirmed `feynman summarize <source>` appears in CLI help; ran `node bin/feynman.js summarize /tmp/feynman-summary-smoke.txt` against a local smoke file and verified it produced `outputs/feynman-summary-smoke-summary.md` plus the raw fetched note artifact under `outputs/.notes/`.
|
||||||
|
- Failed / learned: None in the restored Tier 1 path; broader Tier 2/Tier 3 behavior still depends on runtime/model/tool availability, just like the other prompt-driven workflows.
|
||||||
|
- Blockers: None for the prompt restoration itself.
|
||||||
|
- Next: If desired, add dedicated docs for `summarize` and decide whether to reopen PR `#69` for historical continuity or leave it closed as superseded by the landed equivalent on `main`.
|
||||||
|
|
||||||
|
### 2026-04-12 13:20 PDT — capital-france (citation verification brief)
|
||||||
|
|
||||||
|
- Objective: Verify citations in the capital-of-France draft and produce a cited verifier brief.
|
||||||
|
- Changed: Read `outputs/.drafts/capital-france-draft.md`, `notes/capital-france-research-web.md`, and `notes/capital-france-legal-context.md`; fetched the three draft URLs directly; wrote `notes/capital-france-brief.md` with inline numbered citations and a numbered direct-URL sources list.
|
||||||
|
- Verified: Confirmed the Insee, Sénat, and Élysée URLs were reachable on 2026-04-12; confirmed Insee and Sénat support the core claim that Paris is the capital of France; marked the Élysée homepage as contextual-only support.
|
||||||
|
- Failed / learned: The Élysée homepage does not explicitly state the core claim, so it should not be used as sole evidence for capital status.
|
||||||
|
- Blockers: None for the verifier brief; any stronger legal memo would still need a more direct constitutional/statutory basis if that specific question is asked.
|
||||||
|
- Next: Promote the brief into the final output or downgrade/remove any claim that leans on the Élysée URL alone.
|
||||||
|
|||||||
115
CONTRIBUTING.md
Normal file
115
CONTRIBUTING.md
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
# Contributing to Feynman
|
||||||
|
|
||||||
|
Feynman is a research-first CLI built on Pi and alphaXiv. This guide is for humans and agents contributing code, prompts, skills, docs, installers, or workflow behavior to the repository.
|
||||||
|
|
||||||
|
## Quick Links
|
||||||
|
|
||||||
|
- GitHub: https://github.com/getcompanion-ai/feynman
|
||||||
|
- Docs: https://feynman.is/docs
|
||||||
|
- Repo agent contract: [AGENTS.md](AGENTS.md)
|
||||||
|
- Issues: https://github.com/getcompanion-ai/feynman/issues
|
||||||
|
|
||||||
|
## What Goes Where
|
||||||
|
|
||||||
|
- CLI/runtime code: `src/`
|
||||||
|
- Bundled prompt templates: `prompts/`
|
||||||
|
- Bundled Pi skills: `skills/`
|
||||||
|
- Bundled Pi subagent prompts: `.feynman/agents/`
|
||||||
|
- Docs site: `website/`
|
||||||
|
- Build/release scripts: `scripts/`
|
||||||
|
- Generated research artifacts: `outputs/`, `papers/`, `notes/`
|
||||||
|
|
||||||
|
If you need to change how bundled subagents behave, edit `.feynman/agents/*.md`. Do not duplicate that behavior in `AGENTS.md`.
|
||||||
|
|
||||||
|
## Before You Open a PR
|
||||||
|
|
||||||
|
1. Start from the latest `main`.
|
||||||
|
2. Use Node.js `22.x` for local development. The supported runtime range is Node.js `20.19.0` through `24.x`; `.nvmrc` pins the preferred local version while `package.json`, `website/package.json`, and the runtime version guard define the broader supported range.
|
||||||
|
3. Install dependencies from the repo root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
nvm use || nvm install
|
||||||
|
npm install
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Run the required checks before asking for review:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm test
|
||||||
|
npm run typecheck
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
5. If you changed the docs site, also validate the website:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd website
|
||||||
|
npm install
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Keep the PR focused. Do not mix unrelated cleanup with the real change.
|
||||||
|
7. Add or update tests when behavior changes.
|
||||||
|
8. Update docs, prompts, or skills when the user-facing workflow changes.
|
||||||
|
|
||||||
|
## Contribution Rules
|
||||||
|
|
||||||
|
- Bugs, docs fixes, installer fixes, and focused workflow improvements are good PRs.
|
||||||
|
- Large feature changes should start with an issue or a concrete implementation discussion before code lands.
|
||||||
|
- Avoid refactor-only PRs unless they are necessary to unblock a real fix or requested by a maintainer.
|
||||||
|
- Do not silently change release behavior, installer behavior, or runtime defaults without documenting the reason in the PR.
|
||||||
|
- Use American English in docs, comments, prompts, UI copy, and examples.
|
||||||
|
- Do not add bundled prompts, skills, or docs whose primary purpose is to market, endorse, or funnel users toward a third-party product or service. Product integrations must be justified by user-facing utility and written in neutral language.
|
||||||
|
|
||||||
|
## Repo-Specific Checks
|
||||||
|
|
||||||
|
### Prompt and skill changes
|
||||||
|
|
||||||
|
- New workflows usually live in `prompts/*.md`.
|
||||||
|
- New reusable capabilities usually live in `skills/<name>/SKILL.md`.
|
||||||
|
- Keep skill files concise. Put detailed operational rules in the prompt or in focused reference files only when needed.
|
||||||
|
- If a new workflow should be invokable from the CLI, make sure its prompt frontmatter includes the correct metadata and that the command works through the normal prompt discovery path.
|
||||||
|
|
||||||
|
### Agent and artifact conventions
|
||||||
|
|
||||||
|
- `AGENTS.md` is the repo-level contract for workspace conventions, handoffs, provenance, and output naming.
|
||||||
|
- Long-running research flows should write plan artifacts to `outputs/.plans/` and use `CHANGELOG.md` as a lab notebook when the work is substantial.
|
||||||
|
- Do not update `CHANGELOG.md` for trivial one-shot changes.
|
||||||
|
|
||||||
|
### Release and versioning discipline
|
||||||
|
|
||||||
|
- The curl installer and release docs point users at tagged releases, not arbitrary commits on `main`.
|
||||||
|
- If you ship user-visible fixes after a tag, do not leave the repo in a state where `main` and the latest release advertise the same version string while containing different behavior.
|
||||||
|
- When changing release-sensitive behavior, check the version story across:
|
||||||
|
- `.nvmrc`
|
||||||
|
- `package.json`
|
||||||
|
- `website/package.json`
|
||||||
|
- `scripts/check-node-version.mjs`
|
||||||
|
- install docs in `README.md` and `website/src/content/docs/getting-started/installation.md`
|
||||||
|
|
||||||
|
## AI-Assisted Contributions
|
||||||
|
|
||||||
|
AI-assisted PRs are fine. The contributor is still responsible for the diff.
|
||||||
|
|
||||||
|
- Understand the code you are submitting.
|
||||||
|
- Run the local checks yourself instead of assuming generated code is correct.
|
||||||
|
- Include enough context in the PR description for a reviewer to understand the change quickly.
|
||||||
|
- If an agent updated prompts or skills, verify the instructions match the actual repo behavior.
|
||||||
|
|
||||||
|
## Review Expectations
|
||||||
|
|
||||||
|
- Explain what changed and why.
|
||||||
|
- Call out tradeoffs, follow-up work, and anything intentionally not handled.
|
||||||
|
- Include screenshots for UI changes.
|
||||||
|
- Resolve review comments you addressed before requesting review again.
|
||||||
|
|
||||||
|
## Good First Areas
|
||||||
|
|
||||||
|
Useful contributions usually land in one of these areas:
|
||||||
|
|
||||||
|
- installation and upgrade reliability
|
||||||
|
- research workflow quality
|
||||||
|
- model/provider setup ergonomics
|
||||||
|
- docs clarity
|
||||||
|
- preview and export stability
|
||||||
|
- packaging and release hygiene
|
||||||
145
README.md
145
README.md
@@ -1,44 +1,100 @@
|
|||||||
# Feynman
|
<p align="center">
|
||||||
|
<a href="https://feynman.is">
|
||||||
|
<img src="assets/hero.png" alt="Feynman CLI" width="800" />
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
<p align="center">The open source AI research agent.</p>
|
||||||
|
<p align="center">
|
||||||
|
<a href="https://feynman.is/docs"><img alt="Docs" src="https://img.shields.io/badge/docs-feynman.is-0d9668?style=flat-square" /></a>
|
||||||
|
<a href="https://github.com/getcompanion-ai/feynman/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/getcompanion-ai/feynman?style=flat-square" /></a>
|
||||||
|
</p>
|
||||||
|
|
||||||
The open source AI research agent
|
---
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
**macOS / Linux:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -fsSL https://feynman.is/install | bash
|
curl -fsSL https://feynman.is/install | bash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Windows (PowerShell):**
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
irm https://feynman.is/install.ps1 | iex
|
irm https://feynman.is/install.ps1 | iex
|
||||||
```
|
```
|
||||||
|
|
||||||
Or install the npm fallback:
|
The one-line installer fetches the latest tagged release. To pin a version, pass it explicitly, for example `curl -fsSL https://feynman.is/install | bash -s -- 0.2.19`.
|
||||||
|
|
||||||
|
The installer downloads a standalone native bundle with its own Node.js runtime.
|
||||||
|
|
||||||
|
To upgrade the standalone app later, rerun the installer. `feynman update` only refreshes installed Pi packages inside Feynman's environment; it does not replace the standalone runtime bundle itself.
|
||||||
|
|
||||||
|
To uninstall the standalone app, remove the launcher and runtime bundle, then optionally remove `~/.feynman` if you also want to delete settings, sessions, and installed package state. If you also want to delete alphaXiv login state, remove `~/.ahub`. See the installation guide for platform-specific paths.
|
||||||
|
|
||||||
|
Local models are supported through the custom-provider flow. For Ollama, run `feynman setup`, choose `Custom provider (baseUrl + API key)`, use `openai-completions`, and point it at `http://localhost:11434/v1`.
|
||||||
|
|
||||||
|
### Skills Only
|
||||||
|
|
||||||
|
If you want just the research skills without the full terminal app:
|
||||||
|
|
||||||
|
**macOS / Linux:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install -g @companion-ai/feynman
|
curl -fsSL https://feynman.is/install-skills | bash
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Windows (PowerShell):**
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
irm https://feynman.is/install-skills.ps1 | iex
|
||||||
|
```
|
||||||
|
|
||||||
|
That installs the skill library into `~/.codex/skills/feynman`.
|
||||||
|
|
||||||
|
For a repo-local install instead:
|
||||||
|
|
||||||
|
**macOS / Linux:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
feynman setup
|
curl -fsSL https://feynman.is/install-skills | bash -s -- --repo
|
||||||
feynman
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Feynman works directly inside your folder or repo. For long-running work, keep the stable repo contract in `AGENTS.md`, the current task brief in `outputs/.plans/`, and the chronological lab notebook in `CHANGELOG.md`.
|
**Windows (PowerShell):**
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
& ([scriptblock]::Create((irm https://feynman.is/install-skills.ps1))) -Scope Repo
|
||||||
|
```
|
||||||
|
|
||||||
|
That installs into `.agents/skills/feynman` under the current repository.
|
||||||
|
|
||||||
|
These installers download the bundled `skills/` and `prompts/` trees plus the repo guidance files referenced by those skills. They do not install the Feynman terminal, bundled Node runtime, auth storage, or Pi packages.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## What you type → what happens
|
### What you type → what happens
|
||||||
|
|
||||||
| Prompt | Result |
|
```
|
||||||
| --- | --- |
|
$ feynman "what do we know about scaling laws"
|
||||||
| `feynman "what do we know about scaling laws"` | Searches papers and web, produces a cited research brief |
|
→ Searches papers and web, produces a cited research brief
|
||||||
| `feynman deepresearch "mechanistic interpretability"` | Multi-agent investigation with parallel researchers, synthesis, verification |
|
|
||||||
| `feynman lit "RLHF alternatives"` | Literature review with consensus, disagreements, open questions |
|
$ feynman deepresearch "mechanistic interpretability"
|
||||||
| `feynman audit 2401.12345` | Compares paper claims against the public codebase |
|
→ Multi-agent investigation with parallel researchers, synthesis, verification
|
||||||
| `feynman replicate "chain-of-thought improves math"` | Asks where to run, then builds a replication plan |
|
|
||||||
| `feynman "summarize this PDF" --prompt paper.pdf` | One-shot mode, no REPL |
|
$ feynman lit "RLHF alternatives"
|
||||||
|
→ Literature review with consensus, disagreements, open questions
|
||||||
|
|
||||||
|
$ feynman audit 2401.12345
|
||||||
|
→ Compares paper claims against the public codebase
|
||||||
|
|
||||||
|
$ feynman replicate "chain-of-thought improves math"
|
||||||
|
→ Replicates experiments on local or cloud GPUs
|
||||||
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Workflows
|
### Workflows
|
||||||
|
|
||||||
Ask naturally or use slash commands as shortcuts.
|
Ask naturally or use slash commands as shortcuts.
|
||||||
|
|
||||||
@@ -48,17 +104,18 @@ Ask naturally or use slash commands as shortcuts.
|
|||||||
| `/lit <topic>` | Literature review from paper search and primary sources |
|
| `/lit <topic>` | Literature review from paper search and primary sources |
|
||||||
| `/review <artifact>` | Simulated peer review with severity and revision plan |
|
| `/review <artifact>` | Simulated peer review with severity and revision plan |
|
||||||
| `/audit <item>` | Paper vs. codebase mismatch audit |
|
| `/audit <item>` | Paper vs. codebase mismatch audit |
|
||||||
| `/replicate <paper>` | Replication plan with environment selection |
|
| `/replicate <paper>` | Replicate experiments on local or cloud GPUs |
|
||||||
| `/compare <topic>` | Source comparison matrix |
|
| `/compare <topic>` | Source comparison matrix |
|
||||||
| `/draft <topic>` | Paper-style draft from research findings |
|
| `/draft <topic>` | Paper-style draft from research findings |
|
||||||
| `/autoresearch <idea>` | Autonomous experiment loop |
|
| `/autoresearch <idea>` | Autonomous experiment loop |
|
||||||
| `/watch <topic>` | Recurring research watch |
|
| `/watch <topic>` | Recurring research watch |
|
||||||
|
| `/outputs` | Browse all research artifacts |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Agents
|
### Agents
|
||||||
|
|
||||||
Four bundled research agents, dispatched automatically or via subagent commands.
|
Four bundled research agents, dispatched automatically.
|
||||||
|
|
||||||
- **Researcher** — gather evidence across papers, web, repos, docs
|
- **Researcher** — gather evidence across papers, web, repos, docs
|
||||||
- **Reviewer** — simulated peer review with severity-graded feedback
|
- **Reviewer** — simulated peer review with severity-graded feedback
|
||||||
@@ -67,46 +124,48 @@ Four bundled research agents, dispatched automatically or via subagent commands.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Tools
|
### Skills & Tools
|
||||||
|
|
||||||
- **[AlphaXiv](https://www.alphaxiv.org/)** — paper search, Q&A, code reading, persistent annotations
|
- **[AlphaXiv](https://www.alphaxiv.org/)** — paper search, Q&A, code reading, annotations (via `alpha` CLI)
|
||||||
- **Docker** — isolated container execution for safe experiments on your machine
|
- **Docker** — isolated container execution for safe experiments on your machine
|
||||||
- **Web search** — Gemini or Perplexity, zero-config default via signed-in Chromium
|
- **Web search** — Gemini or Perplexity, zero-config default
|
||||||
- **Session search** — optional indexed recall across prior research sessions
|
- **Session search** — indexed recall across prior research sessions
|
||||||
- **Preview** — browser and PDF export of generated artifacts
|
- **Preview** — browser and PDF export of generated artifacts
|
||||||
|
- **Modal** — serverless GPU compute for burst training and inference
|
||||||
|
- **RunPod** — persistent GPU pods with SSH access for long-running experiments
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## CLI
|
### How it works
|
||||||
|
|
||||||
```bash
|
Built on [Pi](https://github.com/badlogic/pi-mono) for the agent runtime, [alphaXiv](https://www.alphaxiv.org/) for paper search and analysis, and CLI tools for compute and execution. Capabilities are delivered as [Pi skills](https://github.com/badlogic/pi-skills) — Markdown instruction files synced to `~/.feynman/agent/skills/` on startup. Every output is source-grounded — claims link to papers, docs, or repos with direct URLs.
|
||||||
feynman # REPL
|
|
||||||
feynman setup # guided setup
|
|
||||||
feynman doctor # diagnose everything
|
|
||||||
feynman status # current config summary
|
|
||||||
feynman model login [provider] # model auth
|
|
||||||
feynman model set <provider/model> # set default model
|
|
||||||
feynman alpha login # alphaXiv auth
|
|
||||||
feynman packages list # core vs optional packages
|
|
||||||
feynman packages install memory # opt into heavier packages on demand
|
|
||||||
feynman search status # web search config
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## How it works
|
### Star History
|
||||||
|
|
||||||
Built on [Pi](https://github.com/badlogic/pi-mono) for the agent runtime, [alphaXiv](https://www.alphaxiv.org/) for paper search and analysis, and [Docker](https://www.docker.com/) for isolated local execution
|
<a href="https://www.star-history.com/?repos=getcompanion-ai%2Ffeynman&type=date&legend=top-left">
|
||||||
|
<picture>
|
||||||
Every output is source-grounded — claims link to papers, docs, or repos with direct URLs
|
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/chart?repos=getcompanion-ai/feynman&type=date&theme=dark&legend=top-left" />
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/chart?repos=getcompanion-ai/feynman&type=date&legend=top-left" />
|
||||||
|
<img alt="Star History Chart" src="https://api.star-history.com/chart?repos=getcompanion-ai/feynman&type=date&legend=top-left" />
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Contributing
|
### Contributing
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](CONTRIBUTING.md) for the full contributor guide.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/getcompanion-ai/feynman.git
|
git clone https://github.com/getcompanion-ai/feynman.git
|
||||||
cd feynman && npm install && npm run start
|
cd feynman
|
||||||
|
nvm use || nvm install
|
||||||
|
npm install
|
||||||
|
npm test
|
||||||
|
npm run typecheck
|
||||||
|
npm run build
|
||||||
```
|
```
|
||||||
|
|
||||||
[Docs](https://feynman.is/docs) · [MIT License](LICENSE)
|
[Docs](https://feynman.is/docs) · [MIT License](LICENSE)
|
||||||
|
|||||||
BIN
assets/hero-raw.png
Normal file
BIN
assets/hero-raw.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 884 KiB |
BIN
assets/hero.png
Normal file
BIN
assets/hero.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.7 MiB |
@@ -1,8 +1,41 @@
|
|||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
const v = process.versions.node.split(".").map(Number);
|
import { resolve } from "node:path";
|
||||||
if (v[0] < 20) {
|
import { pathToFileURL } from "node:url";
|
||||||
console.error(`feynman requires Node.js 20 or later (you have ${process.versions.node})`);
|
|
||||||
console.error("upgrade: https://nodejs.org or nvm install 20");
|
const MIN_NODE_VERSION = "20.19.0";
|
||||||
|
const MAX_NODE_MAJOR = 24;
|
||||||
|
const PREFERRED_NODE_MAJOR = 22;
|
||||||
|
|
||||||
|
function parseNodeVersion(version) {
|
||||||
|
const [major = "0", minor = "0", patch = "0"] = version.replace(/^v/, "").split(".");
|
||||||
|
return {
|
||||||
|
major: Number.parseInt(major, 10) || 0,
|
||||||
|
minor: Number.parseInt(minor, 10) || 0,
|
||||||
|
patch: Number.parseInt(patch, 10) || 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function compareNodeVersions(left, right) {
|
||||||
|
if (left.major !== right.major) return left.major - right.major;
|
||||||
|
if (left.minor !== right.minor) return left.minor - right.minor;
|
||||||
|
return left.patch - right.patch;
|
||||||
|
}
|
||||||
|
|
||||||
|
const parsedNodeVersion = parseNodeVersion(process.versions.node);
|
||||||
|
if (compareNodeVersions(parsedNodeVersion, parseNodeVersion(MIN_NODE_VERSION)) < 0 || parsedNodeVersion.major > MAX_NODE_MAJOR) {
|
||||||
|
const isWindows = process.platform === "win32";
|
||||||
|
console.error(`feynman supports Node.js ${MIN_NODE_VERSION} through ${MAX_NODE_MAJOR}.x (detected ${process.versions.node}).`);
|
||||||
|
console.error(parsedNodeVersion.major > MAX_NODE_MAJOR
|
||||||
|
? "This newer Node release is not supported yet because native Pi packages may fail to build."
|
||||||
|
: isWindows
|
||||||
|
? "Install a supported Node.js release from https://nodejs.org, or use the standalone installer:"
|
||||||
|
: `Switch to a supported Node release with \`nvm install ${PREFERRED_NODE_MAJOR} && nvm use ${PREFERRED_NODE_MAJOR}\`, or use the standalone installer:`);
|
||||||
|
console.error(isWindows
|
||||||
|
? "irm https://feynman.is/install.ps1 | iex"
|
||||||
|
: "curl -fsSL https://feynman.is/install | bash");
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
import("../dist/index.js");
|
const here = import.meta.dirname;
|
||||||
|
|
||||||
|
await import(pathToFileURL(resolve(here, "..", "scripts", "patch-embedded-pi.mjs")).href);
|
||||||
|
await import(pathToFileURL(resolve(here, "..", "dist", "index.js")).href);
|
||||||
|
|||||||
@@ -1,25 +1,26 @@
|
|||||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
import { registerAlphaCommands, registerAlphaTools } from "./research-tools/alpha.js";
|
import { registerAlphaTools } from "./research-tools/alpha.js";
|
||||||
|
import { registerDiscoveryCommands } from "./research-tools/discovery.js";
|
||||||
|
import { registerFeynmanModelCommand } from "./research-tools/feynman-model.js";
|
||||||
import { installFeynmanHeader } from "./research-tools/header.js";
|
import { installFeynmanHeader } from "./research-tools/header.js";
|
||||||
import { registerHelpCommand } from "./research-tools/help.js";
|
import { registerHelpCommand } from "./research-tools/help.js";
|
||||||
import { registerInitCommand, registerPreviewTool, registerSessionSearchTool } from "./research-tools/project.js";
|
import { registerInitCommand, registerOutputsCommand } from "./research-tools/project.js";
|
||||||
|
import { registerServiceTierControls } from "./research-tools/service-tier.js";
|
||||||
|
|
||||||
export default function researchTools(pi: ExtensionAPI): void {
|
export default function researchTools(pi: ExtensionAPI): void {
|
||||||
const cache: { agentSummaryPromise?: Promise<{ agents: string[]; chains: string[] }> } = {};
|
const cache: { agentSummaryPromise?: Promise<{ agents: string[]; chains: string[] }> } = {};
|
||||||
|
|
||||||
|
// Pi 0.66.x folds post-switch/resume lifecycle into session_start.
|
||||||
pi.on("session_start", async (_event, ctx) => {
|
pi.on("session_start", async (_event, ctx) => {
|
||||||
await installFeynmanHeader(pi, ctx, cache);
|
await installFeynmanHeader(pi, ctx, cache);
|
||||||
});
|
});
|
||||||
|
|
||||||
pi.on("session_switch", async (_event, ctx) => {
|
registerAlphaTools(pi);
|
||||||
await installFeynmanHeader(pi, ctx, cache);
|
registerDiscoveryCommands(pi);
|
||||||
});
|
registerFeynmanModelCommand(pi);
|
||||||
|
|
||||||
registerAlphaCommands(pi);
|
|
||||||
registerHelpCommand(pi);
|
registerHelpCommand(pi);
|
||||||
registerInitCommand(pi);
|
registerInitCommand(pi);
|
||||||
registerSessionSearchTool(pi);
|
registerOutputsCommand(pi);
|
||||||
registerAlphaTools(pi);
|
registerServiceTierControls(pi);
|
||||||
registerPreviewTool(pi);
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,136 +1,63 @@
|
|||||||
import {
|
import {
|
||||||
annotatePaper,
|
|
||||||
askPaper,
|
askPaper,
|
||||||
|
annotatePaper,
|
||||||
clearPaperAnnotation,
|
clearPaperAnnotation,
|
||||||
disconnect,
|
|
||||||
getPaper,
|
getPaper,
|
||||||
getUserName as getAlphaUserName,
|
|
||||||
isLoggedIn as isAlphaLoggedIn,
|
|
||||||
listPaperAnnotations,
|
listPaperAnnotations,
|
||||||
login as loginAlpha,
|
|
||||||
logout as logoutAlpha,
|
|
||||||
readPaperCode,
|
readPaperCode,
|
||||||
searchPapers,
|
searchPapers,
|
||||||
} from "@companion-ai/alpha-hub/lib";
|
} from "@companion-ai/alpha-hub/lib";
|
||||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||||
import { Type } from "@sinclair/typebox";
|
import { Type } from "@sinclair/typebox";
|
||||||
|
|
||||||
import { getExtensionCommandSpec } from "../../metadata/commands.mjs";
|
function formatText(value: unknown): string {
|
||||||
import { formatToolText } from "./shared.js";
|
if (typeof value === "string") return value;
|
||||||
|
return JSON.stringify(value, null, 2);
|
||||||
export function registerAlphaCommands(pi: ExtensionAPI): void {
|
|
||||||
pi.registerCommand("alpha-login", {
|
|
||||||
description: getExtensionCommandSpec("alpha-login")?.description ?? "Sign in to alphaXiv from inside Feynman.",
|
|
||||||
handler: async (_args, ctx) => {
|
|
||||||
if (isAlphaLoggedIn()) {
|
|
||||||
const name = getAlphaUserName();
|
|
||||||
ctx.ui.notify(name ? `alphaXiv already connected as ${name}` : "alphaXiv already connected", "info");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
await loginAlpha();
|
|
||||||
const name = getAlphaUserName();
|
|
||||||
ctx.ui.notify(name ? `alphaXiv connected as ${name}` : "alphaXiv login complete", "info");
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
pi.registerCommand("alpha-logout", {
|
|
||||||
description: getExtensionCommandSpec("alpha-logout")?.description ?? "Clear alphaXiv auth from inside Feynman.",
|
|
||||||
handler: async (_args, ctx) => {
|
|
||||||
logoutAlpha();
|
|
||||||
ctx.ui.notify("alphaXiv auth cleared", "info");
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
pi.registerCommand("alpha-status", {
|
|
||||||
description: getExtensionCommandSpec("alpha-status")?.description ?? "Show alphaXiv authentication status.",
|
|
||||||
handler: async (_args, ctx) => {
|
|
||||||
if (!isAlphaLoggedIn()) {
|
|
||||||
ctx.ui.notify("alphaXiv not connected", "warning");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const name = getAlphaUserName();
|
|
||||||
ctx.ui.notify(name ? `alphaXiv connected as ${name}` : "alphaXiv connected", "info");
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function registerAlphaTools(pi: ExtensionAPI): void {
|
export function registerAlphaTools(pi: ExtensionAPI): void {
|
||||||
pi.registerTool({
|
pi.registerTool({
|
||||||
name: "alpha_search",
|
name: "alpha_search",
|
||||||
label: "Alpha Search",
|
label: "Alpha Search",
|
||||||
description: "Search papers through alphaXiv using semantic, keyword, both, agentic, or all retrieval modes.",
|
description:
|
||||||
|
"Search research papers through alphaXiv. Modes: semantic (default, use 2-3 sentence queries), keyword (exact terms), agentic (broad multi-turn retrieval), both, or all.",
|
||||||
parameters: Type.Object({
|
parameters: Type.Object({
|
||||||
query: Type.String({ description: "Paper search query." }),
|
query: Type.String({ description: "Search query." }),
|
||||||
mode: Type.Optional(
|
mode: Type.Optional(
|
||||||
Type.String({
|
Type.String({ description: "Search mode: semantic, keyword, both, agentic, or all." }),
|
||||||
description: "Search mode: semantic, keyword, both, agentic, or all.",
|
|
||||||
}),
|
|
||||||
),
|
),
|
||||||
}),
|
}),
|
||||||
async execute(_toolCallId, params) {
|
async execute(_toolCallId, params) {
|
||||||
try {
|
const result = await searchPapers(params.query, params.mode?.trim() || "semantic");
|
||||||
const result = await searchPapers(params.query, params.mode?.trim() || "all");
|
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||||
return {
|
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
await disconnect();
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
pi.registerTool({
|
pi.registerTool({
|
||||||
name: "alpha_get_paper",
|
name: "alpha_get_paper",
|
||||||
label: "Alpha Get Paper",
|
label: "Alpha Get Paper",
|
||||||
description: "Fetch a paper report or full text, plus any local annotation, using alphaXiv.",
|
description: "Fetch a paper's AI-generated report (or raw full text) plus any local annotation.",
|
||||||
parameters: Type.Object({
|
parameters: Type.Object({
|
||||||
paper: Type.String({
|
paper: Type.String({ description: "arXiv ID, arXiv URL, or alphaXiv URL." }),
|
||||||
description: "arXiv ID, arXiv URL, or alphaXiv URL.",
|
fullText: Type.Optional(Type.Boolean({ description: "Return raw full text instead of AI report." })),
|
||||||
}),
|
|
||||||
fullText: Type.Optional(
|
|
||||||
Type.Boolean({
|
|
||||||
description: "Return raw full text instead of the AI report.",
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
}),
|
}),
|
||||||
async execute(_toolCallId, params) {
|
async execute(_toolCallId, params) {
|
||||||
try {
|
|
||||||
const result = await getPaper(params.paper, { fullText: params.fullText });
|
const result = await getPaper(params.paper, { fullText: params.fullText });
|
||||||
return {
|
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
await disconnect();
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
pi.registerTool({
|
pi.registerTool({
|
||||||
name: "alpha_ask_paper",
|
name: "alpha_ask_paper",
|
||||||
label: "Alpha Ask Paper",
|
label: "Alpha Ask Paper",
|
||||||
description: "Ask a targeted question about a paper using alphaXiv's PDF analysis.",
|
description: "Ask a targeted question about a paper. Uses AI to analyze the PDF and answer.",
|
||||||
parameters: Type.Object({
|
parameters: Type.Object({
|
||||||
paper: Type.String({
|
paper: Type.String({ description: "arXiv ID, arXiv URL, or alphaXiv URL." }),
|
||||||
description: "arXiv ID, arXiv URL, or alphaXiv URL.",
|
question: Type.String({ description: "Question about the paper." }),
|
||||||
}),
|
|
||||||
question: Type.String({
|
|
||||||
description: "Question to ask about the paper.",
|
|
||||||
}),
|
|
||||||
}),
|
}),
|
||||||
async execute(_toolCallId, params) {
|
async execute(_toolCallId, params) {
|
||||||
try {
|
|
||||||
const result = await askPaper(params.paper, params.question);
|
const result = await askPaper(params.paper, params.question);
|
||||||
return {
|
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
await disconnect();
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -139,33 +66,17 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
|||||||
label: "Alpha Annotate Paper",
|
label: "Alpha Annotate Paper",
|
||||||
description: "Write or clear a persistent local annotation for a paper.",
|
description: "Write or clear a persistent local annotation for a paper.",
|
||||||
parameters: Type.Object({
|
parameters: Type.Object({
|
||||||
paper: Type.String({
|
paper: Type.String({ description: "Paper ID (arXiv ID or URL)." }),
|
||||||
description: "Paper ID to annotate.",
|
note: Type.Optional(Type.String({ description: "Annotation text. Omit when clear=true." })),
|
||||||
}),
|
clear: Type.Optional(Type.Boolean({ description: "Clear the existing annotation." })),
|
||||||
note: Type.Optional(
|
|
||||||
Type.String({
|
|
||||||
description: "Annotation text. Omit when clear=true.",
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
clear: Type.Optional(
|
|
||||||
Type.Boolean({
|
|
||||||
description: "Clear the existing annotation instead of writing one.",
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
}),
|
}),
|
||||||
async execute(_toolCallId, params) {
|
async execute(_toolCallId, params) {
|
||||||
const result = params.clear
|
const result = params.clear
|
||||||
? await clearPaperAnnotation(params.paper)
|
? await clearPaperAnnotation(params.paper)
|
||||||
: params.note
|
: params.note
|
||||||
? await annotatePaper(params.paper, params.note)
|
? await annotatePaper(params.paper, params.note)
|
||||||
: (() => {
|
: (() => { throw new Error("Provide either note or clear=true."); })();
|
||||||
throw new Error("Provide either note or clear=true.");
|
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||||
})();
|
|
||||||
|
|
||||||
return {
|
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -176,37 +87,21 @@ export function registerAlphaTools(pi: ExtensionAPI): void {
|
|||||||
parameters: Type.Object({}),
|
parameters: Type.Object({}),
|
||||||
async execute() {
|
async execute() {
|
||||||
const result = await listPaperAnnotations();
|
const result = await listPaperAnnotations();
|
||||||
return {
|
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
pi.registerTool({
|
pi.registerTool({
|
||||||
name: "alpha_read_code",
|
name: "alpha_read_code",
|
||||||
label: "Alpha Read Code",
|
label: "Alpha Read Code",
|
||||||
description: "Read files from a paper's GitHub repository through alphaXiv.",
|
description: "Read files from a paper's GitHub repository. Use '/' for repo overview.",
|
||||||
parameters: Type.Object({
|
parameters: Type.Object({
|
||||||
githubUrl: Type.String({
|
githubUrl: Type.String({ description: "GitHub repository URL." }),
|
||||||
description: "GitHub repository URL for the paper implementation.",
|
path: Type.Optional(Type.String({ description: "File or directory path. Default: '/'" })),
|
||||||
}),
|
|
||||||
path: Type.Optional(
|
|
||||||
Type.String({
|
|
||||||
description: "Repository path to inspect. Use / for the repo overview.",
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
}),
|
}),
|
||||||
async execute(_toolCallId, params) {
|
async execute(_toolCallId, params) {
|
||||||
try {
|
|
||||||
const result = await readPaperCode(params.githubUrl, params.path?.trim() || "/");
|
const result = await readPaperCode(params.githubUrl, params.path?.trim() || "/");
|
||||||
return {
|
return { content: [{ type: "text", text: formatText(result) }], details: result };
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
} finally {
|
|
||||||
await disconnect();
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
130
extensions/research-tools/discovery.ts
Normal file
130
extensions/research-tools/discovery.ts
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
import { existsSync, readFileSync } from "node:fs";
|
||||||
|
import { homedir } from "node:os";
|
||||||
|
import { resolve } from "node:path";
|
||||||
|
|
||||||
|
import type { ExtensionAPI, SlashCommandInfo, ToolInfo } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
|
function resolveFeynmanSettingsPath(): string {
|
||||||
|
const configured = process.env.PI_CODING_AGENT_DIR?.trim();
|
||||||
|
const agentDir = configured
|
||||||
|
? configured.startsWith("~/")
|
||||||
|
? resolve(homedir(), configured.slice(2))
|
||||||
|
: resolve(configured)
|
||||||
|
: resolve(homedir(), ".feynman", "agent");
|
||||||
|
return resolve(agentDir, "settings.json");
|
||||||
|
}
|
||||||
|
|
||||||
|
function readConfiguredPackages(): string[] {
|
||||||
|
const settingsPath = resolveFeynmanSettingsPath();
|
||||||
|
if (!existsSync(settingsPath)) return [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(readFileSync(settingsPath, "utf8")) as { packages?: unknown[] };
|
||||||
|
return Array.isArray(parsed.packages)
|
||||||
|
? parsed.packages
|
||||||
|
.map((entry) => {
|
||||||
|
if (typeof entry === "string") return entry;
|
||||||
|
if (!entry || typeof entry !== "object") return undefined;
|
||||||
|
const record = entry as { source?: unknown };
|
||||||
|
return typeof record.source === "string" ? record.source : undefined;
|
||||||
|
})
|
||||||
|
.filter((entry): entry is string => Boolean(entry))
|
||||||
|
: [];
|
||||||
|
} catch {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatSourceLabel(sourceInfo: { source: string; path: string }): string {
|
||||||
|
if (sourceInfo.source === "local") {
|
||||||
|
if (sourceInfo.path.includes("/prompts/")) return "workflow";
|
||||||
|
if (sourceInfo.path.includes("/extensions/")) return "extension";
|
||||||
|
return "local";
|
||||||
|
}
|
||||||
|
return sourceInfo.source.replace(/^npm:/, "").replace(/^git:/, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatCommandLine(command: SlashCommandInfo): string {
|
||||||
|
const source = formatSourceLabel(command.sourceInfo);
|
||||||
|
return `/${command.name} — ${command.description ?? ""} [${source}]`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function summarizeToolParameters(tool: ToolInfo): string {
|
||||||
|
const properties =
|
||||||
|
tool.parameters &&
|
||||||
|
typeof tool.parameters === "object" &&
|
||||||
|
"properties" in tool.parameters &&
|
||||||
|
tool.parameters.properties &&
|
||||||
|
typeof tool.parameters.properties === "object"
|
||||||
|
? Object.keys(tool.parameters.properties as Record<string, unknown>)
|
||||||
|
: [];
|
||||||
|
return properties.length > 0 ? properties.join(", ") : "no parameters";
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatToolLine(tool: ToolInfo): string {
|
||||||
|
const source = formatSourceLabel(tool.sourceInfo);
|
||||||
|
return `${tool.name} — ${tool.description ?? ""} [${source}]`;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function registerDiscoveryCommands(pi: ExtensionAPI): void {
|
||||||
|
pi.registerCommand("commands", {
|
||||||
|
description: "Browse all available slash commands, including package and built-in commands.",
|
||||||
|
handler: async (_args, ctx) => {
|
||||||
|
const commands = pi
|
||||||
|
.getCommands()
|
||||||
|
.slice()
|
||||||
|
.sort((left, right) => left.name.localeCompare(right.name));
|
||||||
|
const items = commands.map((command) => formatCommandLine(command));
|
||||||
|
const selected = await ctx.ui.select("Slash Commands", items);
|
||||||
|
if (!selected) return;
|
||||||
|
ctx.ui.setEditorText(selected.split(" — ")[0] ?? "");
|
||||||
|
ctx.ui.notify(`Prefilled ${selected.split(" — ")[0]}`, "info");
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
pi.registerCommand("tools", {
|
||||||
|
description: "Browse all callable tools with their source and parameter summary.",
|
||||||
|
handler: async (_args, ctx) => {
|
||||||
|
const tools = pi
|
||||||
|
.getAllTools()
|
||||||
|
.slice()
|
||||||
|
.sort((left, right) => left.name.localeCompare(right.name));
|
||||||
|
const selected = await ctx.ui.select("Tools", tools.map((tool) => formatToolLine(tool)));
|
||||||
|
if (!selected) return;
|
||||||
|
|
||||||
|
const toolName = selected.split(" — ")[0] ?? selected;
|
||||||
|
const tool = tools.find((entry) => entry.name === toolName);
|
||||||
|
if (!tool) return;
|
||||||
|
ctx.ui.notify(`${tool.name}: ${summarizeToolParameters(tool)}`, "info");
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
pi.registerCommand("capabilities", {
|
||||||
|
description: "Show installed packages, discovery entrypoints, and high-level runtime capability counts.",
|
||||||
|
handler: async (_args, ctx) => {
|
||||||
|
const commands = pi.getCommands();
|
||||||
|
const tools = pi.getAllTools();
|
||||||
|
const workflows = commands.filter((command) => formatSourceLabel(command.sourceInfo) === "workflow");
|
||||||
|
const packages = readConfiguredPackages();
|
||||||
|
const items = [
|
||||||
|
`Commands: ${commands.length}`,
|
||||||
|
`Workflows: ${workflows.length}`,
|
||||||
|
`Tools: ${tools.length}`,
|
||||||
|
`Packages: ${packages.length}`,
|
||||||
|
"--- Discovery ---",
|
||||||
|
"/commands — browse slash commands",
|
||||||
|
"/tools — inspect callable tools",
|
||||||
|
"/hotkeys — view keyboard shortcuts",
|
||||||
|
"/service-tier — set request tier for supported providers",
|
||||||
|
"--- Installed Packages ---",
|
||||||
|
...packages.map((pkg) => pkg),
|
||||||
|
];
|
||||||
|
const selected = await ctx.ui.select("Capabilities", items);
|
||||||
|
if (!selected || selected.startsWith("---")) return;
|
||||||
|
if (selected.startsWith("/")) {
|
||||||
|
ctx.ui.setEditorText(selected.split(" — ")[0] ?? selected);
|
||||||
|
ctx.ui.notify(`Prefilled ${selected.split(" — ")[0]}`, "info");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
309
extensions/research-tools/feynman-model.ts
Normal file
309
extensions/research-tools/feynman-model.ts
Normal file
@@ -0,0 +1,309 @@
|
|||||||
|
import { type Dirent, existsSync, readdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||||
|
import { homedir } from "node:os";
|
||||||
|
import { basename, join, resolve } from "node:path";
|
||||||
|
|
||||||
|
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
|
const FRONTMATTER_PATTERN = /^---\n([\s\S]*?)\n---\n?([\s\S]*)$/;
|
||||||
|
const INHERIT_MAIN = "__inherit_main__";
|
||||||
|
|
||||||
|
type FrontmatterDocument = {
|
||||||
|
lines: string[];
|
||||||
|
body: string;
|
||||||
|
eol: string;
|
||||||
|
trailingNewline: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
type SubagentModelConfig = {
|
||||||
|
agent: string;
|
||||||
|
model?: string;
|
||||||
|
filePath: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type SelectOption<T> = {
|
||||||
|
label: string;
|
||||||
|
value: T;
|
||||||
|
};
|
||||||
|
|
||||||
|
type CommandContext = Parameters<Parameters<ExtensionAPI["registerCommand"]>[1]["handler"]>[1];
|
||||||
|
|
||||||
|
type TargetChoice =
|
||||||
|
| { type: "main" }
|
||||||
|
| { type: "subagent"; agent: string; model?: string };
|
||||||
|
|
||||||
|
function expandHomePath(value: string): string {
|
||||||
|
if (value === "~") return homedir();
|
||||||
|
if (value.startsWith("~/")) return resolve(homedir(), value.slice(2));
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolveFeynmanAgentDir(): string {
|
||||||
|
const configured = process.env.PI_CODING_AGENT_DIR ?? process.env.FEYNMAN_CODING_AGENT_DIR;
|
||||||
|
if (configured?.trim()) {
|
||||||
|
return resolve(expandHomePath(configured.trim()));
|
||||||
|
}
|
||||||
|
return resolve(homedir(), ".feynman", "agent");
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatModelSpec(model: { provider: string; id: string }): string {
|
||||||
|
return `${model.provider}/${model.id}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function detectEol(text: string): string {
|
||||||
|
return text.includes("\r\n") ? "\r\n" : "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeLineEndings(text: string): string {
|
||||||
|
return text.replace(/\r\n/g, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseFrontmatterDocument(text: string): FrontmatterDocument | null {
|
||||||
|
const normalized = normalizeLineEndings(text);
|
||||||
|
const match = normalized.match(FRONTMATTER_PATTERN);
|
||||||
|
if (!match) return null;
|
||||||
|
|
||||||
|
return {
|
||||||
|
lines: match[1].split("\n"),
|
||||||
|
body: match[2] ?? "",
|
||||||
|
eol: detectEol(text),
|
||||||
|
trailingNewline: normalized.endsWith("\n"),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function serializeFrontmatterDocument(document: FrontmatterDocument): string {
|
||||||
|
const normalized = `---\n${document.lines.join("\n")}\n---\n${document.body}`;
|
||||||
|
const withTrailingNewline =
|
||||||
|
document.trailingNewline && !normalized.endsWith("\n") ? `${normalized}\n` : normalized;
|
||||||
|
return document.eol === "\n" ? withTrailingNewline : withTrailingNewline.replace(/\n/g, "\r\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseFrontmatterKey(line: string): string | undefined {
|
||||||
|
const match = line.match(/^\s*([A-Za-z0-9_-]+)\s*:/);
|
||||||
|
return match?.[1]?.toLowerCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
function getFrontmatterValue(lines: string[], key: string): string | undefined {
|
||||||
|
const normalizedKey = key.toLowerCase();
|
||||||
|
for (const line of lines) {
|
||||||
|
const parsedKey = parseFrontmatterKey(line);
|
||||||
|
if (parsedKey !== normalizedKey) continue;
|
||||||
|
const separatorIndex = line.indexOf(":");
|
||||||
|
if (separatorIndex === -1) return undefined;
|
||||||
|
const value = line.slice(separatorIndex + 1).trim();
|
||||||
|
return value.length > 0 ? value : undefined;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function upsertFrontmatterValue(lines: string[], key: string, value: string): string[] {
|
||||||
|
const normalizedKey = key.toLowerCase();
|
||||||
|
const nextLines = [...lines];
|
||||||
|
const existingIndex = nextLines.findIndex((line) => parseFrontmatterKey(line) === normalizedKey);
|
||||||
|
const serialized = `${key}: ${value}`;
|
||||||
|
|
||||||
|
if (existingIndex !== -1) {
|
||||||
|
nextLines[existingIndex] = serialized;
|
||||||
|
return nextLines;
|
||||||
|
}
|
||||||
|
|
||||||
|
const descriptionIndex = nextLines.findIndex((line) => parseFrontmatterKey(line) === "description");
|
||||||
|
const nameIndex = nextLines.findIndex((line) => parseFrontmatterKey(line) === "name");
|
||||||
|
const insertIndex = descriptionIndex !== -1 ? descriptionIndex + 1 : nameIndex !== -1 ? nameIndex + 1 : nextLines.length;
|
||||||
|
nextLines.splice(insertIndex, 0, serialized);
|
||||||
|
return nextLines;
|
||||||
|
}
|
||||||
|
|
||||||
|
function removeFrontmatterKey(lines: string[], key: string): string[] {
|
||||||
|
const normalizedKey = key.toLowerCase();
|
||||||
|
return lines.filter((line) => parseFrontmatterKey(line) !== normalizedKey);
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeAgentName(name: string): string {
|
||||||
|
return name.trim().toLowerCase();
|
||||||
|
}
|
||||||
|
|
||||||
|
function getAgentsDir(agentDir: string): string {
|
||||||
|
return join(agentDir, "agents");
|
||||||
|
}
|
||||||
|
|
||||||
|
function listAgentFiles(agentsDir: string): string[] {
|
||||||
|
if (!existsSync(agentsDir)) return [];
|
||||||
|
|
||||||
|
return readdirSync(agentsDir, { withFileTypes: true })
|
||||||
|
.filter((entry: Dirent) => (entry.isFile() || entry.isSymbolicLink()) && entry.name.endsWith(".md"))
|
||||||
|
.filter((entry) => !entry.name.endsWith(".chain.md"))
|
||||||
|
.map((entry) => join(agentsDir, entry.name));
|
||||||
|
}
|
||||||
|
|
||||||
|
function readAgentConfig(filePath: string): SubagentModelConfig {
|
||||||
|
const content = readFileSync(filePath, "utf8");
|
||||||
|
const parsed = parseFrontmatterDocument(content);
|
||||||
|
const fallbackName = basename(filePath, ".md");
|
||||||
|
if (!parsed) return { agent: fallbackName, filePath };
|
||||||
|
|
||||||
|
return {
|
||||||
|
agent: getFrontmatterValue(parsed.lines, "name") ?? fallbackName,
|
||||||
|
model: getFrontmatterValue(parsed.lines, "model"),
|
||||||
|
filePath,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function listSubagentModelConfigs(agentDir: string): SubagentModelConfig[] {
|
||||||
|
return listAgentFiles(getAgentsDir(agentDir))
|
||||||
|
.map((filePath) => readAgentConfig(filePath))
|
||||||
|
.sort((left, right) => left.agent.localeCompare(right.agent));
|
||||||
|
}
|
||||||
|
|
||||||
|
function findAgentConfig(configs: SubagentModelConfig[], agentName: string): SubagentModelConfig | undefined {
|
||||||
|
const normalized = normalizeAgentName(agentName);
|
||||||
|
return (
|
||||||
|
configs.find((config) => normalizeAgentName(config.agent) === normalized) ??
|
||||||
|
configs.find((config) => normalizeAgentName(basename(config.filePath, ".md")) === normalized)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getAgentConfigOrThrow(agentDir: string, agentName: string): SubagentModelConfig {
|
||||||
|
const configs = listSubagentModelConfigs(agentDir);
|
||||||
|
const target = findAgentConfig(configs, agentName);
|
||||||
|
if (target) return target;
|
||||||
|
|
||||||
|
if (configs.length === 0) {
|
||||||
|
throw new Error(`No subagent definitions found in ${getAgentsDir(agentDir)}.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const availableAgents = configs.map((config) => config.agent).join(", ");
|
||||||
|
throw new Error(`Unknown subagent: ${agentName}. Available agents: ${availableAgents}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
function setSubagentModel(agentDir: string, agentName: string, modelSpec: string): void {
|
||||||
|
const normalizedModelSpec = modelSpec.trim();
|
||||||
|
if (!normalizedModelSpec) throw new Error("Model spec cannot be empty.");
|
||||||
|
|
||||||
|
const target = getAgentConfigOrThrow(agentDir, agentName);
|
||||||
|
const content = readFileSync(target.filePath, "utf8");
|
||||||
|
const parsed = parseFrontmatterDocument(content);
|
||||||
|
|
||||||
|
if (!parsed) {
|
||||||
|
const eol = detectEol(content);
|
||||||
|
const injected = `---${eol}name: ${target.agent}${eol}model: ${normalizedModelSpec}${eol}---${eol}${content}`;
|
||||||
|
writeFileSync(target.filePath, injected, "utf8");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const nextLines = upsertFrontmatterValue(parsed.lines, "model", normalizedModelSpec);
|
||||||
|
if (nextLines.join("\n") !== parsed.lines.join("\n")) {
|
||||||
|
writeFileSync(target.filePath, serializeFrontmatterDocument({ ...parsed, lines: nextLines }), "utf8");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function unsetSubagentModel(agentDir: string, agentName: string): void {
|
||||||
|
const target = getAgentConfigOrThrow(agentDir, agentName);
|
||||||
|
const content = readFileSync(target.filePath, "utf8");
|
||||||
|
const parsed = parseFrontmatterDocument(content);
|
||||||
|
if (!parsed) return;
|
||||||
|
|
||||||
|
const nextLines = removeFrontmatterKey(parsed.lines, "model");
|
||||||
|
if (nextLines.join("\n") !== parsed.lines.join("\n")) {
|
||||||
|
writeFileSync(target.filePath, serializeFrontmatterDocument({ ...parsed, lines: nextLines }), "utf8");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function selectOption<T>(
|
||||||
|
ctx: CommandContext,
|
||||||
|
title: string,
|
||||||
|
options: SelectOption<T>[],
|
||||||
|
): Promise<T | undefined> {
|
||||||
|
const selected = await ctx.ui.select(
|
||||||
|
title,
|
||||||
|
options.map((option) => option.label),
|
||||||
|
);
|
||||||
|
if (!selected) return undefined;
|
||||||
|
return options.find((option) => option.label === selected)?.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function registerFeynmanModelCommand(pi: ExtensionAPI): void {
|
||||||
|
pi.registerCommand("feynman-model", {
|
||||||
|
description: "Open Feynman model menu (main + per-subagent overrides).",
|
||||||
|
handler: async (_args, ctx) => {
|
||||||
|
if (!ctx.hasUI) {
|
||||||
|
ctx.ui.notify("feynman-model requires interactive mode.", "error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
ctx.modelRegistry.refresh();
|
||||||
|
const availableModels = [...ctx.modelRegistry.getAvailable()].sort((left, right) =>
|
||||||
|
formatModelSpec(left).localeCompare(formatModelSpec(right)),
|
||||||
|
);
|
||||||
|
if (availableModels.length === 0) {
|
||||||
|
ctx.ui.notify("No models available.", "error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const agentDir = resolveFeynmanAgentDir();
|
||||||
|
const subagentConfigs = listSubagentModelConfigs(agentDir);
|
||||||
|
const currentMain = ctx.model ? formatModelSpec(ctx.model) : "(none)";
|
||||||
|
|
||||||
|
const targetOptions: SelectOption<TargetChoice>[] = [
|
||||||
|
{ label: `main (default): ${currentMain}`, value: { type: "main" } },
|
||||||
|
...subagentConfigs.map((config) => ({
|
||||||
|
label: `${config.agent}: ${config.model ?? "default"}`,
|
||||||
|
value: { type: "subagent" as const, agent: config.agent, model: config.model },
|
||||||
|
})),
|
||||||
|
];
|
||||||
|
|
||||||
|
const target = await selectOption(ctx, "Choose target", targetOptions);
|
||||||
|
if (!target) return;
|
||||||
|
|
||||||
|
if (target.type === "main") {
|
||||||
|
const selectedModel = await selectOption(
|
||||||
|
ctx,
|
||||||
|
"Select main model",
|
||||||
|
availableModels.map((model) => {
|
||||||
|
const spec = formatModelSpec(model);
|
||||||
|
const suffix = spec === currentMain ? " (current)" : "";
|
||||||
|
return { label: `${spec}${suffix}`, value: model };
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
if (!selectedModel) return;
|
||||||
|
|
||||||
|
const success = await pi.setModel(selectedModel);
|
||||||
|
if (!success) {
|
||||||
|
ctx.ui.notify(`No API key found for ${selectedModel.provider}.`, "error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ctx.ui.notify(`Main model set to ${formatModelSpec(selectedModel)}.`, "info");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const selectedSubagentModel = await selectOption(
|
||||||
|
ctx,
|
||||||
|
`Select model for ${target.agent}`,
|
||||||
|
[
|
||||||
|
{
|
||||||
|
label: target.model ? "(inherit main default)" : "(inherit main default) (current)",
|
||||||
|
value: INHERIT_MAIN,
|
||||||
|
},
|
||||||
|
...availableModels.map((model) => {
|
||||||
|
const spec = formatModelSpec(model);
|
||||||
|
const suffix = spec === target.model ? " (current)" : "";
|
||||||
|
return { label: `${spec}${suffix}`, value: spec };
|
||||||
|
}),
|
||||||
|
],
|
||||||
|
);
|
||||||
|
if (!selectedSubagentModel) return;
|
||||||
|
|
||||||
|
if (selectedSubagentModel === INHERIT_MAIN) {
|
||||||
|
unsetSubagentModel(agentDir, target.agent);
|
||||||
|
ctx.ui.notify(`${target.agent} now inherits the main model.`, "info");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
setSubagentModel(agentDir, target.agent, selectedSubagentModel);
|
||||||
|
ctx.ui.notify(`${target.agent} model set to ${selectedSubagentModel}.`, "info");
|
||||||
|
} catch (error) {
|
||||||
|
ctx.ui.notify(error instanceof Error ? error.message : String(error), "error");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -4,6 +4,7 @@ import { execSync } from "node:child_process";
|
|||||||
import { resolve as resolvePath } from "node:path";
|
import { resolve as resolvePath } from "node:path";
|
||||||
|
|
||||||
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
|
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
|
||||||
|
import { truncateToWidth, visibleWidth } from "@mariozechner/pi-tui";
|
||||||
|
|
||||||
import {
|
import {
|
||||||
APP_ROOT,
|
APP_ROOT,
|
||||||
@@ -11,10 +12,8 @@ import {
|
|||||||
FEYNMAN_VERSION,
|
FEYNMAN_VERSION,
|
||||||
} from "./shared.js";
|
} from "./shared.js";
|
||||||
|
|
||||||
const ANSI_RE = /\x1b\[[0-9;]*m/g;
|
|
||||||
|
|
||||||
function visibleLength(text: string): number {
|
function visibleLength(text: string): number {
|
||||||
return text.replace(ANSI_RE, "").length;
|
return visibleWidth(text);
|
||||||
}
|
}
|
||||||
|
|
||||||
function formatHeaderPath(path: string): string {
|
function formatHeaderPath(path: string): string {
|
||||||
@@ -23,10 +22,8 @@ function formatHeaderPath(path: string): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function truncateVisible(text: string, maxVisible: number): string {
|
function truncateVisible(text: string, maxVisible: number): string {
|
||||||
const raw = text.replace(ANSI_RE, "");
|
if (visibleWidth(text) <= maxVisible) return text;
|
||||||
if (raw.length <= maxVisible) return text;
|
return truncateToWidth(text, maxVisible, maxVisible <= 3 ? "" : "...");
|
||||||
if (maxVisible <= 3) return ".".repeat(maxVisible);
|
|
||||||
return `${raw.slice(0, maxVisible - 3)}...`;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function wrapWords(text: string, maxW: number): string[] {
|
function wrapWords(text: string, maxW: number): string[] {
|
||||||
@@ -34,12 +31,12 @@ function wrapWords(text: string, maxW: number): string[] {
|
|||||||
const lines: string[] = [];
|
const lines: string[] = [];
|
||||||
let cur = "";
|
let cur = "";
|
||||||
for (let word of words) {
|
for (let word of words) {
|
||||||
if (word.length > maxW) {
|
if (visibleWidth(word) > maxW) {
|
||||||
if (cur) { lines.push(cur); cur = ""; }
|
if (cur) { lines.push(cur); cur = ""; }
|
||||||
word = maxW > 3 ? `${word.slice(0, maxW - 1)}…` : word.slice(0, maxW);
|
word = truncateToWidth(word, maxW, maxW > 3 ? "…" : "");
|
||||||
}
|
}
|
||||||
const test = cur ? `${cur} ${word}` : word;
|
const test = cur ? `${cur} ${word}` : word;
|
||||||
if (cur && test.length > maxW) {
|
if (cur && visibleWidth(test) > maxW) {
|
||||||
lines.push(cur);
|
lines.push(cur);
|
||||||
cur = word;
|
cur = word;
|
||||||
} else {
|
} else {
|
||||||
@@ -56,9 +53,10 @@ function padRight(text: string, width: number): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function centerText(text: string, width: number): string {
|
function centerText(text: string, width: number): string {
|
||||||
if (text.length >= width) return text.slice(0, width);
|
const textWidth = visibleWidth(text);
|
||||||
const left = Math.floor((width - text.length) / 2);
|
if (textWidth >= width) return truncateToWidth(text, width, "");
|
||||||
const right = width - text.length - left;
|
const left = Math.floor((width - textWidth) / 2);
|
||||||
|
const right = width - textWidth - left;
|
||||||
return `${" ".repeat(left)}${text}${" ".repeat(right)}`;
|
return `${" ".repeat(left)}${text}${" ".repeat(right)}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -287,8 +285,8 @@ export function installFeynmanHeader(
|
|||||||
|
|
||||||
if (activity) {
|
if (activity) {
|
||||||
const maxActivityLen = leftW * 2;
|
const maxActivityLen = leftW * 2;
|
||||||
const trimmed = activity.length > maxActivityLen
|
const trimmed = visibleWidth(activity) > maxActivityLen
|
||||||
? `${activity.slice(0, maxActivityLen - 1)}…`
|
? truncateToWidth(activity, maxActivityLen, "…")
|
||||||
: activity;
|
: activity;
|
||||||
leftLines.push("");
|
leftLines.push("");
|
||||||
leftLines.push(theme.fg("accent", theme.bold("Last Activity")));
|
leftLines.push(theme.fg("accent", theme.bold("Last Activity")));
|
||||||
|
|||||||
@@ -1,183 +0,0 @@
|
|||||||
import { execFile, spawn } from "node:child_process";
|
|
||||||
import { mkdir, mkdtemp, readFile, stat, writeFile } from "node:fs/promises";
|
|
||||||
import { tmpdir } from "node:os";
|
|
||||||
import { basename, dirname, extname, join } from "node:path";
|
|
||||||
import { pathToFileURL } from "node:url";
|
|
||||||
import { promisify } from "node:util";
|
|
||||||
|
|
||||||
const execFileAsync = promisify(execFile);
|
|
||||||
|
|
||||||
function isMarkdownPath(path: string): boolean {
|
|
||||||
return [".md", ".markdown", ".txt"].includes(extname(path).toLowerCase());
|
|
||||||
}
|
|
||||||
|
|
||||||
function isLatexPath(path: string): boolean {
|
|
||||||
return extname(path).toLowerCase() === ".tex";
|
|
||||||
}
|
|
||||||
|
|
||||||
function wrapCodeAsMarkdown(source: string, filePath: string): string {
|
|
||||||
const language = extname(filePath).replace(/^\./, "") || "text";
|
|
||||||
return `# ${basename(filePath)}\n\n\`\`\`${language}\n${source}\n\`\`\`\n`;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function openWithDefaultApp(targetPath: string): Promise<void> {
|
|
||||||
const target = pathToFileURL(targetPath).href;
|
|
||||||
if (process.platform === "darwin") {
|
|
||||||
await execFileAsync("open", [target]);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (process.platform === "win32") {
|
|
||||||
await execFileAsync("cmd", ["/c", "start", "", target]);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
await execFileAsync("xdg-open", [target]);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function runCommandWithInput(
|
|
||||||
command: string,
|
|
||||||
args: string[],
|
|
||||||
input: string,
|
|
||||||
): Promise<{ stdout: string; stderr: string }> {
|
|
||||||
return await new Promise((resolve, reject) => {
|
|
||||||
const child = spawn(command, args, { stdio: ["pipe", "pipe", "pipe"] });
|
|
||||||
const stdoutChunks: Buffer[] = [];
|
|
||||||
const stderrChunks: Buffer[] = [];
|
|
||||||
|
|
||||||
child.stdout.on("data", (chunk: Buffer | string) => {
|
|
||||||
stdoutChunks.push(typeof chunk === "string" ? Buffer.from(chunk) : chunk);
|
|
||||||
});
|
|
||||||
child.stderr.on("data", (chunk: Buffer | string) => {
|
|
||||||
stderrChunks.push(typeof chunk === "string" ? Buffer.from(chunk) : chunk);
|
|
||||||
});
|
|
||||||
|
|
||||||
child.once("error", reject);
|
|
||||||
child.once("close", (code) => {
|
|
||||||
const stdout = Buffer.concat(stdoutChunks).toString("utf8");
|
|
||||||
const stderr = Buffer.concat(stderrChunks).toString("utf8");
|
|
||||||
if (code === 0) {
|
|
||||||
resolve({ stdout, stderr });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
reject(new Error(`${command} failed with exit code ${code}${stderr ? `: ${stderr.trim()}` : ""}`));
|
|
||||||
});
|
|
||||||
|
|
||||||
child.stdin.end(input);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function renderHtmlPreview(filePath: string): Promise<string> {
|
|
||||||
const source = await readFile(filePath, "utf8");
|
|
||||||
const pandocCommand = process.env.PANDOC_PATH?.trim() || "pandoc";
|
|
||||||
const inputFormat = isLatexPath(filePath)
|
|
||||||
? "latex"
|
|
||||||
: "markdown+lists_without_preceding_blankline+tex_math_dollars+autolink_bare_uris-raw_html";
|
|
||||||
const markdown = isLatexPath(filePath) || isMarkdownPath(filePath) ? source : wrapCodeAsMarkdown(source, filePath);
|
|
||||||
const args = ["-f", inputFormat, "-t", "html5", "--mathml", "--wrap=none", `--resource-path=${dirname(filePath)}`];
|
|
||||||
const { stdout } = await runCommandWithInput(pandocCommand, args, markdown);
|
|
||||||
const html = `<!doctype html><html><head><meta charset="utf-8" /><base href="${pathToFileURL(dirname(filePath) + "/").href}" /><title>${basename(filePath)}</title><style>
|
|
||||||
:root{
|
|
||||||
--bg:#faf7f2;
|
|
||||||
--paper:#fffdf9;
|
|
||||||
--border:#d7cec1;
|
|
||||||
--text:#1f1c18;
|
|
||||||
--muted:#6c645a;
|
|
||||||
--code:#f3eee6;
|
|
||||||
--link:#0f6d8c;
|
|
||||||
--quote:#8b7f70;
|
|
||||||
}
|
|
||||||
@media (prefers-color-scheme: dark){
|
|
||||||
:root{
|
|
||||||
--bg:#161311;
|
|
||||||
--paper:#1d1916;
|
|
||||||
--border:#3b342d;
|
|
||||||
--text:#ebe3d6;
|
|
||||||
--muted:#b4ab9f;
|
|
||||||
--code:#221d19;
|
|
||||||
--link:#8ac6d6;
|
|
||||||
--quote:#a89d8f;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
body{
|
|
||||||
font-family:Charter,"Iowan Old Style","Palatino Linotype","Book Antiqua",Palatino,Georgia,serif;
|
|
||||||
margin:0;
|
|
||||||
background:var(--bg);
|
|
||||||
color:var(--text);
|
|
||||||
line-height:1.7;
|
|
||||||
}
|
|
||||||
main{
|
|
||||||
max-width:900px;
|
|
||||||
margin:2rem auto 4rem;
|
|
||||||
padding:2.5rem 3rem;
|
|
||||||
background:var(--paper);
|
|
||||||
border:1px solid var(--border);
|
|
||||||
border-radius:18px;
|
|
||||||
box-shadow:0 12px 40px rgba(0,0,0,.06);
|
|
||||||
}
|
|
||||||
h1,h2,h3,h4,h5,h6{
|
|
||||||
font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;
|
|
||||||
line-height:1.2;
|
|
||||||
margin-top:1.5em;
|
|
||||||
}
|
|
||||||
h1{font-size:2.2rem;border-bottom:1px solid var(--border);padding-bottom:.35rem;}
|
|
||||||
h2{font-size:1.6rem;border-bottom:1px solid var(--border);padding-bottom:.25rem;}
|
|
||||||
p,ul,ol,blockquote,table{margin:1rem 0;}
|
|
||||||
pre,code{font-family:ui-monospace,SFMono-Regular,Menlo,monospace}
|
|
||||||
pre{
|
|
||||||
background:var(--code);
|
|
||||||
border:1px solid var(--border);
|
|
||||||
border-radius:12px;
|
|
||||||
padding:1rem 1.1rem;
|
|
||||||
overflow:auto;
|
|
||||||
}
|
|
||||||
code{
|
|
||||||
background:var(--code);
|
|
||||||
padding:.12rem .28rem;
|
|
||||||
border-radius:6px;
|
|
||||||
}
|
|
||||||
a{color:var(--link);text-decoration:none}
|
|
||||||
a:hover{text-decoration:underline}
|
|
||||||
img{max-width:100%}
|
|
||||||
blockquote{
|
|
||||||
border-left:4px solid var(--border);
|
|
||||||
padding-left:1rem;
|
|
||||||
color:var(--quote);
|
|
||||||
}
|
|
||||||
table{border-collapse:collapse;width:100%}
|
|
||||||
th,td{border:1px solid var(--border);padding:.55rem .7rem;text-align:left}
|
|
||||||
</style></head><body><main>${stdout}</main></body></html>`;
|
|
||||||
const tempDir = await mkdtemp(join(tmpdir(), "feynman-preview-"));
|
|
||||||
const htmlPath = join(tempDir, `${basename(filePath)}.html`);
|
|
||||||
await writeFile(htmlPath, html, "utf8");
|
|
||||||
return htmlPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function renderPdfPreview(filePath: string): Promise<string> {
|
|
||||||
const source = await readFile(filePath, "utf8");
|
|
||||||
const pandocCommand = process.env.PANDOC_PATH?.trim() || "pandoc";
|
|
||||||
const pdfEngine = process.env.PANDOC_PDF_ENGINE?.trim() || "xelatex";
|
|
||||||
const inputFormat = isLatexPath(filePath)
|
|
||||||
? "latex"
|
|
||||||
: "markdown+lists_without_preceding_blankline+tex_math_dollars+autolink_bare_uris-raw_html";
|
|
||||||
const markdown = isLatexPath(filePath) || isMarkdownPath(filePath) ? source : wrapCodeAsMarkdown(source, filePath);
|
|
||||||
const tempDir = await mkdtemp(join(tmpdir(), "feynman-preview-"));
|
|
||||||
const pdfPath = join(tempDir, `${basename(filePath)}.pdf`);
|
|
||||||
const args = [
|
|
||||||
"-f",
|
|
||||||
inputFormat,
|
|
||||||
"-o",
|
|
||||||
pdfPath,
|
|
||||||
`--pdf-engine=${pdfEngine}`,
|
|
||||||
`--resource-path=${dirname(filePath)}`,
|
|
||||||
];
|
|
||||||
await runCommandWithInput(pandocCommand, args, markdown);
|
|
||||||
return pdfPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function pathExists(path: string): Promise<boolean> {
|
|
||||||
try {
|
|
||||||
await stat(path);
|
|
||||||
return true;
|
|
||||||
} catch {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,14 +1,70 @@
|
|||||||
import { mkdir, stat, writeFile } from "node:fs/promises";
|
import { mkdir, readdir, readFile, stat, writeFile } from "node:fs/promises";
|
||||||
import { dirname, resolve as resolvePath } from "node:path";
|
import { join, relative, resolve as resolvePath } from "node:path";
|
||||||
|
|
||||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||||
import { Type } from "@sinclair/typebox";
|
|
||||||
|
|
||||||
import { getExtensionCommandSpec } from "../../metadata/commands.mjs";
|
import { getExtensionCommandSpec } from "../../metadata/commands.mjs";
|
||||||
import { renderHtmlPreview, renderPdfPreview, openWithDefaultApp, pathExists } from "./preview.js";
|
|
||||||
import { buildProjectAgentsTemplate, buildSessionLogsReadme } from "./project-scaffold.js";
|
import { buildProjectAgentsTemplate, buildSessionLogsReadme } from "./project-scaffold.js";
|
||||||
import { formatToolText } from "./shared.js";
|
|
||||||
import { searchSessionTranscripts } from "./session-search.js";
|
async function pathExists(path: string): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
await stat(path);
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const ARTIFACT_DIRS = ["papers", "outputs", "experiments", "notes"];
|
||||||
|
const ARTIFACT_EXTS = new Set([".md", ".tex", ".pdf", ".py", ".csv", ".json", ".html", ".txt", ".log"]);
|
||||||
|
|
||||||
|
async function collectArtifacts(cwd: string): Promise<{ label: string; path: string }[]> {
|
||||||
|
const items: { label: string; path: string; mtime: number }[] = [];
|
||||||
|
|
||||||
|
for (const dir of ARTIFACT_DIRS) {
|
||||||
|
const dirPath = resolvePath(cwd, dir);
|
||||||
|
if (!(await pathExists(dirPath))) continue;
|
||||||
|
|
||||||
|
const walk = async (current: string): Promise<void> => {
|
||||||
|
let entries;
|
||||||
|
try {
|
||||||
|
entries = await readdir(current, { withFileTypes: true });
|
||||||
|
} catch {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (const entry of entries) {
|
||||||
|
const full = join(current, entry.name);
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
await walk(full);
|
||||||
|
} else if (ARTIFACT_EXTS.has(entry.name.slice(entry.name.lastIndexOf(".")))) {
|
||||||
|
const rel = relative(cwd, full);
|
||||||
|
let title = "";
|
||||||
|
try {
|
||||||
|
const head = await readFile(full, "utf8").then((c) => c.slice(0, 200));
|
||||||
|
const match = head.match(/^#\s+(.+)/m);
|
||||||
|
if (match) title = match[1]!.trim();
|
||||||
|
} catch {}
|
||||||
|
const info = await stat(full).catch(() => null);
|
||||||
|
const mtime = info?.mtimeMs ?? 0;
|
||||||
|
const size = info ? formatSize(info.size) : "";
|
||||||
|
const titlePart = title ? ` — ${title}` : "";
|
||||||
|
items.push({ label: `${rel}${titlePart} (${size})`, path: rel, mtime });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
await walk(dirPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
items.sort((a, b) => b.mtime - a.mtime);
|
||||||
|
return items;
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatSize(bytes: number): string {
|
||||||
|
if (bytes < 1024) return `${bytes}B`;
|
||||||
|
if (bytes < 1024 * 1024) return `${Math.round(bytes / 1024)}KB`;
|
||||||
|
return `${(bytes / (1024 * 1024)).toFixed(1)}MB`;
|
||||||
|
}
|
||||||
|
|
||||||
export function registerInitCommand(pi: ExtensionAPI): void {
|
export function registerInitCommand(pi: ExtensionAPI): void {
|
||||||
pi.registerCommand("init", {
|
pi.registerCommand("init", {
|
||||||
@@ -45,73 +101,23 @@ export function registerInitCommand(pi: ExtensionAPI): void {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
export function registerSessionSearchTool(pi: ExtensionAPI): void {
|
export function registerOutputsCommand(pi: ExtensionAPI): void {
|
||||||
pi.registerTool({
|
pi.registerCommand("outputs", {
|
||||||
name: "session_search",
|
description: "Browse all research artifacts (papers, outputs, experiments, notes).",
|
||||||
label: "Session Search",
|
handler: async (_args, ctx) => {
|
||||||
description: "Search prior Feynman session transcripts to recover what was done, said, or written before.",
|
const items = await collectArtifacts(ctx.cwd);
|
||||||
parameters: Type.Object({
|
if (items.length === 0) {
|
||||||
query: Type.String({
|
ctx.ui.notify("No artifacts found. Use /lit, /draft, /review, or /deepresearch to create some.", "info");
|
||||||
description: "Search query to look for in past sessions.",
|
return;
|
||||||
}),
|
}
|
||||||
limit: Type.Optional(
|
|
||||||
Type.Number({
|
const selected = await ctx.ui.select(`Artifacts (${items.length})`, items.map((i) => i.label));
|
||||||
description: "Maximum number of sessions to return. Defaults to 3.",
|
if (!selected) return;
|
||||||
}),
|
|
||||||
),
|
const match = items.find((i) => i.label === selected);
|
||||||
}),
|
if (match) {
|
||||||
async execute(_toolCallId, params) {
|
ctx.ui.setEditorText(`read ${match.path}`);
|
||||||
const result = await searchSessionTranscripts(params.query, Math.max(1, Math.min(params.limit ?? 3, 8)));
|
}
|
||||||
return {
|
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
export function registerPreviewTool(pi: ExtensionAPI): void {
|
|
||||||
pi.registerTool({
|
|
||||||
name: "preview_file",
|
|
||||||
label: "Preview File",
|
|
||||||
description: "Open a markdown, LaTeX, PDF, or code artifact in the browser or a PDF viewer for human review. Rendered HTML/PDF previews are temporary and do not replace the source artifact.",
|
|
||||||
parameters: Type.Object({
|
|
||||||
path: Type.String({
|
|
||||||
description: "Path to the file to preview.",
|
|
||||||
}),
|
|
||||||
target: Type.Optional(
|
|
||||||
Type.String({
|
|
||||||
description: "Preview target: browser or pdf. Defaults to browser.",
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
}),
|
|
||||||
async execute(_toolCallId, params, _signal, _onUpdate, ctx) {
|
|
||||||
const target = (params.target?.trim().toLowerCase() || "browser");
|
|
||||||
if (target !== "browser" && target !== "pdf") {
|
|
||||||
throw new Error("target must be browser or pdf");
|
|
||||||
}
|
|
||||||
|
|
||||||
const resolvedPath = resolvePath(ctx.cwd, params.path);
|
|
||||||
const openedPath =
|
|
||||||
resolvePath(resolvedPath).toLowerCase().endsWith(".pdf") && target === "pdf"
|
|
||||||
? resolvedPath
|
|
||||||
: target === "pdf"
|
|
||||||
? await renderPdfPreview(resolvedPath)
|
|
||||||
: await renderHtmlPreview(resolvedPath);
|
|
||||||
|
|
||||||
await mkdir(dirname(openedPath), { recursive: true }).catch(() => {});
|
|
||||||
await openWithDefaultApp(openedPath);
|
|
||||||
|
|
||||||
const result = {
|
|
||||||
sourcePath: resolvedPath,
|
|
||||||
target,
|
|
||||||
openedPath,
|
|
||||||
temporaryPreview: openedPath !== resolvedPath,
|
|
||||||
};
|
|
||||||
return {
|
|
||||||
content: [{ type: "text", text: formatToolText(result) }],
|
|
||||||
details: result,
|
|
||||||
};
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
174
extensions/research-tools/service-tier.ts
Normal file
174
extensions/research-tools/service-tier.ts
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
import { homedir } from "node:os";
|
||||||
|
import { readFileSync, writeFileSync } from "node:fs";
|
||||||
|
import { resolve } from "node:path";
|
||||||
|
|
||||||
|
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
|
const FEYNMAN_SERVICE_TIERS = [
|
||||||
|
"auto",
|
||||||
|
"default",
|
||||||
|
"flex",
|
||||||
|
"priority",
|
||||||
|
"standard_only",
|
||||||
|
] as const;
|
||||||
|
|
||||||
|
type FeynmanServiceTier = (typeof FEYNMAN_SERVICE_TIERS)[number];
|
||||||
|
|
||||||
|
const SERVICE_TIER_SET = new Set<string>(FEYNMAN_SERVICE_TIERS);
|
||||||
|
const OPENAI_SERVICE_TIERS = new Set<FeynmanServiceTier>(["auto", "default", "flex", "priority"]);
|
||||||
|
const ANTHROPIC_SERVICE_TIERS = new Set<FeynmanServiceTier>(["auto", "standard_only"]);
|
||||||
|
|
||||||
|
type CommandContext = Parameters<Parameters<ExtensionAPI["registerCommand"]>[1]["handler"]>[1];
|
||||||
|
|
||||||
|
type SelectOption<T> = {
|
||||||
|
label: string;
|
||||||
|
value: T;
|
||||||
|
};
|
||||||
|
|
||||||
|
function resolveFeynmanSettingsPath(): string {
|
||||||
|
const configured = process.env.PI_CODING_AGENT_DIR?.trim();
|
||||||
|
const agentDir = configured
|
||||||
|
? configured.startsWith("~/")
|
||||||
|
? resolve(homedir(), configured.slice(2))
|
||||||
|
: resolve(configured)
|
||||||
|
: resolve(homedir(), ".feynman", "agent");
|
||||||
|
return resolve(agentDir, "settings.json");
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeServiceTier(value: string | undefined): FeynmanServiceTier | undefined {
|
||||||
|
if (!value) return undefined;
|
||||||
|
const normalized = value.trim().toLowerCase();
|
||||||
|
return SERVICE_TIER_SET.has(normalized) ? (normalized as FeynmanServiceTier) : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getConfiguredServiceTier(settingsPath: string): FeynmanServiceTier | undefined {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(readFileSync(settingsPath, "utf8")) as { serviceTier?: string };
|
||||||
|
return normalizeServiceTier(parsed.serviceTier);
|
||||||
|
} catch {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function setConfiguredServiceTier(settingsPath: string, tier: FeynmanServiceTier | undefined): void {
|
||||||
|
let settings: Record<string, unknown> = {};
|
||||||
|
try {
|
||||||
|
settings = JSON.parse(readFileSync(settingsPath, "utf8")) as Record<string, unknown>;
|
||||||
|
} catch {}
|
||||||
|
|
||||||
|
if (tier) {
|
||||||
|
settings.serviceTier = tier;
|
||||||
|
} else {
|
||||||
|
delete settings.serviceTier;
|
||||||
|
}
|
||||||
|
|
||||||
|
writeFileSync(settingsPath, JSON.stringify(settings, null, 2) + "\n", "utf8");
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolveActiveServiceTier(settingsPath: string): FeynmanServiceTier | undefined {
|
||||||
|
return normalizeServiceTier(process.env.FEYNMAN_SERVICE_TIER) ?? getConfiguredServiceTier(settingsPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolveProviderServiceTier(
|
||||||
|
provider: string | undefined,
|
||||||
|
tier: FeynmanServiceTier | undefined,
|
||||||
|
): FeynmanServiceTier | undefined {
|
||||||
|
if (!provider || !tier) return undefined;
|
||||||
|
if ((provider === "openai" || provider === "openai-codex") && OPENAI_SERVICE_TIERS.has(tier)) {
|
||||||
|
return tier;
|
||||||
|
}
|
||||||
|
if (provider === "anthropic" && ANTHROPIC_SERVICE_TIERS.has(tier)) {
|
||||||
|
return tier;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function selectOption<T>(
|
||||||
|
ctx: CommandContext,
|
||||||
|
title: string,
|
||||||
|
options: SelectOption<T>[],
|
||||||
|
): Promise<T | undefined> {
|
||||||
|
const selected = await ctx.ui.select(
|
||||||
|
title,
|
||||||
|
options.map((option) => option.label),
|
||||||
|
);
|
||||||
|
if (!selected) return undefined;
|
||||||
|
return options.find((option) => option.label === selected)?.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseRequestedTier(rawArgs: string): FeynmanServiceTier | null | undefined {
|
||||||
|
const trimmed = rawArgs.trim();
|
||||||
|
if (!trimmed) return undefined;
|
||||||
|
if (trimmed === "unset" || trimmed === "clear" || trimmed === "off") return null;
|
||||||
|
return normalizeServiceTier(trimmed);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function registerServiceTierControls(pi: ExtensionAPI): void {
|
||||||
|
pi.on("before_provider_request", (event, ctx) => {
|
||||||
|
if (!ctx.model || !event.payload || typeof event.payload !== "object") {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const activeTier = resolveActiveServiceTier(resolveFeynmanSettingsPath());
|
||||||
|
const providerTier = resolveProviderServiceTier(ctx.model.provider, activeTier);
|
||||||
|
if (!providerTier) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
...(event.payload as Record<string, unknown>),
|
||||||
|
service_tier: providerTier,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
pi.registerCommand("service-tier", {
|
||||||
|
description: "View or set the provider service tier override used for supported models.",
|
||||||
|
handler: async (args, ctx) => {
|
||||||
|
const settingsPath = resolveFeynmanSettingsPath();
|
||||||
|
const requested = parseRequestedTier(args);
|
||||||
|
|
||||||
|
if (requested === undefined && !args.trim()) {
|
||||||
|
if (!ctx.hasUI) {
|
||||||
|
ctx.ui.notify(getConfiguredServiceTier(settingsPath) ?? "not set", "info");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const current = getConfiguredServiceTier(settingsPath);
|
||||||
|
const selected = await selectOption(
|
||||||
|
ctx,
|
||||||
|
"Select service tier",
|
||||||
|
[
|
||||||
|
{ label: current ? `unset (current: ${current})` : "unset (current)", value: null },
|
||||||
|
...FEYNMAN_SERVICE_TIERS.map((tier) => ({
|
||||||
|
label: tier === current ? `${tier} (current)` : tier,
|
||||||
|
value: tier,
|
||||||
|
})),
|
||||||
|
],
|
||||||
|
);
|
||||||
|
if (selected === undefined) return;
|
||||||
|
if (selected === null) {
|
||||||
|
setConfiguredServiceTier(settingsPath, undefined);
|
||||||
|
ctx.ui.notify("Cleared service tier override.", "info");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
setConfiguredServiceTier(settingsPath, selected);
|
||||||
|
ctx.ui.notify(`Service tier set to ${selected}.`, "info");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (requested === null) {
|
||||||
|
setConfiguredServiceTier(settingsPath, undefined);
|
||||||
|
ctx.ui.notify("Cleared service tier override.", "info");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!requested) {
|
||||||
|
ctx.ui.notify("Use auto, default, flex, priority, standard_only, or unset.", "error");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
setConfiguredServiceTier(settingsPath, requested);
|
||||||
|
ctx.ui.notify(`Service tier set to ${requested}.`, "info");
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -1,223 +0,0 @@
|
|||||||
import { readdir, readFile, stat } from "node:fs/promises";
|
|
||||||
import { basename, join } from "node:path";
|
|
||||||
import { pathToFileURL } from "node:url";
|
|
||||||
|
|
||||||
import { getFeynmanHome } from "./shared.js";
|
|
||||||
|
|
||||||
function extractMessageText(message: unknown): string {
|
|
||||||
if (!message || typeof message !== "object") {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
const content = (message as { content?: unknown }).content;
|
|
||||||
if (typeof content === "string") {
|
|
||||||
return content;
|
|
||||||
}
|
|
||||||
if (!Array.isArray(content)) {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
return content
|
|
||||||
.map((item) => {
|
|
||||||
if (!item || typeof item !== "object") {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
const record = item as { type?: string; text?: unknown; arguments?: unknown; name?: unknown };
|
|
||||||
if (record.type === "text" && typeof record.text === "string") {
|
|
||||||
return record.text;
|
|
||||||
}
|
|
||||||
if (record.type === "toolCall") {
|
|
||||||
const name = typeof record.name === "string" ? record.name : "tool";
|
|
||||||
const args =
|
|
||||||
typeof record.arguments === "string"
|
|
||||||
? record.arguments
|
|
||||||
: record.arguments
|
|
||||||
? JSON.stringify(record.arguments)
|
|
||||||
: "";
|
|
||||||
return `[tool:${name}] ${args}`;
|
|
||||||
}
|
|
||||||
return "";
|
|
||||||
})
|
|
||||||
.filter(Boolean)
|
|
||||||
.join("\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
function buildExcerpt(text: string, query: string, radius = 180): string {
|
|
||||||
const normalizedText = text.replace(/\s+/g, " ").trim();
|
|
||||||
if (!normalizedText) {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
const lower = normalizedText.toLowerCase();
|
|
||||||
const q = query.toLowerCase();
|
|
||||||
const index = lower.indexOf(q);
|
|
||||||
if (index === -1) {
|
|
||||||
return normalizedText.slice(0, radius * 2) + (normalizedText.length > radius * 2 ? "..." : "");
|
|
||||||
}
|
|
||||||
|
|
||||||
const start = Math.max(0, index - radius);
|
|
||||||
const end = Math.min(normalizedText.length, index + q.length + radius);
|
|
||||||
const prefix = start > 0 ? "..." : "";
|
|
||||||
const suffix = end < normalizedText.length ? "..." : "";
|
|
||||||
return `${prefix}${normalizedText.slice(start, end)}${suffix}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function searchSessionTranscripts(query: string, limit: number): Promise<{
|
|
||||||
query: string;
|
|
||||||
results: Array<{
|
|
||||||
sessionId: string;
|
|
||||||
sessionFile: string;
|
|
||||||
startedAt?: string;
|
|
||||||
cwd?: string;
|
|
||||||
matchCount: number;
|
|
||||||
topMatches: Array<{ role: string; timestamp?: string; excerpt: string }>;
|
|
||||||
}>;
|
|
||||||
}> {
|
|
||||||
const packageRoot = process.env.FEYNMAN_PI_NPM_ROOT;
|
|
||||||
if (packageRoot) {
|
|
||||||
try {
|
|
||||||
const indexerPath = pathToFileURL(
|
|
||||||
join(packageRoot, "@kaiserlich-dev", "pi-session-search", "extensions", "indexer.ts"),
|
|
||||||
).href;
|
|
||||||
const indexer = await import(indexerPath) as {
|
|
||||||
updateIndex?: (onProgress?: (msg: string) => void) => Promise<number>;
|
|
||||||
search?: (query: string, limit?: number) => Array<{
|
|
||||||
sessionPath: string;
|
|
||||||
project: string;
|
|
||||||
timestamp: string;
|
|
||||||
snippet: string;
|
|
||||||
rank: number;
|
|
||||||
title: string | null;
|
|
||||||
}>;
|
|
||||||
getSessionSnippets?: (sessionPath: string, query: string, limit?: number) => string[];
|
|
||||||
};
|
|
||||||
|
|
||||||
await indexer.updateIndex?.();
|
|
||||||
const results = indexer.search?.(query, limit) ?? [];
|
|
||||||
if (results.length > 0) {
|
|
||||||
return {
|
|
||||||
query,
|
|
||||||
results: results.map((result) => ({
|
|
||||||
sessionId: basename(result.sessionPath),
|
|
||||||
sessionFile: result.sessionPath,
|
|
||||||
startedAt: result.timestamp,
|
|
||||||
cwd: result.project,
|
|
||||||
matchCount: 1,
|
|
||||||
topMatches: (indexer.getSessionSnippets?.(result.sessionPath, query, 4) ?? [result.snippet])
|
|
||||||
.filter(Boolean)
|
|
||||||
.map((excerpt) => ({
|
|
||||||
role: "match",
|
|
||||||
excerpt,
|
|
||||||
})),
|
|
||||||
})),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch {
|
|
||||||
// Fall back to direct JSONL scanning below.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const sessionDir = join(getFeynmanHome(), "sessions");
|
|
||||||
const terms = query
|
|
||||||
.toLowerCase()
|
|
||||||
.split(/\s+/)
|
|
||||||
.map((term) => term.trim())
|
|
||||||
.filter((term) => term.length >= 2);
|
|
||||||
const needle = query.toLowerCase();
|
|
||||||
|
|
||||||
let files: string[] = [];
|
|
||||||
try {
|
|
||||||
files = (await readdir(sessionDir))
|
|
||||||
.filter((entry) => entry.endsWith(".jsonl"))
|
|
||||||
.map((entry) => join(sessionDir, entry));
|
|
||||||
} catch {
|
|
||||||
return { query, results: [] };
|
|
||||||
}
|
|
||||||
|
|
||||||
const sessions = [];
|
|
||||||
for (const file of files) {
|
|
||||||
const raw = await readFile(file, "utf8").catch(() => "");
|
|
||||||
if (!raw) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let sessionId = basename(file);
|
|
||||||
let startedAt: string | undefined;
|
|
||||||
let cwd: string | undefined;
|
|
||||||
const matches: Array<{ role: string; timestamp?: string; excerpt: string }> = [];
|
|
||||||
|
|
||||||
for (const line of raw.split("\n")) {
|
|
||||||
if (!line.trim()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
const record = JSON.parse(line) as {
|
|
||||||
type?: string;
|
|
||||||
id?: string;
|
|
||||||
timestamp?: string;
|
|
||||||
cwd?: string;
|
|
||||||
message?: { role?: string; content?: unknown };
|
|
||||||
};
|
|
||||||
if (record.type === "session") {
|
|
||||||
sessionId = record.id ?? sessionId;
|
|
||||||
startedAt = record.timestamp;
|
|
||||||
cwd = record.cwd;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (record.type !== "message" || !record.message) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const text = extractMessageText(record.message);
|
|
||||||
if (!text) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const lower = text.toLowerCase();
|
|
||||||
const matched = lower.includes(needle) || terms.some((term) => lower.includes(term));
|
|
||||||
if (!matched) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
matches.push({
|
|
||||||
role: record.message.role ?? "unknown",
|
|
||||||
timestamp: record.timestamp,
|
|
||||||
excerpt: buildExcerpt(text, query),
|
|
||||||
});
|
|
||||||
} catch {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (matches.length === 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mtime = 0;
|
|
||||||
try {
|
|
||||||
mtime = (await stat(file)).mtimeMs;
|
|
||||||
} catch {
|
|
||||||
mtime = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
sessions.push({
|
|
||||||
sessionId,
|
|
||||||
sessionFile: file,
|
|
||||||
startedAt,
|
|
||||||
cwd,
|
|
||||||
matchCount: matches.length,
|
|
||||||
topMatches: matches.slice(0, 4),
|
|
||||||
mtime,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
sessions.sort((a, b) => {
|
|
||||||
if (b.matchCount !== a.matchCount) {
|
|
||||||
return b.matchCount - a.matchCount;
|
|
||||||
}
|
|
||||||
return b.mtime - a.mtime;
|
|
||||||
});
|
|
||||||
|
|
||||||
return {
|
|
||||||
query,
|
|
||||||
results: sessions.slice(0, limit).map(({ mtime: _mtime, ...session }) => session),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
import { readFileSync } from "node:fs";
|
import { readFileSync } from "node:fs";
|
||||||
import { homedir } from "node:os";
|
|
||||||
import { dirname, resolve as resolvePath } from "node:path";
|
import { dirname, resolve as resolvePath } from "node:path";
|
||||||
import { fileURLToPath } from "node:url";
|
import { fileURLToPath } from "node:url";
|
||||||
|
|
||||||
@@ -15,25 +14,3 @@ export const FEYNMAN_VERSION = (() => {
|
|||||||
})();
|
})();
|
||||||
|
|
||||||
export { FEYNMAN_ASCII_LOGO as FEYNMAN_AGENT_LOGO } from "../../logo.mjs";
|
export { FEYNMAN_ASCII_LOGO as FEYNMAN_AGENT_LOGO } from "../../logo.mjs";
|
||||||
|
|
||||||
export const FEYNMAN_RESEARCH_TOOLS = [
|
|
||||||
"alpha_search",
|
|
||||||
"alpha_get_paper",
|
|
||||||
"alpha_ask_paper",
|
|
||||||
"alpha_annotate_paper",
|
|
||||||
"alpha_list_annotations",
|
|
||||||
"alpha_read_code",
|
|
||||||
"session_search",
|
|
||||||
"preview_file",
|
|
||||||
];
|
|
||||||
|
|
||||||
export function formatToolText(result: unknown): string {
|
|
||||||
return typeof result === "string" ? result : JSON.stringify(result, null, 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
export function getFeynmanHome(): string {
|
|
||||||
const agentDir = process.env.FEYNMAN_CODING_AGENT_DIR ??
|
|
||||||
process.env.PI_CODING_AGENT_DIR ??
|
|
||||||
resolvePath(homedir(), ".feynman", "agent");
|
|
||||||
return dirname(agentDir);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -35,11 +35,14 @@ export function readPromptSpecs(appRoot) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const extensionCommandSpecs = [
|
export const extensionCommandSpecs = [
|
||||||
|
{ name: "capabilities", args: "", section: "Project & Session", description: "Show installed packages, discovery entrypoints, and runtime capability counts.", publicDocs: true },
|
||||||
|
{ name: "commands", args: "", section: "Project & Session", description: "Browse all available slash commands, including built-in and package commands.", publicDocs: true },
|
||||||
{ name: "help", args: "", section: "Project & Session", description: "Show grouped Feynman commands and prefill the editor with a selected command.", publicDocs: true },
|
{ name: "help", args: "", section: "Project & Session", description: "Show grouped Feynman commands and prefill the editor with a selected command.", publicDocs: true },
|
||||||
|
{ name: "feynman-model", args: "", section: "Project & Session", description: "Open Feynman model menu (main + per-subagent overrides).", publicDocs: true },
|
||||||
{ name: "init", args: "", section: "Project & Session", description: "Bootstrap AGENTS.md and session-log folders for a research project.", publicDocs: true },
|
{ name: "init", args: "", section: "Project & Session", description: "Bootstrap AGENTS.md and session-log folders for a research project.", publicDocs: true },
|
||||||
{ name: "alpha-login", args: "", section: "Setup", description: "Sign in to alphaXiv from inside Feynman.", publicDocs: true },
|
{ name: "outputs", args: "", section: "Project & Session", description: "Browse all research artifacts (papers, outputs, experiments, notes).", publicDocs: true },
|
||||||
{ name: "alpha-status", args: "", section: "Setup", description: "Show alphaXiv authentication status.", publicDocs: true },
|
{ name: "service-tier", args: "", section: "Project & Session", description: "View or set the provider service tier override for supported models.", publicDocs: true },
|
||||||
{ name: "alpha-logout", args: "", section: "Setup", description: "Clear alphaXiv auth from inside Feynman.", publicDocs: true },
|
{ name: "tools", args: "", section: "Project & Session", description: "Browse all callable tools with their source and parameter summary.", publicDocs: true },
|
||||||
];
|
];
|
||||||
|
|
||||||
export const livePackageCommandGroups = [
|
export const livePackageCommandGroups = [
|
||||||
@@ -59,6 +62,7 @@ export const livePackageCommandGroups = [
|
|||||||
{ name: "schedule-prompt", usage: "/schedule-prompt" },
|
{ name: "schedule-prompt", usage: "/schedule-prompt" },
|
||||||
{ name: "search", usage: "/search" },
|
{ name: "search", usage: "/search" },
|
||||||
{ name: "preview", usage: "/preview" },
|
{ name: "preview", usage: "/preview" },
|
||||||
|
{ name: "hotkeys", usage: "/hotkeys" },
|
||||||
{ name: "new", usage: "/new" },
|
{ name: "new", usage: "/new" },
|
||||||
{ name: "quit", usage: "/quit" },
|
{ name: "quit", usage: "/quit" },
|
||||||
{ name: "exit", usage: "/exit" },
|
{ name: "exit", usage: "/exit" },
|
||||||
@@ -82,9 +86,10 @@ export const cliCommandSections = [
|
|||||||
title: "Model Management",
|
title: "Model Management",
|
||||||
commands: [
|
commands: [
|
||||||
{ usage: "feynman model list", description: "List available models in Pi auth storage." },
|
{ usage: "feynman model list", description: "List available models in Pi auth storage." },
|
||||||
{ usage: "feynman model login [id]", description: "Login to a Pi OAuth model provider." },
|
{ usage: "feynman model login [id]", description: "Authenticate a model provider with OAuth or API-key setup." },
|
||||||
{ usage: "feynman model logout [id]", description: "Logout from a Pi OAuth model provider." },
|
{ usage: "feynman model logout [id]", description: "Clear stored auth for a model provider." },
|
||||||
{ usage: "feynman model set <provider/model>", description: "Set the default model." },
|
{ usage: "feynman model set <provider/model>", description: "Set the default model (also accepts provider:model)." },
|
||||||
|
{ usage: "feynman model tier [value]", description: "View or set the request service tier override." },
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -101,6 +106,8 @@ export const cliCommandSections = [
|
|||||||
{ usage: "feynman packages list", description: "Show core and optional Pi package presets." },
|
{ usage: "feynman packages list", description: "Show core and optional Pi package presets." },
|
||||||
{ usage: "feynman packages install <preset>", description: "Install optional package presets on demand." },
|
{ usage: "feynman packages install <preset>", description: "Install optional package presets on demand." },
|
||||||
{ usage: "feynman search status", description: "Show Pi web-access status and config path." },
|
{ usage: "feynman search status", description: "Show Pi web-access status and config path." },
|
||||||
|
{ usage: "feynman search set <provider> [api-key]", description: "Set the web search provider and optionally save its API key." },
|
||||||
|
{ usage: "feynman search clear", description: "Reset web search provider to auto while preserving API keys." },
|
||||||
{ usage: "feynman update [package]", description: "Update installed packages, or a specific package." },
|
{ usage: "feynman update [package]", description: "Update installed packages, or a specific package." },
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
@@ -111,7 +118,8 @@ export const legacyFlags = [
|
|||||||
{ usage: "--alpha-login", description: "Sign in to alphaXiv and exit." },
|
{ usage: "--alpha-login", description: "Sign in to alphaXiv and exit." },
|
||||||
{ usage: "--alpha-logout", description: "Clear alphaXiv auth and exit." },
|
{ usage: "--alpha-logout", description: "Clear alphaXiv auth and exit." },
|
||||||
{ usage: "--alpha-status", description: "Show alphaXiv auth status and exit." },
|
{ usage: "--alpha-status", description: "Show alphaXiv auth status and exit." },
|
||||||
{ usage: "--model <provider:model>", description: "Force a specific model." },
|
{ usage: "--model <provider/model|provider:model>", description: "Force a specific model." },
|
||||||
|
{ usage: "--service-tier <tier>", description: "Override request service tier for this run." },
|
||||||
{ usage: "--thinking <level>", description: "Set thinking level: off | minimal | low | medium | high | xhigh." },
|
{ usage: "--thinking <level>", description: "Set thinking level: off | minimal | low | medium | high | xhigh." },
|
||||||
{ usage: "--cwd <path>", description: "Set the working directory for tools." },
|
{ usage: "--cwd <path>", description: "Set the working directory for tools." },
|
||||||
{ usage: "--session-dir <path>", description: "Set the session storage directory." },
|
{ usage: "--session-dir <path>", description: "Set the session storage directory." },
|
||||||
|
|||||||
141
package-lock.json
generated
141
package-lock.json
generated
@@ -1,17 +1,19 @@
|
|||||||
{
|
{
|
||||||
"name": "@companion-ai/feynman",
|
"name": "@companion-ai/feynman",
|
||||||
"version": "0.2.12",
|
"version": "0.2.19",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "@companion-ai/feynman",
|
"name": "@companion-ai/feynman",
|
||||||
"version": "0.2.12",
|
"version": "0.2.19",
|
||||||
"hasInstallScript": true,
|
"hasInstallScript": true,
|
||||||
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@companion-ai/alpha-hub": "^0.1.2",
|
"@clack/prompts": "^1.2.0",
|
||||||
"@mariozechner/pi-ai": "^0.62.0",
|
"@companion-ai/alpha-hub": "^0.1.3",
|
||||||
"@mariozechner/pi-coding-agent": "^0.62.0",
|
"@mariozechner/pi-ai": "^0.66.1",
|
||||||
|
"@mariozechner/pi-coding-agent": "^0.66.1",
|
||||||
"@sinclair/typebox": "^0.34.48",
|
"@sinclair/typebox": "^0.34.48",
|
||||||
"dotenv": "^17.3.1"
|
"dotenv": "^17.3.1"
|
||||||
},
|
},
|
||||||
@@ -24,7 +26,7 @@
|
|||||||
"typescript": "^5.9.3"
|
"typescript": "^5.9.3"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=20.18.1"
|
"node": ">=20.19.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@anthropic-ai/sdk": {
|
"node_modules/@anthropic-ai/sdk": {
|
||||||
@@ -779,10 +781,32 @@
|
|||||||
"url": "https://github.com/sponsors/Borewit"
|
"url": "https://github.com/sponsors/Borewit"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@clack/core": {
|
||||||
|
"version": "1.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@clack/core/-/core-1.2.0.tgz",
|
||||||
|
"integrity": "sha512-qfxof/3T3t9DPU/Rj3OmcFyZInceqj/NVtO9rwIuJqCUgh32gwPjpFQQp/ben07qKlhpwq7GzfWpST4qdJ5Drg==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"fast-wrap-ansi": "^0.1.3",
|
||||||
|
"sisteransi": "^1.0.5"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@clack/prompts": {
|
||||||
|
"version": "1.2.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/@clack/prompts/-/prompts-1.2.0.tgz",
|
||||||
|
"integrity": "sha512-4jmztR9fMqPMjz6H/UZXj0zEmE43ha1euENwkckKKel4XpSfokExPo5AiVStdHSAlHekz4d0CA/r45Ok1E4D3w==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@clack/core": "1.2.0",
|
||||||
|
"fast-string-width": "^1.1.0",
|
||||||
|
"fast-wrap-ansi": "^0.1.3",
|
||||||
|
"sisteransi": "^1.0.5"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@companion-ai/alpha-hub": {
|
"node_modules/@companion-ai/alpha-hub": {
|
||||||
"version": "0.1.2",
|
"version": "0.1.3",
|
||||||
"resolved": "https://registry.npmjs.org/@companion-ai/alpha-hub/-/alpha-hub-0.1.2.tgz",
|
"resolved": "https://registry.npmjs.org/@companion-ai/alpha-hub/-/alpha-hub-0.1.3.tgz",
|
||||||
"integrity": "sha512-YAFh4B6loo7lKRjW3UFsdoiW3ZRvLdSdP7liDsHhCxY1dzfbxNU8vDAloodiK4ieDVRqMBTmG9NYbnsb4NZUGw==",
|
"integrity": "sha512-g/JoqeGDCoSvkgs1ZSTYJhbTak0zVanQyoYOvf2tDgfqJ09gfkqmSGFDmiP4PkTn1bocPqywZIABgmv25x1uYA==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@modelcontextprotocol/sdk": "^1.27.1",
|
"@modelcontextprotocol/sdk": "^1.27.1",
|
||||||
@@ -1264,9 +1288,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@hono/node-server": {
|
"node_modules/@hono/node-server": {
|
||||||
"version": "1.19.11",
|
"version": "1.19.13",
|
||||||
"resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.11.tgz",
|
"resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.13.tgz",
|
||||||
"integrity": "sha512-dr8/3zEaB+p0D2n/IUrlPF1HZm586qgJNXK1a9fhg/PzdtkK7Ksd5l312tJX2yBuALqDYBlG20QEbayqPyxn+g==",
|
"integrity": "sha512-TsQLe4i2gvoTtrHje625ngThGBySOgSK3Xo2XRYOdqGN1teR8+I7vchQC46uLJi8OF62YTYA3AhSpumtkhsaKQ==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=18.14.1"
|
"node": ">=18.14.1"
|
||||||
@@ -1468,21 +1492,21 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@mariozechner/pi-agent-core": {
|
"node_modules/@mariozechner/pi-agent-core": {
|
||||||
"version": "0.62.0",
|
"version": "0.66.1",
|
||||||
"resolved": "https://registry.npmjs.org/@mariozechner/pi-agent-core/-/pi-agent-core-0.62.0.tgz",
|
"resolved": "https://registry.npmjs.org/@mariozechner/pi-agent-core/-/pi-agent-core-0.66.1.tgz",
|
||||||
"integrity": "sha512-SBjqgDrgKOaC+IGzFGB3jXQErv9H1QMYnWFvUg6ra6dG0ZgWFBUZb6unidngWLsmaxSDWes6KeKiVFMsr2VSEQ==",
|
"integrity": "sha512-Nj54A7SuB/EQi8r3Gs+glFOr9wz/a9uxYFf0pCLf2DE7VmzA9O7WSejrvArna17K6auftLSdNyRRe2bIO0qezg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@mariozechner/pi-ai": "^0.62.0"
|
"@mariozechner/pi-ai": "^0.66.1"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=20.0.0"
|
"node": ">=20.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@mariozechner/pi-ai": {
|
"node_modules/@mariozechner/pi-ai": {
|
||||||
"version": "0.62.0",
|
"version": "0.66.1",
|
||||||
"resolved": "https://registry.npmjs.org/@mariozechner/pi-ai/-/pi-ai-0.62.0.tgz",
|
"resolved": "https://registry.npmjs.org/@mariozechner/pi-ai/-/pi-ai-0.66.1.tgz",
|
||||||
"integrity": "sha512-mJgryZ5RgBQG++tiETMtCQQJoH2MAhKetCfqI98NMvGydu7L9x2qC2JekQlRaAgIlTgv4MRH1UXHMEs4UweE/Q==",
|
"integrity": "sha512-7IZHvpsFdKEBkTmjNrdVL7JLUJVIpha6bwTr12cZ5XyDrxij06wP6Ncpnf4HT5BXAzD5w2JnoqTOSbMEIZj3dg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/sdk": "^0.73.0",
|
"@anthropic-ai/sdk": "^0.73.0",
|
||||||
@@ -1507,16 +1531,17 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@mariozechner/pi-coding-agent": {
|
"node_modules/@mariozechner/pi-coding-agent": {
|
||||||
"version": "0.62.0",
|
"version": "0.66.1",
|
||||||
"resolved": "https://registry.npmjs.org/@mariozechner/pi-coding-agent/-/pi-coding-agent-0.62.0.tgz",
|
"resolved": "https://registry.npmjs.org/@mariozechner/pi-coding-agent/-/pi-coding-agent-0.66.1.tgz",
|
||||||
"integrity": "sha512-f1NnExqsHuA6w8UVlBtPsvTBhdkMc0h1JD9SzGCdWTLou5GHJr2JIP6DlwV9IKWAnM+sAelaoFez+14wLP2zOQ==",
|
"integrity": "sha512-cNmatT+5HvYzQ78cRhRih00wCeUTH/fFx9ecJh5AbN7axgWU+bwiZYy0cjrTsGVgMGF4xMYlPRn/Nze9JEB+/w==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@mariozechner/jiti": "^2.6.2",
|
"@mariozechner/jiti": "^2.6.2",
|
||||||
"@mariozechner/pi-agent-core": "^0.62.0",
|
"@mariozechner/pi-agent-core": "^0.66.1",
|
||||||
"@mariozechner/pi-ai": "^0.62.0",
|
"@mariozechner/pi-ai": "^0.66.1",
|
||||||
"@mariozechner/pi-tui": "^0.62.0",
|
"@mariozechner/pi-tui": "^0.66.1",
|
||||||
"@silvia-odwyer/photon-node": "^0.3.4",
|
"@silvia-odwyer/photon-node": "^0.3.4",
|
||||||
|
"ajv": "^8.17.1",
|
||||||
"chalk": "^5.5.0",
|
"chalk": "^5.5.0",
|
||||||
"cli-highlight": "^2.1.11",
|
"cli-highlight": "^2.1.11",
|
||||||
"diff": "^8.0.2",
|
"diff": "^8.0.2",
|
||||||
@@ -1543,9 +1568,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@mariozechner/pi-tui": {
|
"node_modules/@mariozechner/pi-tui": {
|
||||||
"version": "0.62.0",
|
"version": "0.66.1",
|
||||||
"resolved": "https://registry.npmjs.org/@mariozechner/pi-tui/-/pi-tui-0.62.0.tgz",
|
"resolved": "https://registry.npmjs.org/@mariozechner/pi-tui/-/pi-tui-0.66.1.tgz",
|
||||||
"integrity": "sha512-/At11PPe8l319MnUoK4wN5L/uVCU6bDdiIUzH8Ez0stOkjSF6isRXScZ+RMM+6iCKsD4muBTX8Cmcif+3/UWHA==",
|
"integrity": "sha512-hNFN42ebjwtfGooqoUwM+QaPR1XCyqPuueuP3aLOWS1bZ2nZP/jq8MBuGNrmMw1cgiDcotvOlSNj3BatzEOGsw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/mime-types": "^2.1.4",
|
"@types/mime-types": "^2.1.4",
|
||||||
@@ -2528,9 +2553,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/basic-ftp": {
|
"node_modules/basic-ftp": {
|
||||||
"version": "5.2.0",
|
"version": "5.2.2",
|
||||||
"resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/basic-ftp/-/basic-ftp-5.2.2.tgz",
|
||||||
"integrity": "sha512-VoMINM2rqJwJgfdHq6RiUudKt2BV+FY5ZFezP/ypmwayk68+NzzAQy4XXLlqsGD4MCzq3DrmNFD/uUmBJuGoXw==",
|
"integrity": "sha512-1tDrzKsdCg70WGvbFss/ulVAxupNauGnOlgpyjKzeQxzyllBLS0CGLV7tjIXTK3ZQA9/FBEm9qyFFN1bciA6pw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=10.0.0"
|
"node": ">=10.0.0"
|
||||||
@@ -2576,9 +2601,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/brace-expansion": {
|
"node_modules/brace-expansion": {
|
||||||
"version": "5.0.4",
|
"version": "5.0.5",
|
||||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz",
|
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz",
|
||||||
"integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==",
|
"integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"balanced-match": "^4.0.2"
|
"balanced-match": "^4.0.2"
|
||||||
@@ -3204,6 +3229,21 @@
|
|||||||
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
|
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
|
"node_modules/fast-string-truncated-width": {
|
||||||
|
"version": "1.2.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/fast-string-truncated-width/-/fast-string-truncated-width-1.2.1.tgz",
|
||||||
|
"integrity": "sha512-Q9acT/+Uu3GwGj+5w/zsGuQjh9O1TyywhIwAxHudtWrgF09nHOPrvTLhQevPbttcxjr/SNN7mJmfOw/B1bXgow==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
|
"node_modules/fast-string-width": {
|
||||||
|
"version": "1.1.0",
|
||||||
|
"resolved": "https://registry.npmjs.org/fast-string-width/-/fast-string-width-1.1.0.tgz",
|
||||||
|
"integrity": "sha512-O3fwIVIH5gKB38QNbdg+3760ZmGz0SZMgvwJbA1b2TGXceKE6A2cOlfogh1iw8lr049zPyd7YADHy+B7U4W9bQ==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"fast-string-truncated-width": "^1.2.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/fast-uri": {
|
"node_modules/fast-uri": {
|
||||||
"version": "3.1.0",
|
"version": "3.1.0",
|
||||||
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
|
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
|
||||||
@@ -3220,6 +3260,15 @@
|
|||||||
],
|
],
|
||||||
"license": "BSD-3-Clause"
|
"license": "BSD-3-Clause"
|
||||||
},
|
},
|
||||||
|
"node_modules/fast-wrap-ansi": {
|
||||||
|
"version": "0.1.6",
|
||||||
|
"resolved": "https://registry.npmjs.org/fast-wrap-ansi/-/fast-wrap-ansi-0.1.6.tgz",
|
||||||
|
"integrity": "sha512-HlUwET7a5gqjURj70D5jl7aC3Zmy4weA1SHUfM0JFI0Ptq987NH2TwbBFLoERhfwk+E+eaq4EK3jXoT+R3yp3w==",
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"fast-string-width": "^1.1.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/fast-xml-builder": {
|
"node_modules/fast-xml-builder": {
|
||||||
"version": "1.1.4",
|
"version": "1.1.4",
|
||||||
"resolved": "https://registry.npmjs.org/fast-xml-builder/-/fast-xml-builder-1.1.4.tgz",
|
"resolved": "https://registry.npmjs.org/fast-xml-builder/-/fast-xml-builder-1.1.4.tgz",
|
||||||
@@ -3621,9 +3670,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/hono": {
|
"node_modules/hono": {
|
||||||
"version": "4.12.9",
|
"version": "4.12.12",
|
||||||
"resolved": "https://registry.npmjs.org/hono/-/hono-4.12.9.tgz",
|
"resolved": "https://registry.npmjs.org/hono/-/hono-4.12.12.tgz",
|
||||||
"integrity": "sha512-wy3T8Zm2bsEvxKZM5w21VdHDDcwVS1yUFFY6i8UobSsKfFceT7TOwhbhfKsDyx7tYQlmRM5FLpIuYvNFyjctiA==",
|
"integrity": "sha512-p1JfQMKaceuCbpJKAPKVqyqviZdS0eUxH9v82oWo1kb9xjQ5wA6iP3FNVAPDFlz5/p7d45lO+BpSk1tuSZMF4Q==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=16.9.0"
|
"node": ">=16.9.0"
|
||||||
@@ -3842,9 +3891,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/koffi": {
|
"node_modules/koffi": {
|
||||||
"version": "2.15.2",
|
"version": "2.15.6",
|
||||||
"resolved": "https://registry.npmjs.org/koffi/-/koffi-2.15.2.tgz",
|
"resolved": "https://registry.npmjs.org/koffi/-/koffi-2.15.6.tgz",
|
||||||
"integrity": "sha512-r9tjJLVRSOhCRWdVyQlF3/Ugzeg13jlzS4czS82MAgLff4W+BcYOW7g8Y62t9O5JYjYOLAjAovAZDNlDfZNu+g==",
|
"integrity": "sha512-WQBpM5uo74UQ17UpsFN+PUOrQQg4/nYdey4SGVluQun2drYYfePziLLWdSmFb4wSdWlJC1aimXQnjhPCheRKuw==",
|
||||||
"hasInstallScript": true,
|
"hasInstallScript": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
@@ -4216,9 +4265,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/path-to-regexp": {
|
"node_modules/path-to-regexp": {
|
||||||
"version": "8.3.0",
|
"version": "8.4.2",
|
||||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.3.0.tgz",
|
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.4.2.tgz",
|
||||||
"integrity": "sha512-7jdwVIRtsP8MYpdXSwOS0YdD0Du+qOoF/AEPIt88PcCFrZCzx41oxku1jD88hZBwbNUIEfpqvuhjFaMAqMTWnA==",
|
"integrity": "sha512-qRcuIdP69NPm4qbACK+aDogI5CBDMi1jKe0ry5rSQJz8JVLsC7jV8XpiJjGRLLol3N+R5ihGYcrPLTno6pAdBA==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"funding": {
|
"funding": {
|
||||||
"type": "opencollective",
|
"type": "opencollective",
|
||||||
@@ -4609,6 +4658,12 @@
|
|||||||
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
|
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
|
||||||
"license": "ISC"
|
"license": "ISC"
|
||||||
},
|
},
|
||||||
|
"node_modules/sisteransi": {
|
||||||
|
"version": "1.0.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
|
||||||
|
"integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/smart-buffer": {
|
"node_modules/smart-buffer": {
|
||||||
"version": "4.2.0",
|
"version": "4.2.0",
|
||||||
"resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz",
|
"resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz",
|
||||||
|
|||||||
41
package.json
41
package.json
@@ -1,11 +1,11 @@
|
|||||||
{
|
{
|
||||||
"name": "@companion-ai/feynman",
|
"name": "@companion-ai/feynman",
|
||||||
"version": "0.2.12",
|
"version": "0.2.19",
|
||||||
"description": "Research-first CLI agent built on Pi and alphaXiv",
|
"description": "Research-first CLI agent built on Pi and alphaXiv",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=20.18.1"
|
"node": ">=20.19.0 <25"
|
||||||
},
|
},
|
||||||
"bin": {
|
"bin": {
|
||||||
"feynman": "bin/feynman.js"
|
"feynman": "bin/feynman.js"
|
||||||
@@ -26,15 +26,16 @@
|
|||||||
"scripts/",
|
"scripts/",
|
||||||
"skills/",
|
"skills/",
|
||||||
"AGENTS.md",
|
"AGENTS.md",
|
||||||
|
"CONTRIBUTING.md",
|
||||||
"README.md",
|
"README.md",
|
||||||
".env.example"
|
".env.example"
|
||||||
],
|
],
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
"preinstall": "node ./scripts/check-node-version.mjs",
|
||||||
"build": "tsc -p tsconfig.build.json",
|
"build": "tsc -p tsconfig.build.json",
|
||||||
"build:native-bundle": "node ./scripts/build-native-bundle.mjs",
|
"build:native-bundle": "node ./scripts/build-native-bundle.mjs",
|
||||||
"dev": "tsx src/index.ts",
|
"dev": "tsx src/index.ts",
|
||||||
"prepack": "node ./scripts/prepare-runtime-workspace.mjs",
|
"prepack": "node ./scripts/clean-publish-artifacts.mjs && npm run build && node ./scripts/prepare-runtime-workspace.mjs",
|
||||||
"postinstall": "node ./scripts/patch-embedded-pi.mjs",
|
|
||||||
"start": "tsx src/index.ts",
|
"start": "tsx src/index.ts",
|
||||||
"start:dist": "node ./bin/feynman.js",
|
"start:dist": "node ./bin/feynman.js",
|
||||||
"test": "node --import tsx --test --test-concurrency=1 tests/*.test.ts",
|
"test": "node --import tsx --test --test-concurrency=1 tests/*.test.ts",
|
||||||
@@ -52,15 +53,41 @@
|
|||||||
],
|
],
|
||||||
"prompts": [
|
"prompts": [
|
||||||
"./prompts"
|
"./prompts"
|
||||||
|
],
|
||||||
|
"skills": [
|
||||||
|
"./skills"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@companion-ai/alpha-hub": "^0.1.2",
|
"@clack/prompts": "^1.2.0",
|
||||||
"@mariozechner/pi-ai": "^0.62.0",
|
"@companion-ai/alpha-hub": "^0.1.3",
|
||||||
"@mariozechner/pi-coding-agent": "^0.62.0",
|
"@mariozechner/pi-ai": "^0.66.1",
|
||||||
|
"@mariozechner/pi-coding-agent": "^0.66.1",
|
||||||
"@sinclair/typebox": "^0.34.48",
|
"@sinclair/typebox": "^0.34.48",
|
||||||
"dotenv": "^17.3.1"
|
"dotenv": "^17.3.1"
|
||||||
},
|
},
|
||||||
|
"overrides": {
|
||||||
|
"basic-ftp": "5.2.2",
|
||||||
|
"@modelcontextprotocol/sdk": {
|
||||||
|
"@hono/node-server": "1.19.13",
|
||||||
|
"hono": "4.12.12"
|
||||||
|
},
|
||||||
|
"express": {
|
||||||
|
"router": {
|
||||||
|
"path-to-regexp": "8.4.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"proxy-agent": {
|
||||||
|
"pac-proxy-agent": {
|
||||||
|
"get-uri": {
|
||||||
|
"basic-ftp": "5.2.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"minimatch": {
|
||||||
|
"brace-expansion": "5.0.5"
|
||||||
|
}
|
||||||
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^25.5.0",
|
"@types/node": "^25.5.0",
|
||||||
"tsx": "^4.21.0",
|
"tsx": "^4.21.0",
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Audit the paper and codebase for: $@
|
|||||||
Derive a short slug from the audit target (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
Derive a short slug from the audit target (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||||
|
|
||||||
Requirements:
|
Requirements:
|
||||||
- Before starting, outline the audit plan: which paper, which repo, which claims to check. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user and confirm before proceeding.
|
- Before starting, outline the audit plan: which paper, which repo, which claims to check. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||||
- Use the `researcher` subagent for evidence gathering and the `verifier` subagent to verify sources and add inline citations when the audit is non-trivial.
|
- Use the `researcher` subagent for evidence gathering and the `verifier` subagent to verify sources and add inline citations when the audit is non-trivial.
|
||||||
- Compare claimed methods, defaults, metrics, and data handling against the actual code.
|
- Compare claimed methods, defaults, metrics, and data handling against the actual code.
|
||||||
- Call out missing code, mismatches, ambiguous defaults, and reproduction risks.
|
- Call out missing code, mismatches, ambiguous defaults, and reproduction risks.
|
||||||
|
|||||||
@@ -27,6 +27,8 @@ Ask the user where to run:
|
|||||||
- **New git branch** — create a branch so main stays clean
|
- **New git branch** — create a branch so main stays clean
|
||||||
- **Virtual environment** — create an isolated venv/conda env first
|
- **Virtual environment** — create an isolated venv/conda env first
|
||||||
- **Docker** — run experiment code inside an isolated Docker container
|
- **Docker** — run experiment code inside an isolated Docker container
|
||||||
|
- **Modal** — run on Modal's serverless GPU infrastructure. Write Modal-decorated scripts and execute with `modal run`. Best for GPU-heavy benchmarks with no persistent state between iterations. Requires `modal` CLI.
|
||||||
|
- **RunPod** — provision a GPU pod via `runpodctl` and run iterations there over SSH. Best for experiments needing persistent state, large datasets, or SSH access between iterations. Requires `runpodctl` CLI.
|
||||||
|
|
||||||
Do not proceed without a clear answer.
|
Do not proceed without a clear answer.
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Compare sources for: $@
|
|||||||
Derive a short slug from the comparison topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
Derive a short slug from the comparison topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||||
|
|
||||||
Requirements:
|
Requirements:
|
||||||
- Before starting, outline the comparison plan: which sources to compare, which dimensions to evaluate, expected output structure. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user and confirm before proceeding.
|
- Before starting, outline the comparison plan: which sources to compare, which dimensions to evaluate, expected output structure. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||||
- Use the `researcher` subagent to gather source material when the comparison set is broad, and the `verifier` subagent to verify sources and add inline citations to the final matrix.
|
- Use the `researcher` subagent to gather source material when the comparison set is broad, and the `verifier` subagent to verify sources and add inline citations to the final matrix.
|
||||||
- Build a comparison matrix covering: source, key claim, evidence type, caveats, confidence.
|
- Build a comparison matrix covering: source, key claim, evidence type, caveats, confidence.
|
||||||
- Generate charts with `pi-charts` when the comparison involves quantitative metrics. Use Mermaid for method or architecture comparisons.
|
- Generate charts with `pi-charts` when the comparison involves quantitative metrics. Use Mermaid for method or architecture comparisons.
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ If `CHANGELOG.md` exists, read the most recent relevant entries before finalizin
|
|||||||
|
|
||||||
Also save the plan with `memory_remember` (type: `fact`, key: `deepresearch.<slug>.plan`) so it survives context truncation.
|
Also save the plan with `memory_remember` (type: `fact`, key: `deepresearch.<slug>.plan`) so it survives context truncation.
|
||||||
|
|
||||||
Present the plan to the user and ask them to confirm before proceeding. If the user wants changes, revise the plan first.
|
Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting in the terminal, give them a brief chance to request plan changes before proceeding.
|
||||||
|
|
||||||
## 2. Scale decision
|
## 2. Scale decision
|
||||||
|
|
||||||
@@ -182,6 +182,15 @@ Write a provenance record alongside it as `<slug>.provenance.md`:
|
|||||||
- **Research files:** [list of intermediate <slug>-research-*.md files]
|
- **Research files:** [list of intermediate <slug>-research-*.md files]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Before you stop, verify on disk that all of these exist:
|
||||||
|
- `outputs/.plans/<slug>.md`
|
||||||
|
- `outputs/.drafts/<slug>-draft.md`
|
||||||
|
- `<slug>-brief.md` intermediate cited brief
|
||||||
|
- `outputs/<slug>.md` or `papers/<slug>.md` final promoted deliverable
|
||||||
|
- `outputs/<slug>.provenance.md` or `papers/<slug>.provenance.md` provenance sidecar
|
||||||
|
|
||||||
|
Do not stop at `<slug>-brief.md` alone. If the cited brief exists but the promoted final output or provenance sidecar does not, create them before responding.
|
||||||
|
|
||||||
## Background execution
|
## Background execution
|
||||||
|
|
||||||
If the user wants unattended execution or the sweep will clearly take a while:
|
If the user wants unattended execution or the sweep will clearly take a while:
|
||||||
|
|||||||
@@ -9,11 +9,17 @@ Write a paper-style draft for: $@
|
|||||||
Derive a short slug from the topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
Derive a short slug from the topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||||
|
|
||||||
Requirements:
|
Requirements:
|
||||||
- Before writing, outline the draft structure: proposed title, sections, key claims to make, source material to draw from, and a verification log for the critical claims, figures, and calculations. Write the outline to `outputs/.plans/<slug>.md`. Present the outline to the user and confirm before proceeding.
|
- Before writing, outline the draft structure: proposed title, sections, key claims to make, source material to draw from, and a verification log for the critical claims, figures, and calculations. Write the outline to `outputs/.plans/<slug>.md`. Present the outline to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||||
- Use the `writer` subagent when the draft should be produced from already-collected notes, then use the `verifier` subagent to add inline citations and verify sources.
|
- Use the `writer` subagent when the draft should be produced from already-collected notes, then use the `verifier` subagent to add inline citations and verify sources.
|
||||||
- Include at minimum: title, abstract, problem statement, related work, method or synthesis, evidence or experiments, limitations, conclusion.
|
- Include at minimum: title, abstract, problem statement, related work, method or synthesis, evidence or experiments, limitations, conclusion.
|
||||||
|
- **Never invent experimental results, scores, figures, images, charts, tables, datasets, or benchmarks.** If no raw artifact, cited source, or prior research note provides the value, write a clearly labeled placeholder such as `TODO: run experiment` or `No experimental results are available yet` instead of fabricating plausible numbers.
|
||||||
|
- The `evidence or experiments` section must contain only one of:
|
||||||
|
- cited results from primary sources,
|
||||||
|
- results computed from explicit raw artifacts/scripts already present in the workspace,
|
||||||
|
- a proposed experimental plan with no claimed outcomes.
|
||||||
|
- Every figure, chart, image, or table must have provenance in its caption: source URL, research-file reference, raw artifact path, or script path. If provenance is missing, omit the figure.
|
||||||
- Use clean Markdown with LaTeX where equations materially help.
|
- Use clean Markdown with LaTeX where equations materially help.
|
||||||
- Generate charts with `pi-charts` for quantitative data, benchmarks, and comparisons. Use Mermaid for architectures and pipelines. Every figure needs a caption.
|
- Generate charts with `pi-charts` only for quantitative data, benchmarks, and comparisons that already exist in the source material or raw artifacts. Use Mermaid for architectures and pipelines only when the structure is supported by sources. Every figure needs a provenance-bearing caption.
|
||||||
- Before delivery, sweep the draft for any claim that sounds stronger than its support. Mark tentative results as tentative and remove unsupported numerics instead of letting the verifier discover them later.
|
- Before delivery, sweep the draft for any claim that sounds stronger than its support. Mark tentative results as tentative and remove unsupported numerics instead of letting the verifier discover them later.
|
||||||
- Save exactly one draft to `papers/<slug>.md`.
|
- Save exactly one draft to `papers/<slug>.md`.
|
||||||
- End with a `Sources` appendix with direct URLs for all primary references.
|
- End with a `Sources` appendix with direct URLs for all primary references.
|
||||||
|
|||||||
@@ -10,9 +10,9 @@ Derive a short slug from the topic (lowercase, hyphens, no filler words, ≤5 wo
|
|||||||
|
|
||||||
## Workflow
|
## Workflow
|
||||||
|
|
||||||
1. **Plan** — Outline the scope: key questions, source types to search (papers, web, repos), time period, expected sections, and a small task ledger plus verification log. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user and confirm before proceeding.
|
1. **Plan** — Outline the scope: key questions, source types to search (papers, web, repos), time period, expected sections, and a small task ledger plus verification log. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||||
2. **Gather** — Use the `researcher` subagent when the sweep is wide enough to benefit from delegated paper triage before synthesis. For narrow topics, search directly. Researcher outputs go to `<slug>-research-*.md`. Do not silently skip assigned questions; mark them `done`, `blocked`, or `superseded`.
|
2. **Gather** — Use the `researcher` subagent when the sweep is wide enough to benefit from delegated paper triage before synthesis. For narrow topics, search directly. Researcher outputs go to `<slug>-research-*.md`. Do not silently skip assigned questions; mark them `done`, `blocked`, or `superseded`.
|
||||||
3. **Synthesize** — Separate consensus, disagreements, and open questions. When useful, propose concrete next experiments or follow-up reading. Generate charts with `pi-charts` for quantitative comparisons across papers and Mermaid diagrams for taxonomies or method pipelines. Before finishing the draft, sweep every strong claim against the verification log and downgrade anything that is inferred or single-source critical.
|
3. **Synthesize** — Separate consensus, disagreements, and open questions. When useful, propose concrete next experiments or follow-up reading. Generate charts with `pi-charts` for quantitative comparisons across papers and Mermaid diagrams for taxonomies or method pipelines. Before finishing the draft, sweep every strong claim against the verification log and downgrade anything that is inferred or single-source critical.
|
||||||
4. **Cite** — Spawn the `verifier` agent to add inline citations and verify every source URL in the draft.
|
4. **Cite** — Spawn the `verifier` agent to add inline citations and verify every source URL in the draft.
|
||||||
5. **Verify** — Spawn the `reviewer` agent to check the cited draft for unsupported claims, logical gaps, zombie sections, and single-source critical findings. Fix FATAL issues before delivering. Note MAJOR issues in Open Questions. If FATAL issues were found, run one more verification pass after the fixes.
|
5. **Verify** — Spawn the `reviewer` agent to check the cited draft for unsupported claims, logical gaps, zombie sections, and single-source critical findings. Fix FATAL issues before delivering. Note MAJOR issues in Open Questions. If FATAL issues were found, run one more verification pass after the fixes.
|
||||||
6. **Deliver** — Save the final literature review to `outputs/<slug>.md`. Write a provenance record alongside it as `outputs/<slug>.provenance.md` listing: date, sources consulted vs. accepted vs. rejected, verification status, and intermediate research files used.
|
6. **Deliver** — Save the final literature review to `outputs/<slug>.md`. Write a provenance record alongside it as `outputs/<slug>.provenance.md` listing: date, sources consulted vs. accepted vs. rejected, verification status, and intermediate research files used. Before you stop, verify on disk that both files exist; do not stop at an intermediate cited draft alone.
|
||||||
|
|||||||
@@ -14,6 +14,8 @@ Design a replication plan for: $@
|
|||||||
- **Local** — run in the current working directory
|
- **Local** — run in the current working directory
|
||||||
- **Virtual environment** — create an isolated venv/conda env first
|
- **Virtual environment** — create an isolated venv/conda env first
|
||||||
- **Docker** — run experiment code inside an isolated Docker container
|
- **Docker** — run experiment code inside an isolated Docker container
|
||||||
|
- **Modal** — run on Modal's serverless GPU infrastructure. Write a Modal-decorated Python script and execute with `modal run <script.py>`. Best for burst GPU jobs that don't need persistent state. Requires `modal` CLI (`pip install modal && modal setup`).
|
||||||
|
- **RunPod** — provision a GPU pod on RunPod and SSH in for execution. Use `runpodctl` to create pods, transfer files, and manage lifecycle. Best for long-running experiments or when you need SSH access and persistent storage. Requires `runpodctl` CLI and `RUNPOD_API_KEY`.
|
||||||
- **Plan only** — produce the replication plan without executing
|
- **Plan only** — produce the replication plan without executing
|
||||||
4. **Execute** — If the user chose an execution environment, implement and run the replication steps there. Save notes, scripts, raw outputs, and results to disk in a reproducible layout. Do not call the outcome replicated unless the planned checks actually passed.
|
4. **Execute** — If the user chose an execution environment, implement and run the replication steps there. Save notes, scripts, raw outputs, and results to disk in a reproducible layout. Do not call the outcome replicated unless the planned checks actually passed.
|
||||||
5. **Log** — For multi-step or resumable replication work, append concise entries to `CHANGELOG.md` after meaningful progress, failed attempts, major verification outcomes, and before stopping. Record the active objective, what changed, what was checked, and the next step.
|
5. **Log** — For multi-step or resumable replication work, append concise entries to `CHANGELOG.md` after meaningful progress, failed attempts, major verification outcomes, and before stopping. Record the active objective, what changed, what was checked, and the next step.
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Review this AI research artifact: $@
|
|||||||
Derive a short slug from the artifact name (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
Derive a short slug from the artifact name (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||||
|
|
||||||
Requirements:
|
Requirements:
|
||||||
- Before starting, outline what will be reviewed, the review criteria (novelty, empirical rigor, baselines, reproducibility, etc.), and any verification-specific checks needed for claims, figures, and reported metrics. Present the plan to the user and confirm before proceeding.
|
- Before starting, outline what will be reviewed, the review criteria (novelty, empirical rigor, baselines, reproducibility, etc.), and any verification-specific checks needed for claims, figures, and reported metrics. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||||
- Spawn a `researcher` subagent to gather evidence on the artifact — inspect the paper, code, cited work, and any linked experimental artifacts. Save to `<slug>-research.md`.
|
- Spawn a `researcher` subagent to gather evidence on the artifact — inspect the paper, code, cited work, and any linked experimental artifacts. Save to `<slug>-research.md`.
|
||||||
- Spawn a `reviewer` subagent with `<slug>-research.md` to produce the final peer review with inline annotations.
|
- Spawn a `reviewer` subagent with `<slug>-research.md` to produce the final peer review with inline annotations.
|
||||||
- For small or simple artifacts where evidence gathering is overkill, run the `reviewer` subagent directly instead.
|
- For small or simple artifacts where evidence gathering is overkill, run the `reviewer` subagent directly instead.
|
||||||
|
|||||||
165
prompts/summarize.md
Normal file
165
prompts/summarize.md
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
---
|
||||||
|
description: Summarize any URL, local file, or PDF using the RLM pattern — source stored on disk, never injected raw into context.
|
||||||
|
args: <source>
|
||||||
|
section: Research Workflows
|
||||||
|
topLevelCli: true
|
||||||
|
---
|
||||||
|
Summarize the following source: $@
|
||||||
|
|
||||||
|
Derive a short slug from the source filename or URL domain (lowercase, hyphens, no filler words, ≤5 words — e.g. `attention-is-all-you-need`). Use this slug for all files in this run.
|
||||||
|
|
||||||
|
## Why this uses the RLM pattern
|
||||||
|
|
||||||
|
Standard summarization injects the full document into context. Above ~15k tokens, early content degrades as the window fills (context rot). This workflow keeps the document on disk as an external variable and reads only bounded windows — so context pressure is proportional to the window size, not the document size.
|
||||||
|
|
||||||
|
Tier 1 (< 8k chars) is a deliberate exception: direct injection is safe at ~2k tokens and windowed reading would add unnecessary friction.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 1 — Fetch, validate, measure
|
||||||
|
|
||||||
|
Run all guards before any tier logic. A failure here is cheap; a failure mid-Tier-3 is not.
|
||||||
|
|
||||||
|
- **GitHub repo URL** (`https://github.com/owner/repo` — exactly 4 slashes): fetch the raw README instead. Try `https://raw.githubusercontent.com/{owner}/{repo}/main/README.md`, then `/master/README.md`. A repo HTML page is not the document the user wants to summarize.
|
||||||
|
- **Remote URL**: fetch to disk with `curl -sL -o outputs/.notes/<slug>-raw.txt <url>`. Do NOT use fetch_content — its return value enters context directly, bypassing the RLM external-variable principle.
|
||||||
|
- **Local file or PDF**: copy or extract to `outputs/.notes/<slug>-raw.txt`. For PDFs, extract text via `pdftotext` or equivalent before measuring.
|
||||||
|
- **Empty or failed fetch**: if the file is < 50 bytes after fetching, stop and surface the error to the user — do not proceed to tier selection.
|
||||||
|
- **Binary content**: if the file is > 1 KB but contains < 100 readable text characters, stop and tell the user the content appears binary or unextracted.
|
||||||
|
- **Existing output**: if `outputs/<slug>-summary.md` already exists, ask the user whether to overwrite or use a different slug. Do not proceed until confirmed.
|
||||||
|
|
||||||
|
Measure decoded text characters (not bytes — UTF-8 multi-byte chars would overcount). Log: `[summarize] source=<source> slug=<slug> chars=<count>`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 2 — Choose tier
|
||||||
|
|
||||||
|
| Chars | Tier | Strategy |
|
||||||
|
|---|---|---|
|
||||||
|
| < 8 000 | 1 | Direct read — full content enters context (safe at ~2k tokens) |
|
||||||
|
| 8 000 – 60 000 | 2 | RLM-lite — windowed bash extraction, progressive notes to disk |
|
||||||
|
| > 60 000 | 3 | Full RLM — bash chunking + parallel researcher subagents |
|
||||||
|
|
||||||
|
Log: `[summarize] tier=<N> chars=<count>`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tier 1 — Direct read
|
||||||
|
|
||||||
|
Read `outputs/.notes/<slug>-raw.txt` in full. Summarize directly using the output format. Write to `outputs/<slug>-summary.md`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tier 2 — RLM-lite windowed read
|
||||||
|
|
||||||
|
The document stays on disk. Extract 6 000-char windows via bash:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# WHY f.seek/f.read: the read tool uses line offsets, not char offsets.
|
||||||
|
# For exact char-boundary windowing across arbitrary text, bash is required.
|
||||||
|
with open("outputs/.notes/<slug>-raw.txt", encoding="utf-8") as f:
|
||||||
|
f.seek(n * 6000)
|
||||||
|
window = f.read(6000)
|
||||||
|
```
|
||||||
|
|
||||||
|
For each window:
|
||||||
|
1. Extract key claims and evidence.
|
||||||
|
2. Append to `outputs/.notes/<slug>-notes.md` before reading the next window. This is the checkpoint: if the session is interrupted, processed windows survive.
|
||||||
|
3. Log: `[summarize] window <N>/<total> done`
|
||||||
|
|
||||||
|
Synthesize `outputs/.notes/<slug>-notes.md` into `outputs/<slug>-summary.md`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tier 3 — Full RLM parallel chunks
|
||||||
|
|
||||||
|
Each chunk gets a fresh researcher subagent context window — context rot is impossible because no subagent sees more than 6 000 chars.
|
||||||
|
|
||||||
|
WHY 500-char overlap: academic papers contain multi-sentence arguments that span chunk boundaries. 500 chars (~80 words) ensures a cross-boundary claim appears fully in at least one adjacent chunk.
|
||||||
|
|
||||||
|
### 3a. Chunk the document
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
os.makedirs("outputs/.notes", exist_ok=True)
|
||||||
|
|
||||||
|
with open("outputs/.notes/<slug>-raw.txt", encoding="utf-8") as f:
|
||||||
|
text = f.read()
|
||||||
|
|
||||||
|
chunk_size, overlap = 6000, 500
|
||||||
|
chunks, i = [], 0
|
||||||
|
while i < len(text):
|
||||||
|
chunks.append(text[i : i + chunk_size])
|
||||||
|
i += chunk_size - overlap
|
||||||
|
|
||||||
|
for n, chunk in enumerate(chunks):
|
||||||
|
# Zero-pad index so files sort correctly (chunk-002 before chunk-010)
|
||||||
|
with open(f"outputs/.notes/<slug>-chunk-{n:03d}.txt", "w", encoding="utf-8") as f:
|
||||||
|
f.write(chunk)
|
||||||
|
|
||||||
|
print(f"[summarize] chunks={len(chunks)} chunk_size={chunk_size} overlap={overlap}")
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3b. Confirm before spawning
|
||||||
|
|
||||||
|
If this is an unattended or one-shot run, continue automatically. Otherwise tell the user: "Source is ~<chars> chars -> <N> chunks -> <N> researcher subagents. This may take several minutes. Proceed?" Wait for confirmation before launching Tier 3.
|
||||||
|
|
||||||
|
### 3c. Dispatch researcher subagents
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"tasks": [{
|
||||||
|
"agent": "researcher",
|
||||||
|
"task": "Read ONLY `outputs/.notes/<slug>-chunk-NNN.txt`. Extract: (1) key claims, (2) methodology or technical approach, (3) cited evidence. Do NOT use web_search or fetch external URLs — this is single-source summarization. If a claim appears to start or end mid-sentence at the file boundary, mark it BOUNDARY PARTIAL. Write to `outputs/.notes/<slug>-summary-chunk-NNN.md`.",
|
||||||
|
"output": "outputs/.notes/<slug>-summary-chunk-NNN.md"
|
||||||
|
}],
|
||||||
|
"concurrency": 4,
|
||||||
|
"failFast": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3d. Aggregate
|
||||||
|
|
||||||
|
After all subagents return, verify every expected `outputs/.notes/<slug>-summary-chunk-NNN.md` exists. Note any missing chunk indices — they will appear in the Coverage gaps section of the output. Do not abort on partial coverage; a partial summary with gaps noted is more useful than no summary.
|
||||||
|
|
||||||
|
When synthesizing:
|
||||||
|
- **Deduplicate**: a claim in multiple chunks is one claim — keep the most complete formulation.
|
||||||
|
- **Resolve boundary conflicts**: for adjacent-chunk contradictions, prefer the version with more supporting context.
|
||||||
|
- **Remove BOUNDARY PARTIAL markers** where a complete version exists in a neighbouring chunk.
|
||||||
|
|
||||||
|
Write to `outputs/<slug>-summary.md`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Output format
|
||||||
|
|
||||||
|
All tiers produce the same artifact at `outputs/<slug>-summary.md`:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
# Summary: [document title or source filename]
|
||||||
|
|
||||||
|
**Source:** [URL or file path]
|
||||||
|
**Date:** [YYYY-MM-DD]
|
||||||
|
**Tier:** [1 / 2 (N windows) / 3 (N chunks)]
|
||||||
|
|
||||||
|
## Key Claims
|
||||||
|
[3-7 most important assertions, each as a bullet]
|
||||||
|
|
||||||
|
## Methodology
|
||||||
|
[Approach, dataset, evaluation, baselines — omit for non-research documents]
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
[What the source explicitly flags as weak, incomplete, or out of scope]
|
||||||
|
|
||||||
|
## Verdict
|
||||||
|
[One paragraph: what this document establishes, its credibility, who should read it]
|
||||||
|
|
||||||
|
## Sources
|
||||||
|
1. [Title or filename] — [URL or file path]
|
||||||
|
|
||||||
|
## Coverage gaps *(Tier 3 only — omit if all chunks succeeded)*
|
||||||
|
[Missing chunk indices and their approximate byte ranges]
|
||||||
|
```
|
||||||
|
|
||||||
|
Before you stop, verify on disk that `outputs/<slug>-summary.md` exists.
|
||||||
|
|
||||||
|
Sources contains only the single source confirmed reachable in Step 1. No verifier subagent is needed — there are no URLs constructed from memory to verify.
|
||||||
@@ -9,7 +9,7 @@ Create a research watch for: $@
|
|||||||
Derive a short slug from the watch topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
Derive a short slug from the watch topic (lowercase, hyphens, no filler words, ≤5 words). Use this slug for all files in this run.
|
||||||
|
|
||||||
Requirements:
|
Requirements:
|
||||||
- Before starting, outline the watch plan: what to monitor, what signals matter, what counts as a meaningful change, and the check frequency. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user and confirm before proceeding.
|
- Before starting, outline the watch plan: what to monitor, what signals matter, what counts as a meaningful change, and the check frequency. Write the plan to `outputs/.plans/<slug>.md`. Present the plan to the user. If this is an unattended or one-shot run, continue automatically. If the user is actively interacting, give them a brief chance to request changes before proceeding.
|
||||||
- Start with a baseline sweep of the topic.
|
- Start with a baseline sweep of the topic.
|
||||||
- Use `schedule_prompt` to create the recurring or delayed follow-up instead of merely promising to check later.
|
- Use `schedule_prompt` to create the recurring or delayed follow-up instead of merely promising to check later.
|
||||||
- Save exactly one baseline artifact to `outputs/<slug>-baseline.md`.
|
- Save exactly one baseline artifact to `outputs/<slug>-baseline.md`.
|
||||||
|
|||||||
@@ -6,13 +6,45 @@ import { spawnSync } from "node:child_process";
|
|||||||
const appRoot = resolve(import.meta.dirname, "..");
|
const appRoot = resolve(import.meta.dirname, "..");
|
||||||
const packageJson = JSON.parse(readFileSync(resolve(appRoot, "package.json"), "utf8"));
|
const packageJson = JSON.parse(readFileSync(resolve(appRoot, "package.json"), "utf8"));
|
||||||
const packageLockPath = resolve(appRoot, "package-lock.json");
|
const packageLockPath = resolve(appRoot, "package-lock.json");
|
||||||
const bundledNodeVersion = process.env.FEYNMAN_BUNDLED_NODE_VERSION ?? process.version.slice(1);
|
const minBundledNodeVersion = packageJson.engines?.node?.replace(/^>=/, "").trim() || process.version.slice(1);
|
||||||
|
|
||||||
|
function parseSemver(version) {
|
||||||
|
const [major = "0", minor = "0", patch = "0"] = version.split(".");
|
||||||
|
return [Number.parseInt(major, 10) || 0, Number.parseInt(minor, 10) || 0, Number.parseInt(patch, 10) || 0];
|
||||||
|
}
|
||||||
|
|
||||||
|
function compareSemver(left, right) {
|
||||||
|
for (let index = 0; index < 3; index += 1) {
|
||||||
|
const diff = left[index] - right[index];
|
||||||
|
if (diff !== 0) return diff;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
function fail(message) {
|
function fail(message) {
|
||||||
console.error(`[feynman] ${message}`);
|
console.error(`[feynman] ${message}`);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function resolveBundledNodeVersion() {
|
||||||
|
const requestedNodeVersion = process.env.FEYNMAN_BUNDLED_NODE_VERSION?.trim();
|
||||||
|
if (requestedNodeVersion) {
|
||||||
|
if (compareSemver(parseSemver(requestedNodeVersion), parseSemver(minBundledNodeVersion)) < 0) {
|
||||||
|
fail(
|
||||||
|
`FEYNMAN_BUNDLED_NODE_VERSION=${requestedNodeVersion} is below the supported floor ${minBundledNodeVersion}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return requestedNodeVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentNodeVersion = process.version.slice(1);
|
||||||
|
return compareSemver(parseSemver(currentNodeVersion), parseSemver(minBundledNodeVersion)) < 0
|
||||||
|
? minBundledNodeVersion
|
||||||
|
: currentNodeVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
const bundledNodeVersion = resolveBundledNodeVersion();
|
||||||
|
|
||||||
function resolveCommand(command) {
|
function resolveCommand(command) {
|
||||||
if (process.platform === "win32" && command === "npm") {
|
if (process.platform === "win32" && command === "npm") {
|
||||||
return "npm.cmd";
|
return "npm.cmd";
|
||||||
@@ -136,6 +168,7 @@ function ensureBundledWorkspace() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function copyPackageFiles(appDir) {
|
function copyPackageFiles(appDir) {
|
||||||
|
const releaseDir = resolve(appRoot, "dist", "release");
|
||||||
cpSync(resolve(appRoot, "package.json"), resolve(appDir, "package.json"));
|
cpSync(resolve(appRoot, "package.json"), resolve(appDir, "package.json"));
|
||||||
for (const entry of packageJson.files) {
|
for (const entry of packageJson.files) {
|
||||||
const normalized = entry.endsWith("/") ? entry.slice(0, -1) : entry;
|
const normalized = entry.endsWith("/") ? entry.slice(0, -1) : entry;
|
||||||
@@ -143,7 +176,10 @@ function copyPackageFiles(appDir) {
|
|||||||
if (!existsSync(source)) continue;
|
if (!existsSync(source)) continue;
|
||||||
const destination = resolve(appDir, normalized);
|
const destination = resolve(appDir, normalized);
|
||||||
mkdirSync(dirname(destination), { recursive: true });
|
mkdirSync(dirname(destination), { recursive: true });
|
||||||
cpSync(source, destination, { recursive: true });
|
cpSync(source, destination, {
|
||||||
|
recursive: true,
|
||||||
|
filter: (path) => path !== releaseDir && !path.startsWith(`${releaseDir}/`),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
cpSync(packageLockPath, resolve(appDir, "package-lock.json"));
|
cpSync(packageLockPath, resolve(appDir, "package-lock.json"));
|
||||||
@@ -160,6 +196,9 @@ function installAppDependencies(appDir, stagingRoot) {
|
|||||||
run("npm", ["ci", "--omit=dev", "--ignore-scripts", "--no-audit", "--no-fund", "--loglevel", "error"], {
|
run("npm", ["ci", "--omit=dev", "--ignore-scripts", "--no-audit", "--no-fund", "--loglevel", "error"], {
|
||||||
cwd: depsDir,
|
cwd: depsDir,
|
||||||
});
|
});
|
||||||
|
run(process.execPath, [resolve(appRoot, "scripts", "prune-runtime-deps.mjs"), depsDir], {
|
||||||
|
cwd: appRoot,
|
||||||
|
});
|
||||||
|
|
||||||
cpSync(resolve(depsDir, "node_modules"), resolve(appDir, "node_modules"), { recursive: true });
|
cpSync(resolve(depsDir, "node_modules"), resolve(appDir, "node_modules"), { recursive: true });
|
||||||
}
|
}
|
||||||
@@ -236,7 +275,8 @@ function writeLauncher(bundleRoot, target) {
|
|||||||
"@echo off",
|
"@echo off",
|
||||||
"setlocal",
|
"setlocal",
|
||||||
'set "ROOT=%~dp0"',
|
'set "ROOT=%~dp0"',
|
||||||
'"%ROOT%node\\node.exe" "%ROOT%app\\bin\\feynman.js" %*',
|
'if "%ROOT:~-1%"=="\\" set "ROOT=%ROOT:~0,-1%"',
|
||||||
|
'"%ROOT%\\node\\node.exe" "%ROOT%\\app\\bin\\feynman.js" %*',
|
||||||
"",
|
"",
|
||||||
].join("\r\n"),
|
].join("\r\n"),
|
||||||
"utf8",
|
"utf8",
|
||||||
@@ -270,10 +310,12 @@ function packBundle(bundleRoot, target, outDir) {
|
|||||||
|
|
||||||
if (target.bundleExtension === "zip") {
|
if (target.bundleExtension === "zip") {
|
||||||
if (process.platform === "win32") {
|
if (process.platform === "win32") {
|
||||||
|
const bundleDir = dirname(bundleRoot).replace(/'/g, "''");
|
||||||
|
const bundleName = basename(bundleRoot).replace(/'/g, "''");
|
||||||
run("powershell", [
|
run("powershell", [
|
||||||
"-NoProfile",
|
"-NoProfile",
|
||||||
"-Command",
|
"-Command",
|
||||||
`Compress-Archive -Path '${bundleRoot.replace(/'/g, "''")}\\*' -DestinationPath '${archivePath.replace(/'/g, "''")}' -Force`,
|
`Push-Location '${bundleDir}'; Compress-Archive -Path '${bundleName}' -DestinationPath '${archivePath.replace(/'/g, "''")}' -Force; Pop-Location`,
|
||||||
]);
|
]);
|
||||||
} else {
|
} else {
|
||||||
run("zip", ["-qr", archivePath, basename(bundleRoot)], { cwd: resolve(bundleRoot, "..") });
|
run("zip", ["-qr", archivePath, basename(bundleRoot)], { cwd: resolve(bundleRoot, "..") });
|
||||||
|
|||||||
46
scripts/check-node-version.mjs
Normal file
46
scripts/check-node-version.mjs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
const MIN_NODE_VERSION = "20.19.0";
|
||||||
|
const MAX_NODE_MAJOR = 24;
|
||||||
|
const PREFERRED_NODE_MAJOR = 22;
|
||||||
|
|
||||||
|
function parseNodeVersion(version) {
|
||||||
|
const [major = "0", minor = "0", patch = "0"] = version.replace(/^v/, "").split(".");
|
||||||
|
return {
|
||||||
|
major: Number.parseInt(major, 10) || 0,
|
||||||
|
minor: Number.parseInt(minor, 10) || 0,
|
||||||
|
patch: Number.parseInt(patch, 10) || 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function compareNodeVersions(left, right) {
|
||||||
|
if (left.major !== right.major) return left.major - right.major;
|
||||||
|
if (left.minor !== right.minor) return left.minor - right.minor;
|
||||||
|
return left.patch - right.patch;
|
||||||
|
}
|
||||||
|
|
||||||
|
function isSupportedNodeVersion(version = process.versions.node) {
|
||||||
|
const parsed = parseNodeVersion(version);
|
||||||
|
return compareNodeVersions(parsed, parseNodeVersion(MIN_NODE_VERSION)) >= 0 && parsed.major <= MAX_NODE_MAJOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
function getUnsupportedNodeVersionLines(version = process.versions.node) {
|
||||||
|
const isWindows = process.platform === "win32";
|
||||||
|
const parsed = parseNodeVersion(version);
|
||||||
|
return [
|
||||||
|
`feynman supports Node.js ${MIN_NODE_VERSION} through ${MAX_NODE_MAJOR}.x (detected ${version}).`,
|
||||||
|
parsed.major > MAX_NODE_MAJOR
|
||||||
|
? "This newer Node release is not supported yet because native Pi packages may fail to build."
|
||||||
|
: isWindows
|
||||||
|
? "Install a supported Node.js release from https://nodejs.org, or use the standalone installer:"
|
||||||
|
: `Switch to a supported Node release with \`nvm install ${PREFERRED_NODE_MAJOR} && nvm use ${PREFERRED_NODE_MAJOR}\`, or use the standalone installer:`,
|
||||||
|
isWindows
|
||||||
|
? "irm https://feynman.is/install.ps1 | iex"
|
||||||
|
: "curl -fsSL https://feynman.is/install | bash",
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!isSupportedNodeVersion()) {
|
||||||
|
for (const line of getUnsupportedNodeVersionLines()) {
|
||||||
|
console.error(line);
|
||||||
|
}
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
8
scripts/clean-publish-artifacts.mjs
Normal file
8
scripts/clean-publish-artifacts.mjs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
import { rmSync } from "node:fs";
|
||||||
|
import { resolve } from "node:path";
|
||||||
|
|
||||||
|
const appRoot = resolve(import.meta.dirname, "..");
|
||||||
|
const releaseDir = resolve(appRoot, "dist", "release");
|
||||||
|
|
||||||
|
rmSync(releaseDir, { recursive: true, force: true });
|
||||||
|
console.log("[feynman] removed dist/release before npm pack/publish");
|
||||||
128
scripts/install/install-skills.ps1
Normal file
128
scripts/install/install-skills.ps1
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
param(
|
||||||
|
[string]$Version = "latest",
|
||||||
|
[ValidateSet("User", "Repo")]
|
||||||
|
[string]$Scope = "User",
|
||||||
|
[string]$TargetDir = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
|
||||||
|
function Normalize-Version {
|
||||||
|
param([string]$RequestedVersion)
|
||||||
|
|
||||||
|
if (-not $RequestedVersion) {
|
||||||
|
return "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
switch ($RequestedVersion.ToLowerInvariant()) {
|
||||||
|
"latest" { return "latest" }
|
||||||
|
"stable" { return "latest" }
|
||||||
|
"edge" { throw "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." }
|
||||||
|
default { return $RequestedVersion.TrimStart("v") }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Resolve-LatestReleaseVersion {
|
||||||
|
$page = Invoke-WebRequest -Uri "https://github.com/getcompanion-ai/feynman/releases/latest"
|
||||||
|
$match = [regex]::Match($page.Content, 'releases/tag/v([0-9][^"''<>\s]*)')
|
||||||
|
if (-not $match.Success) {
|
||||||
|
throw "Failed to resolve the latest Feynman release version."
|
||||||
|
}
|
||||||
|
|
||||||
|
return $match.Groups[1].Value
|
||||||
|
}
|
||||||
|
|
||||||
|
function Resolve-VersionMetadata {
|
||||||
|
param([string]$RequestedVersion)
|
||||||
|
|
||||||
|
$normalizedVersion = Normalize-Version -RequestedVersion $RequestedVersion
|
||||||
|
|
||||||
|
if ($normalizedVersion -eq "latest") {
|
||||||
|
$resolvedVersion = Resolve-LatestReleaseVersion
|
||||||
|
} else {
|
||||||
|
$resolvedVersion = $normalizedVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
return [PSCustomObject]@{
|
||||||
|
ResolvedVersion = $resolvedVersion
|
||||||
|
GitRef = "v$resolvedVersion"
|
||||||
|
DownloadUrl = if ($env:FEYNMAN_INSTALL_SKILLS_ARCHIVE_URL) { $env:FEYNMAN_INSTALL_SKILLS_ARCHIVE_URL } else { "https://github.com/getcompanion-ai/feynman/archive/refs/tags/v$resolvedVersion.zip" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Resolve-InstallDir {
|
||||||
|
param(
|
||||||
|
[string]$ResolvedScope,
|
||||||
|
[string]$ResolvedTargetDir
|
||||||
|
)
|
||||||
|
|
||||||
|
if ($ResolvedTargetDir) {
|
||||||
|
return $ResolvedTargetDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if ($ResolvedScope -eq "Repo") {
|
||||||
|
return Join-Path (Get-Location) ".agents\skills\feynman"
|
||||||
|
}
|
||||||
|
|
||||||
|
$codexHome = if ($env:CODEX_HOME) { $env:CODEX_HOME } else { Join-Path $HOME ".codex" }
|
||||||
|
return Join-Path $codexHome "skills\feynman"
|
||||||
|
}
|
||||||
|
|
||||||
|
$metadata = Resolve-VersionMetadata -RequestedVersion $Version
|
||||||
|
$resolvedVersion = $metadata.ResolvedVersion
|
||||||
|
$downloadUrl = $metadata.DownloadUrl
|
||||||
|
$installDir = Resolve-InstallDir -ResolvedScope $Scope -ResolvedTargetDir $TargetDir
|
||||||
|
|
||||||
|
$tmpDir = Join-Path ([System.IO.Path]::GetTempPath()) ("feynman-skills-install-" + [System.Guid]::NewGuid().ToString("N"))
|
||||||
|
New-Item -ItemType Directory -Path $tmpDir | Out-Null
|
||||||
|
|
||||||
|
try {
|
||||||
|
$archivePath = Join-Path $tmpDir "feynman-skills.zip"
|
||||||
|
$extractDir = Join-Path $tmpDir "extract"
|
||||||
|
|
||||||
|
Write-Host "==> Downloading Feynman skills $resolvedVersion"
|
||||||
|
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
|
||||||
|
|
||||||
|
Write-Host "==> Extracting skills"
|
||||||
|
Expand-Archive -LiteralPath $archivePath -DestinationPath $extractDir -Force
|
||||||
|
|
||||||
|
$sourceRoot = Get-ChildItem -Path $extractDir -Directory | Select-Object -First 1
|
||||||
|
if (-not $sourceRoot) {
|
||||||
|
throw "Could not find extracted Feynman archive."
|
||||||
|
}
|
||||||
|
|
||||||
|
$skillsSource = Join-Path $sourceRoot.FullName "skills"
|
||||||
|
$promptsSource = Join-Path $sourceRoot.FullName "prompts"
|
||||||
|
if (-not (Test-Path $skillsSource) -or -not (Test-Path $promptsSource)) {
|
||||||
|
throw "Could not find the bundled skills resources in the downloaded archive."
|
||||||
|
}
|
||||||
|
|
||||||
|
$installParent = Split-Path $installDir -Parent
|
||||||
|
if ($installParent) {
|
||||||
|
New-Item -ItemType Directory -Path $installParent -Force | Out-Null
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Test-Path $installDir) {
|
||||||
|
Remove-Item -Recurse -Force $installDir
|
||||||
|
}
|
||||||
|
|
||||||
|
New-Item -ItemType Directory -Path $installDir -Force | Out-Null
|
||||||
|
Copy-Item -Path (Join-Path $skillsSource "*") -Destination $installDir -Recurse -Force
|
||||||
|
New-Item -ItemType Directory -Path (Join-Path $installDir "prompts") -Force | Out-Null
|
||||||
|
Copy-Item -Path (Join-Path $promptsSource "*") -Destination (Join-Path $installDir "prompts") -Recurse -Force
|
||||||
|
Copy-Item -Path (Join-Path $sourceRoot.FullName "AGENTS.md") -Destination (Join-Path $installDir "AGENTS.md") -Force
|
||||||
|
Copy-Item -Path (Join-Path $sourceRoot.FullName "CONTRIBUTING.md") -Destination (Join-Path $installDir "CONTRIBUTING.md") -Force
|
||||||
|
|
||||||
|
Write-Host "==> Installed skills to $installDir"
|
||||||
|
if ($Scope -eq "Repo") {
|
||||||
|
Write-Host "Repo-local skills will be discovered automatically from .agents/skills."
|
||||||
|
} else {
|
||||||
|
Write-Host "User-level skills will be discovered from `$CODEX_HOME/skills."
|
||||||
|
}
|
||||||
|
|
||||||
|
Write-Host "Feynman skills $resolvedVersion installed successfully."
|
||||||
|
} finally {
|
||||||
|
if (Test-Path $tmpDir) {
|
||||||
|
Remove-Item -Recurse -Force $tmpDir
|
||||||
|
}
|
||||||
|
}
|
||||||
210
scripts/install/install-skills.sh
Normal file
210
scripts/install/install-skills.sh
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
VERSION="latest"
|
||||||
|
SCOPE="${FEYNMAN_SKILLS_SCOPE:-user}"
|
||||||
|
TARGET_DIR="${FEYNMAN_SKILLS_DIR:-}"
|
||||||
|
|
||||||
|
step() {
|
||||||
|
printf '==> %s\n' "$1"
|
||||||
|
}
|
||||||
|
|
||||||
|
normalize_version() {
|
||||||
|
case "$1" in
|
||||||
|
"")
|
||||||
|
printf 'latest\n'
|
||||||
|
;;
|
||||||
|
latest | stable)
|
||||||
|
printf 'latest\n'
|
||||||
|
;;
|
||||||
|
edge)
|
||||||
|
echo "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
v*)
|
||||||
|
printf '%s\n' "${1#v}"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
printf '%s\n' "$1"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
download_file() {
|
||||||
|
url="$1"
|
||||||
|
output="$2"
|
||||||
|
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
if [ -t 2 ]; then
|
||||||
|
curl -fL --progress-bar "$url" -o "$output"
|
||||||
|
else
|
||||||
|
curl -fsSL "$url" -o "$output"
|
||||||
|
fi
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v wget >/dev/null 2>&1; then
|
||||||
|
if [ -t 2 ]; then
|
||||||
|
wget --show-progress -O "$output" "$url"
|
||||||
|
else
|
||||||
|
wget -q -O "$output" "$url"
|
||||||
|
fi
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "curl or wget is required to install Feynman skills." >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
download_text() {
|
||||||
|
url="$1"
|
||||||
|
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
curl -fsSL "$url"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if command -v wget >/dev/null 2>&1; then
|
||||||
|
wget -q -O - "$url"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "curl or wget is required to install Feynman skills." >&2
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve_version() {
|
||||||
|
normalized_version="$(normalize_version "$VERSION")"
|
||||||
|
|
||||||
|
if [ "$normalized_version" = "latest" ]; then
|
||||||
|
release_page="$(download_text "https://github.com/getcompanion-ai/feynman/releases/latest")"
|
||||||
|
resolved_version="$(printf '%s\n' "$release_page" | sed -n 's@.*releases/tag/v\([0-9][^"<>[:space:]]*\).*@\1@p' | head -n 1)"
|
||||||
|
|
||||||
|
if [ -z "$resolved_version" ]; then
|
||||||
|
echo "Failed to resolve the latest Feynman release version." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf '%s\nv%s\n' "$resolved_version" "$resolved_version"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf '%s\nv%s\n' "$normalized_version" "$normalized_version"
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve_target_dir() {
|
||||||
|
if [ -n "$TARGET_DIR" ]; then
|
||||||
|
printf '%s\n' "$TARGET_DIR"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "$SCOPE" in
|
||||||
|
repo)
|
||||||
|
printf '%s/.agents/skills/feynman\n' "$PWD"
|
||||||
|
;;
|
||||||
|
user)
|
||||||
|
codex_home="${CODEX_HOME:-$HOME/.codex}"
|
||||||
|
printf '%s/skills/feynman\n' "$codex_home"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown scope: $SCOPE (expected --user or --repo)" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
--repo)
|
||||||
|
SCOPE="repo"
|
||||||
|
;;
|
||||||
|
--user)
|
||||||
|
SCOPE="user"
|
||||||
|
;;
|
||||||
|
--dir)
|
||||||
|
if [ $# -lt 2 ]; then
|
||||||
|
echo "Usage: install-skills.sh [stable|latest|<version>] [--user|--repo] [--dir <path>]" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
TARGET_DIR="$2"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
edge|stable|latest|v*|[0-9]*)
|
||||||
|
VERSION="$1"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown argument: $1" >&2
|
||||||
|
echo "Usage: install-skills.sh [stable|latest|<version>] [--user|--repo] [--dir <path>]" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
archive_metadata="$(resolve_version)"
|
||||||
|
resolved_version="$(printf '%s\n' "$archive_metadata" | sed -n '1p')"
|
||||||
|
git_ref="$(printf '%s\n' "$archive_metadata" | sed -n '2p')"
|
||||||
|
|
||||||
|
archive_url="${FEYNMAN_INSTALL_SKILLS_ARCHIVE_URL:-}"
|
||||||
|
if [ -z "$archive_url" ]; then
|
||||||
|
case "$git_ref" in
|
||||||
|
main)
|
||||||
|
archive_url="https://github.com/getcompanion-ai/feynman/archive/refs/heads/main.tar.gz"
|
||||||
|
;;
|
||||||
|
v*)
|
||||||
|
archive_url="https://github.com/getcompanion-ai/feynman/archive/refs/tags/${git_ref}.tar.gz"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$archive_url" ]; then
|
||||||
|
echo "Could not resolve a download URL for ref: $git_ref" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
install_dir="$(resolve_target_dir)"
|
||||||
|
|
||||||
|
step "Installing Feynman skills ${resolved_version} (${SCOPE})"
|
||||||
|
|
||||||
|
tmp_dir="$(mktemp -d)"
|
||||||
|
cleanup() {
|
||||||
|
rm -rf "$tmp_dir"
|
||||||
|
}
|
||||||
|
trap cleanup EXIT INT TERM
|
||||||
|
|
||||||
|
archive_path="$tmp_dir/feynman-skills.tar.gz"
|
||||||
|
step "Downloading skills archive"
|
||||||
|
download_file "$archive_url" "$archive_path"
|
||||||
|
|
||||||
|
extract_dir="$tmp_dir/extract"
|
||||||
|
mkdir -p "$extract_dir"
|
||||||
|
step "Extracting skills"
|
||||||
|
tar -xzf "$archive_path" -C "$extract_dir"
|
||||||
|
|
||||||
|
source_root="$(find "$extract_dir" -mindepth 1 -maxdepth 1 -type d | head -n 1)"
|
||||||
|
if [ -z "$source_root" ] || [ ! -d "$source_root/skills" ] || [ ! -d "$source_root/prompts" ]; then
|
||||||
|
echo "Could not find the bundled skills resources in the downloaded archive." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "$install_dir")"
|
||||||
|
rm -rf "$install_dir"
|
||||||
|
mkdir -p "$install_dir"
|
||||||
|
cp -R "$source_root/skills/." "$install_dir/"
|
||||||
|
mkdir -p "$install_dir/prompts"
|
||||||
|
cp -R "$source_root/prompts/." "$install_dir/prompts/"
|
||||||
|
cp "$source_root/AGENTS.md" "$install_dir/AGENTS.md"
|
||||||
|
cp "$source_root/CONTRIBUTING.md" "$install_dir/CONTRIBUTING.md"
|
||||||
|
|
||||||
|
step "Installed skills to $install_dir"
|
||||||
|
case "$SCOPE" in
|
||||||
|
repo)
|
||||||
|
step "Repo-local skills will be discovered automatically from .agents/skills"
|
||||||
|
;;
|
||||||
|
user)
|
||||||
|
step "User-level skills will be discovered from \$CODEX_HOME/skills"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
printf 'Feynman skills %s installed successfully.\n' "$resolved_version"
|
||||||
@@ -4,36 +4,88 @@ param(
|
|||||||
|
|
||||||
$ErrorActionPreference = "Stop"
|
$ErrorActionPreference = "Stop"
|
||||||
|
|
||||||
function Resolve-Version {
|
function Normalize-Version {
|
||||||
param([string]$RequestedVersion)
|
param([string]$RequestedVersion)
|
||||||
|
|
||||||
if ($RequestedVersion -and $RequestedVersion -ne "latest") {
|
if (-not $RequestedVersion) {
|
||||||
return $RequestedVersion.TrimStart("v")
|
return "latest"
|
||||||
}
|
}
|
||||||
|
|
||||||
$release = Invoke-RestMethod -Uri "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest"
|
switch ($RequestedVersion.ToLowerInvariant()) {
|
||||||
if (-not $release.tag_name) {
|
"latest" { return "latest" }
|
||||||
|
"stable" { return "latest" }
|
||||||
|
"edge" { throw "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." }
|
||||||
|
default { return $RequestedVersion.TrimStart("v") }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function Resolve-LatestReleaseVersion {
|
||||||
|
$page = Invoke-WebRequest -Uri "https://github.com/getcompanion-ai/feynman/releases/latest"
|
||||||
|
$match = [regex]::Match($page.Content, 'releases/tag/v([0-9][^"''<>\s]*)')
|
||||||
|
if (-not $match.Success) {
|
||||||
throw "Failed to resolve the latest Feynman release version."
|
throw "Failed to resolve the latest Feynman release version."
|
||||||
}
|
}
|
||||||
|
|
||||||
return $release.tag_name.TrimStart("v")
|
return $match.Groups[1].Value
|
||||||
|
}
|
||||||
|
|
||||||
|
function Resolve-ReleaseMetadata {
|
||||||
|
param(
|
||||||
|
[string]$RequestedVersion,
|
||||||
|
[string]$AssetTarget,
|
||||||
|
[string]$BundleExtension
|
||||||
|
)
|
||||||
|
|
||||||
|
$normalizedVersion = Normalize-Version -RequestedVersion $RequestedVersion
|
||||||
|
|
||||||
|
if ($normalizedVersion -eq "latest") {
|
||||||
|
$resolvedVersion = Resolve-LatestReleaseVersion
|
||||||
|
} else {
|
||||||
|
$resolvedVersion = $normalizedVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
$bundleName = "feynman-$resolvedVersion-$AssetTarget"
|
||||||
|
$archiveName = "$bundleName.$BundleExtension"
|
||||||
|
$baseUrl = if ($env:FEYNMAN_INSTALL_BASE_URL) { $env:FEYNMAN_INSTALL_BASE_URL } else { "https://github.com/getcompanion-ai/feynman/releases/download/v$resolvedVersion" }
|
||||||
|
|
||||||
|
return [PSCustomObject]@{
|
||||||
|
ResolvedVersion = $resolvedVersion
|
||||||
|
BundleName = $bundleName
|
||||||
|
ArchiveName = $archiveName
|
||||||
|
DownloadUrl = "$baseUrl/$archiveName"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function Get-ArchSuffix {
|
function Get-ArchSuffix {
|
||||||
|
# Prefer PROCESSOR_ARCHITECTURE which is always available on Windows.
|
||||||
|
# RuntimeInformation::OSArchitecture requires .NET 4.7.1+ and may not
|
||||||
|
# be loaded in every Windows PowerShell 5.1 session.
|
||||||
|
$envArch = $env:PROCESSOR_ARCHITECTURE
|
||||||
|
if ($envArch) {
|
||||||
|
switch ($envArch) {
|
||||||
|
"AMD64" { return "x64" }
|
||||||
|
"ARM64" { return "arm64" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
$arch = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture
|
$arch = [System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture
|
||||||
switch ($arch.ToString()) {
|
switch ($arch.ToString()) {
|
||||||
"X64" { return "x64" }
|
"X64" { return "x64" }
|
||||||
"Arm64" { return "arm64" }
|
"Arm64" { return "arm64" }
|
||||||
default { throw "Unsupported architecture: $arch" }
|
|
||||||
}
|
}
|
||||||
|
} catch {}
|
||||||
|
|
||||||
|
throw "Unsupported architecture: $envArch"
|
||||||
}
|
}
|
||||||
|
|
||||||
$resolvedVersion = Resolve-Version -RequestedVersion $Version
|
|
||||||
$archSuffix = Get-ArchSuffix
|
$archSuffix = Get-ArchSuffix
|
||||||
$bundleName = "feynman-$resolvedVersion-win32-$archSuffix"
|
$assetTarget = "win32-$archSuffix"
|
||||||
$archiveName = "$bundleName.zip"
|
$release = Resolve-ReleaseMetadata -RequestedVersion $Version -AssetTarget $assetTarget -BundleExtension "zip"
|
||||||
$baseUrl = if ($env:FEYNMAN_INSTALL_BASE_URL) { $env:FEYNMAN_INSTALL_BASE_URL } else { "https://github.com/getcompanion-ai/feynman/releases/download/v$resolvedVersion" }
|
$resolvedVersion = $release.ResolvedVersion
|
||||||
$downloadUrl = "$baseUrl/$archiveName"
|
$bundleName = $release.BundleName
|
||||||
|
$archiveName = $release.ArchiveName
|
||||||
|
$downloadUrl = $release.DownloadUrl
|
||||||
|
|
||||||
$installRoot = Join-Path $env:LOCALAPPDATA "Programs\feynman"
|
$installRoot = Join-Path $env:LOCALAPPDATA "Programs\feynman"
|
||||||
$installBinDir = Join-Path $installRoot "bin"
|
$installBinDir = Join-Path $installRoot "bin"
|
||||||
@@ -44,25 +96,53 @@ New-Item -ItemType Directory -Path $tmpDir | Out-Null
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
$archivePath = Join-Path $tmpDir $archiveName
|
$archivePath = Join-Path $tmpDir $archiveName
|
||||||
|
Write-Host "==> Downloading $archiveName"
|
||||||
|
try {
|
||||||
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
|
Invoke-WebRequest -Uri $downloadUrl -OutFile $archivePath
|
||||||
|
} catch {
|
||||||
|
throw @"
|
||||||
|
Failed to download $archiveName from:
|
||||||
|
$downloadUrl
|
||||||
|
|
||||||
|
The win32-$archSuffix bundle is missing from the GitHub release.
|
||||||
|
This usually means the release exists, but not all platform bundles were uploaded.
|
||||||
|
|
||||||
|
Workarounds:
|
||||||
|
- try again after the release finishes publishing
|
||||||
|
- pass the latest published version explicitly, e.g.:
|
||||||
|
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.19
|
||||||
|
"@
|
||||||
|
}
|
||||||
|
|
||||||
New-Item -ItemType Directory -Path $installRoot -Force | Out-Null
|
New-Item -ItemType Directory -Path $installRoot -Force | Out-Null
|
||||||
if (Test-Path $bundleDir) {
|
if (Test-Path $bundleDir) {
|
||||||
Remove-Item -Recurse -Force $bundleDir
|
Remove-Item -Recurse -Force $bundleDir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Write-Host "==> Extracting $archiveName"
|
||||||
Expand-Archive -LiteralPath $archivePath -DestinationPath $installRoot -Force
|
Expand-Archive -LiteralPath $archivePath -DestinationPath $installRoot -Force
|
||||||
|
|
||||||
New-Item -ItemType Directory -Path $installBinDir -Force | Out-Null
|
New-Item -ItemType Directory -Path $installBinDir -Force | Out-Null
|
||||||
|
|
||||||
$shimPath = Join-Path $installBinDir "feynman.cmd"
|
$shimPath = Join-Path $installBinDir "feynman.cmd"
|
||||||
|
$shimPs1Path = Join-Path $installBinDir "feynman.ps1"
|
||||||
|
Write-Host "==> Linking feynman into $installBinDir"
|
||||||
@"
|
@"
|
||||||
@echo off
|
@echo off
|
||||||
"$bundleDir\feynman.cmd" %*
|
CALL "$bundleDir\feynman.cmd" %*
|
||||||
"@ | Set-Content -Path $shimPath -Encoding ASCII
|
"@ | Set-Content -Path $shimPath -Encoding ASCII
|
||||||
|
|
||||||
|
@"
|
||||||
|
`$BundleDir = "$bundleDir"
|
||||||
|
& "`$BundleDir\node\node.exe" "`$BundleDir\app\bin\feynman.js" @args
|
||||||
|
"@ | Set-Content -Path $shimPs1Path -Encoding UTF8
|
||||||
|
|
||||||
$currentUserPath = [Environment]::GetEnvironmentVariable("Path", "User")
|
$currentUserPath = [Environment]::GetEnvironmentVariable("Path", "User")
|
||||||
if (-not $currentUserPath.Split(';').Contains($installBinDir)) {
|
$alreadyOnPath = $false
|
||||||
|
if ($currentUserPath) {
|
||||||
|
$alreadyOnPath = $currentUserPath.Split(';') -contains $installBinDir
|
||||||
|
}
|
||||||
|
if (-not $alreadyOnPath) {
|
||||||
$updatedPath = if ([string]::IsNullOrWhiteSpace($currentUserPath)) {
|
$updatedPath = if ([string]::IsNullOrWhiteSpace($currentUserPath)) {
|
||||||
$installBinDir
|
$installBinDir
|
||||||
} else {
|
} else {
|
||||||
@@ -74,6 +154,14 @@ try {
|
|||||||
Write-Host "$installBinDir is already on PATH."
|
Write-Host "$installBinDir is already on PATH."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
$resolvedCommand = Get-Command feynman -ErrorAction SilentlyContinue
|
||||||
|
if ($resolvedCommand -and $resolvedCommand.Source -ne $shimPath) {
|
||||||
|
Write-Warning "Current shell resolves feynman to $($resolvedCommand.Source)"
|
||||||
|
Write-Host "Run in a new shell, or run: `$env:Path = '$installBinDir;' + `$env:Path"
|
||||||
|
Write-Host "Then run: feynman"
|
||||||
|
Write-Host "If that path is an old package-manager install, remove it or put $installBinDir first on PATH."
|
||||||
|
}
|
||||||
|
|
||||||
Write-Host "Feynman $resolvedVersion installed successfully."
|
Write-Host "Feynman $resolvedVersion installed successfully."
|
||||||
} finally {
|
} finally {
|
||||||
if (Test-Path $tmpDir) {
|
if (Test-Path $tmpDir) {
|
||||||
|
|||||||
@@ -13,11 +13,57 @@ step() {
|
|||||||
printf '==> %s\n' "$1"
|
printf '==> %s\n' "$1"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
run_with_spinner() {
|
||||||
|
label="$1"
|
||||||
|
shift
|
||||||
|
|
||||||
|
if [ ! -t 2 ]; then
|
||||||
|
step "$label"
|
||||||
|
"$@"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
"$@" &
|
||||||
|
pid=$!
|
||||||
|
frame=0
|
||||||
|
|
||||||
|
set +e
|
||||||
|
while kill -0 "$pid" 2>/dev/null; do
|
||||||
|
case "$frame" in
|
||||||
|
0) spinner='|' ;;
|
||||||
|
1) spinner='/' ;;
|
||||||
|
2) spinner='-' ;;
|
||||||
|
*) spinner='\\' ;;
|
||||||
|
esac
|
||||||
|
printf '\r==> %s %s' "$label" "$spinner" >&2
|
||||||
|
frame=$(( (frame + 1) % 4 ))
|
||||||
|
sleep 0.1
|
||||||
|
done
|
||||||
|
wait "$pid"
|
||||||
|
status=$?
|
||||||
|
set -e
|
||||||
|
|
||||||
|
printf '\r\033[2K' >&2
|
||||||
|
if [ "$status" -ne 0 ]; then
|
||||||
|
printf '==> %s failed\n' "$label" >&2
|
||||||
|
return "$status"
|
||||||
|
fi
|
||||||
|
|
||||||
|
step "$label"
|
||||||
|
}
|
||||||
|
|
||||||
normalize_version() {
|
normalize_version() {
|
||||||
case "$1" in
|
case "$1" in
|
||||||
"" | latest)
|
"")
|
||||||
printf 'latest\n'
|
printf 'latest\n'
|
||||||
;;
|
;;
|
||||||
|
latest | stable)
|
||||||
|
printf 'latest\n'
|
||||||
|
;;
|
||||||
|
edge)
|
||||||
|
echo "The edge channel has been removed. Use the default installer for the latest tagged release or pass an exact version." >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
v*)
|
v*)
|
||||||
printf '%s\n' "${1#v}"
|
printf '%s\n' "${1#v}"
|
||||||
;;
|
;;
|
||||||
@@ -32,12 +78,20 @@ download_file() {
|
|||||||
output="$2"
|
output="$2"
|
||||||
|
|
||||||
if command -v curl >/dev/null 2>&1; then
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
if [ -t 2 ]; then
|
||||||
|
curl -fL --progress-bar "$url" -o "$output"
|
||||||
|
else
|
||||||
curl -fsSL "$url" -o "$output"
|
curl -fsSL "$url" -o "$output"
|
||||||
|
fi
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if command -v wget >/dev/null 2>&1; then
|
if command -v wget >/dev/null 2>&1; then
|
||||||
|
if [ -t 2 ]; then
|
||||||
|
wget --show-progress -O "$output" "$url"
|
||||||
|
else
|
||||||
wget -q -O "$output" "$url"
|
wget -q -O "$output" "$url"
|
||||||
|
fi
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -110,23 +164,43 @@ require_command() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
resolve_version() {
|
warn_command_conflict() {
|
||||||
normalized_version="$(normalize_version "$VERSION")"
|
expected_path="$INSTALL_BIN_DIR/feynman"
|
||||||
|
resolved_path="$(command -v feynman 2>/dev/null || true)"
|
||||||
|
|
||||||
if [ "$normalized_version" != "latest" ]; then
|
if [ -z "$resolved_path" ]; then
|
||||||
printf '%s\n' "$normalized_version"
|
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
release_json="$(download_text "https://api.github.com/repos/getcompanion-ai/feynman/releases/latest")"
|
if [ "$resolved_path" != "$expected_path" ]; then
|
||||||
resolved="$(printf '%s\n' "$release_json" | sed -n 's/.*"tag_name":[[:space:]]*"v\([^"]*\)".*/\1/p' | head -n 1)"
|
step "Warning: current shell resolves feynman to $resolved_path"
|
||||||
|
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||||
|
step "Or launch directly: $expected_path"
|
||||||
|
|
||||||
if [ -z "$resolved" ]; then
|
step "If that path is an old package-manager install, remove it or put $INSTALL_BIN_DIR first on PATH."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve_release_metadata() {
|
||||||
|
normalized_version="$(normalize_version "$VERSION")"
|
||||||
|
|
||||||
|
if [ "$normalized_version" = "latest" ]; then
|
||||||
|
release_page="$(download_text "https://github.com/getcompanion-ai/feynman/releases/latest")"
|
||||||
|
resolved_version="$(printf '%s\n' "$release_page" | sed -n 's@.*releases/tag/v\([0-9][^"<>[:space:]]*\).*@\1@p' | head -n 1)"
|
||||||
|
|
||||||
|
if [ -z "$resolved_version" ]; then
|
||||||
echo "Failed to resolve the latest Feynman release version." >&2
|
echo "Failed to resolve the latest Feynman release version." >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
resolved_version="$normalized_version"
|
||||||
|
fi
|
||||||
|
|
||||||
printf '%s\n' "$resolved"
|
bundle_name="feynman-${resolved_version}-${asset_target}"
|
||||||
|
archive_name="${bundle_name}.${archive_extension}"
|
||||||
|
download_url="${FEYNMAN_INSTALL_BASE_URL:-https://github.com/getcompanion-ai/feynman/releases/download/v${resolved_version}}/${archive_name}"
|
||||||
|
|
||||||
|
printf '%s\n%s\n%s\n%s\n' "$resolved_version" "$bundle_name" "$archive_name" "$download_url"
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$(uname -s)" in
|
case "$(uname -s)" in
|
||||||
@@ -158,12 +232,13 @@ esac
|
|||||||
require_command mktemp
|
require_command mktemp
|
||||||
require_command tar
|
require_command tar
|
||||||
|
|
||||||
resolved_version="$(resolve_version)"
|
|
||||||
asset_target="$os-$arch"
|
asset_target="$os-$arch"
|
||||||
bundle_name="feynman-${resolved_version}-${asset_target}"
|
archive_extension="tar.gz"
|
||||||
archive_name="${bundle_name}.tar.gz"
|
release_metadata="$(resolve_release_metadata)"
|
||||||
base_url="${FEYNMAN_INSTALL_BASE_URL:-https://github.com/getcompanion-ai/feynman/releases/download/v${resolved_version}}"
|
resolved_version="$(printf '%s\n' "$release_metadata" | sed -n '1p')"
|
||||||
download_url="${base_url}/${archive_name}"
|
bundle_name="$(printf '%s\n' "$release_metadata" | sed -n '2p')"
|
||||||
|
archive_name="$(printf '%s\n' "$release_metadata" | sed -n '3p')"
|
||||||
|
download_url="$(printf '%s\n' "$release_metadata" | sed -n '4p')"
|
||||||
|
|
||||||
step "Installing Feynman ${resolved_version} for ${asset_target}"
|
step "Installing Feynman ${resolved_version} for ${asset_target}"
|
||||||
|
|
||||||
@@ -174,13 +249,29 @@ cleanup() {
|
|||||||
trap cleanup EXIT INT TERM
|
trap cleanup EXIT INT TERM
|
||||||
|
|
||||||
archive_path="$tmp_dir/$archive_name"
|
archive_path="$tmp_dir/$archive_name"
|
||||||
download_file "$download_url" "$archive_path"
|
step "Downloading ${archive_name}"
|
||||||
|
if ! download_file "$download_url" "$archive_path"; then
|
||||||
|
cat >&2 <<EOF
|
||||||
|
Failed to download ${archive_name} from:
|
||||||
|
${download_url}
|
||||||
|
|
||||||
|
The ${asset_target} bundle is missing from the GitHub release.
|
||||||
|
This usually means the release exists, but not all platform bundles were uploaded.
|
||||||
|
|
||||||
|
Workarounds:
|
||||||
|
- try again after the release finishes publishing
|
||||||
|
- pass the latest published version explicitly, e.g.:
|
||||||
|
curl -fsSL https://feynman.is/install | bash -s -- 0.2.19
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
mkdir -p "$INSTALL_APP_DIR"
|
mkdir -p "$INSTALL_APP_DIR"
|
||||||
rm -rf "$INSTALL_APP_DIR/$bundle_name"
|
rm -rf "$INSTALL_APP_DIR/$bundle_name"
|
||||||
tar -xzf "$archive_path" -C "$INSTALL_APP_DIR"
|
run_with_spinner "Extracting ${archive_name}" tar -xzf "$archive_path" -C "$INSTALL_APP_DIR"
|
||||||
|
|
||||||
mkdir -p "$INSTALL_BIN_DIR"
|
mkdir -p "$INSTALL_BIN_DIR"
|
||||||
|
step "Linking feynman into $INSTALL_BIN_DIR"
|
||||||
cat >"$INSTALL_BIN_DIR/feynman" <<EOF
|
cat >"$INSTALL_BIN_DIR/feynman" <<EOF
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
set -eu
|
set -eu
|
||||||
@@ -193,20 +284,22 @@ add_to_path
|
|||||||
case "$path_action" in
|
case "$path_action" in
|
||||||
added)
|
added)
|
||||||
step "PATH updated for future shells in $path_profile"
|
step "PATH updated for future shells in $path_profile"
|
||||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||||
;;
|
;;
|
||||||
configured)
|
configured)
|
||||||
step "PATH is already configured for future shells in $path_profile"
|
step "PATH is already configured for future shells in $path_profile"
|
||||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||||
;;
|
;;
|
||||||
skipped)
|
skipped)
|
||||||
step "PATH update skipped"
|
step "PATH update skipped"
|
||||||
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && feynman"
|
step "Run now: export PATH=\"$INSTALL_BIN_DIR:\$PATH\" && hash -r && feynman"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
step "$INSTALL_BIN_DIR is already on PATH"
|
step "$INSTALL_BIN_DIR is already on PATH"
|
||||||
step "Run: feynman"
|
step "Run: hash -r && feynman"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
warn_command_conflict
|
||||||
|
|
||||||
printf 'Feynman %s installed successfully.\n' "$resolved_version"
|
printf 'Feynman %s installed successfully.\n' "$resolved_version"
|
||||||
|
|||||||
1
scripts/lib/alpha-hub-auth-patch.d.mts
Normal file
1
scripts/lib/alpha-hub-auth-patch.d.mts
Normal file
@@ -0,0 +1 @@
|
|||||||
|
export declare function patchAlphaHubAuthSource(source: string): string;
|
||||||
66
scripts/lib/alpha-hub-auth-patch.mjs
Normal file
66
scripts/lib/alpha-hub-auth-patch.mjs
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
const LEGACY_SUCCESS_HTML = "'<html><body><h2>Logged in to Alpha Hub</h2><p>You can close this tab.</p></body></html>'";
|
||||||
|
const LEGACY_ERROR_HTML = "'<html><body><h2>Login failed</h2><p>You can close this tab.</p></body></html>'";
|
||||||
|
|
||||||
|
const bodyAttr = 'style="font-family:system-ui,sans-serif;text-align:center;padding-top:20vh;background:#050a08;color:#f0f5f2"';
|
||||||
|
const logo = '<h1 style="font-family:monospace;font-size:48px;color:#34d399;margin:0">feynman</h1>';
|
||||||
|
|
||||||
|
const FEYNMAN_SUCCESS_HTML = `'<html><body ${bodyAttr}>${logo}<h2 style="color:#34d399;margin-top:16px">Logged in</h2><p style="color:#8aaa9a">You can close this tab.</p></body></html>'`;
|
||||||
|
const FEYNMAN_ERROR_HTML = `'<html><body ${bodyAttr}>${logo}<h2 style="color:#ef4444;margin-top:16px">Login failed</h2><p style="color:#8aaa9a">You can close this tab.</p></body></html>'`;
|
||||||
|
|
||||||
|
const CURRENT_OPEN_BROWSER = [
|
||||||
|
"function openBrowser(url) {",
|
||||||
|
" try {",
|
||||||
|
" const plat = platform();",
|
||||||
|
" if (plat === 'darwin') execSync(`open \"${url}\"`);",
|
||||||
|
" else if (plat === 'linux') execSync(`xdg-open \"${url}\"`);",
|
||||||
|
" else if (plat === 'win32') execSync(`start \"\" \"${url}\"`);",
|
||||||
|
" } catch {}",
|
||||||
|
"}",
|
||||||
|
].join("\n");
|
||||||
|
|
||||||
|
const PATCHED_OPEN_BROWSER = [
|
||||||
|
"function openBrowser(url) {",
|
||||||
|
" try {",
|
||||||
|
" const plat = platform();",
|
||||||
|
" const isWsl = plat === 'linux' && (Boolean(process.env.WSL_DISTRO_NAME) || Boolean(process.env.WSL_INTEROP));",
|
||||||
|
" if (plat === 'darwin') execSync(`open \"${url}\"`);",
|
||||||
|
" else if (isWsl) {",
|
||||||
|
" try {",
|
||||||
|
" execSync(`wslview \"${url}\"`);",
|
||||||
|
" } catch {",
|
||||||
|
" execSync(`cmd.exe /c start \"\" \"${url}\"`);",
|
||||||
|
" }",
|
||||||
|
" }",
|
||||||
|
" else if (plat === 'linux') execSync(`xdg-open \"${url}\"`);",
|
||||||
|
" else if (plat === 'win32') execSync(`cmd /c start \"\" \"${url}\"`);",
|
||||||
|
" } catch {}",
|
||||||
|
"}",
|
||||||
|
].join("\n");
|
||||||
|
|
||||||
|
const LEGACY_WIN_OPEN = "else if (plat === 'win32') execSync(`start \"${url}\"`);";
|
||||||
|
const FIXED_WIN_OPEN = "else if (plat === 'win32') execSync(`cmd /c start \"\" \"${url}\"`);";
|
||||||
|
|
||||||
|
const OPEN_BROWSER_LOG = "process.stderr.write('Opening browser for alphaXiv login...\\n');";
|
||||||
|
const OPEN_BROWSER_LOG_WITH_URL = "process.stderr.write(`Opening browser for alphaXiv login...\\nAuth URL: ${authUrl.toString()}\\n`);";
|
||||||
|
|
||||||
|
export function patchAlphaHubAuthSource(source) {
|
||||||
|
let patched = source;
|
||||||
|
|
||||||
|
if (patched.includes(LEGACY_SUCCESS_HTML)) {
|
||||||
|
patched = patched.replace(LEGACY_SUCCESS_HTML, FEYNMAN_SUCCESS_HTML);
|
||||||
|
}
|
||||||
|
if (patched.includes(LEGACY_ERROR_HTML)) {
|
||||||
|
patched = patched.replace(LEGACY_ERROR_HTML, FEYNMAN_ERROR_HTML);
|
||||||
|
}
|
||||||
|
if (patched.includes(CURRENT_OPEN_BROWSER)) {
|
||||||
|
patched = patched.replace(CURRENT_OPEN_BROWSER, PATCHED_OPEN_BROWSER);
|
||||||
|
}
|
||||||
|
if (patched.includes(LEGACY_WIN_OPEN)) {
|
||||||
|
patched = patched.replace(LEGACY_WIN_OPEN, FIXED_WIN_OPEN);
|
||||||
|
}
|
||||||
|
if (patched.includes(OPEN_BROWSER_LOG)) {
|
||||||
|
patched = patched.replace(OPEN_BROWSER_LOG, OPEN_BROWSER_LOG_WITH_URL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return patched;
|
||||||
|
}
|
||||||
1
scripts/lib/pi-extension-loader-patch.d.mts
Normal file
1
scripts/lib/pi-extension-loader-patch.d.mts
Normal file
@@ -0,0 +1 @@
|
|||||||
|
export function patchPiExtensionLoaderSource(source: string): string;
|
||||||
32
scripts/lib/pi-extension-loader-patch.mjs
Normal file
32
scripts/lib/pi-extension-loader-patch.mjs
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
const PATH_TO_FILE_URL_IMPORT = 'import { fileURLToPath, pathToFileURL } from "node:url";';
|
||||||
|
const FILE_URL_TO_PATH_IMPORT = 'import { fileURLToPath } from "node:url";';
|
||||||
|
|
||||||
|
const IMPORT_CALL = 'const module = await jiti.import(extensionPath, { default: true });';
|
||||||
|
const PATCHED_IMPORT_CALL = [
|
||||||
|
' const extensionSpecifier = process.platform === "win32" && path.isAbsolute(extensionPath)',
|
||||||
|
' ? pathToFileURL(extensionPath).href',
|
||||||
|
' : extensionPath;',
|
||||||
|
' const module = await jiti.import(extensionSpecifier, { default: true });',
|
||||||
|
].join("\n");
|
||||||
|
|
||||||
|
export function patchPiExtensionLoaderSource(source) {
|
||||||
|
let patched = source;
|
||||||
|
|
||||||
|
if (patched.includes(PATH_TO_FILE_URL_IMPORT) || patched.includes(PATCHED_IMPORT_CALL)) {
|
||||||
|
return patched;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (patched.includes(FILE_URL_TO_PATH_IMPORT)) {
|
||||||
|
patched = patched.replace(FILE_URL_TO_PATH_IMPORT, PATH_TO_FILE_URL_IMPORT);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!patched.includes(PATH_TO_FILE_URL_IMPORT)) {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!patched.includes(IMPORT_CALL)) {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
return patched.replace(IMPORT_CALL, PATCHED_IMPORT_CALL);
|
||||||
|
}
|
||||||
1
scripts/lib/pi-google-legacy-schema-patch.d.mts
Normal file
1
scripts/lib/pi-google-legacy-schema-patch.d.mts
Normal file
@@ -0,0 +1 @@
|
|||||||
|
export function patchPiGoogleLegacySchemaSource(source: string): string;
|
||||||
44
scripts/lib/pi-google-legacy-schema-patch.mjs
Normal file
44
scripts/lib/pi-google-legacy-schema-patch.mjs
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
const HELPER = [
|
||||||
|
"function normalizeLegacyToolSchema(schema) {",
|
||||||
|
" if (Array.isArray(schema)) return schema.map((item) => normalizeLegacyToolSchema(item));",
|
||||||
|
' if (!schema || typeof schema !== "object") return schema;',
|
||||||
|
" const normalized = {};",
|
||||||
|
" for (const [key, value] of Object.entries(schema)) {",
|
||||||
|
' if (key === "const") {',
|
||||||
|
" normalized.enum = [value];",
|
||||||
|
" continue;",
|
||||||
|
" }",
|
||||||
|
" normalized[key] = normalizeLegacyToolSchema(value);",
|
||||||
|
" }",
|
||||||
|
" return normalized;",
|
||||||
|
"}",
|
||||||
|
].join("\n");
|
||||||
|
|
||||||
|
const ORIGINAL =
|
||||||
|
' ...(useParameters ? { parameters: tool.parameters } : { parametersJsonSchema: tool.parameters }),';
|
||||||
|
const PATCHED = [
|
||||||
|
" ...(useParameters",
|
||||||
|
" ? { parameters: normalizeLegacyToolSchema(tool.parameters) }",
|
||||||
|
" : { parametersJsonSchema: tool.parameters }),",
|
||||||
|
].join("\n");
|
||||||
|
|
||||||
|
export function patchPiGoogleLegacySchemaSource(source) {
|
||||||
|
let patched = source;
|
||||||
|
|
||||||
|
if (patched.includes("function normalizeLegacyToolSchema(schema) {")) {
|
||||||
|
return patched;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!patched.includes(ORIGINAL)) {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
patched = patched.replace(ORIGINAL, PATCHED);
|
||||||
|
const marker = "export function convertTools(tools, useParameters = false) {";
|
||||||
|
const markerIndex = patched.indexOf(marker);
|
||||||
|
if (markerIndex === -1) {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
return `${patched.slice(0, markerIndex)}${HELPER}\n\n${patched.slice(markerIndex)}`;
|
||||||
|
}
|
||||||
2
scripts/lib/pi-subagents-patch.d.mts
Normal file
2
scripts/lib/pi-subagents-patch.d.mts
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
export const PI_SUBAGENTS_PATCH_TARGETS: string[];
|
||||||
|
export function patchPiSubagentsSource(relativePath: string, source: string): string;
|
||||||
184
scripts/lib/pi-subagents-patch.mjs
Normal file
184
scripts/lib/pi-subagents-patch.mjs
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
export const PI_SUBAGENTS_PATCH_TARGETS = [
|
||||||
|
"index.ts",
|
||||||
|
"agents.ts",
|
||||||
|
"artifacts.ts",
|
||||||
|
"run-history.ts",
|
||||||
|
"skills.ts",
|
||||||
|
"chain-clarify.ts",
|
||||||
|
];
|
||||||
|
|
||||||
|
const RESOLVE_PI_AGENT_DIR_HELPER = [
|
||||||
|
"function resolvePiAgentDir(): string {",
|
||||||
|
' const configured = process.env.PI_CODING_AGENT_DIR?.trim();',
|
||||||
|
' if (!configured) return path.join(os.homedir(), ".pi", "agent");',
|
||||||
|
' return configured.startsWith("~/") ? path.join(os.homedir(), configured.slice(2)) : configured;',
|
||||||
|
"}",
|
||||||
|
].join("\n");
|
||||||
|
|
||||||
|
function injectResolvePiAgentDirHelper(source) {
|
||||||
|
if (source.includes("function resolvePiAgentDir(): string {")) {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = source.split("\n");
|
||||||
|
let insertAt = 0;
|
||||||
|
let importSeen = false;
|
||||||
|
let importOpen = false;
|
||||||
|
|
||||||
|
for (let index = 0; index < lines.length; index += 1) {
|
||||||
|
const trimmed = lines[index].trim();
|
||||||
|
if (!importSeen) {
|
||||||
|
if (trimmed === "" || trimmed.startsWith("/**") || trimmed.startsWith("*") || trimmed.startsWith("*/")) {
|
||||||
|
insertAt = index + 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (trimmed.startsWith("import ")) {
|
||||||
|
importSeen = true;
|
||||||
|
importOpen = !trimmed.endsWith(";");
|
||||||
|
insertAt = index + 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (trimmed.startsWith("import ")) {
|
||||||
|
importOpen = !trimmed.endsWith(";");
|
||||||
|
insertAt = index + 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (importOpen) {
|
||||||
|
if (trimmed.endsWith(";")) importOpen = false;
|
||||||
|
insertAt = index + 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (trimmed === "") {
|
||||||
|
insertAt = index + 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
insertAt = index;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return [...lines.slice(0, insertAt), "", RESOLVE_PI_AGENT_DIR_HELPER, "", ...lines.slice(insertAt)].join("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
function replaceAll(source, from, to) {
|
||||||
|
return source.split(from).join(to);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function patchPiSubagentsSource(relativePath, source) {
|
||||||
|
let patched = source;
|
||||||
|
|
||||||
|
switch (relativePath) {
|
||||||
|
case "index.ts":
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
'const configPath = path.join(os.homedir(), ".pi", "agent", "extensions", "subagent", "config.json");',
|
||||||
|
'const configPath = path.join(resolvePiAgentDir(), "extensions", "subagent", "config.json");',
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case "agents.ts":
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
'const userDir = path.join(os.homedir(), ".pi", "agent", "agents");',
|
||||||
|
'const userDir = path.join(resolvePiAgentDir(), "agents");',
|
||||||
|
);
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
[
|
||||||
|
'export function discoverAgents(cwd: string, scope: AgentScope): AgentDiscoveryResult {',
|
||||||
|
'\tconst userDirOld = path.join(os.homedir(), ".pi", "agent", "agents");',
|
||||||
|
'\tconst userDirNew = path.join(os.homedir(), ".agents");',
|
||||||
|
].join("\n"),
|
||||||
|
[
|
||||||
|
'export function discoverAgents(cwd: string, scope: AgentScope): AgentDiscoveryResult {',
|
||||||
|
'\tconst userDir = path.join(resolvePiAgentDir(), "agents");',
|
||||||
|
].join("\n"),
|
||||||
|
);
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
[
|
||||||
|
'\tconst userAgentsOld = scope === "project" ? [] : loadAgentsFromDir(userDirOld, "user");',
|
||||||
|
'\tconst userAgentsNew = scope === "project" ? [] : loadAgentsFromDir(userDirNew, "user");',
|
||||||
|
'\tconst userAgents = [...userAgentsOld, ...userAgentsNew];',
|
||||||
|
].join("\n"),
|
||||||
|
'\tconst userAgents = scope === "project" ? [] : loadAgentsFromDir(userDir, "user");',
|
||||||
|
);
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
[
|
||||||
|
'const userDirOld = path.join(os.homedir(), ".pi", "agent", "agents");',
|
||||||
|
'const userDirNew = path.join(os.homedir(), ".agents");',
|
||||||
|
].join("\n"),
|
||||||
|
'const userDir = path.join(resolvePiAgentDir(), "agents");',
|
||||||
|
);
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
[
|
||||||
|
'\tconst user = [',
|
||||||
|
'\t\t...loadAgentsFromDir(userDirOld, "user"),',
|
||||||
|
'\t\t...loadAgentsFromDir(userDirNew, "user"),',
|
||||||
|
'\t];',
|
||||||
|
].join("\n"),
|
||||||
|
'\tconst user = loadAgentsFromDir(userDir, "user");',
|
||||||
|
);
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
[
|
||||||
|
'\tconst chains = [',
|
||||||
|
'\t\t...loadChainsFromDir(userDirOld, "user"),',
|
||||||
|
'\t\t...loadChainsFromDir(userDirNew, "user"),',
|
||||||
|
'\t\t...(projectDir ? loadChainsFromDir(projectDir, "project") : []),',
|
||||||
|
'\t];',
|
||||||
|
].join("\n"),
|
||||||
|
[
|
||||||
|
'\tconst chains = [',
|
||||||
|
'\t\t...loadChainsFromDir(userDir, "user"),',
|
||||||
|
'\t\t...(projectDir ? loadChainsFromDir(projectDir, "project") : []),',
|
||||||
|
'\t];',
|
||||||
|
].join("\n"),
|
||||||
|
);
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
'\tconst userDir = fs.existsSync(userDirNew) ? userDirNew : userDirOld;',
|
||||||
|
'\tconst userDir = path.join(resolvePiAgentDir(), "agents");',
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case "artifacts.ts":
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
'const sessionsBase = path.join(os.homedir(), ".pi", "agent", "sessions");',
|
||||||
|
'const sessionsBase = path.join(resolvePiAgentDir(), "sessions");',
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case "run-history.ts":
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
'const HISTORY_PATH = path.join(os.homedir(), ".pi", "agent", "run-history.jsonl");',
|
||||||
|
'const HISTORY_PATH = path.join(resolvePiAgentDir(), "run-history.jsonl");',
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case "skills.ts":
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
'const AGENT_DIR = path.join(os.homedir(), ".pi", "agent");',
|
||||||
|
"const AGENT_DIR = resolvePiAgentDir();",
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
case "chain-clarify.ts":
|
||||||
|
patched = replaceAll(
|
||||||
|
patched,
|
||||||
|
'const dir = path.join(os.homedir(), ".pi", "agent", "agents");',
|
||||||
|
'const dir = path.join(resolvePiAgentDir(), "agents");',
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (patched === source) {
|
||||||
|
return source;
|
||||||
|
}
|
||||||
|
|
||||||
|
return injectResolvePiAgentDirHelper(patched);
|
||||||
|
}
|
||||||
2
scripts/lib/pi-web-access-patch.d.mts
Normal file
2
scripts/lib/pi-web-access-patch.d.mts
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
export const PI_WEB_ACCESS_PATCH_TARGETS: string[];
|
||||||
|
export function patchPiWebAccessSource(relativePath: string, source: string): string;
|
||||||
48
scripts/lib/pi-web-access-patch.mjs
Normal file
48
scripts/lib/pi-web-access-patch.mjs
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
export const PI_WEB_ACCESS_PATCH_TARGETS = [
|
||||||
|
"index.ts",
|
||||||
|
"exa.ts",
|
||||||
|
"gemini-api.ts",
|
||||||
|
"gemini-search.ts",
|
||||||
|
"gemini-web.ts",
|
||||||
|
"github-extract.ts",
|
||||||
|
"perplexity.ts",
|
||||||
|
"video-extract.ts",
|
||||||
|
"youtube-extract.ts",
|
||||||
|
];
|
||||||
|
|
||||||
|
const LEGACY_CONFIG_EXPR = 'join(homedir(), ".pi", "web-search.json")';
|
||||||
|
const PATCHED_CONFIG_EXPR =
|
||||||
|
'process.env.FEYNMAN_WEB_SEARCH_CONFIG ?? process.env.PI_WEB_SEARCH_CONFIG ?? join(homedir(), ".pi", "web-search.json")';
|
||||||
|
|
||||||
|
export function patchPiWebAccessSource(relativePath, source) {
|
||||||
|
let patched = source;
|
||||||
|
let changed = false;
|
||||||
|
|
||||||
|
if (!patched.includes(PATCHED_CONFIG_EXPR)) {
|
||||||
|
patched = patched.split(LEGACY_CONFIG_EXPR).join(PATCHED_CONFIG_EXPR);
|
||||||
|
changed = patched !== source;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (relativePath === "index.ts") {
|
||||||
|
const workflowDefaultOriginal = 'const workflow = resolveWorkflow(params.workflow ?? configWorkflow, ctx?.hasUI !== false);';
|
||||||
|
const workflowDefaultPatched = 'const workflow = resolveWorkflow(params.workflow ?? configWorkflow ?? "none", ctx?.hasUI !== false);';
|
||||||
|
if (patched.includes(workflowDefaultOriginal)) {
|
||||||
|
patched = patched.replace(workflowDefaultOriginal, workflowDefaultPatched);
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
if (patched.includes('summary-review = open curator with auto summary draft (default)')) {
|
||||||
|
patched = patched.replace(
|
||||||
|
'summary-review = open curator with auto summary draft (default)',
|
||||||
|
'summary-review = open curator with auto summary draft (opt-in)',
|
||||||
|
);
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (relativePath === "index.ts" && changed) {
|
||||||
|
patched = patched.replace('import { join } from "node:path";', 'import { dirname, join } from "node:path";');
|
||||||
|
patched = patched.replace('const dir = join(homedir(), ".pi");', "const dir = dirname(WEB_SEARCH_CONFIG_PATH);");
|
||||||
|
}
|
||||||
|
|
||||||
|
return patched;
|
||||||
|
}
|
||||||
@@ -1,28 +1,51 @@
|
|||||||
import { spawnSync } from "node:child_process";
|
import { spawnSync } from "node:child_process";
|
||||||
import { existsSync, mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
import { existsSync, lstatSync, mkdirSync, readFileSync, readlinkSync, rmSync, symlinkSync, writeFileSync } from "node:fs";
|
||||||
import { dirname, resolve } from "node:path";
|
import { createRequire } from "node:module";
|
||||||
|
import { homedir } from "node:os";
|
||||||
|
import { delimiter, dirname, resolve } from "node:path";
|
||||||
import { fileURLToPath } from "node:url";
|
import { fileURLToPath } from "node:url";
|
||||||
import { FEYNMAN_LOGO_HTML } from "../logo.mjs";
|
import { FEYNMAN_LOGO_HTML } from "../logo.mjs";
|
||||||
|
import { patchAlphaHubAuthSource } from "./lib/alpha-hub-auth-patch.mjs";
|
||||||
|
import { patchPiExtensionLoaderSource } from "./lib/pi-extension-loader-patch.mjs";
|
||||||
|
import { patchPiGoogleLegacySchemaSource } from "./lib/pi-google-legacy-schema-patch.mjs";
|
||||||
|
import { PI_WEB_ACCESS_PATCH_TARGETS, patchPiWebAccessSource } from "./lib/pi-web-access-patch.mjs";
|
||||||
|
import { PI_SUBAGENTS_PATCH_TARGETS, patchPiSubagentsSource } from "./lib/pi-subagents-patch.mjs";
|
||||||
|
|
||||||
const here = dirname(fileURLToPath(import.meta.url));
|
const here = dirname(fileURLToPath(import.meta.url));
|
||||||
const appRoot = resolve(here, "..");
|
const appRoot = resolve(here, "..");
|
||||||
|
const feynmanHome = resolve(process.env.FEYNMAN_HOME ?? homedir(), ".feynman");
|
||||||
|
const feynmanNpmPrefix = resolve(feynmanHome, "npm-global");
|
||||||
|
process.env.FEYNMAN_NPM_PREFIX = feynmanNpmPrefix;
|
||||||
|
process.env.NPM_CONFIG_PREFIX = feynmanNpmPrefix;
|
||||||
|
process.env.npm_config_prefix = feynmanNpmPrefix;
|
||||||
|
const appRequire = createRequire(resolve(appRoot, "package.json"));
|
||||||
const isGlobalInstall = process.env.npm_config_global === "true" || process.env.npm_config_location === "global";
|
const isGlobalInstall = process.env.npm_config_global === "true" || process.env.npm_config_location === "global";
|
||||||
|
|
||||||
function findNodeModules() {
|
|
||||||
let dir = appRoot;
|
|
||||||
while (dir !== dirname(dir)) {
|
|
||||||
const nm = resolve(dir, "node_modules");
|
|
||||||
if (existsSync(nm)) return nm;
|
|
||||||
dir = dirname(dir);
|
|
||||||
}
|
|
||||||
return resolve(appRoot, "node_modules");
|
|
||||||
}
|
|
||||||
|
|
||||||
const nodeModules = findNodeModules();
|
|
||||||
|
|
||||||
function findPackageRoot(packageName) {
|
function findPackageRoot(packageName) {
|
||||||
const candidate = resolve(nodeModules, packageName);
|
const segments = packageName.split("/");
|
||||||
if (existsSync(resolve(candidate, "package.json"))) return candidate;
|
let current = appRoot;
|
||||||
|
while (current !== dirname(current)) {
|
||||||
|
for (const candidate of [resolve(current, "node_modules", ...segments), resolve(current, ...segments)]) {
|
||||||
|
if (existsSync(resolve(candidate, "package.json"))) {
|
||||||
|
return candidate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
current = dirname(current);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const spec of [`${packageName}/dist/index.js`, `${packageName}/dist/cli.js`, packageName]) {
|
||||||
|
try {
|
||||||
|
let current = dirname(appRequire.resolve(spec));
|
||||||
|
while (current !== dirname(current)) {
|
||||||
|
if (existsSync(resolve(current, "package.json"))) {
|
||||||
|
return current;
|
||||||
|
}
|
||||||
|
current = dirname(current);
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,17 +54,28 @@ const piTuiRoot = findPackageRoot("@mariozechner/pi-tui");
|
|||||||
const piAiRoot = findPackageRoot("@mariozechner/pi-ai");
|
const piAiRoot = findPackageRoot("@mariozechner/pi-ai");
|
||||||
|
|
||||||
if (!piPackageRoot) {
|
if (!piPackageRoot) {
|
||||||
console.warn("[feynman] pi-coding-agent not found, skipping patches");
|
console.warn("[feynman] pi-coding-agent not found, skipping Pi patches");
|
||||||
process.exit(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const packageJsonPath = resolve(piPackageRoot, "package.json");
|
const packageJsonPath = piPackageRoot ? resolve(piPackageRoot, "package.json") : null;
|
||||||
const cliPath = resolve(piPackageRoot, "dist", "cli.js");
|
const cliPath = piPackageRoot ? resolve(piPackageRoot, "dist", "cli.js") : null;
|
||||||
const bunCliPath = resolve(piPackageRoot, "dist", "bun", "cli.js");
|
const bunCliPath = piPackageRoot ? resolve(piPackageRoot, "dist", "bun", "cli.js") : null;
|
||||||
const interactiveModePath = resolve(piPackageRoot, "dist", "modes", "interactive", "interactive-mode.js");
|
const interactiveModePath = piPackageRoot ? resolve(piPackageRoot, "dist", "modes", "interactive", "interactive-mode.js") : null;
|
||||||
const interactiveThemePath = resolve(piPackageRoot, "dist", "modes", "interactive", "theme", "theme.js");
|
const interactiveThemePath = piPackageRoot ? resolve(piPackageRoot, "dist", "modes", "interactive", "theme", "theme.js") : null;
|
||||||
|
const extensionLoaderPath = piPackageRoot ? resolve(piPackageRoot, "dist", "core", "extensions", "loader.js") : null;
|
||||||
|
const terminalPath = piTuiRoot ? resolve(piTuiRoot, "dist", "terminal.js") : null;
|
||||||
const editorPath = piTuiRoot ? resolve(piTuiRoot, "dist", "components", "editor.js") : null;
|
const editorPath = piTuiRoot ? resolve(piTuiRoot, "dist", "components", "editor.js") : null;
|
||||||
const workspaceRoot = resolve(appRoot, ".feynman", "npm", "node_modules");
|
const workspaceRoot = resolve(appRoot, ".feynman", "npm", "node_modules");
|
||||||
|
const workspaceExtensionLoaderPath = resolve(
|
||||||
|
workspaceRoot,
|
||||||
|
"@mariozechner",
|
||||||
|
"pi-coding-agent",
|
||||||
|
"dist",
|
||||||
|
"core",
|
||||||
|
"extensions",
|
||||||
|
"loader.js",
|
||||||
|
);
|
||||||
|
const piSubagentsRoot = resolve(workspaceRoot, "pi-subagents");
|
||||||
const webAccessPath = resolve(workspaceRoot, "pi-web-access", "index.ts");
|
const webAccessPath = resolve(workspaceRoot, "pi-web-access", "index.ts");
|
||||||
const sessionSearchIndexerPath = resolve(
|
const sessionSearchIndexerPath = resolve(
|
||||||
workspaceRoot,
|
workspaceRoot,
|
||||||
@@ -54,13 +88,210 @@ const piMemoryPath = resolve(workspaceRoot, "@samfp", "pi-memory", "src", "index
|
|||||||
const settingsPath = resolve(appRoot, ".feynman", "settings.json");
|
const settingsPath = resolve(appRoot, ".feynman", "settings.json");
|
||||||
const workspaceDir = resolve(appRoot, ".feynman", "npm");
|
const workspaceDir = resolve(appRoot, ".feynman", "npm");
|
||||||
const workspacePackageJsonPath = resolve(workspaceDir, "package.json");
|
const workspacePackageJsonPath = resolve(workspaceDir, "package.json");
|
||||||
|
const workspaceManifestPath = resolve(workspaceDir, ".runtime-manifest.json");
|
||||||
const workspaceArchivePath = resolve(appRoot, ".feynman", "runtime-workspace.tgz");
|
const workspaceArchivePath = resolve(appRoot, ".feynman", "runtime-workspace.tgz");
|
||||||
|
const globalNodeModulesRoot = resolve(feynmanNpmPrefix, "lib", "node_modules");
|
||||||
|
const PRUNE_VERSION = 3;
|
||||||
|
const NATIVE_PACKAGE_SPECS = new Set([
|
||||||
|
"@kaiserlich-dev/pi-session-search",
|
||||||
|
"@samfp/pi-memory",
|
||||||
|
]);
|
||||||
|
const FILTERED_INSTALL_OUTPUT_PATTERNS = [
|
||||||
|
/npm warn deprecated node-domexception@1\.0\.0/i,
|
||||||
|
/npm notice/i,
|
||||||
|
/^(added|removed|changed) \d+ packages?( in .+)?$/i,
|
||||||
|
/^\d+ packages are looking for funding$/i,
|
||||||
|
/^run `npm fund` for details$/i,
|
||||||
|
];
|
||||||
|
|
||||||
|
function arraysMatch(left, right) {
|
||||||
|
return left.length === right.length && left.every((value, index) => value === right[index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
function supportsNativePackageSources(version = process.versions.node) {
|
||||||
|
const [major = "0"] = version.replace(/^v/, "").split(".");
|
||||||
|
return (Number.parseInt(major, 10) || 0) <= 24;
|
||||||
|
}
|
||||||
|
|
||||||
|
function createInstallCommand(packageManager, packageSpecs) {
|
||||||
|
switch (packageManager) {
|
||||||
|
case "npm":
|
||||||
|
return [
|
||||||
|
"install",
|
||||||
|
"--global=false",
|
||||||
|
"--location=project",
|
||||||
|
"--prefer-offline",
|
||||||
|
"--no-audit",
|
||||||
|
"--no-fund",
|
||||||
|
"--legacy-peer-deps",
|
||||||
|
"--loglevel",
|
||||||
|
"error",
|
||||||
|
...packageSpecs,
|
||||||
|
];
|
||||||
|
case "pnpm":
|
||||||
|
return ["add", "--prefer-offline", "--reporter", "silent", ...packageSpecs];
|
||||||
|
case "bun":
|
||||||
|
return ["add", "--silent", ...packageSpecs];
|
||||||
|
default:
|
||||||
|
throw new Error(`Unsupported package manager: ${packageManager}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let cachedPackageManager = undefined;
|
||||||
|
|
||||||
|
function resolvePackageManager() {
|
||||||
|
if (cachedPackageManager !== undefined) return cachedPackageManager;
|
||||||
|
|
||||||
|
const requested = process.env.FEYNMAN_PACKAGE_MANAGER?.trim();
|
||||||
|
const candidates = requested ? [requested] : ["npm", "pnpm", "bun"];
|
||||||
|
for (const candidate of candidates) {
|
||||||
|
if (resolveExecutable(candidate)) {
|
||||||
|
cachedPackageManager = candidate;
|
||||||
|
return candidate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cachedPackageManager = null;
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function installWorkspacePackages(packageSpecs) {
|
||||||
|
const packageManager = resolvePackageManager();
|
||||||
|
if (!packageManager) {
|
||||||
|
process.stderr.write(
|
||||||
|
"[feynman] no supported package manager found; install npm, pnpm, or bun, or set FEYNMAN_PACKAGE_MANAGER.\n",
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = spawnSync(packageManager, createInstallCommand(packageManager, packageSpecs), {
|
||||||
|
cwd: workspaceDir,
|
||||||
|
stdio: ["ignore", "pipe", "pipe"],
|
||||||
|
timeout: 300000,
|
||||||
|
env: {
|
||||||
|
...process.env,
|
||||||
|
PATH: getPathWithCurrentNode(process.env.PATH),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
for (const stream of [result.stdout, result.stderr]) {
|
||||||
|
if (!stream?.length) continue;
|
||||||
|
for (const line of stream.toString().split(/\r?\n/)) {
|
||||||
|
if (!line.trim()) continue;
|
||||||
|
if (FILTERED_INSTALL_OUTPUT_PATTERNS.some((pattern) => pattern.test(line.trim()))) continue;
|
||||||
|
process.stderr.write(`${line}\n`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.status !== 0) {
|
||||||
|
process.stderr.write(`[feynman] ${packageManager} failed while setting up bundled packages.\n`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
function parsePackageName(spec) {
|
function parsePackageName(spec) {
|
||||||
const match = spec.match(/^(@?[^@]+(?:\/[^@]+)?)(?:@.+)?$/);
|
const match = spec.match(/^(@?[^@]+(?:\/[^@]+)?)(?:@.+)?$/);
|
||||||
return match?.[1] ?? spec;
|
return match?.[1] ?? spec;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function filterUnsupportedPackageSpecs(packageSpecs) {
|
||||||
|
if (supportsNativePackageSources()) return packageSpecs;
|
||||||
|
return packageSpecs.filter((spec) => !NATIVE_PACKAGE_SPECS.has(parsePackageName(spec)));
|
||||||
|
}
|
||||||
|
|
||||||
|
function workspaceContainsPackages(packageSpecs) {
|
||||||
|
return packageSpecs.every((spec) => existsSync(resolve(workspaceRoot, parsePackageName(spec))));
|
||||||
|
}
|
||||||
|
|
||||||
|
function workspaceMatchesRuntime(packageSpecs) {
|
||||||
|
if (!existsSync(workspaceManifestPath)) return false;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const manifest = JSON.parse(readFileSync(workspaceManifestPath, "utf8"));
|
||||||
|
if (!Array.isArray(manifest.packageSpecs)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!arraysMatch(manifest.packageSpecs, packageSpecs)) {
|
||||||
|
if (!(workspaceContainsPackages(packageSpecs) && packageSpecs.every((spec) => manifest.packageSpecs.includes(spec)))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!supportsNativePackageSources() && workspaceContainsPackages(packageSpecs)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
manifest.nodeAbi !== process.versions.modules ||
|
||||||
|
manifest.platform !== process.platform ||
|
||||||
|
manifest.arch !== process.arch ||
|
||||||
|
manifest.pruneVersion !== PRUNE_VERSION
|
||||||
|
) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return packageSpecs.every((spec) => existsSync(resolve(workspaceRoot, parsePackageName(spec))));
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function writeWorkspaceManifest(packageSpecs) {
|
||||||
|
writeFileSync(
|
||||||
|
workspaceManifestPath,
|
||||||
|
JSON.stringify(
|
||||||
|
{
|
||||||
|
packageSpecs,
|
||||||
|
generatedAt: new Date().toISOString(),
|
||||||
|
nodeAbi: process.versions.modules,
|
||||||
|
nodeVersion: process.version,
|
||||||
|
platform: process.platform,
|
||||||
|
arch: process.arch,
|
||||||
|
pruneVersion: PRUNE_VERSION,
|
||||||
|
},
|
||||||
|
null,
|
||||||
|
2,
|
||||||
|
) + "\n",
|
||||||
|
"utf8",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureParentDir(path) {
|
||||||
|
mkdirSync(dirname(path), { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
function linkPointsTo(linkPath, targetPath) {
|
||||||
|
try {
|
||||||
|
if (!lstatSync(linkPath).isSymbolicLink()) return false;
|
||||||
|
return resolve(dirname(linkPath), readlinkSync(linkPath)) === targetPath;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureBundledPackageLinks(packageSpecs) {
|
||||||
|
if (!workspaceMatchesRuntime(packageSpecs)) return;
|
||||||
|
|
||||||
|
for (const spec of packageSpecs) {
|
||||||
|
const packageName = parsePackageName(spec);
|
||||||
|
const sourcePath = resolve(workspaceRoot, packageName);
|
||||||
|
const targetPath = resolve(globalNodeModulesRoot, packageName);
|
||||||
|
if (!existsSync(sourcePath)) continue;
|
||||||
|
if (linkPointsTo(targetPath, sourcePath)) continue;
|
||||||
|
try {
|
||||||
|
if (lstatSync(targetPath).isSymbolicLink()) {
|
||||||
|
rmSync(targetPath, { force: true });
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
if (existsSync(targetPath)) continue;
|
||||||
|
|
||||||
|
ensureParentDir(targetPath);
|
||||||
|
try {
|
||||||
|
symlinkSync(sourcePath, targetPath, process.platform === "win32" ? "junction" : "dir");
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function restorePackagedWorkspace(packageSpecs) {
|
function restorePackagedWorkspace(packageSpecs) {
|
||||||
if (!existsSync(workspaceArchivePath)) return false;
|
if (!existsSync(workspaceArchivePath)) return false;
|
||||||
|
|
||||||
@@ -72,26 +303,18 @@ function restorePackagedWorkspace(packageSpecs) {
|
|||||||
timeout: 300000,
|
timeout: 300000,
|
||||||
});
|
});
|
||||||
|
|
||||||
if (result.status !== 0) {
|
// On Windows, tar may exit non-zero due to symlink creation failures in
|
||||||
if (result.stderr?.length) process.stderr.write(result.stderr);
|
// .bin/ directories. These are non-fatal — check whether the actual
|
||||||
return false;
|
// package directories were extracted successfully.
|
||||||
}
|
const packagesPresent = packageSpecs.every((spec) => existsSync(resolve(workspaceRoot, parsePackageName(spec))));
|
||||||
|
if (packagesPresent) return true;
|
||||||
return packageSpecs.every((spec) => existsSync(resolve(workspaceRoot, parsePackageName(spec))));
|
|
||||||
}
|
|
||||||
|
|
||||||
function refreshPackagedWorkspace(packageSpecs) {
|
|
||||||
const result = spawnSync("npm", ["install", "--prefer-offline", "--no-audit", "--no-fund", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs], {
|
|
||||||
stdio: ["ignore", "ignore", "pipe"],
|
|
||||||
timeout: 300000,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (result.status !== 0) {
|
if (result.status !== 0) {
|
||||||
if (result.stderr?.length) process.stderr.write(result.stderr);
|
if (result.stderr?.length) process.stderr.write(result.stderr);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
function resolveExecutable(name, fallbackPaths = []) {
|
function resolveExecutable(name, fallbackPaths = []) {
|
||||||
@@ -99,17 +322,35 @@ function resolveExecutable(name, fallbackPaths = []) {
|
|||||||
if (existsSync(candidate)) return candidate;
|
if (existsSync(candidate)) return candidate;
|
||||||
}
|
}
|
||||||
|
|
||||||
const result = spawnSync("sh", ["-lc", `command -v ${name}`], {
|
const isWindows = process.platform === "win32";
|
||||||
|
const env = {
|
||||||
|
...process.env,
|
||||||
|
PATH: process.env.PATH ?? "",
|
||||||
|
};
|
||||||
|
const result = isWindows
|
||||||
|
? spawnSync("cmd", ["/c", `where ${name}`], {
|
||||||
encoding: "utf8",
|
encoding: "utf8",
|
||||||
stdio: ["ignore", "pipe", "ignore"],
|
stdio: ["ignore", "pipe", "ignore"],
|
||||||
|
env,
|
||||||
|
})
|
||||||
|
: spawnSync("sh", ["-c", `command -v ${name}`], {
|
||||||
|
encoding: "utf8",
|
||||||
|
stdio: ["ignore", "pipe", "ignore"],
|
||||||
|
env,
|
||||||
});
|
});
|
||||||
if (result.status === 0) {
|
if (result.status === 0) {
|
||||||
const resolved = result.stdout.trim();
|
const resolved = result.stdout.trim().split(/\r?\n/)[0];
|
||||||
if (resolved) return resolved;
|
if (resolved) return resolved;
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getPathWithCurrentNode(pathValue = process.env.PATH ?? "") {
|
||||||
|
const nodeDir = dirname(process.execPath);
|
||||||
|
const parts = pathValue.split(delimiter).filter(Boolean);
|
||||||
|
return parts.includes(nodeDir) ? pathValue : `${nodeDir}${delimiter}${pathValue}`;
|
||||||
|
}
|
||||||
|
|
||||||
function ensurePackageWorkspace() {
|
function ensurePackageWorkspace() {
|
||||||
if (!existsSync(settingsPath)) return;
|
if (!existsSync(settingsPath)) return;
|
||||||
|
|
||||||
@@ -119,10 +360,17 @@ function ensurePackageWorkspace() {
|
|||||||
.filter((v) => typeof v === "string" && v.startsWith("npm:"))
|
.filter((v) => typeof v === "string" && v.startsWith("npm:"))
|
||||||
.map((v) => v.slice(4))
|
.map((v) => v.slice(4))
|
||||||
: [];
|
: [];
|
||||||
|
const supportedPackageSpecs = filterUnsupportedPackageSpecs(packageSpecs);
|
||||||
|
|
||||||
if (packageSpecs.length === 0) return;
|
if (supportedPackageSpecs.length === 0) return;
|
||||||
if (existsSync(resolve(workspaceRoot, parsePackageName(packageSpecs[0])))) return;
|
if (workspaceMatchesRuntime(supportedPackageSpecs)) {
|
||||||
if (restorePackagedWorkspace(packageSpecs) && refreshPackagedWorkspace(packageSpecs)) return;
|
ensureBundledPackageLinks(supportedPackageSpecs);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (restorePackagedWorkspace(packageSpecs) && workspaceMatchesRuntime(supportedPackageSpecs)) {
|
||||||
|
ensureBundledPackageLinks(supportedPackageSpecs);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
mkdirSync(workspaceDir, { recursive: true });
|
mkdirSync(workspaceDir, { recursive: true });
|
||||||
writeFileSync(
|
writeFileSync(
|
||||||
@@ -139,19 +387,17 @@ function ensurePackageWorkspace() {
|
|||||||
process.stderr.write(`\r${frames[frame++ % frames.length]} setting up feynman... ${elapsed}s`);
|
process.stderr.write(`\r${frames[frame++ % frames.length]} setting up feynman... ${elapsed}s`);
|
||||||
}, 80);
|
}, 80);
|
||||||
|
|
||||||
const result = spawnSync("npm", ["install", "--prefer-offline", "--no-audit", "--no-fund", "--loglevel", "error", "--prefix", workspaceDir, ...packageSpecs], {
|
const result = installWorkspacePackages(supportedPackageSpecs);
|
||||||
stdio: ["ignore", "ignore", "pipe"],
|
|
||||||
timeout: 300000,
|
|
||||||
});
|
|
||||||
|
|
||||||
clearInterval(spinner);
|
clearInterval(spinner);
|
||||||
const elapsed = Math.round((Date.now() - start) / 1000);
|
const elapsed = Math.round((Date.now() - start) / 1000);
|
||||||
|
|
||||||
if (result.status !== 0) {
|
if (!result) {
|
||||||
process.stderr.write(`\r✗ setup failed (${elapsed}s)\n`);
|
process.stderr.write(`\r✗ setup failed (${elapsed}s)\n`);
|
||||||
if (result.stderr?.length) process.stderr.write(result.stderr);
|
|
||||||
} else {
|
} else {
|
||||||
process.stderr.write(`\r✓ feynman ready (${elapsed}s)\n`);
|
process.stderr.write("\r\x1b[2K");
|
||||||
|
writeWorkspaceManifest(supportedPackageSpecs);
|
||||||
|
ensureBundledPackageLinks(supportedPackageSpecs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -178,7 +424,20 @@ function ensurePandoc() {
|
|||||||
|
|
||||||
ensurePandoc();
|
ensurePandoc();
|
||||||
|
|
||||||
if (existsSync(packageJsonPath)) {
|
if (existsSync(piSubagentsRoot)) {
|
||||||
|
for (const relativePath of PI_SUBAGENTS_PATCH_TARGETS) {
|
||||||
|
const entryPath = resolve(piSubagentsRoot, relativePath);
|
||||||
|
if (!existsSync(entryPath)) continue;
|
||||||
|
|
||||||
|
const source = readFileSync(entryPath, "utf8");
|
||||||
|
const patched = patchPiSubagentsSource(relativePath, source);
|
||||||
|
if (patched !== source) {
|
||||||
|
writeFileSync(entryPath, patched, "utf8");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (packageJsonPath && existsSync(packageJsonPath)) {
|
||||||
const pkg = JSON.parse(readFileSync(packageJsonPath, "utf8"));
|
const pkg = JSON.parse(readFileSync(packageJsonPath, "utf8"));
|
||||||
if (pkg.piConfig?.name !== "feynman" || pkg.piConfig?.configDir !== ".feynman") {
|
if (pkg.piConfig?.name !== "feynman" || pkg.piConfig?.configDir !== ".feynman") {
|
||||||
pkg.piConfig = {
|
pkg.piConfig = {
|
||||||
@@ -190,18 +449,76 @@ if (existsSync(packageJsonPath)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const entryPath of [cliPath, bunCliPath]) {
|
for (const entryPath of [cliPath, bunCliPath].filter(Boolean)) {
|
||||||
if (!existsSync(entryPath)) {
|
if (!existsSync(entryPath)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const cliSource = readFileSync(entryPath, "utf8");
|
let cliSource = readFileSync(entryPath, "utf8");
|
||||||
if (cliSource.includes('process.title = "pi";')) {
|
if (cliSource.includes('process.title = "pi";')) {
|
||||||
writeFileSync(entryPath, cliSource.replace('process.title = "pi";', 'process.title = "feynman";'), "utf8");
|
cliSource = cliSource.replace('process.title = "pi";', 'process.title = "feynman";');
|
||||||
}
|
}
|
||||||
|
const stdinErrorGuard = [
|
||||||
|
"const feynmanHandleStdinError = (error) => {",
|
||||||
|
' if (error && typeof error === "object") {',
|
||||||
|
' const code = "code" in error ? error.code : undefined;',
|
||||||
|
' const syscall = "syscall" in error ? error.syscall : undefined;',
|
||||||
|
' if ((code === "EIO" || code === "EBADF") && syscall === "read") {',
|
||||||
|
" return;",
|
||||||
|
" }",
|
||||||
|
" }",
|
||||||
|
"};",
|
||||||
|
'process.stdin?.on?.("error", feynmanHandleStdinError);',
|
||||||
|
].join("\n");
|
||||||
|
if (!cliSource.includes('process.stdin?.on?.("error", feynmanHandleStdinError);')) {
|
||||||
|
cliSource = cliSource.replace(
|
||||||
|
'process.emitWarning = (() => { });',
|
||||||
|
`process.emitWarning = (() => { });\n${stdinErrorGuard}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
writeFileSync(entryPath, cliSource, "utf8");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (existsSync(interactiveModePath)) {
|
if (terminalPath && existsSync(terminalPath)) {
|
||||||
|
let terminalSource = readFileSync(terminalPath, "utf8");
|
||||||
|
if (!terminalSource.includes("stdinErrorHandler;")) {
|
||||||
|
terminalSource = terminalSource.replace(
|
||||||
|
" stdinBuffer;\n stdinDataHandler;\n",
|
||||||
|
[
|
||||||
|
" stdinBuffer;",
|
||||||
|
" stdinDataHandler;",
|
||||||
|
" stdinErrorHandler = (error) => {",
|
||||||
|
' if ((error?.code === "EIO" || error?.code === "EBADF") && error?.syscall === "read") {',
|
||||||
|
" return;",
|
||||||
|
" }",
|
||||||
|
" };",
|
||||||
|
].join("\n") + "\n",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (!terminalSource.includes('process.stdin.on("error", this.stdinErrorHandler);')) {
|
||||||
|
terminalSource = terminalSource.replace(
|
||||||
|
' process.stdin.resume();\n',
|
||||||
|
' process.stdin.resume();\n process.stdin.on("error", this.stdinErrorHandler);\n',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (!terminalSource.includes(' process.stdin.removeListener("error", this.stdinErrorHandler);')) {
|
||||||
|
terminalSource = terminalSource.replace(
|
||||||
|
' process.stdin.removeListener("data", onData);\n this.inputHandler = previousHandler;\n',
|
||||||
|
[
|
||||||
|
' process.stdin.removeListener("data", onData);',
|
||||||
|
' process.stdin.removeListener("error", this.stdinErrorHandler);',
|
||||||
|
' this.inputHandler = previousHandler;',
|
||||||
|
].join("\n"),
|
||||||
|
);
|
||||||
|
terminalSource = terminalSource.replace(
|
||||||
|
' process.stdin.pause();\n',
|
||||||
|
' process.stdin.removeListener("error", this.stdinErrorHandler);\n process.stdin.pause();\n',
|
||||||
|
);
|
||||||
|
}
|
||||||
|
writeFileSync(terminalPath, terminalSource, "utf8");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (interactiveModePath && existsSync(interactiveModePath)) {
|
||||||
const interactiveModeSource = readFileSync(interactiveModePath, "utf8");
|
const interactiveModeSource = readFileSync(interactiveModePath, "utf8");
|
||||||
if (interactiveModeSource.includes("`π - ${sessionName} - ${cwdBasename}`")) {
|
if (interactiveModeSource.includes("`π - ${sessionName} - ${cwdBasename}`")) {
|
||||||
writeFileSync(
|
writeFileSync(
|
||||||
@@ -214,7 +531,19 @@ if (existsSync(interactiveModePath)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (existsSync(interactiveThemePath)) {
|
for (const loaderPath of [extensionLoaderPath, workspaceExtensionLoaderPath].filter(Boolean)) {
|
||||||
|
if (!existsSync(loaderPath)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const source = readFileSync(loaderPath, "utf8");
|
||||||
|
const patched = patchPiExtensionLoaderSource(source);
|
||||||
|
if (patched !== source) {
|
||||||
|
writeFileSync(loaderPath, patched, "utf8");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (interactiveThemePath && existsSync(interactiveThemePath)) {
|
||||||
let themeSource = readFileSync(interactiveThemePath, "utf8");
|
let themeSource = readFileSync(interactiveThemePath, "utf8");
|
||||||
const desiredGetEditorTheme = [
|
const desiredGetEditorTheme = [
|
||||||
"export function getEditorTheme() {",
|
"export function getEditorTheme() {",
|
||||||
@@ -389,6 +718,21 @@ if (existsSync(webAccessPath)) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const piWebAccessRoot = resolve(workspaceRoot, "pi-web-access");
|
||||||
|
|
||||||
|
if (existsSync(piWebAccessRoot)) {
|
||||||
|
for (const relativePath of PI_WEB_ACCESS_PATCH_TARGETS) {
|
||||||
|
const entryPath = resolve(piWebAccessRoot, relativePath);
|
||||||
|
if (!existsSync(entryPath)) continue;
|
||||||
|
|
||||||
|
const source = readFileSync(entryPath, "utf8");
|
||||||
|
const patched = patchPiWebAccessSource(relativePath, source);
|
||||||
|
if (patched !== source) {
|
||||||
|
writeFileSync(entryPath, patched, "utf8");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (existsSync(sessionSearchIndexerPath)) {
|
if (existsSync(sessionSearchIndexerPath)) {
|
||||||
const source = readFileSync(sessionSearchIndexerPath, "utf8");
|
const source = readFileSync(sessionSearchIndexerPath, "utf8");
|
||||||
const original = 'const sessionsDir = path.join(os.homedir(), ".pi", "agent", "sessions");';
|
const original = 'const sessionsDir = path.join(os.homedir(), ".pi", "agent", "sessions");';
|
||||||
@@ -400,6 +744,7 @@ if (existsSync(sessionSearchIndexerPath)) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const oauthPagePath = piAiRoot ? resolve(piAiRoot, "dist", "utils", "oauth", "oauth-page.js") : null;
|
const oauthPagePath = piAiRoot ? resolve(piAiRoot, "dist", "utils", "oauth", "oauth-page.js") : null;
|
||||||
|
const googleSharedPath = piAiRoot ? resolve(piAiRoot, "dist", "providers", "google-shared.js") : null;
|
||||||
|
|
||||||
if (oauthPagePath && existsSync(oauthPagePath)) {
|
if (oauthPagePath && existsSync(oauthPagePath)) {
|
||||||
let source = readFileSync(oauthPagePath, "utf8");
|
let source = readFileSync(oauthPagePath, "utf8");
|
||||||
@@ -412,25 +757,24 @@ if (oauthPagePath && existsSync(oauthPagePath)) {
|
|||||||
if (changed) writeFileSync(oauthPagePath, source, "utf8");
|
if (changed) writeFileSync(oauthPagePath, source, "utf8");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (googleSharedPath && existsSync(googleSharedPath)) {
|
||||||
|
const source = readFileSync(googleSharedPath, "utf8");
|
||||||
|
const patched = patchPiGoogleLegacySchemaSource(source);
|
||||||
|
if (patched !== source) {
|
||||||
|
writeFileSync(googleSharedPath, patched, "utf8");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const alphaHubAuthPath = findPackageRoot("@companion-ai/alpha-hub")
|
const alphaHubAuthPath = findPackageRoot("@companion-ai/alpha-hub")
|
||||||
? resolve(findPackageRoot("@companion-ai/alpha-hub"), "src", "lib", "auth.js")
|
? resolve(findPackageRoot("@companion-ai/alpha-hub"), "src", "lib", "auth.js")
|
||||||
: null;
|
: null;
|
||||||
|
|
||||||
if (alphaHubAuthPath && existsSync(alphaHubAuthPath)) {
|
if (alphaHubAuthPath && existsSync(alphaHubAuthPath)) {
|
||||||
let source = readFileSync(alphaHubAuthPath, "utf8");
|
const source = readFileSync(alphaHubAuthPath, "utf8");
|
||||||
const oldSuccess = "'<html><body><h2>Logged in to Alpha Hub</h2><p>You can close this tab.</p></body></html>'";
|
const patched = patchAlphaHubAuthSource(source);
|
||||||
const oldError = "'<html><body><h2>Login failed</h2><p>You can close this tab.</p></body></html>'";
|
if (patched !== source) {
|
||||||
const bodyAttr = `style="font-family:system-ui,sans-serif;text-align:center;padding-top:20vh;background:#050a08;color:#f0f5f2"`;
|
writeFileSync(alphaHubAuthPath, patched, "utf8");
|
||||||
const logo = `<h1 style="font-family:monospace;font-size:48px;color:#34d399;margin:0">feynman</h1>`;
|
|
||||||
const newSuccess = `'<html><body ${bodyAttr}>${logo}<h2 style="color:#34d399;margin-top:16px">Logged in</h2><p style="color:#8aaa9a">You can close this tab.</p></body></html>'`;
|
|
||||||
const newError = `'<html><body ${bodyAttr}>${logo}<h2 style="color:#ef4444;margin-top:16px">Login failed</h2><p style="color:#8aaa9a">You can close this tab.</p></body></html>'`;
|
|
||||||
if (source.includes(oldSuccess)) {
|
|
||||||
source = source.replace(oldSuccess, newSuccess);
|
|
||||||
}
|
}
|
||||||
if (source.includes(oldError)) {
|
|
||||||
source = source.replace(oldError, newError);
|
|
||||||
}
|
|
||||||
writeFileSync(alphaHubAuthPath, source, "utf8");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (existsSync(piMemoryPath)) {
|
if (existsSync(piMemoryPath)) {
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ const workspaceNodeModulesDir = resolve(workspaceDir, "node_modules");
|
|||||||
const manifestPath = resolve(workspaceDir, ".runtime-manifest.json");
|
const manifestPath = resolve(workspaceDir, ".runtime-manifest.json");
|
||||||
const workspacePackageJsonPath = resolve(workspaceDir, "package.json");
|
const workspacePackageJsonPath = resolve(workspaceDir, "package.json");
|
||||||
const workspaceArchivePath = resolve(feynmanDir, "runtime-workspace.tgz");
|
const workspaceArchivePath = resolve(feynmanDir, "runtime-workspace.tgz");
|
||||||
|
const PRUNE_VERSION = 3;
|
||||||
|
|
||||||
function readPackageSpecs() {
|
function readPackageSpecs() {
|
||||||
const settings = JSON.parse(readFileSync(settingsPath, "utf8"));
|
const settings = JSON.parse(readFileSync(settingsPath, "utf8"));
|
||||||
@@ -44,7 +45,8 @@ function workspaceIsCurrent(packageSpecs) {
|
|||||||
if (
|
if (
|
||||||
manifest.nodeAbi !== process.versions.modules ||
|
manifest.nodeAbi !== process.versions.modules ||
|
||||||
manifest.platform !== process.platform ||
|
manifest.platform !== process.platform ||
|
||||||
manifest.arch !== process.arch
|
manifest.arch !== process.arch ||
|
||||||
|
manifest.pruneVersion !== PRUNE_VERSION
|
||||||
) {
|
) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -102,6 +104,7 @@ function writeManifest(packageSpecs) {
|
|||||||
nodeVersion: process.version,
|
nodeVersion: process.version,
|
||||||
platform: process.platform,
|
platform: process.platform,
|
||||||
arch: process.arch,
|
arch: process.arch,
|
||||||
|
pruneVersion: PRUNE_VERSION,
|
||||||
},
|
},
|
||||||
null,
|
null,
|
||||||
2,
|
2,
|
||||||
@@ -110,6 +113,15 @@ function writeManifest(packageSpecs) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function pruneWorkspace() {
|
||||||
|
const result = spawnSync(process.execPath, [resolve(appRoot, "scripts", "prune-runtime-deps.mjs"), workspaceDir], {
|
||||||
|
stdio: "inherit",
|
||||||
|
});
|
||||||
|
if (result.status !== 0) {
|
||||||
|
process.exit(result.status ?? 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function archiveIsCurrent() {
|
function archiveIsCurrent() {
|
||||||
if (!existsSync(workspaceArchivePath) || !existsSync(manifestPath)) {
|
if (!existsSync(workspaceArchivePath) || !existsSync(manifestPath)) {
|
||||||
return false;
|
return false;
|
||||||
@@ -144,6 +156,7 @@ if (workspaceIsCurrent(packageSpecs)) {
|
|||||||
|
|
||||||
console.log("[feynman] preparing vendored runtime workspace...");
|
console.log("[feynman] preparing vendored runtime workspace...");
|
||||||
prepareWorkspace(packageSpecs);
|
prepareWorkspace(packageSpecs);
|
||||||
|
pruneWorkspace();
|
||||||
writeManifest(packageSpecs);
|
writeManifest(packageSpecs);
|
||||||
createWorkspaceArchive();
|
createWorkspaceArchive();
|
||||||
console.log("[feynman] vendored runtime workspace ready");
|
console.log("[feynman] vendored runtime workspace ready");
|
||||||
|
|||||||
131
scripts/prune-runtime-deps.mjs
Normal file
131
scripts/prune-runtime-deps.mjs
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
import { existsSync, readdirSync, rmSync, statSync } from "node:fs";
|
||||||
|
import { basename, join, resolve } from "node:path";
|
||||||
|
|
||||||
|
const root = resolve(process.argv[2] ?? ".");
|
||||||
|
const nodeModulesDir = resolve(root, "node_modules");
|
||||||
|
|
||||||
|
const STRIP_FILE_PATTERNS = [
|
||||||
|
/\.map$/i,
|
||||||
|
/\.d\.cts$/i,
|
||||||
|
/\.d\.ts$/i,
|
||||||
|
/^README(\..+)?\.md$/i,
|
||||||
|
/^CHANGELOG(\..+)?\.md$/i,
|
||||||
|
];
|
||||||
|
|
||||||
|
function safeStat(path) {
|
||||||
|
try {
|
||||||
|
return statSync(path);
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function removePath(path) {
|
||||||
|
rmSync(path, { recursive: true, force: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
function walkAndPrune(dir) {
|
||||||
|
if (!existsSync(dir)) return;
|
||||||
|
|
||||||
|
for (const entry of readdirSync(dir, { withFileTypes: true })) {
|
||||||
|
const path = join(dir, entry.name);
|
||||||
|
const stats = entry.isSymbolicLink() ? safeStat(path) : null;
|
||||||
|
const isDirectory = entry.isDirectory() || stats?.isDirectory();
|
||||||
|
const isFile = entry.isFile() || stats?.isFile();
|
||||||
|
|
||||||
|
if (isDirectory) {
|
||||||
|
walkAndPrune(path);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isFile && STRIP_FILE_PATTERNS.some((pattern) => pattern.test(entry.name))) {
|
||||||
|
removePath(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function currentKoffiVariant() {
|
||||||
|
if (process.platform === "darwin" && process.arch === "arm64") return "darwin_arm64";
|
||||||
|
if (process.platform === "darwin" && process.arch === "x64") return "darwin_x64";
|
||||||
|
if (process.platform === "linux" && process.arch === "arm64") return "linux_arm64";
|
||||||
|
if (process.platform === "linux" && process.arch === "x64") return "linux_x64";
|
||||||
|
if (process.platform === "win32" && process.arch === "arm64") return "win32_arm64";
|
||||||
|
if (process.platform === "win32" && process.arch === "x64") return "win32_x64";
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function pruneKoffi(nodeModulesRoot) {
|
||||||
|
const koffiRoot = join(nodeModulesRoot, "koffi");
|
||||||
|
if (!existsSync(koffiRoot)) return;
|
||||||
|
|
||||||
|
for (const dirName of ["doc", "src", "vendor"]) {
|
||||||
|
removePath(join(koffiRoot, dirName));
|
||||||
|
}
|
||||||
|
|
||||||
|
const buildRoot = join(koffiRoot, "build", "koffi");
|
||||||
|
if (!existsSync(buildRoot)) return;
|
||||||
|
|
||||||
|
const keep = currentKoffiVariant();
|
||||||
|
for (const entry of readdirSync(buildRoot, { withFileTypes: true })) {
|
||||||
|
if (entry.name === keep) continue;
|
||||||
|
removePath(join(buildRoot, entry.name));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function pruneBetterSqlite3(nodeModulesRoot) {
|
||||||
|
const pkgRoot = join(nodeModulesRoot, "better-sqlite3");
|
||||||
|
if (!existsSync(pkgRoot)) return;
|
||||||
|
|
||||||
|
removePath(join(pkgRoot, "deps"));
|
||||||
|
removePath(join(pkgRoot, "src"));
|
||||||
|
removePath(join(pkgRoot, "binding.gyp"));
|
||||||
|
|
||||||
|
const buildRoot = join(pkgRoot, "build");
|
||||||
|
const releaseRoot = join(buildRoot, "Release");
|
||||||
|
if (existsSync(releaseRoot)) {
|
||||||
|
for (const entry of readdirSync(releaseRoot, { withFileTypes: true })) {
|
||||||
|
if (entry.name === "better_sqlite3.node") continue;
|
||||||
|
removePath(join(releaseRoot, entry.name));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const entry of ["Makefile", "binding.Makefile", "config.gypi", "deps", "gyp-mac-tool", "test_extension.target.mk", "better_sqlite3.target.mk"]) {
|
||||||
|
removePath(join(buildRoot, entry));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function pruneLiteparse(nodeModulesRoot) {
|
||||||
|
const pkgRoot = join(nodeModulesRoot, "@llamaindex", "liteparse");
|
||||||
|
if (!existsSync(pkgRoot)) return;
|
||||||
|
if (existsSync(join(pkgRoot, "dist"))) {
|
||||||
|
removePath(join(pkgRoot, "src"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function prunePiCodingAgent(nodeModulesRoot) {
|
||||||
|
const pkgRoot = join(nodeModulesRoot, "@mariozechner", "pi-coding-agent");
|
||||||
|
if (!existsSync(pkgRoot)) return;
|
||||||
|
removePath(join(pkgRoot, "docs"));
|
||||||
|
removePath(join(pkgRoot, "examples"));
|
||||||
|
}
|
||||||
|
|
||||||
|
function pruneMermaid(nodeModulesRoot) {
|
||||||
|
const pkgRoot = join(nodeModulesRoot, "mermaid", "dist");
|
||||||
|
if (!existsSync(pkgRoot)) return;
|
||||||
|
removePath(join(pkgRoot, "docs"));
|
||||||
|
removePath(join(pkgRoot, "tests"));
|
||||||
|
removePath(join(pkgRoot, "__mocks__"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!existsSync(nodeModulesDir)) {
|
||||||
|
process.exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
walkAndPrune(nodeModulesDir);
|
||||||
|
pruneKoffi(nodeModulesDir);
|
||||||
|
pruneBetterSqlite3(nodeModulesDir);
|
||||||
|
pruneLiteparse(nodeModulesDir);
|
||||||
|
prunePiCodingAgent(nodeModulesDir);
|
||||||
|
pruneMermaid(nodeModulesDir);
|
||||||
|
|
||||||
|
console.log(`[feynman] pruned runtime deps in ${basename(root)}`);
|
||||||
@@ -7,5 +7,7 @@ const websitePublicDir = resolve(appRoot, "website", "public");
|
|||||||
mkdirSync(websitePublicDir, { recursive: true });
|
mkdirSync(websitePublicDir, { recursive: true });
|
||||||
cpSync(resolve(appRoot, "scripts", "install", "install.sh"), resolve(websitePublicDir, "install"));
|
cpSync(resolve(appRoot, "scripts", "install", "install.sh"), resolve(websitePublicDir, "install"));
|
||||||
cpSync(resolve(appRoot, "scripts", "install", "install.ps1"), resolve(websitePublicDir, "install.ps1"));
|
cpSync(resolve(appRoot, "scripts", "install", "install.ps1"), resolve(websitePublicDir, "install.ps1"));
|
||||||
|
cpSync(resolve(appRoot, "scripts", "install", "install-skills.sh"), resolve(websitePublicDir, "install-skills"));
|
||||||
|
cpSync(resolve(appRoot, "scripts", "install", "install-skills.ps1"), resolve(websitePublicDir, "install-skills.ps1"));
|
||||||
|
|
||||||
console.log("[feynman] synced website installers");
|
console.log("[feynman] synced website installers");
|
||||||
|
|||||||
42
skills/alpha-research/SKILL.md
Normal file
42
skills/alpha-research/SKILL.md
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
name: alpha-research
|
||||||
|
description: Search, read, and query research papers via the `alpha` CLI (alphaXiv-backed). Use when the user asks about academic papers, wants to find research on a topic, needs to read a specific paper, ask questions about a paper, inspect a paper's code repository, or manage paper annotations.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Alpha Research CLI
|
||||||
|
|
||||||
|
Use the `alpha` CLI via bash for all paper research operations.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `alpha search "<query>"` | Search papers. Prefer `--mode semantic` by default; use `--mode keyword` only for exact-term lookup and `--mode agentic` for broader retrieval. |
|
||||||
|
| `alpha get <arxiv-id-or-url>` | Fetch paper content and any local annotation |
|
||||||
|
| `alpha get --full-text <arxiv-id>` | Get raw full text instead of AI report |
|
||||||
|
| `alpha ask <arxiv-id> "<question>"` | Ask a question about a paper's PDF |
|
||||||
|
| `alpha code <github-url> [path]` | Read files from a paper's GitHub repo. Use `/` for overview |
|
||||||
|
| `alpha annotate <paper-id> "<note>"` | Save a persistent annotation on a paper |
|
||||||
|
| `alpha annotate --clear <paper-id>` | Remove an annotation |
|
||||||
|
| `alpha annotate --list` | List all annotations |
|
||||||
|
|
||||||
|
## Auth
|
||||||
|
|
||||||
|
Run `alpha login` to authenticate with alphaXiv. Check status with `feynman alpha status`, or `alpha status` once your installed `alpha-hub` version includes it.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
```bash
|
||||||
|
alpha search "transformer scaling laws"
|
||||||
|
alpha search --mode agentic "efficient attention mechanisms for long context"
|
||||||
|
alpha get 2106.09685
|
||||||
|
alpha ask 2106.09685 "What optimizer did they use?"
|
||||||
|
alpha code https://github.com/karpathy/nanoGPT src/model.py
|
||||||
|
alpha annotate 2106.09685 "Key paper on LoRA - revisit for adapter comparison"
|
||||||
|
```
|
||||||
|
|
||||||
|
## When to use
|
||||||
|
|
||||||
|
- Academic paper search, reading, Q&A → `alpha`
|
||||||
|
- Current topics (products, releases, docs) → web search tools
|
||||||
|
- Mixed topics → combine both
|
||||||
@@ -5,7 +5,7 @@ description: Autonomous experiment loop that tries ideas, measures results, keep
|
|||||||
|
|
||||||
# Autoresearch
|
# Autoresearch
|
||||||
|
|
||||||
Run the `/autoresearch` workflow. Read the prompt template at `prompts/autoresearch.md` for the full procedure.
|
Run the `/autoresearch` workflow. Read the prompt template at `../prompts/autoresearch.md` for the full procedure.
|
||||||
|
|
||||||
Tools used: `init_experiment`, `run_experiment`, `log_experiment` (from pi-autoresearch)
|
Tools used: `init_experiment`, `run_experiment`, `log_experiment` (from pi-autoresearch)
|
||||||
|
|
||||||
|
|||||||
28
skills/contributing/SKILL.md
Normal file
28
skills/contributing/SKILL.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
name: contributing
|
||||||
|
description: Contribute changes to the Feynman repository itself. Use when the task is to add features, fix bugs, update prompts or skills, change install or release behavior, improve docs, or prepare a focused PR against this repo.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Read `../CONTRIBUTING.md` first, then `../AGENTS.md` for repo-level agent conventions.
|
||||||
|
|
||||||
|
Use this skill when working on Feynman itself, especially for:
|
||||||
|
|
||||||
|
- CLI or runtime changes in `src/`
|
||||||
|
- prompt changes in `prompts/`
|
||||||
|
- bundled skill changes in `skills/`
|
||||||
|
- subagent behavior changes in `.feynman/agents/`
|
||||||
|
- install, packaging, or release changes in `scripts/`, `README.md`, or website docs
|
||||||
|
|
||||||
|
Minimum local checks before claiming the repo change is done:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm test
|
||||||
|
npm run typecheck
|
||||||
|
npm run build
|
||||||
|
```
|
||||||
|
|
||||||
|
If the docs site changed, also validate `website/`.
|
||||||
|
|
||||||
|
When changing release-sensitive behavior, verify that `.nvmrc`, package `engines`, runtime guards, and install docs stay aligned.
|
||||||
@@ -5,7 +5,7 @@ description: Run a thorough, source-heavy investigation on any topic. Use when t
|
|||||||
|
|
||||||
# Deep Research
|
# Deep Research
|
||||||
|
|
||||||
Run the `/deepresearch` workflow. Read the prompt template at `prompts/deepresearch.md` for the full procedure.
|
Run the `/deepresearch` workflow. Read the prompt template at `../prompts/deepresearch.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `researcher`, `verifier`, `reviewer`
|
Agents used: `researcher`, `verifier`, `reviewer`
|
||||||
|
|
||||||
|
|||||||
25
skills/eli5/SKILL.md
Normal file
25
skills/eli5/SKILL.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
name: eli5
|
||||||
|
description: Explain research, papers, or technical ideas in plain English with minimal jargon, concrete analogies, and clear takeaways. Use when the user says "ELI5 this", asks for a simple explanation of a paper or research result, wants jargon removed, or asks what something technically dense actually means.
|
||||||
|
---
|
||||||
|
|
||||||
|
# ELI5
|
||||||
|
|
||||||
|
Use `alpha` first when the user names a specific paper, arXiv id, DOI, or paper URL.
|
||||||
|
|
||||||
|
If the user gives only a topic, identify 1-3 representative papers and anchor the explanation around the clearest or most important one.
|
||||||
|
|
||||||
|
Structure the answer with:
|
||||||
|
- `One-Sentence Summary`
|
||||||
|
- `Big Idea`
|
||||||
|
- `How It Works`
|
||||||
|
- `Why It Matters`
|
||||||
|
- `What To Be Skeptical Of`
|
||||||
|
- `If You Remember 3 Things`
|
||||||
|
|
||||||
|
Guidelines:
|
||||||
|
- Use short sentences and concrete words.
|
||||||
|
- Define jargon immediately or remove it.
|
||||||
|
- Prefer one good analogy over several weak ones.
|
||||||
|
- Separate what the paper actually shows from speculation or interpretation.
|
||||||
|
- Keep the explanation inline unless the user explicitly asks to save it as an artifact.
|
||||||
@@ -5,6 +5,6 @@ description: Inspect active background research work including running processes
|
|||||||
|
|
||||||
# Jobs
|
# Jobs
|
||||||
|
|
||||||
Run the `/jobs` workflow. Read the prompt template at `prompts/jobs.md` for the full procedure.
|
Run the `/jobs` workflow. Read the prompt template at `../prompts/jobs.md` for the full procedure.
|
||||||
|
|
||||||
Shows active `pi-processes`, scheduled `pi-schedule-prompt` entries, and running subagent tasks.
|
Shows active `pi-processes`, scheduled `pi-schedule-prompt` entries, and running subagent tasks.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description: Run a literature review using paper search and primary-source synth
|
|||||||
|
|
||||||
# Literature Review
|
# Literature Review
|
||||||
|
|
||||||
Run the `/lit` workflow. Read the prompt template at `prompts/lit.md` for the full procedure.
|
Run the `/lit` workflow. Read the prompt template at `../prompts/lit.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `researcher`, `verifier`, `reviewer`
|
Agents used: `researcher`, `verifier`, `reviewer`
|
||||||
|
|
||||||
|
|||||||
56
skills/modal-compute/SKILL.md
Normal file
56
skills/modal-compute/SKILL.md
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
name: modal-compute
|
||||||
|
description: Run GPU workloads on Modal's serverless infrastructure. Use when the user needs remote GPU compute for training, inference, benchmarks, or batch processing and Modal CLI is available.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Modal Compute
|
||||||
|
|
||||||
|
Use the `modal` CLI for serverless GPU workloads. No pod lifecycle to manage — write a decorated Python script and run it.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install modal
|
||||||
|
modal setup
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `modal run script.py` | Run a script on Modal (ephemeral) |
|
||||||
|
| `modal run --detach script.py` | Run detached (background) |
|
||||||
|
| `modal deploy script.py` | Deploy persistently |
|
||||||
|
| `modal serve script.py` | Serve with hot-reload (dev) |
|
||||||
|
| `modal shell --gpu a100` | Interactive shell with GPU |
|
||||||
|
| `modal app list` | List deployed apps |
|
||||||
|
|
||||||
|
## GPU types
|
||||||
|
|
||||||
|
`T4`, `L4`, `A10G`, `L40S`, `A100`, `A100-80GB`, `H100`, `H200`, `B200`
|
||||||
|
|
||||||
|
Multi-GPU: `"H100:4"` for 4x H100s.
|
||||||
|
|
||||||
|
## Script pattern
|
||||||
|
|
||||||
|
```python
|
||||||
|
import modal
|
||||||
|
|
||||||
|
app = modal.App("experiment")
|
||||||
|
image = modal.Image.debian_slim(python_version="3.11").pip_install("torch==2.8.0")
|
||||||
|
|
||||||
|
@app.function(gpu="A100", image=image, timeout=600)
|
||||||
|
def train():
|
||||||
|
import torch
|
||||||
|
# training code here
|
||||||
|
|
||||||
|
@app.local_entrypoint()
|
||||||
|
def main():
|
||||||
|
train.remote()
|
||||||
|
```
|
||||||
|
|
||||||
|
## When to use
|
||||||
|
|
||||||
|
- Stateless burst GPU jobs (training, inference, benchmarks)
|
||||||
|
- No persistent state needed between runs
|
||||||
|
- Check availability: `command -v modal`
|
||||||
@@ -5,7 +5,7 @@ description: Compare a paper's claims against its public codebase. Use when the
|
|||||||
|
|
||||||
# Paper-Code Audit
|
# Paper-Code Audit
|
||||||
|
|
||||||
Run the `/audit` workflow. Read the prompt template at `prompts/audit.md` for the full procedure.
|
Run the `/audit` workflow. Read the prompt template at `../prompts/audit.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `researcher`, `verifier`
|
Agents used: `researcher`, `verifier`
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description: Turn research findings into a polished paper-style draft with secti
|
|||||||
|
|
||||||
# Paper Writing
|
# Paper Writing
|
||||||
|
|
||||||
Run the `/draft` workflow. Read the prompt template at `prompts/draft.md` for the full procedure.
|
Run the `/draft` workflow. Read the prompt template at `../prompts/draft.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `writer`, `verifier`
|
Agents used: `writer`, `verifier`
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description: Simulate a tough but constructive peer review of an AI research art
|
|||||||
|
|
||||||
# Peer Review
|
# Peer Review
|
||||||
|
|
||||||
Run the `/review` workflow. Read the prompt template at `prompts/review.md` for the full procedure.
|
Run the `/review` workflow. Read the prompt template at `../prompts/review.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `researcher`, `reviewer`
|
Agents used: `researcher`, `reviewer`
|
||||||
|
|
||||||
|
|||||||
27
skills/preview/SKILL.md
Normal file
27
skills/preview/SKILL.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: preview
|
||||||
|
description: Preview Markdown, LaTeX, PDF, or code artifacts in the browser or as PDF. Use when the user wants to review a written artifact, export a report, or view a rendered document.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Preview
|
||||||
|
|
||||||
|
Use the `/preview` command to render and open artifacts.
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/preview` | Preview the most recent artifact in the browser |
|
||||||
|
| `/preview --file <path>` | Preview a specific file |
|
||||||
|
| `/preview-browser` | Force browser preview |
|
||||||
|
| `/preview-pdf` | Export to PDF via pandoc + LaTeX |
|
||||||
|
| `/preview-clear-cache` | Clear rendered preview cache |
|
||||||
|
|
||||||
|
## Fallback
|
||||||
|
|
||||||
|
If the preview commands are not available, use bash:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
open <file.md> # macOS — opens in default app
|
||||||
|
open <file.pdf> # macOS — opens in Preview
|
||||||
|
```
|
||||||
@@ -5,7 +5,7 @@ description: Plan or execute a replication of a paper, claim, or benchmark. Use
|
|||||||
|
|
||||||
# Replication
|
# Replication
|
||||||
|
|
||||||
Run the `/replicate` workflow. Read the prompt template at `prompts/replicate.md` for the full procedure.
|
Run the `/replicate` workflow. Read the prompt template at `../prompts/replicate.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `researcher`
|
Agents used: `researcher`
|
||||||
|
|
||||||
|
|||||||
48
skills/runpod-compute/SKILL.md
Normal file
48
skills/runpod-compute/SKILL.md
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
name: runpod-compute
|
||||||
|
description: Provision and manage GPU pods on RunPod for long-running experiments. Use when the user needs persistent GPU compute with SSH access, large datasets, or multi-step experiments.
|
||||||
|
---
|
||||||
|
|
||||||
|
# RunPod Compute
|
||||||
|
|
||||||
|
Use `runpodctl` CLI for persistent GPU pods with SSH access.
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
brew install runpod/runpodctl/runpodctl # macOS
|
||||||
|
runpodctl config --apiKey=YOUR_KEY
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `runpodctl create pod --gpuType "NVIDIA A100 80GB PCIe" --imageName "runpod/pytorch:2.4.0-py3.11-cuda12.4.1-devel-ubuntu22.04" --name experiment` | Create a pod |
|
||||||
|
| `runpodctl get pod` | List all pods |
|
||||||
|
| `runpodctl stop pod <id>` | Stop (preserves volume) |
|
||||||
|
| `runpodctl start pod <id>` | Resume a stopped pod |
|
||||||
|
| `runpodctl remove pod <id>` | Terminate and delete |
|
||||||
|
| `runpodctl gpu list` | List available GPU types and prices |
|
||||||
|
| `runpodctl send <file>` | Transfer files to/from pods |
|
||||||
|
| `runpodctl receive <code>` | Receive transferred files |
|
||||||
|
|
||||||
|
## SSH access
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh root@<IP> -p <PORT> -i ~/.ssh/id_ed25519
|
||||||
|
```
|
||||||
|
|
||||||
|
Get connection details from `runpodctl get pod <id>`. Pods must expose port `22/tcp`.
|
||||||
|
|
||||||
|
## GPU types
|
||||||
|
|
||||||
|
`NVIDIA GeForce RTX 4090`, `NVIDIA RTX A6000`, `NVIDIA A40`, `NVIDIA A100 80GB PCIe`, `NVIDIA H100 80GB HBM3`
|
||||||
|
|
||||||
|
## When to use
|
||||||
|
|
||||||
|
- Long-running experiments needing persistent state
|
||||||
|
- Large dataset processing
|
||||||
|
- Multi-step work with SSH access between iterations
|
||||||
|
- Always stop or remove pods after experiments
|
||||||
|
- Check availability: `command -v runpodctl`
|
||||||
@@ -5,6 +5,6 @@ description: Write a durable session log capturing completed work, findings, ope
|
|||||||
|
|
||||||
# Session Log
|
# Session Log
|
||||||
|
|
||||||
Run the `/log` workflow. Read the prompt template at `prompts/log.md` for the full procedure.
|
Run the `/log` workflow. Read the prompt template at `../prompts/log.md` for the full procedure.
|
||||||
|
|
||||||
Output: session log in `notes/session-logs/`.
|
Output: session log in `notes/session-logs/`.
|
||||||
|
|||||||
26
skills/session-search/SKILL.md
Normal file
26
skills/session-search/SKILL.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
name: session-search
|
||||||
|
description: Search past Feynman session transcripts to recover prior work, conversations, and research context. Use when the user references something from a previous session, asks "what did we do before", or when you suspect relevant past context exists.
|
||||||
|
---
|
||||||
|
|
||||||
|
# Session Search
|
||||||
|
|
||||||
|
Use the `/search` command to search prior Feynman sessions interactively, or search session JSONL files directly via bash.
|
||||||
|
|
||||||
|
## Interactive search
|
||||||
|
|
||||||
|
```
|
||||||
|
/search <query>
|
||||||
|
```
|
||||||
|
|
||||||
|
Opens the session search UI. Supports `resume <sessionPath>` to continue a found session.
|
||||||
|
|
||||||
|
## Direct file search
|
||||||
|
|
||||||
|
Session transcripts are stored as JSONL files in `~/.feynman/sessions/`. Each line is a JSON record with `type` (session, message, model_change) and `message.content` fields.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
grep -ril "scaling laws" ~/.feynman/sessions/
|
||||||
|
```
|
||||||
|
|
||||||
|
For structured search across sessions, use the interactive `/search` command.
|
||||||
@@ -5,7 +5,7 @@ description: Compare multiple sources on a topic and produce a grounded comparis
|
|||||||
|
|
||||||
# Source Comparison
|
# Source Comparison
|
||||||
|
|
||||||
Run the `/compare` workflow. Read the prompt template at `prompts/compare.md` for the full procedure.
|
Run the `/compare` workflow. Read the prompt template at `../prompts/compare.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `researcher`, `verifier`
|
Agents used: `researcher`, `verifier`
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description: Set up a recurring research watch on a topic, company, paper area,
|
|||||||
|
|
||||||
# Watch
|
# Watch
|
||||||
|
|
||||||
Run the `/watch` workflow. Read the prompt template at `prompts/watch.md` for the full procedure.
|
Run the `/watch` workflow. Read the prompt template at `../prompts/watch.md` for the full procedure.
|
||||||
|
|
||||||
Agents used: `researcher`
|
Agents used: `researcher`
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { createHash } from "node:crypto";
|
import { createHash } from "node:crypto";
|
||||||
import { existsSync, mkdirSync, readdirSync, readFileSync, writeFileSync } from "node:fs";
|
import { existsSync, mkdirSync, readdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||||
import { dirname, relative, resolve } from "node:path";
|
import { dirname, relative, resolve } from "node:path";
|
||||||
|
|
||||||
import { getBootstrapStatePath } from "../config/paths.js";
|
import { getBootstrapStatePath } from "../config/paths.js";
|
||||||
@@ -64,27 +64,76 @@ function listFiles(root: string): string[] {
|
|||||||
return files.sort();
|
return files.sort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function removeEmptyParentDirectories(path: string, stopAt: string): void {
|
||||||
|
let current = dirname(path);
|
||||||
|
while (current.startsWith(stopAt) && current !== stopAt) {
|
||||||
|
if (!existsSync(current)) {
|
||||||
|
current = dirname(current);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (readdirSync(current).length > 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
rmSync(current, { recursive: true, force: true });
|
||||||
|
current = dirname(current);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function syncManagedFiles(
|
function syncManagedFiles(
|
||||||
sourceRoot: string,
|
sourceRoot: string,
|
||||||
targetRoot: string,
|
targetRoot: string,
|
||||||
|
scope: string,
|
||||||
state: BootstrapState,
|
state: BootstrapState,
|
||||||
result: BootstrapSyncResult,
|
result: BootstrapSyncResult,
|
||||||
): void {
|
): void {
|
||||||
|
const sourcePaths = new Set(listFiles(sourceRoot).map((sourcePath) => relative(sourceRoot, sourcePath)));
|
||||||
|
|
||||||
|
for (const targetPath of listFiles(targetRoot)) {
|
||||||
|
const key = relative(targetRoot, targetPath);
|
||||||
|
if (sourcePaths.has(key)) continue;
|
||||||
|
|
||||||
|
const scopedKey = `${scope}:${key}`;
|
||||||
|
const previous = state.files[scopedKey] ?? state.files[key];
|
||||||
|
if (!previous) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!existsSync(targetPath)) {
|
||||||
|
delete state.files[scopedKey];
|
||||||
|
delete state.files[key];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentTargetText = readFileSync(targetPath, "utf8");
|
||||||
|
const currentTargetHash = sha256(currentTargetText);
|
||||||
|
if (currentTargetHash !== previous.lastAppliedTargetHash) {
|
||||||
|
result.skipped.push(key);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
rmSync(targetPath, { force: true });
|
||||||
|
removeEmptyParentDirectories(targetPath, targetRoot);
|
||||||
|
delete state.files[scopedKey];
|
||||||
|
delete state.files[key];
|
||||||
|
}
|
||||||
|
|
||||||
for (const sourcePath of listFiles(sourceRoot)) {
|
for (const sourcePath of listFiles(sourceRoot)) {
|
||||||
const key = relative(sourceRoot, sourcePath);
|
const key = relative(sourceRoot, sourcePath);
|
||||||
const targetPath = resolve(targetRoot, key);
|
const targetPath = resolve(targetRoot, key);
|
||||||
const sourceText = readFileSync(sourcePath, "utf8");
|
const sourceText = readFileSync(sourcePath, "utf8");
|
||||||
const sourceHash = sha256(sourceText);
|
const sourceHash = sha256(sourceText);
|
||||||
const previous = state.files[key];
|
const scopedKey = `${scope}:${key}`;
|
||||||
|
const previous = state.files[scopedKey] ?? state.files[key];
|
||||||
|
|
||||||
mkdirSync(dirname(targetPath), { recursive: true });
|
mkdirSync(dirname(targetPath), { recursive: true });
|
||||||
|
|
||||||
if (!existsSync(targetPath)) {
|
if (!existsSync(targetPath)) {
|
||||||
writeFileSync(targetPath, sourceText, "utf8");
|
writeFileSync(targetPath, sourceText, "utf8");
|
||||||
state.files[key] = {
|
state.files[scopedKey] = {
|
||||||
lastAppliedSourceHash: sourceHash,
|
lastAppliedSourceHash: sourceHash,
|
||||||
lastAppliedTargetHash: sourceHash,
|
lastAppliedTargetHash: sourceHash,
|
||||||
};
|
};
|
||||||
|
delete state.files[key];
|
||||||
result.copied.push(key);
|
result.copied.push(key);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -93,10 +142,11 @@ function syncManagedFiles(
|
|||||||
const currentTargetHash = sha256(currentTargetText);
|
const currentTargetHash = sha256(currentTargetText);
|
||||||
|
|
||||||
if (currentTargetHash === sourceHash) {
|
if (currentTargetHash === sourceHash) {
|
||||||
state.files[key] = {
|
state.files[scopedKey] = {
|
||||||
lastAppliedSourceHash: sourceHash,
|
lastAppliedSourceHash: sourceHash,
|
||||||
lastAppliedTargetHash: currentTargetHash,
|
lastAppliedTargetHash: currentTargetHash,
|
||||||
};
|
};
|
||||||
|
delete state.files[key];
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,10 +161,11 @@ function syncManagedFiles(
|
|||||||
}
|
}
|
||||||
|
|
||||||
writeFileSync(targetPath, sourceText, "utf8");
|
writeFileSync(targetPath, sourceText, "utf8");
|
||||||
state.files[key] = {
|
state.files[scopedKey] = {
|
||||||
lastAppliedSourceHash: sourceHash,
|
lastAppliedSourceHash: sourceHash,
|
||||||
lastAppliedTargetHash: sourceHash,
|
lastAppliedTargetHash: sourceHash,
|
||||||
};
|
};
|
||||||
|
delete state.files[key];
|
||||||
result.updated.push(key);
|
result.updated.push(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -128,8 +179,9 @@ export function syncBundledAssets(appRoot: string, agentDir: string): BootstrapS
|
|||||||
skipped: [],
|
skipped: [],
|
||||||
};
|
};
|
||||||
|
|
||||||
syncManagedFiles(resolve(appRoot, ".feynman", "themes"), resolve(agentDir, "themes"), state, result);
|
syncManagedFiles(resolve(appRoot, ".feynman", "themes"), resolve(agentDir, "themes"), "themes", state, result);
|
||||||
syncManagedFiles(resolve(appRoot, ".feynman", "agents"), resolve(agentDir, "agents"), state, result);
|
syncManagedFiles(resolve(appRoot, ".feynman", "agents"), resolve(agentDir, "agents"), "agents", state, result);
|
||||||
|
syncManagedFiles(resolve(appRoot, "skills"), resolve(agentDir, "skills"), "skills", state, result);
|
||||||
|
|
||||||
writeBootstrapState(statePath, state);
|
writeBootstrapState(statePath, state);
|
||||||
return result;
|
return result;
|
||||||
|
|||||||
206
src/cli.ts
206
src/cli.ts
@@ -1,6 +1,6 @@
|
|||||||
import "dotenv/config";
|
import "dotenv/config";
|
||||||
|
|
||||||
import { readFileSync } from "node:fs";
|
import { existsSync, readFileSync } from "node:fs";
|
||||||
import { dirname, resolve } from "node:path";
|
import { dirname, resolve } from "node:path";
|
||||||
import { parseArgs } from "node:util";
|
import { parseArgs } from "node:util";
|
||||||
import { fileURLToPath } from "node:url";
|
import { fileURLToPath } from "node:url";
|
||||||
@@ -11,25 +11,33 @@ import {
|
|||||||
login as loginAlpha,
|
login as loginAlpha,
|
||||||
logout as logoutAlpha,
|
logout as logoutAlpha,
|
||||||
} from "@companion-ai/alpha-hub/lib";
|
} from "@companion-ai/alpha-hub/lib";
|
||||||
import { AuthStorage, DefaultPackageManager, ModelRegistry, SettingsManager } from "@mariozechner/pi-coding-agent";
|
import { SettingsManager } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
import { syncBundledAssets } from "./bootstrap/sync.js";
|
import { syncBundledAssets } from "./bootstrap/sync.js";
|
||||||
import { ensureFeynmanHome, getDefaultSessionDir, getFeynmanAgentDir, getFeynmanHome } from "./config/paths.js";
|
import { ensureFeynmanHome, getDefaultSessionDir, getFeynmanAgentDir, getFeynmanHome } from "./config/paths.js";
|
||||||
import { launchPiChat } from "./pi/launch.js";
|
import { launchPiChat } from "./pi/launch.js";
|
||||||
|
import { installPackageSources, updateConfiguredPackages } from "./pi/package-ops.js";
|
||||||
|
import { MAX_NATIVE_PACKAGE_NODE_MAJOR } from "./pi/package-presets.js";
|
||||||
import { CORE_PACKAGE_SOURCES, getOptionalPackagePresetSources, listOptionalPackagePresets } from "./pi/package-presets.js";
|
import { CORE_PACKAGE_SOURCES, getOptionalPackagePresetSources, listOptionalPackagePresets } from "./pi/package-presets.js";
|
||||||
import { normalizeFeynmanSettings, normalizeThinkingLevel, parseModelSpec } from "./pi/settings.js";
|
import { normalizeFeynmanSettings, normalizeThinkingLevel, parseModelSpec } from "./pi/settings.js";
|
||||||
|
import { applyFeynmanPackageManagerEnv } from "./pi/runtime.js";
|
||||||
|
import { getConfiguredServiceTier, normalizeServiceTier, setConfiguredServiceTier } from "./model/service-tier.js";
|
||||||
import {
|
import {
|
||||||
|
authenticateModelProvider,
|
||||||
getCurrentModelSpec,
|
getCurrentModelSpec,
|
||||||
loginModelProvider,
|
loginModelProvider,
|
||||||
logoutModelProvider,
|
logoutModelProvider,
|
||||||
printModelList,
|
printModelList,
|
||||||
setDefaultModelSpec,
|
setDefaultModelSpec,
|
||||||
} from "./model/commands.js";
|
} from "./model/commands.js";
|
||||||
import { printSearchStatus } from "./search/commands.js";
|
import { buildModelStatusSnapshotFromRecords, getAvailableModelRecords, getSupportedModelRecords } from "./model/catalog.js";
|
||||||
|
import { clearSearchConfig, printSearchStatus, setSearchProvider } from "./search/commands.js";
|
||||||
|
import type { PiWebSearchProvider } from "./pi/web-access.js";
|
||||||
import { runDoctor, runStatus } from "./setup/doctor.js";
|
import { runDoctor, runStatus } from "./setup/doctor.js";
|
||||||
import { setupPreviewDependencies } from "./setup/preview.js";
|
import { setupPreviewDependencies } from "./setup/preview.js";
|
||||||
import { runSetup } from "./setup/setup.js";
|
import { runSetup } from "./setup/setup.js";
|
||||||
import { printAsciiHeader, printInfo, printPanel, printSection } from "./ui/terminal.js";
|
import { ASH, printAsciiHeader, printInfo, printPanel, printSection, RESET, SAGE } from "./ui/terminal.js";
|
||||||
|
import { createModelRegistry } from "./model/registry.js";
|
||||||
import {
|
import {
|
||||||
cliCommandSections,
|
cliCommandSections,
|
||||||
formatCliWorkflowUsage,
|
formatCliWorkflowUsage,
|
||||||
@@ -43,7 +51,7 @@ const TOP_LEVEL_COMMANDS = new Set(topLevelCommandNames);
|
|||||||
function printHelpLine(usage: string, description: string): void {
|
function printHelpLine(usage: string, description: string): void {
|
||||||
const width = 30;
|
const width = 30;
|
||||||
const padding = Math.max(1, width - usage.length);
|
const padding = Math.max(1, width - usage.length);
|
||||||
printInfo(`${usage}${" ".repeat(padding)}${description}`);
|
console.log(` ${SAGE}${usage}${RESET}${" ".repeat(padding)}${ASH}${description}${RESET}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
function printHelp(appRoot: string): void {
|
function printHelp(appRoot: string): void {
|
||||||
@@ -124,7 +132,13 @@ async function handleModelCommand(subcommand: string | undefined, args: string[]
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (subcommand === "login") {
|
if (subcommand === "login") {
|
||||||
|
if (args[0]) {
|
||||||
|
// Specific provider given - resolve OAuth vs API-key setup automatically
|
||||||
await loginModelProvider(feynmanAuthPath, args[0], feynmanSettingsPath);
|
await loginModelProvider(feynmanAuthPath, args[0], feynmanSettingsPath);
|
||||||
|
} else {
|
||||||
|
// No provider specified - show auth method choice
|
||||||
|
await authenticateModelProvider(feynmanAuthPath, feynmanSettingsPath);
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,39 +150,67 @@ async function handleModelCommand(subcommand: string | undefined, args: string[]
|
|||||||
if (subcommand === "set") {
|
if (subcommand === "set") {
|
||||||
const spec = args[0];
|
const spec = args[0];
|
||||||
if (!spec) {
|
if (!spec) {
|
||||||
throw new Error("Usage: feynman model set <provider/model>");
|
throw new Error("Usage: feynman model set <provider/model|provider:model>");
|
||||||
}
|
}
|
||||||
setDefaultModelSpec(feynmanSettingsPath, feynmanAuthPath, spec);
|
setDefaultModelSpec(feynmanSettingsPath, feynmanAuthPath, spec);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (subcommand === "tier") {
|
||||||
|
const requested = args[0];
|
||||||
|
if (!requested) {
|
||||||
|
console.log(getConfiguredServiceTier(feynmanSettingsPath) ?? "not set");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (requested === "unset" || requested === "clear" || requested === "off") {
|
||||||
|
setConfiguredServiceTier(feynmanSettingsPath, undefined);
|
||||||
|
console.log("Cleared service tier override");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const tier = normalizeServiceTier(requested);
|
||||||
|
if (!tier) {
|
||||||
|
throw new Error("Usage: feynman model tier <auto|default|flex|priority|standard_only|unset>");
|
||||||
|
}
|
||||||
|
|
||||||
|
setConfiguredServiceTier(feynmanSettingsPath, tier);
|
||||||
|
console.log(`Service tier set to ${tier}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
throw new Error(`Unknown model command: ${subcommand}`);
|
throw new Error(`Unknown model command: ${subcommand}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
async function handleUpdateCommand(workingDir: string, feynmanAgentDir: string, source?: string): Promise<void> {
|
async function handleUpdateCommand(workingDir: string, feynmanAgentDir: string, source?: string): Promise<void> {
|
||||||
const settingsManager = SettingsManager.create(workingDir, feynmanAgentDir);
|
try {
|
||||||
const packageManager = new DefaultPackageManager({
|
const result = await updateConfiguredPackages(workingDir, feynmanAgentDir, source);
|
||||||
cwd: workingDir,
|
if (result.updated.length === 0) {
|
||||||
agentDir: feynmanAgentDir,
|
|
||||||
settingsManager,
|
|
||||||
});
|
|
||||||
|
|
||||||
packageManager.setProgressCallback((event) => {
|
|
||||||
if (event.type === "start") {
|
|
||||||
console.log(`Updating ${event.source}...`);
|
|
||||||
} else if (event.type === "complete") {
|
|
||||||
console.log(`Updated ${event.source}`);
|
|
||||||
} else if (event.type === "error") {
|
|
||||||
console.error(`Failed to update ${event.source}: ${event.message ?? "unknown error"}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
await packageManager.update(source);
|
|
||||||
await settingsManager.flush();
|
|
||||||
console.log("All packages up to date.");
|
console.log("All packages up to date.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const updatedSource of result.updated) {
|
||||||
|
console.log(`Updated ${updatedSource}`);
|
||||||
|
}
|
||||||
|
for (const skippedSource of result.skipped) {
|
||||||
|
console.log(`Skipped ${skippedSource} on Node ${process.versions.node} (native packages are only supported through Node ${MAX_NATIVE_PACKAGE_NODE_MAJOR}.x).`);
|
||||||
|
}
|
||||||
|
console.log("All packages up to date.");
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : String(error);
|
||||||
|
if (message.includes("No supported package manager found")) {
|
||||||
|
console.log("No package manager is available for live package updates.");
|
||||||
|
console.log("If you installed the standalone app, rerun the installer to get newer bundled packages.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async function handlePackagesCommand(subcommand: string | undefined, args: string[], workingDir: string, feynmanAgentDir: string): Promise<void> {
|
async function handlePackagesCommand(subcommand: string | undefined, args: string[], workingDir: string, feynmanAgentDir: string): Promise<void> {
|
||||||
|
applyFeynmanPackageManagerEnv(feynmanAgentDir);
|
||||||
const settingsManager = SettingsManager.create(workingDir, feynmanAgentDir);
|
const settingsManager = SettingsManager.create(workingDir, feynmanAgentDir);
|
||||||
const configuredSources = new Set(
|
const configuredSources = new Set(
|
||||||
settingsManager
|
settingsManager
|
||||||
@@ -208,38 +250,67 @@ async function handlePackagesCommand(subcommand: string | undefined, args: strin
|
|||||||
throw new Error(`Unknown package preset: ${target}`);
|
throw new Error(`Unknown package preset: ${target}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
const packageManager = new DefaultPackageManager({
|
const appRoot = resolve(dirname(fileURLToPath(import.meta.url)), "..");
|
||||||
cwd: workingDir,
|
const isStandaloneBundle = !existsSync(resolve(appRoot, ".feynman", "runtime-workspace.tgz")) && existsSync(resolve(appRoot, ".feynman", "npm"));
|
||||||
agentDir: feynmanAgentDir,
|
if (target === "generative-ui" && process.platform === "darwin" && isStandaloneBundle) {
|
||||||
settingsManager,
|
console.log("The generative-ui preset is currently unavailable in the standalone macOS bundle.");
|
||||||
});
|
console.log("Its native glimpseui dependency fails to compile reliably in that environment.");
|
||||||
packageManager.setProgressCallback((event) => {
|
console.log("If you need generative-ui, install Feynman through npm instead of the standalone bundle.");
|
||||||
if (event.type === "start") {
|
return;
|
||||||
console.log(`Installing ${event.source}...`);
|
|
||||||
} else if (event.type === "complete") {
|
|
||||||
console.log(`Installed ${event.source}`);
|
|
||||||
} else if (event.type === "error") {
|
|
||||||
console.error(`Failed to install ${event.source}: ${event.message ?? "unknown error"}`);
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
|
const pendingSources = sources.filter((source) => !configuredSources.has(source));
|
||||||
for (const source of sources) {
|
for (const source of sources) {
|
||||||
if (configuredSources.has(source)) {
|
if (configuredSources.has(source)) {
|
||||||
console.log(`${source} already installed`);
|
console.log(`${source} already installed`);
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
await packageManager.install(source);
|
}
|
||||||
|
|
||||||
|
if (pendingSources.length === 0) {
|
||||||
|
console.log("Optional packages installed.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await installPackageSources(workingDir, feynmanAgentDir, pendingSources, { persist: true });
|
||||||
|
for (const skippedSource of result.skipped) {
|
||||||
|
console.log(`Skipped ${skippedSource} on Node ${process.versions.node} (native packages are only supported through Node ${MAX_NATIVE_PACKAGE_NODE_MAJOR}.x).`);
|
||||||
}
|
}
|
||||||
await settingsManager.flush();
|
await settingsManager.flush();
|
||||||
console.log("Optional packages installed.");
|
console.log("Optional packages installed.");
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : String(error);
|
||||||
|
if (message.includes("No supported package manager found")) {
|
||||||
|
console.log("No package manager is available for optional package installs.");
|
||||||
|
console.log("Install npm, pnpm, or bun, or rerun the standalone installer for bundled package updates.");
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
function handleSearchCommand(subcommand: string | undefined): void {
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleSearchCommand(subcommand: string | undefined, args: string[]): void {
|
||||||
if (!subcommand || subcommand === "status") {
|
if (!subcommand || subcommand === "status") {
|
||||||
printSearchStatus();
|
printSearchStatus();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (subcommand === "set") {
|
||||||
|
const provider = args[0] as PiWebSearchProvider | undefined;
|
||||||
|
const validProviders: PiWebSearchProvider[] = ["auto", "perplexity", "exa", "gemini"];
|
||||||
|
if (!provider || !validProviders.includes(provider)) {
|
||||||
|
throw new Error("Usage: feynman search set <auto|perplexity|exa|gemini> [api-key]");
|
||||||
|
}
|
||||||
|
setSearchProvider(provider, args[1]);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (subcommand === "clear") {
|
||||||
|
clearSearchConfig();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
throw new Error(`Unknown search command: ${subcommand}`);
|
throw new Error(`Unknown search command: ${subcommand}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,6 +346,24 @@ export function resolveInitialPrompt(
|
|||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function shouldRunInteractiveSetup(
|
||||||
|
explicitModelSpec: string | undefined,
|
||||||
|
currentModelSpec: string | undefined,
|
||||||
|
isInteractiveTerminal: boolean,
|
||||||
|
authPath: string,
|
||||||
|
): boolean {
|
||||||
|
if (explicitModelSpec || !isInteractiveTerminal) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const status = buildModelStatusSnapshotFromRecords(
|
||||||
|
getSupportedModelRecords(authPath),
|
||||||
|
getAvailableModelRecords(authPath),
|
||||||
|
currentModelSpec,
|
||||||
|
);
|
||||||
|
return !status.currentValid;
|
||||||
|
}
|
||||||
|
|
||||||
export async function main(): Promise<void> {
|
export async function main(): Promise<void> {
|
||||||
const here = dirname(fileURLToPath(import.meta.url));
|
const here = dirname(fileURLToPath(import.meta.url));
|
||||||
const appRoot = resolve(here, "..");
|
const appRoot = resolve(here, "..");
|
||||||
@@ -293,12 +382,15 @@ export async function main(): Promise<void> {
|
|||||||
cwd: { type: "string" },
|
cwd: { type: "string" },
|
||||||
doctor: { type: "boolean" },
|
doctor: { type: "boolean" },
|
||||||
help: { type: "boolean" },
|
help: { type: "boolean" },
|
||||||
|
version: { type: "boolean" },
|
||||||
"alpha-login": { type: "boolean" },
|
"alpha-login": { type: "boolean" },
|
||||||
"alpha-logout": { type: "boolean" },
|
"alpha-logout": { type: "boolean" },
|
||||||
"alpha-status": { type: "boolean" },
|
"alpha-status": { type: "boolean" },
|
||||||
|
mode: { type: "string" },
|
||||||
model: { type: "string" },
|
model: { type: "string" },
|
||||||
"new-session": { type: "boolean" },
|
"new-session": { type: "boolean" },
|
||||||
prompt: { type: "string" },
|
prompt: { type: "string" },
|
||||||
|
"service-tier": { type: "string" },
|
||||||
"session-dir": { type: "string" },
|
"session-dir": { type: "string" },
|
||||||
"setup-preview": { type: "boolean" },
|
"setup-preview": { type: "boolean" },
|
||||||
thinking: { type: "string" },
|
thinking: { type: "string" },
|
||||||
@@ -310,6 +402,14 @@ export async function main(): Promise<void> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (values.version) {
|
||||||
|
if (feynmanVersion) {
|
||||||
|
console.log(feynmanVersion);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw new Error("Unable to determine the installed Feynman version.");
|
||||||
|
}
|
||||||
|
|
||||||
const workingDir = resolve(values.cwd ?? process.cwd());
|
const workingDir = resolve(values.cwd ?? process.cwd());
|
||||||
const sessionDir = resolve(values["session-dir"] ?? getDefaultSessionDir(feynmanHome));
|
const sessionDir = resolve(values["session-dir"] ?? getDefaultSessionDir(feynmanHome));
|
||||||
const feynmanSettingsPath = resolve(feynmanAgentDir, "settings.json");
|
const feynmanSettingsPath = resolve(feynmanAgentDir, "settings.json");
|
||||||
@@ -397,7 +497,7 @@ export async function main(): Promise<void> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (command === "search") {
|
if (command === "search") {
|
||||||
handleSearchCommand(rest[0]);
|
handleSearchCommand(rest[0], rest.slice(1));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -417,15 +517,32 @@ export async function main(): Promise<void> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const explicitModelSpec = values.model ?? process.env.FEYNMAN_MODEL;
|
const explicitModelSpec = values.model ?? process.env.FEYNMAN_MODEL;
|
||||||
|
const explicitServiceTier = normalizeServiceTier(values["service-tier"] ?? process.env.FEYNMAN_SERVICE_TIER);
|
||||||
|
const mode = values.mode;
|
||||||
|
if (mode !== undefined && mode !== "text" && mode !== "json" && mode !== "rpc") {
|
||||||
|
throw new Error("Unknown mode. Use text, json, or rpc.");
|
||||||
|
}
|
||||||
|
if ((values["service-tier"] ?? process.env.FEYNMAN_SERVICE_TIER) && !explicitServiceTier) {
|
||||||
|
throw new Error("Unknown service tier. Use auto, default, flex, priority, or standard_only.");
|
||||||
|
}
|
||||||
|
if (explicitServiceTier) {
|
||||||
|
process.env.FEYNMAN_SERVICE_TIER = explicitServiceTier;
|
||||||
|
}
|
||||||
if (explicitModelSpec) {
|
if (explicitModelSpec) {
|
||||||
const modelRegistry = new ModelRegistry(AuthStorage.create(feynmanAuthPath));
|
const modelRegistry = createModelRegistry(feynmanAuthPath);
|
||||||
const explicitModel = parseModelSpec(explicitModelSpec, modelRegistry);
|
const explicitModel = parseModelSpec(explicitModelSpec, modelRegistry);
|
||||||
if (!explicitModel) {
|
if (!explicitModel) {
|
||||||
throw new Error(`Unknown model: ${explicitModelSpec}`);
|
throw new Error(`Unknown model: ${explicitModelSpec}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!explicitModelSpec && !getCurrentModelSpec(feynmanSettingsPath) && process.stdin.isTTY && process.stdout.isTTY) {
|
const currentModelSpec = getCurrentModelSpec(feynmanSettingsPath);
|
||||||
|
if (shouldRunInteractiveSetup(
|
||||||
|
explicitModelSpec,
|
||||||
|
currentModelSpec,
|
||||||
|
Boolean(process.stdin.isTTY && process.stdout.isTTY),
|
||||||
|
feynmanAuthPath,
|
||||||
|
)) {
|
||||||
await runSetup({
|
await runSetup({
|
||||||
settingsPath: feynmanSettingsPath,
|
settingsPath: feynmanSettingsPath,
|
||||||
bundledSettingsPath,
|
bundledSettingsPath,
|
||||||
@@ -447,6 +564,7 @@ export async function main(): Promise<void> {
|
|||||||
sessionDir,
|
sessionDir,
|
||||||
feynmanAgentDir,
|
feynmanAgentDir,
|
||||||
feynmanVersion,
|
feynmanVersion,
|
||||||
|
mode,
|
||||||
thinkingLevel,
|
thinkingLevel,
|
||||||
explicitModelSpec,
|
explicitModelSpec,
|
||||||
oneShotPrompt: values.prompt,
|
oneShotPrompt: values.prompt,
|
||||||
|
|||||||
10
src/index.ts
10
src/index.ts
@@ -1,6 +1,12 @@
|
|||||||
import { main } from "./cli.js";
|
import { ensureSupportedNodeVersion } from "./system/node-version.js";
|
||||||
|
|
||||||
main().catch((error) => {
|
async function run(): Promise<void> {
|
||||||
|
ensureSupportedNodeVersion();
|
||||||
|
const { main } = await import("./cli.js");
|
||||||
|
await main();
|
||||||
|
}
|
||||||
|
|
||||||
|
run().catch((error) => {
|
||||||
console.error(error instanceof Error ? error.message : String(error));
|
console.error(error instanceof Error ? error.message : String(error));
|
||||||
process.exitCode = 1;
|
process.exitCode = 1;
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent";
|
import { createModelRegistry } from "./registry.js";
|
||||||
|
|
||||||
type ModelRecord = {
|
type ModelRecord = {
|
||||||
provider: string;
|
provider: string;
|
||||||
@@ -95,6 +95,14 @@ const RESEARCH_MODEL_PREFERENCES = [
|
|||||||
spec: "zai/glm-5",
|
spec: "zai/glm-5",
|
||||||
reason: "good fallback when GLM is the available research model",
|
reason: "good fallback when GLM is the available research model",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
spec: "minimax/minimax-m2.7",
|
||||||
|
reason: "good fallback when MiniMax is the available research model",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: "minimax/minimax-m2.7-highspeed",
|
||||||
|
reason: "good fallback when MiniMax is the available research model",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
spec: "kimi-coding/kimi-k2-thinking",
|
spec: "kimi-coding/kimi-k2-thinking",
|
||||||
reason: "good fallback when Kimi is the available research model",
|
reason: "good fallback when Kimi is the available research model",
|
||||||
@@ -166,10 +174,6 @@ function sortProviders(left: ProviderStatus, right: ProviderStatus): number {
|
|||||||
return left.label.localeCompare(right.label);
|
return left.label.localeCompare(right.label);
|
||||||
}
|
}
|
||||||
|
|
||||||
function createModelRegistry(authPath: string): ModelRegistry {
|
|
||||||
return new ModelRegistry(AuthStorage.create(authPath));
|
|
||||||
}
|
|
||||||
|
|
||||||
export function getAvailableModelRecords(authPath: string): ModelRecord[] {
|
export function getAvailableModelRecords(authPath: string): ModelRecord[] {
|
||||||
return createModelRegistry(authPath)
|
return createModelRegistry(authPath)
|
||||||
.getAvailable()
|
.getAvailable()
|
||||||
@@ -258,7 +262,9 @@ export function buildModelStatusSnapshotFromRecords(
|
|||||||
const guidance: string[] = [];
|
const guidance: string[] = [];
|
||||||
if (available.length === 0) {
|
if (available.length === 0) {
|
||||||
guidance.push("No authenticated Pi models are available yet.");
|
guidance.push("No authenticated Pi models are available yet.");
|
||||||
guidance.push("Run `feynman model login <provider>` or add provider credentials that Pi can see.");
|
guidance.push(
|
||||||
|
"Run `feynman model login <provider>` (OAuth) or configure an API key (env var, auth.json, or models.json for custom providers).",
|
||||||
|
);
|
||||||
guidance.push("After auth is in place, rerun `feynman model list` or `feynman setup model`.");
|
guidance.push("After auth is in place, rerun `feynman model list` or `feynman setup model`.");
|
||||||
} else if (!current) {
|
} else if (!current) {
|
||||||
guidance.push(`No default research model is set. Recommended: ${recommended?.spec}.`);
|
guidance.push(`No default research model is set. Recommended: ${recommended?.spec}.`);
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
import { AuthStorage } from "@mariozechner/pi-coding-agent";
|
import { AuthStorage } from "@mariozechner/pi-coding-agent";
|
||||||
import { writeFileSync } from "node:fs";
|
import { writeFileSync } from "node:fs";
|
||||||
|
import { exec as execCallback } from "node:child_process";
|
||||||
|
import { promisify } from "node:util";
|
||||||
|
|
||||||
import { readJson } from "../pi/settings.js";
|
import { readJson } from "../pi/settings.js";
|
||||||
import { promptChoice, promptText } from "../setup/prompts.js";
|
import { promptChoice, promptSelect, promptText, type PromptSelectOption } from "../setup/prompts.js";
|
||||||
|
import { openUrl } from "../system/open-url.js";
|
||||||
import { printInfo, printSection, printSuccess, printWarning } from "../ui/terminal.js";
|
import { printInfo, printSection, printSuccess, printWarning } from "../ui/terminal.js";
|
||||||
import {
|
import {
|
||||||
buildModelStatusSnapshotFromRecords,
|
buildModelStatusSnapshotFromRecords,
|
||||||
@@ -11,6 +14,10 @@ import {
|
|||||||
getSupportedModelRecords,
|
getSupportedModelRecords,
|
||||||
type ModelStatusSnapshot,
|
type ModelStatusSnapshot,
|
||||||
} from "./catalog.js";
|
} from "./catalog.js";
|
||||||
|
import { createModelRegistry, getModelsJsonPath } from "./registry.js";
|
||||||
|
import { upsertProviderBaseUrl, upsertProviderConfig } from "./models-json.js";
|
||||||
|
|
||||||
|
const exec = promisify(execCallback);
|
||||||
|
|
||||||
function collectModelStatus(settingsPath: string, authPath: string): ModelStatusSnapshot {
|
function collectModelStatus(settingsPath: string, authPath: string): ModelStatusSnapshot {
|
||||||
return buildModelStatusSnapshotFromRecords(
|
return buildModelStatusSnapshotFromRecords(
|
||||||
@@ -48,17 +55,558 @@ async function selectOAuthProvider(authPath: string, action: "login" | "logout")
|
|||||||
return providers[0];
|
return providers[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
const choices = providers.map((provider) => `${provider.id} — ${provider.name ?? provider.id}`);
|
const selection = await promptSelect<OAuthProviderInfo | "cancel">(
|
||||||
choices.push("Cancel");
|
`Choose an OAuth provider to ${action}:`,
|
||||||
const selection = await promptChoice(`Choose an OAuth provider to ${action}:`, choices, 0);
|
[
|
||||||
if (selection >= providers.length) {
|
...providers.map((provider) => ({
|
||||||
|
value: provider,
|
||||||
|
label: provider.name ?? provider.id,
|
||||||
|
hint: provider.id,
|
||||||
|
})),
|
||||||
|
{ value: "cancel", label: "Cancel" },
|
||||||
|
],
|
||||||
|
providers[0],
|
||||||
|
);
|
||||||
|
if (selection === "cancel") {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
return providers[selection];
|
return selection;
|
||||||
|
}
|
||||||
|
|
||||||
|
type ApiKeyProviderInfo = {
|
||||||
|
id: string;
|
||||||
|
label: string;
|
||||||
|
envVar?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
const API_KEY_PROVIDERS: ApiKeyProviderInfo[] = [
|
||||||
|
{ id: "openai", label: "OpenAI Platform API", envVar: "OPENAI_API_KEY" },
|
||||||
|
{ id: "anthropic", label: "Anthropic API", envVar: "ANTHROPIC_API_KEY" },
|
||||||
|
{ id: "google", label: "Google Gemini API", envVar: "GEMINI_API_KEY" },
|
||||||
|
{ id: "__custom__", label: "Custom provider (local/self-hosted/proxy)" },
|
||||||
|
{ id: "amazon-bedrock", label: "Amazon Bedrock (AWS credential chain)" },
|
||||||
|
{ id: "openrouter", label: "OpenRouter", envVar: "OPENROUTER_API_KEY" },
|
||||||
|
{ id: "zai", label: "Z.AI / GLM", envVar: "ZAI_API_KEY" },
|
||||||
|
{ id: "kimi-coding", label: "Kimi / Moonshot", envVar: "KIMI_API_KEY" },
|
||||||
|
{ id: "minimax", label: "MiniMax", envVar: "MINIMAX_API_KEY" },
|
||||||
|
{ id: "minimax-cn", label: "MiniMax (China)", envVar: "MINIMAX_CN_API_KEY" },
|
||||||
|
{ id: "mistral", label: "Mistral", envVar: "MISTRAL_API_KEY" },
|
||||||
|
{ id: "groq", label: "Groq", envVar: "GROQ_API_KEY" },
|
||||||
|
{ id: "xai", label: "xAI", envVar: "XAI_API_KEY" },
|
||||||
|
{ id: "cerebras", label: "Cerebras", envVar: "CEREBRAS_API_KEY" },
|
||||||
|
{ id: "vercel-ai-gateway", label: "Vercel AI Gateway", envVar: "AI_GATEWAY_API_KEY" },
|
||||||
|
{ id: "huggingface", label: "Hugging Face", envVar: "HF_TOKEN" },
|
||||||
|
{ id: "opencode", label: "OpenCode Zen", envVar: "OPENCODE_API_KEY" },
|
||||||
|
{ id: "opencode-go", label: "OpenCode Go", envVar: "OPENCODE_API_KEY" },
|
||||||
|
{ id: "azure-openai-responses", label: "Azure OpenAI (Responses)", envVar: "AZURE_OPENAI_API_KEY" },
|
||||||
|
];
|
||||||
|
|
||||||
|
function resolveApiKeyProvider(input: string): ApiKeyProviderInfo | undefined {
|
||||||
|
const normalizedInput = normalizeProviderId(input);
|
||||||
|
if (!normalizedInput) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
return API_KEY_PROVIDERS.find((provider) => provider.id === normalizedInput);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function resolveModelProviderForCommand(
|
||||||
|
authPath: string,
|
||||||
|
input: string,
|
||||||
|
): { kind: "oauth" | "api-key"; id: string } | undefined {
|
||||||
|
const oauthProvider = resolveOAuthProvider(authPath, input);
|
||||||
|
if (oauthProvider) {
|
||||||
|
return { kind: "oauth", id: oauthProvider.id };
|
||||||
|
}
|
||||||
|
|
||||||
|
const apiKeyProvider = resolveApiKeyProvider(input);
|
||||||
|
if (apiKeyProvider) {
|
||||||
|
return { kind: "api-key", id: apiKeyProvider.id };
|
||||||
|
}
|
||||||
|
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function selectApiKeyProvider(): Promise<ApiKeyProviderInfo | undefined> {
|
||||||
|
const options: PromptSelectOption<ApiKeyProviderInfo | "cancel">[] = API_KEY_PROVIDERS.map((provider) => ({
|
||||||
|
value: provider,
|
||||||
|
label: provider.label,
|
||||||
|
hint: provider.id === "__custom__"
|
||||||
|
? "Ollama, vLLM, LM Studio, proxies"
|
||||||
|
: provider.envVar ?? provider.id,
|
||||||
|
}));
|
||||||
|
options.push({ value: "cancel", label: "Cancel" });
|
||||||
|
|
||||||
|
const defaultProvider = API_KEY_PROVIDERS.find((provider) => provider.id === "openai") ?? API_KEY_PROVIDERS[0];
|
||||||
|
const selection = await promptSelect("Choose an API-key provider:", options, defaultProvider);
|
||||||
|
if (selection === "cancel") {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
return selection;
|
||||||
|
}
|
||||||
|
|
||||||
|
type CustomProviderSetup = {
|
||||||
|
providerId: string;
|
||||||
|
modelIds: string[];
|
||||||
|
baseUrl: string;
|
||||||
|
api: "openai-completions" | "openai-responses" | "anthropic-messages" | "google-generative-ai";
|
||||||
|
apiKeyConfig: string;
|
||||||
|
/**
|
||||||
|
* If true, add `Authorization: Bearer <apiKey>` to requests in addition to
|
||||||
|
* whatever the API mode uses (useful for proxies that implement /v1/messages
|
||||||
|
* but expect Bearer auth instead of x-api-key).
|
||||||
|
*/
|
||||||
|
authHeader: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
function normalizeProviderId(value: string): string {
|
||||||
|
return value.trim().toLowerCase().replace(/\s+/g, "-");
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeModelIds(value: string): string[] {
|
||||||
|
const items = value
|
||||||
|
.split(",")
|
||||||
|
.map((entry) => entry.trim())
|
||||||
|
.filter(Boolean);
|
||||||
|
return Array.from(new Set(items));
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeBaseUrl(value: string): string {
|
||||||
|
return value.trim().replace(/\/+$/, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeCustomProviderBaseUrl(
|
||||||
|
api: CustomProviderSetup["api"],
|
||||||
|
baseUrl: string,
|
||||||
|
): { baseUrl: string; note?: string } {
|
||||||
|
const normalized = normalizeBaseUrl(baseUrl);
|
||||||
|
if (!normalized) {
|
||||||
|
return { baseUrl: normalized };
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pi expects Anthropic baseUrl without `/v1` (it appends `/v1/messages` internally).
|
||||||
|
if (api === "anthropic-messages" && /\/v1$/i.test(normalized)) {
|
||||||
|
return { baseUrl: normalized.replace(/\/v1$/i, ""), note: "Stripped trailing /v1 for Anthropic mode." };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { baseUrl: normalized };
|
||||||
|
}
|
||||||
|
|
||||||
|
function isLocalBaseUrl(baseUrl: string): boolean {
|
||||||
|
return /^(https?:\/\/)?(localhost|127\.0\.0\.1|0\.0\.0\.0)(:|\/|$)/i.test(baseUrl);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function resolveApiKeyConfig(apiKeyConfig: string): Promise<string | undefined> {
|
||||||
|
const trimmed = apiKeyConfig.trim();
|
||||||
|
if (!trimmed) return undefined;
|
||||||
|
|
||||||
|
if (trimmed.startsWith("!")) {
|
||||||
|
const command = trimmed.slice(1).trim();
|
||||||
|
if (!command) return undefined;
|
||||||
|
const shell = process.platform === "win32" ? process.env.ComSpec || "cmd.exe" : process.env.SHELL || "/bin/sh";
|
||||||
|
try {
|
||||||
|
const { stdout } = await exec(command, { shell, maxBuffer: 1024 * 1024 });
|
||||||
|
const value = stdout.trim();
|
||||||
|
return value || undefined;
|
||||||
|
} catch {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const envValue = process.env[trimmed];
|
||||||
|
if (typeof envValue === "string" && envValue.trim()) {
|
||||||
|
return envValue.trim();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to literal value.
|
||||||
|
return trimmed;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function bestEffortFetchOpenAiModelIds(
|
||||||
|
baseUrl: string,
|
||||||
|
apiKey: string,
|
||||||
|
authHeader: boolean,
|
||||||
|
): Promise<string[] | undefined> {
|
||||||
|
const url = `${baseUrl}/models`;
|
||||||
|
const controller = new AbortController();
|
||||||
|
const timer = setTimeout(() => controller.abort(), 5000);
|
||||||
|
try {
|
||||||
|
const response = await fetch(url, {
|
||||||
|
method: "GET",
|
||||||
|
headers: authHeader ? { Authorization: `Bearer ${apiKey}` } : undefined,
|
||||||
|
signal: controller.signal,
|
||||||
|
});
|
||||||
|
if (!response.ok) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
const json = (await response.json()) as any;
|
||||||
|
if (!Array.isArray(json?.data)) return undefined;
|
||||||
|
return json.data
|
||||||
|
.map((entry: any) => (typeof entry?.id === "string" ? entry.id : undefined))
|
||||||
|
.filter(Boolean);
|
||||||
|
} catch {
|
||||||
|
return undefined;
|
||||||
|
} finally {
|
||||||
|
clearTimeout(timer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function promptCustomProviderSetup(): Promise<CustomProviderSetup | undefined> {
|
||||||
|
printSection("Custom Provider");
|
||||||
|
const providerIdInput = await promptText("Provider id (e.g. my-proxy)", "custom");
|
||||||
|
const providerId = normalizeProviderId(providerIdInput);
|
||||||
|
if (!providerId || providerId === "__custom__") {
|
||||||
|
printWarning("Invalid provider id.");
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
const apiChoices = [
|
||||||
|
"openai-completions — OpenAI Chat Completions compatible (e.g. /v1/chat/completions)",
|
||||||
|
"openai-responses — OpenAI Responses compatible (e.g. /v1/responses)",
|
||||||
|
"anthropic-messages — Anthropic Messages compatible (e.g. /v1/messages)",
|
||||||
|
"google-generative-ai — Google Generative AI compatible (generativelanguage.googleapis.com)",
|
||||||
|
"Cancel",
|
||||||
|
];
|
||||||
|
const apiSelection = await promptChoice("API mode:", apiChoices, 0);
|
||||||
|
if (apiSelection >= 4) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
const api = ["openai-completions", "openai-responses", "anthropic-messages", "google-generative-ai"][apiSelection] as CustomProviderSetup["api"];
|
||||||
|
|
||||||
|
const baseUrlDefault = ((): string => {
|
||||||
|
if (api === "openai-completions" || api === "openai-responses") return "http://localhost:11434/v1";
|
||||||
|
if (api === "anthropic-messages") return "https://api.anthropic.com";
|
||||||
|
if (api === "google-generative-ai") return "https://generativelanguage.googleapis.com";
|
||||||
|
return "http://localhost:11434/v1";
|
||||||
|
})();
|
||||||
|
const baseUrlPrompt =
|
||||||
|
api === "openai-completions" || api === "openai-responses"
|
||||||
|
? "Base URL (include /v1 for OpenAI-compatible endpoints)"
|
||||||
|
: api === "anthropic-messages"
|
||||||
|
? "Base URL (no trailing /, no /v1)"
|
||||||
|
: "Base URL (no trailing /)";
|
||||||
|
const baseUrlRaw = await promptText(baseUrlPrompt, baseUrlDefault);
|
||||||
|
const { baseUrl, note: baseUrlNote } = normalizeCustomProviderBaseUrl(api, baseUrlRaw);
|
||||||
|
if (!baseUrl) {
|
||||||
|
printWarning("Base URL is required.");
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
if (baseUrlNote) {
|
||||||
|
printInfo(baseUrlNote);
|
||||||
|
}
|
||||||
|
|
||||||
|
let authHeader = false;
|
||||||
|
if (api === "openai-completions" || api === "openai-responses") {
|
||||||
|
const defaultAuthHeader = !isLocalBaseUrl(baseUrl);
|
||||||
|
const authHeaderChoices = [
|
||||||
|
"Yes (send Authorization: Bearer <apiKey>)",
|
||||||
|
"No (common for local Ollama/vLLM/LM Studio)",
|
||||||
|
"Cancel",
|
||||||
|
];
|
||||||
|
const authHeaderSelection = await promptChoice(
|
||||||
|
"Send Authorization header?",
|
||||||
|
authHeaderChoices,
|
||||||
|
defaultAuthHeader ? 0 : 1,
|
||||||
|
);
|
||||||
|
if (authHeaderSelection >= 2) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
authHeader = authHeaderSelection === 0;
|
||||||
|
}
|
||||||
|
if (api === "anthropic-messages") {
|
||||||
|
const defaultAuthHeader = isLocalBaseUrl(baseUrl);
|
||||||
|
const authHeaderChoices = [
|
||||||
|
"Yes (also send Authorization: Bearer <apiKey>)",
|
||||||
|
"No (standard Anthropic uses x-api-key only)",
|
||||||
|
"Cancel",
|
||||||
|
];
|
||||||
|
const authHeaderSelection = await promptChoice(
|
||||||
|
"Also send Authorization header?",
|
||||||
|
authHeaderChoices,
|
||||||
|
defaultAuthHeader ? 0 : 1,
|
||||||
|
);
|
||||||
|
if (authHeaderSelection >= 2) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
authHeader = authHeaderSelection === 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
printInfo("API key value supports:");
|
||||||
|
printInfo(" - literal secret (stored in models.json)");
|
||||||
|
printInfo(" - env var name (resolved at runtime)");
|
||||||
|
printInfo(" - !command (executes and uses stdout)");
|
||||||
|
const apiKeyConfigRaw = (await promptText("API key / resolver", "")).trim();
|
||||||
|
const apiKeyConfig = apiKeyConfigRaw || "local";
|
||||||
|
if (!apiKeyConfigRaw) {
|
||||||
|
printInfo("Using placeholder apiKey value (required by Pi for custom providers).");
|
||||||
|
}
|
||||||
|
|
||||||
|
let modelIdsDefault = "my-model";
|
||||||
|
if (api === "openai-completions" || api === "openai-responses") {
|
||||||
|
// Best-effort: hit /models so users can pick correct ids (especially for proxies).
|
||||||
|
const resolvedKey = await resolveApiKeyConfig(apiKeyConfig);
|
||||||
|
const modelIds = resolvedKey ? await bestEffortFetchOpenAiModelIds(baseUrl, resolvedKey, authHeader) : undefined;
|
||||||
|
if (modelIds && modelIds.length > 0) {
|
||||||
|
const sample = modelIds.slice(0, 10).join(", ");
|
||||||
|
printInfo(`Detected models: ${sample}${modelIds.length > 10 ? ", ..." : ""}`);
|
||||||
|
modelIdsDefault = modelIds.includes("sonnet") ? "sonnet" : modelIds[0]!;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const modelIdsRaw = await promptText("Model id(s) (comma-separated)", modelIdsDefault);
|
||||||
|
const modelIds = normalizeModelIds(modelIdsRaw);
|
||||||
|
if (modelIds.length === 0) {
|
||||||
|
printWarning("At least one model id is required.");
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { providerId, modelIds, baseUrl, api, apiKeyConfig, authHeader };
|
||||||
|
}
|
||||||
|
|
||||||
|
async function verifyCustomProvider(setup: CustomProviderSetup, authPath: string): Promise<void> {
|
||||||
|
const registry = createModelRegistry(authPath);
|
||||||
|
const modelsError = registry.getError();
|
||||||
|
if (modelsError) {
|
||||||
|
printWarning("Verification: models.json failed to load.");
|
||||||
|
for (const line of modelsError.split("\n")) {
|
||||||
|
printInfo(` ${line}`);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const all = registry.getAll();
|
||||||
|
const hasModel = setup.modelIds.some((id) => all.some((model) => model.provider === setup.providerId && model.id === id));
|
||||||
|
if (!hasModel) {
|
||||||
|
printWarning("Verification: model registry does not contain the configured provider/model ids.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const available = registry.getAvailable();
|
||||||
|
const hasAvailable = setup.modelIds.some((id) =>
|
||||||
|
available.some((model) => model.provider === setup.providerId && model.id === id),
|
||||||
|
);
|
||||||
|
if (!hasAvailable) {
|
||||||
|
printWarning("Verification: provider is not considered authenticated/available.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const apiKey = await registry.getApiKeyForProvider(setup.providerId);
|
||||||
|
if (!apiKey) {
|
||||||
|
printWarning("Verification: API key could not be resolved (check env var name / !command).");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const timeoutMs = 8000;
|
||||||
|
|
||||||
|
// Best-effort network check for OpenAI-compatible endpoints
|
||||||
|
if (setup.api === "openai-completions" || setup.api === "openai-responses") {
|
||||||
|
const url = `${setup.baseUrl}/models`;
|
||||||
|
const controller = new AbortController();
|
||||||
|
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
||||||
|
try {
|
||||||
|
const response = await fetch(url, {
|
||||||
|
method: "GET",
|
||||||
|
headers: setup.authHeader ? { Authorization: `Bearer ${apiKey}` } : undefined,
|
||||||
|
signal: controller.signal,
|
||||||
|
});
|
||||||
|
if (!response.ok) {
|
||||||
|
printWarning(`Verification: ${url} returned ${response.status} ${response.statusText}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const json = (await response.json()) as unknown;
|
||||||
|
const modelIds = Array.isArray((json as any)?.data)
|
||||||
|
? (json as any).data.map((entry: any) => (typeof entry?.id === "string" ? entry.id : undefined)).filter(Boolean)
|
||||||
|
: [];
|
||||||
|
const missing = setup.modelIds.filter((id) => modelIds.length > 0 && !modelIds.includes(id));
|
||||||
|
if (modelIds.length > 0 && missing.length > 0) {
|
||||||
|
printWarning(`Verification: /models does not list configured model id(s): ${missing.join(", ")}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
printSuccess("Verification: endpoint reachable and authorized.");
|
||||||
|
} catch (error) {
|
||||||
|
printWarning(`Verification: failed to reach ${url}: ${error instanceof Error ? error.message : String(error)}`);
|
||||||
|
} finally {
|
||||||
|
clearTimeout(timer);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (setup.api === "anthropic-messages") {
|
||||||
|
const url = `${setup.baseUrl}/v1/models?limit=1`;
|
||||||
|
const controller = new AbortController();
|
||||||
|
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
||||||
|
try {
|
||||||
|
const headers: Record<string, string> = {
|
||||||
|
"x-api-key": apiKey,
|
||||||
|
"anthropic-version": "2023-06-01",
|
||||||
|
};
|
||||||
|
if (setup.authHeader) {
|
||||||
|
headers.Authorization = `Bearer ${apiKey}`;
|
||||||
|
}
|
||||||
|
const response = await fetch(url, {
|
||||||
|
method: "GET",
|
||||||
|
headers,
|
||||||
|
signal: controller.signal,
|
||||||
|
});
|
||||||
|
if (!response.ok) {
|
||||||
|
printWarning(`Verification: ${url} returned ${response.status} ${response.statusText}`);
|
||||||
|
if (response.status === 404) {
|
||||||
|
printInfo(" Tip: For Anthropic mode, use a base URL without /v1 (e.g. https://api.anthropic.com).");
|
||||||
|
}
|
||||||
|
if ((response.status === 401 || response.status === 403) && !setup.authHeader) {
|
||||||
|
printInfo(" Tip: Some proxies require `Authorization: Bearer <apiKey>` even in Anthropic mode.");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
printSuccess("Verification: endpoint reachable and authorized.");
|
||||||
|
} catch (error) {
|
||||||
|
printWarning(`Verification: failed to reach ${url}: ${error instanceof Error ? error.message : String(error)}`);
|
||||||
|
} finally {
|
||||||
|
clearTimeout(timer);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (setup.api === "google-generative-ai") {
|
||||||
|
const url = `${setup.baseUrl}/v1beta/models?key=${encodeURIComponent(apiKey)}`;
|
||||||
|
const controller = new AbortController();
|
||||||
|
const timer = setTimeout(() => controller.abort(), timeoutMs);
|
||||||
|
try {
|
||||||
|
const response = await fetch(url, { method: "GET", signal: controller.signal });
|
||||||
|
if (!response.ok) {
|
||||||
|
printWarning(`Verification: ${url} returned ${response.status} ${response.statusText}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
printSuccess("Verification: endpoint reachable and authorized.");
|
||||||
|
} catch (error) {
|
||||||
|
printWarning(`Verification: failed to reach ${url}: ${error instanceof Error ? error.message : String(error)}`);
|
||||||
|
} finally {
|
||||||
|
clearTimeout(timer);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
printInfo("Verification: skipped network probe for this API mode.");
|
||||||
|
}
|
||||||
|
|
||||||
|
async function verifyBedrockCredentialChain(): Promise<void> {
|
||||||
|
const { defaultProvider } = await import("@aws-sdk/credential-provider-node");
|
||||||
|
const credentials = await defaultProvider({})();
|
||||||
|
if (!credentials?.accessKeyId || !credentials?.secretAccessKey) {
|
||||||
|
throw new Error("AWS credential chain resolved without usable Bedrock credentials.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function configureBedrockProvider(authPath: string): Promise<boolean> {
|
||||||
|
printSection("AWS Credentials: Amazon Bedrock");
|
||||||
|
printInfo("Feynman will verify the AWS SDK credential chain used by Pi's Bedrock provider.");
|
||||||
|
printInfo("Supported sources include AWS_PROFILE, ~/.aws credentials/config, SSO, ECS/IRSA, and EC2 instance roles.");
|
||||||
|
|
||||||
|
try {
|
||||||
|
await verifyBedrockCredentialChain();
|
||||||
|
AuthStorage.create(authPath).set("amazon-bedrock", { type: "api_key", key: "<authenticated>" });
|
||||||
|
printSuccess("Verified AWS credential chain and marked Amazon Bedrock as configured.");
|
||||||
|
printInfo("Use `feynman model list` to see available Bedrock models.");
|
||||||
|
return true;
|
||||||
|
} catch (error) {
|
||||||
|
printWarning(`AWS credential verification failed: ${error instanceof Error ? error.message : String(error)}`);
|
||||||
|
printInfo("Configure AWS credentials first, for example:");
|
||||||
|
printInfo(" export AWS_PROFILE=default");
|
||||||
|
printInfo(" # or set AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY");
|
||||||
|
printInfo(" # or use an EC2/ECS/IRSA role with valid Bedrock access");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function maybeSetRecommendedDefaultModel(settingsPath: string | undefined, authPath: string): void {
|
||||||
|
if (!settingsPath) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentSpec = getCurrentModelSpec(settingsPath);
|
||||||
|
const available = getAvailableModelRecords(authPath);
|
||||||
|
const currentValid = currentSpec ? available.some((m) => `${m.provider}/${m.id}` === currentSpec) : false;
|
||||||
|
|
||||||
|
if ((!currentSpec || !currentValid) && available.length > 0) {
|
||||||
|
const recommended = chooseRecommendedModel(authPath);
|
||||||
|
if (recommended) {
|
||||||
|
setDefaultModelSpec(settingsPath, authPath, recommended.spec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function configureApiKeyProvider(authPath: string, providerId?: string): Promise<boolean> {
|
||||||
|
const provider = providerId ? resolveApiKeyProvider(providerId) : await selectApiKeyProvider();
|
||||||
|
if (!provider) {
|
||||||
|
if (providerId) {
|
||||||
|
throw new Error(`Unknown API-key model provider: ${providerId}`);
|
||||||
|
}
|
||||||
|
printInfo("API key setup cancelled.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (provider.id === "amazon-bedrock") {
|
||||||
|
return configureBedrockProvider(authPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (provider.id === "__custom__") {
|
||||||
|
const setup = await promptCustomProviderSetup();
|
||||||
|
if (!setup) {
|
||||||
|
printInfo("Custom provider setup cancelled.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const modelsJsonPath = getModelsJsonPath(authPath);
|
||||||
|
const result = upsertProviderConfig(modelsJsonPath, setup.providerId, {
|
||||||
|
baseUrl: setup.baseUrl,
|
||||||
|
apiKey: setup.apiKeyConfig,
|
||||||
|
api: setup.api,
|
||||||
|
authHeader: setup.authHeader,
|
||||||
|
models: setup.modelIds.map((id) => ({ id })),
|
||||||
|
});
|
||||||
|
if (!result.ok) {
|
||||||
|
printWarning(result.error);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
printSuccess(`Saved custom provider: ${setup.providerId}`);
|
||||||
|
await verifyCustomProvider(setup, authPath);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
printSection(`API Key: ${provider.label}`);
|
||||||
|
if (provider.envVar) {
|
||||||
|
printInfo(`Tip: to avoid writing secrets to disk, set ${provider.envVar} in your shell or .env.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const apiKey = await promptText("Paste API key (leave empty to use env var instead)", "");
|
||||||
|
if (!apiKey) {
|
||||||
|
if (provider.envVar) {
|
||||||
|
printInfo(`Set ${provider.envVar} and rerun setup (or run \`feynman model list\`).`);
|
||||||
|
} else {
|
||||||
|
printInfo("No API key provided.");
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
AuthStorage.create(authPath).set(provider.id, { type: "api_key", key: apiKey });
|
||||||
|
printSuccess(`Saved API key for ${provider.id} in auth storage.`);
|
||||||
|
|
||||||
|
const baseUrl = await promptText("Base URL override (optional, include /v1 for OpenAI-compatible endpoints)", "");
|
||||||
|
if (baseUrl) {
|
||||||
|
const modelsJsonPath = getModelsJsonPath(authPath);
|
||||||
|
const result = upsertProviderBaseUrl(modelsJsonPath, provider.id, baseUrl);
|
||||||
|
if (result.ok) {
|
||||||
|
printSuccess(`Saved baseUrl override for ${provider.id} in models.json.`);
|
||||||
|
} else {
|
||||||
|
printWarning(result.error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
function resolveAvailableModelSpec(authPath: string, input: string): string | undefined {
|
function resolveAvailableModelSpec(authPath: string, input: string): string | undefined {
|
||||||
const normalizedInput = input.trim().toLowerCase();
|
const normalizedInput = input.trim().replace(/^([^/:]+):(.+)$/, "$1/$2").toLowerCase();
|
||||||
if (!normalizedInput) {
|
if (!normalizedInput) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
@@ -74,6 +622,17 @@ function resolveAvailableModelSpec(authPath: string, input: string): string | un
|
|||||||
return `${exactIdMatches[0]!.provider}/${exactIdMatches[0]!.id}`;
|
return `${exactIdMatches[0]!.provider}/${exactIdMatches[0]!.id}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// When multiple providers expose the same bare model ID, prefer providers the
|
||||||
|
// user explicitly configured in auth storage.
|
||||||
|
if (exactIdMatches.length > 1) {
|
||||||
|
const authData = readJson(authPath) as Record<string, unknown>;
|
||||||
|
const configuredProviders = new Set(Object.keys(authData));
|
||||||
|
const configuredMatches = exactIdMatches.filter((model) => configuredProviders.has(model.provider));
|
||||||
|
if (configuredMatches.length === 1) {
|
||||||
|
return `${configuredMatches[0]!.provider}/${configuredMatches[0]!.id}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,14 +669,52 @@ export function printModelList(settingsPath: string, authPath: string): void {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function loginModelProvider(authPath: string, providerId?: string, settingsPath?: string): Promise<void> {
|
export async function authenticateModelProvider(authPath: string, settingsPath?: string): Promise<boolean> {
|
||||||
|
const choices = [
|
||||||
|
"OAuth login (recommended: ChatGPT Plus/Pro, Claude Pro/Max, Copilot, ...)",
|
||||||
|
"API key or custom provider (OpenAI, Anthropic, Google, local/self-hosted, ...)",
|
||||||
|
"Cancel",
|
||||||
|
];
|
||||||
|
const selection = await promptChoice("How do you want to authenticate?", choices, 0);
|
||||||
|
|
||||||
|
if (selection === 0) {
|
||||||
|
return loginModelProvider(authPath, undefined, settingsPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selection === 1) {
|
||||||
|
const configured = await configureApiKeyProvider(authPath);
|
||||||
|
if (configured) {
|
||||||
|
maybeSetRecommendedDefaultModel(settingsPath, authPath);
|
||||||
|
}
|
||||||
|
return configured;
|
||||||
|
}
|
||||||
|
|
||||||
|
printInfo("Authentication cancelled.");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function loginModelProvider(authPath: string, providerId?: string, settingsPath?: string): Promise<boolean> {
|
||||||
|
if (providerId) {
|
||||||
|
const resolvedProvider = resolveModelProviderForCommand(authPath, providerId);
|
||||||
|
if (!resolvedProvider) {
|
||||||
|
throw new Error(`Unknown model provider: ${providerId}`);
|
||||||
|
}
|
||||||
|
if (resolvedProvider.kind === "api-key") {
|
||||||
|
const configured = await configureApiKeyProvider(authPath, resolvedProvider.id);
|
||||||
|
if (configured) {
|
||||||
|
maybeSetRecommendedDefaultModel(settingsPath, authPath);
|
||||||
|
}
|
||||||
|
return configured;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const provider = providerId ? resolveOAuthProvider(authPath, providerId) : await selectOAuthProvider(authPath, "login");
|
const provider = providerId ? resolveOAuthProvider(authPath, providerId) : await selectOAuthProvider(authPath, "login");
|
||||||
if (!provider) {
|
if (!provider) {
|
||||||
if (providerId) {
|
if (providerId) {
|
||||||
throw new Error(`Unknown OAuth model provider: ${providerId}`);
|
throw new Error(`Unknown model provider: ${providerId}`);
|
||||||
}
|
}
|
||||||
printInfo("Login cancelled.");
|
printInfo("Login cancelled.");
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
const authStorage = AuthStorage.create(authPath);
|
const authStorage = AuthStorage.create(authPath);
|
||||||
@@ -126,7 +723,13 @@ export async function loginModelProvider(authPath: string, providerId?: string,
|
|||||||
await authStorage.login(provider.id, {
|
await authStorage.login(provider.id, {
|
||||||
onAuth: (info: { url: string; instructions?: string }) => {
|
onAuth: (info: { url: string; instructions?: string }) => {
|
||||||
printSection(`Login: ${provider.name ?? provider.id}`);
|
printSection(`Login: ${provider.name ?? provider.id}`);
|
||||||
printInfo(`Open this URL: ${info.url}`);
|
const opened = openUrl(info.url);
|
||||||
|
if (opened) {
|
||||||
|
printInfo("Opened the login URL in your browser.");
|
||||||
|
} else {
|
||||||
|
printWarning("Couldn't open your browser automatically.");
|
||||||
|
}
|
||||||
|
printInfo(`Auth URL: ${info.url}`);
|
||||||
if (info.instructions) {
|
if (info.instructions) {
|
||||||
printInfo(info.instructions);
|
printInfo(info.instructions);
|
||||||
}
|
}
|
||||||
@@ -145,33 +748,38 @@ export async function loginModelProvider(authPath: string, providerId?: string,
|
|||||||
|
|
||||||
printSuccess(`Model provider login complete: ${provider.id}`);
|
printSuccess(`Model provider login complete: ${provider.id}`);
|
||||||
|
|
||||||
if (settingsPath) {
|
maybeSetRecommendedDefaultModel(settingsPath, authPath);
|
||||||
const currentSpec = getCurrentModelSpec(settingsPath);
|
|
||||||
const available = getAvailableModelRecords(authPath);
|
|
||||||
const currentValid = currentSpec
|
|
||||||
? available.some((m) => `${m.provider}/${m.id}` === currentSpec)
|
|
||||||
: false;
|
|
||||||
|
|
||||||
if ((!currentSpec || !currentValid) && available.length > 0) {
|
return true;
|
||||||
const recommended = chooseRecommendedModel(authPath);
|
|
||||||
if (recommended) {
|
|
||||||
setDefaultModelSpec(settingsPath, authPath, recommended.spec);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function logoutModelProvider(authPath: string, providerId?: string): Promise<void> {
|
export async function logoutModelProvider(authPath: string, providerId?: string): Promise<void> {
|
||||||
const provider = providerId ? resolveOAuthProvider(authPath, providerId) : await selectOAuthProvider(authPath, "logout");
|
const authStorage = AuthStorage.create(authPath);
|
||||||
if (!provider) {
|
|
||||||
if (providerId) {
|
if (providerId) {
|
||||||
throw new Error(`Unknown OAuth model provider: ${providerId}`);
|
const resolvedProvider = resolveModelProviderForCommand(authPath, providerId);
|
||||||
|
if (resolvedProvider) {
|
||||||
|
authStorage.logout(resolvedProvider.id);
|
||||||
|
printSuccess(`Model provider logout complete: ${resolvedProvider.id}`);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const normalizedProviderId = normalizeProviderId(providerId);
|
||||||
|
if (authStorage.has(normalizedProviderId)) {
|
||||||
|
authStorage.logout(normalizedProviderId);
|
||||||
|
printSuccess(`Model provider logout complete: ${normalizedProviderId}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new Error(`Unknown model provider: ${providerId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const provider = await selectOAuthProvider(authPath, "logout");
|
||||||
|
if (!provider) {
|
||||||
printInfo("Logout cancelled.");
|
printInfo("Logout cancelled.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
AuthStorage.create(authPath).logout(provider.id);
|
authStorage.logout(provider.id);
|
||||||
printSuccess(`Model provider logout complete: ${provider.id}`);
|
printSuccess(`Model provider logout complete: ${provider.id}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -193,11 +801,34 @@ export function setDefaultModelSpec(settingsPath: string, authPath: string, spec
|
|||||||
export async function runModelSetup(settingsPath: string, authPath: string): Promise<void> {
|
export async function runModelSetup(settingsPath: string, authPath: string): Promise<void> {
|
||||||
let status = collectModelStatus(settingsPath, authPath);
|
let status = collectModelStatus(settingsPath, authPath);
|
||||||
|
|
||||||
if (status.availableModels.length === 0) {
|
while (status.availableModels.length === 0) {
|
||||||
await loginModelProvider(authPath, undefined, settingsPath);
|
const choices = [
|
||||||
|
"OAuth login (recommended: ChatGPT Plus/Pro, Claude Pro/Max, Copilot, ...)",
|
||||||
|
"API key or custom provider (OpenAI, Anthropic, ZAI, Kimi, MiniMax, ...)",
|
||||||
|
"Cancel",
|
||||||
|
];
|
||||||
|
const selection = await promptChoice("Choose how to configure model access:", choices, 0);
|
||||||
|
if (selection === 0) {
|
||||||
|
const loggedIn = await loginModelProvider(authPath, undefined, settingsPath);
|
||||||
|
if (!loggedIn) {
|
||||||
|
status = collectModelStatus(settingsPath, authPath);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else if (selection === 1) {
|
||||||
|
const configured = await configureApiKeyProvider(authPath);
|
||||||
|
if (!configured) {
|
||||||
|
status = collectModelStatus(settingsPath, authPath);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
printInfo("Setup cancelled.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
status = collectModelStatus(settingsPath, authPath);
|
status = collectModelStatus(settingsPath, authPath);
|
||||||
if (status.availableModels.length === 0) {
|
if (status.availableModels.length === 0) {
|
||||||
return;
|
printWarning("No authenticated models are available yet.");
|
||||||
|
printInfo("If you configured a custom provider, ensure it has `apiKey` set in models.json.");
|
||||||
|
printInfo("Tip: run `feynman doctor` to see models.json path + load errors.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
91
src/model/models-json.ts
Normal file
91
src/model/models-json.ts
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
import { chmodSync, existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||||
|
import { dirname } from "node:path";
|
||||||
|
|
||||||
|
type ModelsJson = {
|
||||||
|
providers?: Record<string, Record<string, unknown>>;
|
||||||
|
};
|
||||||
|
|
||||||
|
function readModelsJson(modelsJsonPath: string): { ok: true; value: ModelsJson } | { ok: false; error: string } {
|
||||||
|
if (!existsSync(modelsJsonPath)) {
|
||||||
|
return { ok: true, value: { providers: {} } };
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const raw = readFileSync(modelsJsonPath, "utf8").trim();
|
||||||
|
if (!raw) {
|
||||||
|
return { ok: true, value: { providers: {} } };
|
||||||
|
}
|
||||||
|
const parsed = JSON.parse(raw) as unknown;
|
||||||
|
if (!parsed || typeof parsed !== "object") {
|
||||||
|
return { ok: false, error: `Invalid models.json (expected an object): ${modelsJsonPath}` };
|
||||||
|
}
|
||||||
|
return { ok: true, value: parsed as ModelsJson };
|
||||||
|
} catch (error) {
|
||||||
|
return {
|
||||||
|
ok: false,
|
||||||
|
error: `Failed to read models.json: ${error instanceof Error ? error.message : String(error)}`,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function upsertProviderBaseUrl(
|
||||||
|
modelsJsonPath: string,
|
||||||
|
providerId: string,
|
||||||
|
baseUrl: string,
|
||||||
|
): { ok: true } | { ok: false; error: string } {
|
||||||
|
return upsertProviderConfig(modelsJsonPath, providerId, { baseUrl });
|
||||||
|
}
|
||||||
|
|
||||||
|
export type ProviderConfigPatch = {
|
||||||
|
baseUrl?: string;
|
||||||
|
apiKey?: string;
|
||||||
|
api?: string;
|
||||||
|
authHeader?: boolean;
|
||||||
|
headers?: Record<string, string>;
|
||||||
|
models?: Array<{ id: string }>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export function upsertProviderConfig(
|
||||||
|
modelsJsonPath: string,
|
||||||
|
providerId: string,
|
||||||
|
patch: ProviderConfigPatch,
|
||||||
|
): { ok: true } | { ok: false; error: string } {
|
||||||
|
const loaded = readModelsJson(modelsJsonPath);
|
||||||
|
if (!loaded.ok) {
|
||||||
|
return loaded;
|
||||||
|
}
|
||||||
|
|
||||||
|
const value: ModelsJson = loaded.value;
|
||||||
|
const providers: Record<string, Record<string, unknown>> = {
|
||||||
|
...(value.providers && typeof value.providers === "object" ? value.providers : {}),
|
||||||
|
};
|
||||||
|
|
||||||
|
const currentProvider =
|
||||||
|
providers[providerId] && typeof providers[providerId] === "object" ? providers[providerId] : {};
|
||||||
|
|
||||||
|
const nextProvider: Record<string, unknown> = { ...currentProvider };
|
||||||
|
if (patch.baseUrl !== undefined) nextProvider.baseUrl = patch.baseUrl;
|
||||||
|
if (patch.apiKey !== undefined) nextProvider.apiKey = patch.apiKey;
|
||||||
|
if (patch.api !== undefined) nextProvider.api = patch.api;
|
||||||
|
if (patch.authHeader !== undefined) nextProvider.authHeader = patch.authHeader;
|
||||||
|
if (patch.headers !== undefined) nextProvider.headers = patch.headers;
|
||||||
|
if (patch.models !== undefined) nextProvider.models = patch.models;
|
||||||
|
|
||||||
|
providers[providerId] = nextProvider;
|
||||||
|
|
||||||
|
const next: ModelsJson = { ...value, providers };
|
||||||
|
|
||||||
|
try {
|
||||||
|
mkdirSync(dirname(modelsJsonPath), { recursive: true });
|
||||||
|
writeFileSync(modelsJsonPath, JSON.stringify(next, null, 2) + "\n", "utf8");
|
||||||
|
// models.json can contain API keys/headers; default to user-only permissions.
|
||||||
|
try {
|
||||||
|
chmodSync(modelsJsonPath, 0o600);
|
||||||
|
} catch {
|
||||||
|
// ignore permission errors (best-effort)
|
||||||
|
}
|
||||||
|
return { ok: true };
|
||||||
|
} catch (error) {
|
||||||
|
return { ok: false, error: `Failed to write models.json: ${error instanceof Error ? error.message : String(error)}` };
|
||||||
|
}
|
||||||
|
}
|
||||||
11
src/model/registry.ts
Normal file
11
src/model/registry.ts
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
import { dirname, resolve } from "node:path";
|
||||||
|
|
||||||
|
import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
|
export function getModelsJsonPath(authPath: string): string {
|
||||||
|
return resolve(dirname(authPath), "models.json");
|
||||||
|
}
|
||||||
|
|
||||||
|
export function createModelRegistry(authPath: string): ModelRegistry {
|
||||||
|
return ModelRegistry.create(AuthStorage.create(authPath), getModelsJsonPath(authPath));
|
||||||
|
}
|
||||||
65
src/model/service-tier.ts
Normal file
65
src/model/service-tier.ts
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
import { mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||||
|
import { dirname } from "node:path";
|
||||||
|
|
||||||
|
export const FEYNMAN_SERVICE_TIERS = [
|
||||||
|
"auto",
|
||||||
|
"default",
|
||||||
|
"flex",
|
||||||
|
"priority",
|
||||||
|
"standard_only",
|
||||||
|
] as const;
|
||||||
|
|
||||||
|
export type FeynmanServiceTier = (typeof FEYNMAN_SERVICE_TIERS)[number];
|
||||||
|
|
||||||
|
const SERVICE_TIER_SET = new Set<string>(FEYNMAN_SERVICE_TIERS);
|
||||||
|
const OPENAI_SERVICE_TIERS = new Set<FeynmanServiceTier>(["auto", "default", "flex", "priority"]);
|
||||||
|
const ANTHROPIC_SERVICE_TIERS = new Set<FeynmanServiceTier>(["auto", "standard_only"]);
|
||||||
|
|
||||||
|
function readSettings(settingsPath: string): Record<string, unknown> {
|
||||||
|
try {
|
||||||
|
return JSON.parse(readFileSync(settingsPath, "utf8")) as Record<string, unknown>;
|
||||||
|
} catch {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function normalizeServiceTier(value: string | undefined): FeynmanServiceTier | undefined {
|
||||||
|
if (!value) return undefined;
|
||||||
|
const normalized = value.trim().toLowerCase();
|
||||||
|
return SERVICE_TIER_SET.has(normalized) ? (normalized as FeynmanServiceTier) : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getConfiguredServiceTier(settingsPath: string): FeynmanServiceTier | undefined {
|
||||||
|
const settings = readSettings(settingsPath);
|
||||||
|
return normalizeServiceTier(typeof settings.serviceTier === "string" ? settings.serviceTier : undefined);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function setConfiguredServiceTier(settingsPath: string, tier: FeynmanServiceTier | undefined): void {
|
||||||
|
const settings = readSettings(settingsPath);
|
||||||
|
if (tier) {
|
||||||
|
settings.serviceTier = tier;
|
||||||
|
} else {
|
||||||
|
delete settings.serviceTier;
|
||||||
|
}
|
||||||
|
|
||||||
|
mkdirSync(dirname(settingsPath), { recursive: true });
|
||||||
|
writeFileSync(settingsPath, JSON.stringify(settings, null, 2) + "\n", "utf8");
|
||||||
|
}
|
||||||
|
|
||||||
|
export function resolveActiveServiceTier(settingsPath: string): FeynmanServiceTier | undefined {
|
||||||
|
return normalizeServiceTier(process.env.FEYNMAN_SERVICE_TIER) ?? getConfiguredServiceTier(settingsPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function resolveProviderServiceTier(
|
||||||
|
provider: string | undefined,
|
||||||
|
tier: FeynmanServiceTier | undefined,
|
||||||
|
): FeynmanServiceTier | undefined {
|
||||||
|
if (!provider || !tier) return undefined;
|
||||||
|
if ((provider === "openai" || provider === "openai-codex") && OPENAI_SERVICE_TIERS.has(tier)) {
|
||||||
|
return tier;
|
||||||
|
}
|
||||||
|
if (provider === "anthropic" && ANTHROPIC_SERVICE_TIERS.has(tier)) {
|
||||||
|
return tier;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
@@ -1,22 +1,38 @@
|
|||||||
import { spawn } from "node:child_process";
|
import { spawn } from "node:child_process";
|
||||||
import { existsSync } from "node:fs";
|
import { existsSync } from "node:fs";
|
||||||
|
import { constants } from "node:os";
|
||||||
|
|
||||||
import { buildPiArgs, buildPiEnv, type PiRuntimeOptions, resolvePiPaths } from "./runtime.js";
|
import { buildPiArgs, buildPiEnv, type PiRuntimeOptions, resolvePiPaths, toNodeImportSpecifier } from "./runtime.js";
|
||||||
|
import { ensureSupportedNodeVersion } from "../system/node-version.js";
|
||||||
|
|
||||||
|
export function exitCodeFromSignal(signal: NodeJS.Signals): number {
|
||||||
|
const signalNumber = constants.signals[signal];
|
||||||
|
return typeof signalNumber === "number" ? 128 + signalNumber : 1;
|
||||||
|
}
|
||||||
|
|
||||||
export async function launchPiChat(options: PiRuntimeOptions): Promise<void> {
|
export async function launchPiChat(options: PiRuntimeOptions): Promise<void> {
|
||||||
const { piCliPath, promisePolyfillPath } = resolvePiPaths(options.appRoot);
|
ensureSupportedNodeVersion();
|
||||||
|
|
||||||
|
const { piCliPath, promisePolyfillPath, promisePolyfillSourcePath, tsxLoaderPath } = resolvePiPaths(options.appRoot);
|
||||||
if (!existsSync(piCliPath)) {
|
if (!existsSync(piCliPath)) {
|
||||||
throw new Error(`Pi CLI not found: ${piCliPath}`);
|
throw new Error(`Pi CLI not found: ${piCliPath}`);
|
||||||
}
|
}
|
||||||
if (!existsSync(promisePolyfillPath)) {
|
|
||||||
|
const useBuiltPolyfill = existsSync(promisePolyfillPath);
|
||||||
|
const useDevPolyfill = !useBuiltPolyfill && existsSync(promisePolyfillSourcePath) && existsSync(tsxLoaderPath);
|
||||||
|
if (!useBuiltPolyfill && !useDevPolyfill) {
|
||||||
throw new Error(`Promise polyfill not found: ${promisePolyfillPath}`);
|
throw new Error(`Promise polyfill not found: ${promisePolyfillPath}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (process.stdout.isTTY) {
|
if (process.stdout.isTTY && options.mode !== "rpc") {
|
||||||
process.stdout.write("\x1b[2J\x1b[3J\x1b[H");
|
process.stdout.write("\x1b[2J\x1b[3J\x1b[H");
|
||||||
}
|
}
|
||||||
|
|
||||||
const child = spawn(process.execPath, ["--import", promisePolyfillPath, piCliPath, ...buildPiArgs(options)], {
|
const importArgs = useDevPolyfill
|
||||||
|
? ["--import", toNodeImportSpecifier(tsxLoaderPath), "--import", toNodeImportSpecifier(promisePolyfillSourcePath)]
|
||||||
|
: ["--import", toNodeImportSpecifier(promisePolyfillPath)];
|
||||||
|
|
||||||
|
const child = spawn(process.execPath, [...importArgs, piCliPath, ...buildPiArgs(options)], {
|
||||||
cwd: options.workingDir,
|
cwd: options.workingDir,
|
||||||
stdio: "inherit",
|
stdio: "inherit",
|
||||||
env: buildPiEnv(options),
|
env: buildPiEnv(options),
|
||||||
@@ -26,7 +42,9 @@ export async function launchPiChat(options: PiRuntimeOptions): Promise<void> {
|
|||||||
child.on("error", reject);
|
child.on("error", reject);
|
||||||
child.on("exit", (code, signal) => {
|
child.on("exit", (code, signal) => {
|
||||||
if (signal) {
|
if (signal) {
|
||||||
process.kill(process.pid, signal);
|
console.error(`feynman terminated because the Pi child exited with ${signal}.`);
|
||||||
|
process.exitCode = exitCodeFromSignal(signal);
|
||||||
|
resolvePromise();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
process.exitCode = code ?? 0;
|
process.exitCode = code ?? 0;
|
||||||
|
|||||||
456
src/pi/package-ops.ts
Normal file
456
src/pi/package-ops.ts
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
import { spawn } from "node:child_process";
|
||||||
|
import { cpSync, existsSync, lstatSync, mkdirSync, readlinkSync, rmSync, symlinkSync, writeFileSync } from "node:fs";
|
||||||
|
import { fileURLToPath } from "node:url";
|
||||||
|
import { dirname, join, resolve } from "node:path";
|
||||||
|
|
||||||
|
import { DefaultPackageManager, SettingsManager } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
|
import { NATIVE_PACKAGE_SOURCES, supportsNativePackageSources } from "./package-presets.js";
|
||||||
|
import { applyFeynmanPackageManagerEnv, getFeynmanNpmPrefixPath } from "./runtime.js";
|
||||||
|
import { getPathWithCurrentNode, resolveExecutable } from "../system/executables.js";
|
||||||
|
|
||||||
|
type PackageScope = "user" | "project";
|
||||||
|
|
||||||
|
type ConfiguredPackage = {
|
||||||
|
source: string;
|
||||||
|
scope: PackageScope;
|
||||||
|
filtered: boolean;
|
||||||
|
installedPath?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
type NpmSource = {
|
||||||
|
name: string;
|
||||||
|
source: string;
|
||||||
|
spec: string;
|
||||||
|
pinned: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type MissingConfiguredPackageSummary = {
|
||||||
|
missing: ConfiguredPackage[];
|
||||||
|
bundled: ConfiguredPackage[];
|
||||||
|
};
|
||||||
|
|
||||||
|
export type InstallPackageSourcesResult = {
|
||||||
|
installed: string[];
|
||||||
|
skipped: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
export type UpdateConfiguredPackagesResult = {
|
||||||
|
updated: string[];
|
||||||
|
skipped: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
const FILTERED_INSTALL_OUTPUT_PATTERNS = [
|
||||||
|
/npm warn deprecated node-domexception@1\.0\.0/i,
|
||||||
|
/npm notice/i,
|
||||||
|
/^(added|removed|changed) \d+ packages?( in .+)?$/i,
|
||||||
|
/^(\d+ )?packages are looking for funding$/i,
|
||||||
|
/^run `npm fund` for details$/i,
|
||||||
|
];
|
||||||
|
const APP_ROOT = resolve(dirname(fileURLToPath(import.meta.url)), "..", "..");
|
||||||
|
|
||||||
|
function createPackageContext(workingDir: string, agentDir: string) {
|
||||||
|
applyFeynmanPackageManagerEnv(agentDir);
|
||||||
|
process.env.PATH = getPathWithCurrentNode(process.env.PATH);
|
||||||
|
const settingsManager = SettingsManager.create(workingDir, agentDir);
|
||||||
|
const packageManager = new DefaultPackageManager({
|
||||||
|
cwd: workingDir,
|
||||||
|
agentDir,
|
||||||
|
settingsManager,
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
settingsManager,
|
||||||
|
packageManager,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function shouldSkipNativeSource(source: string, version = process.versions.node): boolean {
|
||||||
|
return !supportsNativePackageSources(version) && NATIVE_PACKAGE_SOURCES.includes(source as (typeof NATIVE_PACKAGE_SOURCES)[number]);
|
||||||
|
}
|
||||||
|
|
||||||
|
function filterUnsupportedSources(sources: string[], version = process.versions.node): { supported: string[]; skipped: string[] } {
|
||||||
|
const supported: string[] = [];
|
||||||
|
const skipped: string[] = [];
|
||||||
|
|
||||||
|
for (const source of sources) {
|
||||||
|
if (shouldSkipNativeSource(source, version)) {
|
||||||
|
skipped.push(source);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
supported.push(source);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { supported, skipped };
|
||||||
|
}
|
||||||
|
|
||||||
|
function relayFilteredOutput(chunk: Buffer | string, writer: NodeJS.WriteStream): void {
|
||||||
|
const text = chunk.toString();
|
||||||
|
for (const line of text.split(/\r?\n/)) {
|
||||||
|
if (!line.trim()) continue;
|
||||||
|
if (FILTERED_INSTALL_OUTPUT_PATTERNS.some((pattern) => pattern.test(line.trim()))) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
writer.write(`${line}\n`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseNpmSource(source: string): NpmSource | undefined {
|
||||||
|
if (!source.startsWith("npm:")) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
const spec = source.slice("npm:".length).trim();
|
||||||
|
const match = spec.match(/^(@?[^@]+(?:\/[^@]+)?)(?:@(.+))?$/);
|
||||||
|
const name = match?.[1] ?? spec;
|
||||||
|
const version = match?.[2];
|
||||||
|
|
||||||
|
return {
|
||||||
|
name,
|
||||||
|
source,
|
||||||
|
spec,
|
||||||
|
pinned: Boolean(version),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function dedupeNpmSources(sources: string[], updateToLatest: boolean): string[] {
|
||||||
|
const specs = new Map<string, string>();
|
||||||
|
|
||||||
|
for (const source of sources) {
|
||||||
|
const parsed = parseNpmSource(source);
|
||||||
|
if (!parsed) continue;
|
||||||
|
|
||||||
|
specs.set(parsed.name, updateToLatest && !parsed.pinned ? `${parsed.name}@latest` : parsed.spec);
|
||||||
|
}
|
||||||
|
|
||||||
|
return [...specs.values()];
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureProjectInstallRoot(workingDir: string): string {
|
||||||
|
const installRoot = resolve(workingDir, ".feynman", "npm");
|
||||||
|
mkdirSync(installRoot, { recursive: true });
|
||||||
|
|
||||||
|
const ignorePath = join(installRoot, ".gitignore");
|
||||||
|
if (!existsSync(ignorePath)) {
|
||||||
|
writeFileSync(ignorePath, "*\n!.gitignore\n", "utf8");
|
||||||
|
}
|
||||||
|
|
||||||
|
const packageJsonPath = join(installRoot, "package.json");
|
||||||
|
if (!existsSync(packageJsonPath)) {
|
||||||
|
writeFileSync(packageJsonPath, JSON.stringify({ name: "feynman-packages", private: true }, null, 2) + "\n", "utf8");
|
||||||
|
}
|
||||||
|
|
||||||
|
return installRoot;
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolveAdjacentNpmExecutable(): string | undefined {
|
||||||
|
const executableName = process.platform === "win32" ? "npm.cmd" : "npm";
|
||||||
|
const candidate = resolve(dirname(process.execPath), executableName);
|
||||||
|
return existsSync(candidate) ? candidate : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolvePackageManagerCommand(settingsManager: SettingsManager): { command: string; args: string[] } | undefined {
|
||||||
|
const configured = settingsManager.getNpmCommand();
|
||||||
|
if (!configured || configured.length === 0) {
|
||||||
|
const adjacentNpm = resolveAdjacentNpmExecutable() ?? resolveExecutable("npm");
|
||||||
|
return adjacentNpm ? { command: adjacentNpm, args: [] } : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
const [command = "npm", ...args] = configured;
|
||||||
|
if (!command) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
const executable = resolveExecutable(command);
|
||||||
|
if (!executable) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
return { command: executable, args };
|
||||||
|
}
|
||||||
|
|
||||||
|
async function runPackageManagerInstall(
|
||||||
|
settingsManager: SettingsManager,
|
||||||
|
workingDir: string,
|
||||||
|
agentDir: string,
|
||||||
|
scope: PackageScope,
|
||||||
|
specs: string[],
|
||||||
|
): Promise<void> {
|
||||||
|
if (specs.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const packageManagerCommand = resolvePackageManagerCommand(settingsManager);
|
||||||
|
if (!packageManagerCommand) {
|
||||||
|
throw new Error("No supported package manager found. Install npm, pnpm, or bun, or configure `npmCommand`.");
|
||||||
|
}
|
||||||
|
|
||||||
|
const args = [
|
||||||
|
...packageManagerCommand.args,
|
||||||
|
"install",
|
||||||
|
"--no-audit",
|
||||||
|
"--no-fund",
|
||||||
|
"--legacy-peer-deps",
|
||||||
|
"--loglevel",
|
||||||
|
"error",
|
||||||
|
];
|
||||||
|
|
||||||
|
if (scope === "user") {
|
||||||
|
args.push("-g", "--prefix", getFeynmanNpmPrefixPath(agentDir));
|
||||||
|
} else {
|
||||||
|
args.push("--prefix", ensureProjectInstallRoot(workingDir));
|
||||||
|
}
|
||||||
|
|
||||||
|
args.push(...specs);
|
||||||
|
|
||||||
|
await new Promise<void>((resolvePromise, reject) => {
|
||||||
|
const child = spawn(packageManagerCommand.command, args, {
|
||||||
|
cwd: scope === "user" ? agentDir : workingDir,
|
||||||
|
stdio: ["ignore", "pipe", "pipe"],
|
||||||
|
env: {
|
||||||
|
...process.env,
|
||||||
|
PATH: getPathWithCurrentNode(process.env.PATH),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stdout?.on("data", (chunk) => relayFilteredOutput(chunk, process.stdout));
|
||||||
|
child.stderr?.on("data", (chunk) => relayFilteredOutput(chunk, process.stderr));
|
||||||
|
|
||||||
|
child.on("error", reject);
|
||||||
|
child.on("exit", (code) => {
|
||||||
|
if ((code ?? 1) !== 0) {
|
||||||
|
const installingGenerativeUi = specs.some((spec) => spec.startsWith("pi-generative-ui"));
|
||||||
|
if (installingGenerativeUi && process.platform === "darwin") {
|
||||||
|
reject(
|
||||||
|
new Error(
|
||||||
|
"Installing pi-generative-ui failed. Its native glimpseui dependency did not compile against the current macOS/Xcode toolchain. Try the npm-installed Feynman path with your local Node toolchain or skip this optional preset for now.",
|
||||||
|
),
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
reject(new Error(`${packageManagerCommand.command} install failed with code ${code ?? 1}`));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvePromise();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function groupConfiguredNpmSources(packages: ConfiguredPackage[]): Record<PackageScope, string[]> {
|
||||||
|
return {
|
||||||
|
user: packages.filter((entry) => entry.scope === "user").map((entry) => entry.source),
|
||||||
|
project: packages.filter((entry) => entry.scope === "project").map((entry) => entry.source),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function isBundledWorkspacePackagePath(installedPath: string | undefined, appRoot: string): boolean {
|
||||||
|
if (!installedPath) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const bundledRoot = resolve(appRoot, ".feynman", "npm", "node_modules");
|
||||||
|
return installedPath.startsWith(bundledRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getMissingConfiguredPackages(
|
||||||
|
workingDir: string,
|
||||||
|
agentDir: string,
|
||||||
|
appRoot: string,
|
||||||
|
): MissingConfiguredPackageSummary {
|
||||||
|
const { packageManager } = createPackageContext(workingDir, agentDir);
|
||||||
|
const configured = packageManager.listConfiguredPackages();
|
||||||
|
|
||||||
|
return configured.reduce<MissingConfiguredPackageSummary>(
|
||||||
|
(summary, entry) => {
|
||||||
|
if (entry.installedPath) {
|
||||||
|
if (isBundledWorkspacePackagePath(entry.installedPath, appRoot)) {
|
||||||
|
summary.bundled.push(entry);
|
||||||
|
}
|
||||||
|
return summary;
|
||||||
|
}
|
||||||
|
|
||||||
|
summary.missing.push(entry);
|
||||||
|
return summary;
|
||||||
|
},
|
||||||
|
{ missing: [], bundled: [] },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function installPackageSources(
|
||||||
|
workingDir: string,
|
||||||
|
agentDir: string,
|
||||||
|
sources: string[],
|
||||||
|
options?: { local?: boolean; persist?: boolean },
|
||||||
|
): Promise<InstallPackageSourcesResult> {
|
||||||
|
const { settingsManager, packageManager } = createPackageContext(workingDir, agentDir);
|
||||||
|
const scope: PackageScope = options?.local ? "project" : "user";
|
||||||
|
const installed: string[] = [];
|
||||||
|
|
||||||
|
const bundledSeeded = scope === "user" ? seedBundledWorkspacePackages(agentDir, APP_ROOT, sources) : [];
|
||||||
|
installed.push(...bundledSeeded);
|
||||||
|
const remainingSources = sources.filter((source) => !bundledSeeded.includes(source));
|
||||||
|
const grouped = groupConfiguredNpmSources(
|
||||||
|
remainingSources.map((source) => ({
|
||||||
|
source,
|
||||||
|
scope,
|
||||||
|
filtered: false,
|
||||||
|
})),
|
||||||
|
);
|
||||||
|
const { supported: supportedUserSources, skipped } = filterUnsupportedSources(grouped.user);
|
||||||
|
const { supported: supportedProjectSources, skipped: skippedProject } = filterUnsupportedSources(grouped.project);
|
||||||
|
skipped.push(...skippedProject);
|
||||||
|
|
||||||
|
const supportedNpmSources = scope === "user" ? supportedUserSources : supportedProjectSources;
|
||||||
|
if (supportedNpmSources.length > 0) {
|
||||||
|
await runPackageManagerInstall(settingsManager, workingDir, agentDir, scope, dedupeNpmSources(supportedNpmSources, false));
|
||||||
|
installed.push(...supportedNpmSources);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const source of sources) {
|
||||||
|
if (parseNpmSource(source)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
await packageManager.install(source, { local: options?.local });
|
||||||
|
installed.push(source);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options?.persist) {
|
||||||
|
for (const source of installed) {
|
||||||
|
if (packageManager.addSourceToSettings(source, { local: options?.local })) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
skipped.push(source);
|
||||||
|
}
|
||||||
|
await settingsManager.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
return { installed, skipped };
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function updateConfiguredPackages(
|
||||||
|
workingDir: string,
|
||||||
|
agentDir: string,
|
||||||
|
source?: string,
|
||||||
|
): Promise<UpdateConfiguredPackagesResult> {
|
||||||
|
const { settingsManager, packageManager } = createPackageContext(workingDir, agentDir);
|
||||||
|
|
||||||
|
if (source) {
|
||||||
|
await packageManager.update(source);
|
||||||
|
return { updated: [source], skipped: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
const availableUpdates = await packageManager.checkForAvailableUpdates();
|
||||||
|
if (availableUpdates.length === 0) {
|
||||||
|
return { updated: [], skipped: [] };
|
||||||
|
}
|
||||||
|
|
||||||
|
const npmUpdatesByScope: Record<PackageScope, string[]> = { user: [], project: [] };
|
||||||
|
const gitUpdates: string[] = [];
|
||||||
|
const skipped: string[] = [];
|
||||||
|
|
||||||
|
for (const entry of availableUpdates) {
|
||||||
|
if (entry.type === "npm") {
|
||||||
|
if (shouldSkipNativeSource(entry.source)) {
|
||||||
|
skipped.push(entry.source);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
npmUpdatesByScope[entry.scope].push(entry.source);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
gitUpdates.push(entry.source);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const scope of ["user", "project"] as const) {
|
||||||
|
const sources = npmUpdatesByScope[scope];
|
||||||
|
if (sources.length === 0) continue;
|
||||||
|
|
||||||
|
await runPackageManagerInstall(settingsManager, workingDir, agentDir, scope, dedupeNpmSources(sources, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const gitSource of gitUpdates) {
|
||||||
|
await packageManager.update(gitSource);
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
updated: availableUpdates
|
||||||
|
.map((entry) => entry.source)
|
||||||
|
.filter((source) => !skipped.includes(source)),
|
||||||
|
skipped,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureParentDir(path: string): void {
|
||||||
|
mkdirSync(dirname(path), { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
function pathsMatchSymlinkTarget(linkPath: string, targetPath: string): boolean {
|
||||||
|
try {
|
||||||
|
if (!lstatSync(linkPath).isSymbolicLink()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return resolve(dirname(linkPath), readlinkSync(linkPath)) === targetPath;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function linkDirectory(linkPath: string, targetPath: string): void {
|
||||||
|
if (pathsMatchSymlinkTarget(linkPath, targetPath)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
if (existsSync(linkPath) && lstatSync(linkPath).isSymbolicLink()) {
|
||||||
|
rmSync(linkPath, { force: true });
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
|
||||||
|
if (existsSync(linkPath)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ensureParentDir(linkPath);
|
||||||
|
try {
|
||||||
|
symlinkSync(targetPath, linkPath, process.platform === "win32" ? "junction" : "dir");
|
||||||
|
} catch {
|
||||||
|
// Fallback for filesystems that do not allow symlinks.
|
||||||
|
if (!existsSync(linkPath)) {
|
||||||
|
cpSync(targetPath, linkPath, { recursive: true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function seedBundledWorkspacePackages(
|
||||||
|
agentDir: string,
|
||||||
|
appRoot: string,
|
||||||
|
sources: string[],
|
||||||
|
): string[] {
|
||||||
|
const bundledNodeModulesRoot = resolve(appRoot, ".feynman", "npm", "node_modules");
|
||||||
|
if (!existsSync(bundledNodeModulesRoot)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const globalNodeModulesRoot = resolve(getFeynmanNpmPrefixPath(agentDir), "lib", "node_modules");
|
||||||
|
const seeded: string[] = [];
|
||||||
|
|
||||||
|
for (const source of sources) {
|
||||||
|
if (shouldSkipNativeSource(source)) continue;
|
||||||
|
|
||||||
|
const parsed = parseNpmSource(source);
|
||||||
|
if (!parsed) continue;
|
||||||
|
|
||||||
|
const bundledPackagePath = resolve(bundledNodeModulesRoot, parsed.name);
|
||||||
|
if (!existsSync(bundledPackagePath)) continue;
|
||||||
|
|
||||||
|
const targetPath = resolve(globalNodeModulesRoot, parsed.name);
|
||||||
|
if (!existsSync(targetPath)) {
|
||||||
|
linkDirectory(targetPath, bundledPackagePath);
|
||||||
|
seeded.push(source);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return seeded;
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
import type { PackageSource } from "@mariozechner/pi-coding-agent";
|
import type { PackageSource } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
export const CORE_PACKAGE_SOURCES = [
|
export const CORE_PACKAGE_SOURCES = [
|
||||||
|
"npm:@companion-ai/alpha-hub",
|
||||||
"npm:pi-subagents",
|
"npm:pi-subagents",
|
||||||
"npm:pi-btw",
|
"npm:pi-btw",
|
||||||
"npm:pi-docparser",
|
"npm:pi-docparser",
|
||||||
@@ -16,6 +17,13 @@ export const CORE_PACKAGE_SOURCES = [
|
|||||||
"npm:@tmustier/pi-ralph-wiggum",
|
"npm:@tmustier/pi-ralph-wiggum",
|
||||||
] as const;
|
] as const;
|
||||||
|
|
||||||
|
export const NATIVE_PACKAGE_SOURCES = [
|
||||||
|
"npm:@kaiserlich-dev/pi-session-search",
|
||||||
|
"npm:@samfp/pi-memory",
|
||||||
|
] as const;
|
||||||
|
|
||||||
|
export const MAX_NATIVE_PACKAGE_NODE_MAJOR = 24;
|
||||||
|
|
||||||
export const OPTIONAL_PACKAGE_PRESETS = {
|
export const OPTIONAL_PACKAGE_PRESETS = {
|
||||||
"generative-ui": {
|
"generative-ui": {
|
||||||
description: "Interactive Glimpse UI widgets.",
|
description: "Interactive Glimpse UI widgets.",
|
||||||
@@ -23,13 +31,13 @@ export const OPTIONAL_PACKAGE_PRESETS = {
|
|||||||
},
|
},
|
||||||
} as const;
|
} as const;
|
||||||
|
|
||||||
|
export type OptionalPackagePresetName = keyof typeof OPTIONAL_PACKAGE_PRESETS;
|
||||||
|
|
||||||
const LEGACY_DEFAULT_PACKAGE_SOURCES = [
|
const LEGACY_DEFAULT_PACKAGE_SOURCES = [
|
||||||
...CORE_PACKAGE_SOURCES,
|
...CORE_PACKAGE_SOURCES,
|
||||||
"npm:pi-generative-ui",
|
"npm:pi-generative-ui",
|
||||||
] as const;
|
] as const;
|
||||||
|
|
||||||
export type OptionalPackagePresetName = keyof typeof OPTIONAL_PACKAGE_PRESETS;
|
|
||||||
|
|
||||||
function arraysMatchAsSets(left: readonly string[], right: readonly string[]): boolean {
|
function arraysMatchAsSets(left: readonly string[], right: readonly string[]): boolean {
|
||||||
if (left.length !== right.length) {
|
if (left.length !== right.length) {
|
||||||
return false;
|
return false;
|
||||||
@@ -49,6 +57,24 @@ export function shouldPruneLegacyDefaultPackages(packages: PackageSource[] | und
|
|||||||
return arraysMatchAsSets(packages as string[], LEGACY_DEFAULT_PACKAGE_SOURCES);
|
return arraysMatchAsSets(packages as string[], LEGACY_DEFAULT_PACKAGE_SOURCES);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function parseNodeMajor(version: string): number {
|
||||||
|
const [major = "0"] = version.replace(/^v/, "").split(".");
|
||||||
|
return Number.parseInt(major, 10) || 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function supportsNativePackageSources(version = process.versions.node): boolean {
|
||||||
|
return parseNodeMajor(version) <= MAX_NATIVE_PACKAGE_NODE_MAJOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function filterPackageSourcesForCurrentNode<T extends string>(sources: readonly T[], version = process.versions.node): T[] {
|
||||||
|
if (supportsNativePackageSources(version)) {
|
||||||
|
return [...sources];
|
||||||
|
}
|
||||||
|
|
||||||
|
const blocked = new Set<string>(NATIVE_PACKAGE_SOURCES);
|
||||||
|
return sources.filter((source) => !blocked.has(source));
|
||||||
|
}
|
||||||
|
|
||||||
export function getOptionalPackagePresetSources(name: string): string[] | undefined {
|
export function getOptionalPackagePresetSources(name: string): string[] | undefined {
|
||||||
const normalized = name.trim().toLowerCase();
|
const normalized = name.trim().toLowerCase();
|
||||||
if (normalized === "ui") {
|
if (normalized === "ui") {
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import { existsSync, readFileSync } from "node:fs";
|
import { existsSync, readFileSync } from "node:fs";
|
||||||
import { dirname, resolve } from "node:path";
|
import { delimiter, dirname, isAbsolute, resolve } from "node:path";
|
||||||
|
import { pathToFileURL } from "node:url";
|
||||||
|
|
||||||
import {
|
import {
|
||||||
BROWSER_FALLBACK_PATHS,
|
BROWSER_FALLBACK_PATHS,
|
||||||
@@ -14,30 +15,54 @@ export type PiRuntimeOptions = {
|
|||||||
sessionDir: string;
|
sessionDir: string;
|
||||||
feynmanAgentDir: string;
|
feynmanAgentDir: string;
|
||||||
feynmanVersion?: string;
|
feynmanVersion?: string;
|
||||||
|
mode?: "text" | "json" | "rpc";
|
||||||
thinkingLevel?: string;
|
thinkingLevel?: string;
|
||||||
explicitModelSpec?: string;
|
explicitModelSpec?: string;
|
||||||
oneShotPrompt?: string;
|
oneShotPrompt?: string;
|
||||||
initialPrompt?: string;
|
initialPrompt?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export function getFeynmanNpmPrefixPath(feynmanAgentDir: string): string {
|
||||||
|
return resolve(dirname(feynmanAgentDir), "npm-global");
|
||||||
|
}
|
||||||
|
|
||||||
|
export function applyFeynmanPackageManagerEnv(feynmanAgentDir: string): string {
|
||||||
|
const feynmanNpmPrefixPath = getFeynmanNpmPrefixPath(feynmanAgentDir);
|
||||||
|
process.env.FEYNMAN_NPM_PREFIX = feynmanNpmPrefixPath;
|
||||||
|
process.env.NPM_CONFIG_PREFIX = feynmanNpmPrefixPath;
|
||||||
|
process.env.npm_config_prefix = feynmanNpmPrefixPath;
|
||||||
|
return feynmanNpmPrefixPath;
|
||||||
|
}
|
||||||
|
|
||||||
export function resolvePiPaths(appRoot: string) {
|
export function resolvePiPaths(appRoot: string) {
|
||||||
return {
|
return {
|
||||||
piPackageRoot: resolve(appRoot, "node_modules", "@mariozechner", "pi-coding-agent"),
|
piPackageRoot: resolve(appRoot, "node_modules", "@mariozechner", "pi-coding-agent"),
|
||||||
piCliPath: resolve(appRoot, "node_modules", "@mariozechner", "pi-coding-agent", "dist", "cli.js"),
|
piCliPath: resolve(appRoot, "node_modules", "@mariozechner", "pi-coding-agent", "dist", "cli.js"),
|
||||||
promisePolyfillPath: resolve(appRoot, "dist", "system", "promise-polyfill.js"),
|
promisePolyfillPath: resolve(appRoot, "dist", "system", "promise-polyfill.js"),
|
||||||
|
promisePolyfillSourcePath: resolve(appRoot, "src", "system", "promise-polyfill.ts"),
|
||||||
|
tsxLoaderPath: resolve(appRoot, "node_modules", "tsx", "dist", "loader.mjs"),
|
||||||
researchToolsPath: resolve(appRoot, "extensions", "research-tools.ts"),
|
researchToolsPath: resolve(appRoot, "extensions", "research-tools.ts"),
|
||||||
promptTemplatePath: resolve(appRoot, "prompts"),
|
promptTemplatePath: resolve(appRoot, "prompts"),
|
||||||
systemPromptPath: resolve(appRoot, ".feynman", "SYSTEM.md"),
|
systemPromptPath: resolve(appRoot, ".feynman", "SYSTEM.md"),
|
||||||
piWorkspaceNodeModulesPath: resolve(appRoot, ".feynman", "npm", "node_modules"),
|
piWorkspaceNodeModulesPath: resolve(appRoot, ".feynman", "npm", "node_modules"),
|
||||||
|
nodeModulesBinPath: resolve(appRoot, "node_modules", ".bin"),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function toNodeImportSpecifier(modulePath: string): string {
|
||||||
|
return isAbsolute(modulePath) ? pathToFileURL(modulePath).href : modulePath;
|
||||||
|
}
|
||||||
|
|
||||||
export function validatePiInstallation(appRoot: string): string[] {
|
export function validatePiInstallation(appRoot: string): string[] {
|
||||||
const paths = resolvePiPaths(appRoot);
|
const paths = resolvePiPaths(appRoot);
|
||||||
const missing: string[] = [];
|
const missing: string[] = [];
|
||||||
|
|
||||||
if (!existsSync(paths.piCliPath)) missing.push(paths.piCliPath);
|
if (!existsSync(paths.piCliPath)) missing.push(paths.piCliPath);
|
||||||
if (!existsSync(paths.promisePolyfillPath)) missing.push(paths.promisePolyfillPath);
|
if (!existsSync(paths.promisePolyfillPath)) {
|
||||||
|
// Dev fallback: allow running from source without `dist/` build artifacts.
|
||||||
|
const hasDevPolyfill = existsSync(paths.promisePolyfillSourcePath) && existsSync(paths.tsxLoaderPath);
|
||||||
|
if (!hasDevPolyfill) missing.push(paths.promisePolyfillPath);
|
||||||
|
}
|
||||||
if (!existsSync(paths.researchToolsPath)) missing.push(paths.researchToolsPath);
|
if (!existsSync(paths.researchToolsPath)) missing.push(paths.researchToolsPath);
|
||||||
if (!existsSync(paths.promptTemplatePath)) missing.push(paths.promptTemplatePath);
|
if (!existsSync(paths.promptTemplatePath)) missing.push(paths.promptTemplatePath);
|
||||||
|
|
||||||
@@ -59,6 +84,9 @@ export function buildPiArgs(options: PiRuntimeOptions): string[] {
|
|||||||
args.push("--system-prompt", readFileSync(paths.systemPromptPath, "utf8"));
|
args.push("--system-prompt", readFileSync(paths.systemPromptPath, "utf8"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (options.mode) {
|
||||||
|
args.push("--mode", options.mode);
|
||||||
|
}
|
||||||
if (options.explicitModelSpec) {
|
if (options.explicitModelSpec) {
|
||||||
args.push("--model", options.explicitModelSpec);
|
args.push("--model", options.explicitModelSpec);
|
||||||
}
|
}
|
||||||
@@ -76,19 +104,36 @@ export function buildPiArgs(options: PiRuntimeOptions): string[] {
|
|||||||
|
|
||||||
export function buildPiEnv(options: PiRuntimeOptions): NodeJS.ProcessEnv {
|
export function buildPiEnv(options: PiRuntimeOptions): NodeJS.ProcessEnv {
|
||||||
const paths = resolvePiPaths(options.appRoot);
|
const paths = resolvePiPaths(options.appRoot);
|
||||||
|
const feynmanNpmPrefixPath = getFeynmanNpmPrefixPath(options.feynmanAgentDir);
|
||||||
|
const feynmanNpmBinPath = resolve(feynmanNpmPrefixPath, "bin");
|
||||||
|
const feynmanWebSearchConfigPath = resolve(dirname(options.feynmanAgentDir), "web-search.json");
|
||||||
|
|
||||||
|
const currentPath = process.env.PATH ?? "";
|
||||||
|
const binEntries = [paths.nodeModulesBinPath, resolve(paths.piWorkspaceNodeModulesPath, ".bin"), feynmanNpmBinPath];
|
||||||
|
const binPath = binEntries.join(delimiter);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
...process.env,
|
...process.env,
|
||||||
|
PATH: `${binPath}${delimiter}${currentPath}`,
|
||||||
FEYNMAN_VERSION: options.feynmanVersion,
|
FEYNMAN_VERSION: options.feynmanVersion,
|
||||||
FEYNMAN_SESSION_DIR: options.sessionDir,
|
FEYNMAN_SESSION_DIR: options.sessionDir,
|
||||||
FEYNMAN_MEMORY_DIR: resolve(dirname(options.feynmanAgentDir), "memory"),
|
FEYNMAN_MEMORY_DIR: resolve(dirname(options.feynmanAgentDir), "memory"),
|
||||||
|
FEYNMAN_WEB_SEARCH_CONFIG: feynmanWebSearchConfigPath,
|
||||||
FEYNMAN_NODE_EXECUTABLE: process.execPath,
|
FEYNMAN_NODE_EXECUTABLE: process.execPath,
|
||||||
FEYNMAN_BIN_PATH: resolve(options.appRoot, "bin", "feynman.js"),
|
FEYNMAN_BIN_PATH: resolve(options.appRoot, "bin", "feynman.js"),
|
||||||
|
FEYNMAN_NPM_PREFIX: feynmanNpmPrefixPath,
|
||||||
|
// Ensure the Pi child process uses Feynman's agent dir for auth/models/settings.
|
||||||
|
PI_CODING_AGENT_DIR: options.feynmanAgentDir,
|
||||||
PANDOC_PATH: process.env.PANDOC_PATH ?? resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS),
|
PANDOC_PATH: process.env.PANDOC_PATH ?? resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS),
|
||||||
PI_HARDWARE_CURSOR: process.env.PI_HARDWARE_CURSOR ?? "1",
|
PI_HARDWARE_CURSOR: process.env.PI_HARDWARE_CURSOR ?? "1",
|
||||||
PI_SKIP_VERSION_CHECK: process.env.PI_SKIP_VERSION_CHECK ?? "1",
|
PI_SKIP_VERSION_CHECK: process.env.PI_SKIP_VERSION_CHECK ?? "1",
|
||||||
MERMAID_CLI_PATH: process.env.MERMAID_CLI_PATH ?? resolveExecutable("mmdc", MERMAID_FALLBACK_PATHS),
|
MERMAID_CLI_PATH: process.env.MERMAID_CLI_PATH ?? resolveExecutable("mmdc", MERMAID_FALLBACK_PATHS),
|
||||||
PUPPETEER_EXECUTABLE_PATH:
|
PUPPETEER_EXECUTABLE_PATH:
|
||||||
process.env.PUPPETEER_EXECUTABLE_PATH ?? resolveExecutable("google-chrome", BROWSER_FALLBACK_PATHS),
|
process.env.PUPPETEER_EXECUTABLE_PATH ?? resolveExecutable("google-chrome", BROWSER_FALLBACK_PATHS),
|
||||||
|
// Always pin npm's global prefix to the Feynman workspace. npm injects
|
||||||
|
// lowercase config vars into child processes, which would otherwise leak
|
||||||
|
// the caller's global prefix into Pi.
|
||||||
|
NPM_CONFIG_PREFIX: feynmanNpmPrefixPath,
|
||||||
|
npm_config_prefix: feynmanNpmPrefixPath,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||||
import { dirname } from "node:path";
|
import { dirname } from "node:path";
|
||||||
|
|
||||||
import { AuthStorage, ModelRegistry, type PackageSource } from "@mariozechner/pi-coding-agent";
|
import { ModelRegistry, type PackageSource } from "@mariozechner/pi-coding-agent";
|
||||||
|
|
||||||
import { CORE_PACKAGE_SOURCES, shouldPruneLegacyDefaultPackages } from "./package-presets.js";
|
import { CORE_PACKAGE_SOURCES, filterPackageSourcesForCurrentNode, shouldPruneLegacyDefaultPackages } from "./package-presets.js";
|
||||||
|
import { createModelRegistry } from "../model/registry.js";
|
||||||
|
|
||||||
export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
|
export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh";
|
||||||
|
|
||||||
@@ -66,6 +67,23 @@ function choosePreferredModel(
|
|||||||
return availableModels[0];
|
return availableModels[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function filterConfiguredPackagesForCurrentNode(packages: PackageSource[] | undefined): PackageSource[] {
|
||||||
|
if (!Array.isArray(packages)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
|
||||||
|
const filteredStringSources = new Set(filterPackageSourcesForCurrentNode(
|
||||||
|
packages
|
||||||
|
.map((entry) => (typeof entry === "string" ? entry : entry.source))
|
||||||
|
.filter((entry): entry is string => typeof entry === "string"),
|
||||||
|
));
|
||||||
|
|
||||||
|
return packages.filter((entry) => {
|
||||||
|
const source = typeof entry === "string" ? entry : entry.source;
|
||||||
|
return filteredStringSources.has(source);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
export function readJson(path: string): Record<string, unknown> {
|
export function readJson(path: string): Record<string, unknown> {
|
||||||
if (!existsSync(path)) {
|
if (!existsSync(path)) {
|
||||||
return {};
|
return {};
|
||||||
@@ -109,14 +127,16 @@ export function normalizeFeynmanSettings(
|
|||||||
settings.theme = "feynman";
|
settings.theme = "feynman";
|
||||||
settings.quietStartup = true;
|
settings.quietStartup = true;
|
||||||
settings.collapseChangelog = true;
|
settings.collapseChangelog = true;
|
||||||
|
const supportedCorePackages = filterPackageSourcesForCurrentNode(CORE_PACKAGE_SOURCES);
|
||||||
if (!Array.isArray(settings.packages) || settings.packages.length === 0) {
|
if (!Array.isArray(settings.packages) || settings.packages.length === 0) {
|
||||||
settings.packages = [...CORE_PACKAGE_SOURCES];
|
settings.packages = supportedCorePackages;
|
||||||
} else if (shouldPruneLegacyDefaultPackages(settings.packages as PackageSource[])) {
|
} else if (shouldPruneLegacyDefaultPackages(settings.packages as PackageSource[])) {
|
||||||
settings.packages = [...CORE_PACKAGE_SOURCES];
|
settings.packages = supportedCorePackages;
|
||||||
|
} else {
|
||||||
|
settings.packages = filterConfiguredPackagesForCurrentNode(settings.packages as PackageSource[]);
|
||||||
}
|
}
|
||||||
|
|
||||||
const authStorage = AuthStorage.create(authPath);
|
const modelRegistry = createModelRegistry(authPath);
|
||||||
const modelRegistry = new ModelRegistry(authStorage);
|
|
||||||
const availableModels = modelRegistry.getAvailable().map((model) => ({
|
const availableModels = modelRegistry.getAvailable().map((model) => ({
|
||||||
provider: model.provider,
|
provider: model.provider,
|
||||||
id: model.id,
|
id: model.id,
|
||||||
|
|||||||
@@ -1,13 +1,17 @@
|
|||||||
import { existsSync, readFileSync } from "node:fs";
|
import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||||
import { homedir } from "node:os";
|
import { dirname, resolve } from "node:path";
|
||||||
import { resolve } from "node:path";
|
import { getFeynmanHome } from "../config/paths.js";
|
||||||
|
|
||||||
export type PiWebSearchProvider = "auto" | "perplexity" | "gemini";
|
export type PiWebSearchProvider = "auto" | "perplexity" | "exa" | "gemini";
|
||||||
|
export type PiWebSearchWorkflow = "none" | "summary-review";
|
||||||
|
|
||||||
export type PiWebAccessConfig = Record<string, unknown> & {
|
export type PiWebAccessConfig = Record<string, unknown> & {
|
||||||
|
route?: PiWebSearchProvider;
|
||||||
provider?: PiWebSearchProvider;
|
provider?: PiWebSearchProvider;
|
||||||
searchProvider?: PiWebSearchProvider;
|
searchProvider?: PiWebSearchProvider;
|
||||||
|
workflow?: PiWebSearchWorkflow;
|
||||||
perplexityApiKey?: string;
|
perplexityApiKey?: string;
|
||||||
|
exaApiKey?: string;
|
||||||
geminiApiKey?: string;
|
geminiApiKey?: string;
|
||||||
chromeProfile?: string;
|
chromeProfile?: string;
|
||||||
};
|
};
|
||||||
@@ -16,19 +20,26 @@ export type PiWebAccessStatus = {
|
|||||||
configPath: string;
|
configPath: string;
|
||||||
searchProvider: PiWebSearchProvider;
|
searchProvider: PiWebSearchProvider;
|
||||||
requestProvider: PiWebSearchProvider;
|
requestProvider: PiWebSearchProvider;
|
||||||
|
workflow: PiWebSearchWorkflow;
|
||||||
perplexityConfigured: boolean;
|
perplexityConfigured: boolean;
|
||||||
|
exaConfigured: boolean;
|
||||||
geminiApiConfigured: boolean;
|
geminiApiConfigured: boolean;
|
||||||
chromeProfile?: string;
|
chromeProfile?: string;
|
||||||
routeLabel: string;
|
routeLabel: string;
|
||||||
note: string;
|
note: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
export function getPiWebSearchConfigPath(home = process.env.HOME ?? homedir()): string {
|
export function getPiWebSearchConfigPath(home?: string): string {
|
||||||
return resolve(home, ".feynman", "web-search.json");
|
const feynmanHome = home ? resolve(home, ".feynman") : getFeynmanHome();
|
||||||
|
return resolve(feynmanHome, "web-search.json");
|
||||||
}
|
}
|
||||||
|
|
||||||
function normalizeProvider(value: unknown): PiWebSearchProvider | undefined {
|
function normalizeProvider(value: unknown): PiWebSearchProvider | undefined {
|
||||||
return value === "auto" || value === "perplexity" || value === "gemini" ? value : undefined;
|
return value === "auto" || value === "perplexity" || value === "exa" || value === "gemini" ? value : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeWorkflow(value: unknown): PiWebSearchWorkflow | undefined {
|
||||||
|
return value === "none" || value === "summary-review" ? value : undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
function normalizeNonEmptyString(value: unknown): string | undefined {
|
function normalizeNonEmptyString(value: unknown): string | undefined {
|
||||||
@@ -48,10 +59,29 @@ export function loadPiWebAccessConfig(configPath = getPiWebSearchConfigPath()):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function savePiWebAccessConfig(
|
||||||
|
updates: Partial<Record<keyof PiWebAccessConfig, unknown>>,
|
||||||
|
configPath = getPiWebSearchConfigPath(),
|
||||||
|
): void {
|
||||||
|
const merged: Record<string, unknown> = { ...loadPiWebAccessConfig(configPath) };
|
||||||
|
for (const [key, value] of Object.entries(updates)) {
|
||||||
|
if (value === undefined) {
|
||||||
|
delete merged[key];
|
||||||
|
} else {
|
||||||
|
merged[key] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mkdirSync(dirname(configPath), { recursive: true });
|
||||||
|
writeFileSync(configPath, JSON.stringify(merged, null, 2) + "\n", "utf8");
|
||||||
|
}
|
||||||
|
|
||||||
function formatRouteLabel(provider: PiWebSearchProvider): string {
|
function formatRouteLabel(provider: PiWebSearchProvider): string {
|
||||||
switch (provider) {
|
switch (provider) {
|
||||||
case "perplexity":
|
case "perplexity":
|
||||||
return "Perplexity";
|
return "Perplexity";
|
||||||
|
case "exa":
|
||||||
|
return "Exa";
|
||||||
case "gemini":
|
case "gemini":
|
||||||
return "Gemini";
|
return "Gemini";
|
||||||
default:
|
default:
|
||||||
@@ -63,10 +93,12 @@ function formatRouteNote(provider: PiWebSearchProvider): string {
|
|||||||
switch (provider) {
|
switch (provider) {
|
||||||
case "perplexity":
|
case "perplexity":
|
||||||
return "Pi web-access will use Perplexity for search.";
|
return "Pi web-access will use Perplexity for search.";
|
||||||
|
case "exa":
|
||||||
|
return "Pi web-access will use Exa for search.";
|
||||||
case "gemini":
|
case "gemini":
|
||||||
return "Pi web-access will use Gemini API or Gemini Browser.";
|
return "Pi web-access will use Gemini API or Gemini Browser.";
|
||||||
default:
|
default:
|
||||||
return "Pi web-access will try Perplexity, then Gemini API, then Gemini Browser.";
|
return "Pi web-access will try Perplexity, then Exa, then Gemini API, then Gemini Browser.";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,9 +106,12 @@ export function getPiWebAccessStatus(
|
|||||||
config: PiWebAccessConfig = loadPiWebAccessConfig(),
|
config: PiWebAccessConfig = loadPiWebAccessConfig(),
|
||||||
configPath = getPiWebSearchConfigPath(),
|
configPath = getPiWebSearchConfigPath(),
|
||||||
): PiWebAccessStatus {
|
): PiWebAccessStatus {
|
||||||
const searchProvider = normalizeProvider(config.searchProvider) ?? "auto";
|
const searchProvider =
|
||||||
const requestProvider = normalizeProvider(config.provider) ?? searchProvider;
|
normalizeProvider(config.searchProvider) ?? normalizeProvider(config.route) ?? normalizeProvider(config.provider) ?? "auto";
|
||||||
|
const requestProvider = normalizeProvider(config.provider) ?? normalizeProvider(config.route) ?? searchProvider;
|
||||||
|
const workflow = normalizeWorkflow(config.workflow) ?? "none";
|
||||||
const perplexityConfigured = Boolean(normalizeNonEmptyString(config.perplexityApiKey));
|
const perplexityConfigured = Boolean(normalizeNonEmptyString(config.perplexityApiKey));
|
||||||
|
const exaConfigured = Boolean(normalizeNonEmptyString(config.exaApiKey));
|
||||||
const geminiApiConfigured = Boolean(normalizeNonEmptyString(config.geminiApiKey));
|
const geminiApiConfigured = Boolean(normalizeNonEmptyString(config.geminiApiKey));
|
||||||
const chromeProfile = normalizeNonEmptyString(config.chromeProfile);
|
const chromeProfile = normalizeNonEmptyString(config.chromeProfile);
|
||||||
const effectiveProvider = searchProvider;
|
const effectiveProvider = searchProvider;
|
||||||
@@ -85,7 +120,9 @@ export function getPiWebAccessStatus(
|
|||||||
configPath,
|
configPath,
|
||||||
searchProvider,
|
searchProvider,
|
||||||
requestProvider,
|
requestProvider,
|
||||||
|
workflow,
|
||||||
perplexityConfigured,
|
perplexityConfigured,
|
||||||
|
exaConfigured,
|
||||||
geminiApiConfigured,
|
geminiApiConfigured,
|
||||||
chromeProfile,
|
chromeProfile,
|
||||||
routeLabel: formatRouteLabel(effectiveProvider),
|
routeLabel: formatRouteLabel(effectiveProvider),
|
||||||
@@ -100,7 +137,9 @@ export function formatPiWebAccessDoctorLines(
|
|||||||
"web access: pi-web-access",
|
"web access: pi-web-access",
|
||||||
` search route: ${status.routeLabel}`,
|
` search route: ${status.routeLabel}`,
|
||||||
` request route: ${status.requestProvider}`,
|
` request route: ${status.requestProvider}`,
|
||||||
|
` search workflow: ${status.workflow}`,
|
||||||
` perplexity api: ${status.perplexityConfigured ? "configured" : "not configured"}`,
|
` perplexity api: ${status.perplexityConfigured ? "configured" : "not configured"}`,
|
||||||
|
` exa api: ${status.exaConfigured ? "configured" : "not configured"}`,
|
||||||
` gemini api: ${status.geminiApiConfigured ? "configured" : "not configured"}`,
|
` gemini api: ${status.geminiApiConfigured ? "configured" : "not configured"}`,
|
||||||
` browser profile: ${status.chromeProfile ?? "default Chromium profile"}`,
|
` browser profile: ${status.chromeProfile ?? "default Chromium profile"}`,
|
||||||
` config path: ${status.configPath}`,
|
` config path: ${status.configPath}`,
|
||||||
|
|||||||
@@ -1,13 +1,60 @@
|
|||||||
import { getPiWebAccessStatus } from "../pi/web-access.js";
|
import {
|
||||||
|
getPiWebAccessStatus,
|
||||||
|
savePiWebAccessConfig,
|
||||||
|
type PiWebAccessConfig,
|
||||||
|
type PiWebSearchProvider,
|
||||||
|
} from "../pi/web-access.js";
|
||||||
import { printInfo } from "../ui/terminal.js";
|
import { printInfo } from "../ui/terminal.js";
|
||||||
|
|
||||||
|
const SEARCH_PROVIDERS: PiWebSearchProvider[] = ["auto", "perplexity", "exa", "gemini"];
|
||||||
|
const PROVIDER_API_KEY_FIELDS: Partial<Record<PiWebSearchProvider, keyof PiWebAccessConfig>> = {
|
||||||
|
perplexity: "perplexityApiKey",
|
||||||
|
exa: "exaApiKey",
|
||||||
|
gemini: "geminiApiKey",
|
||||||
|
};
|
||||||
|
|
||||||
export function printSearchStatus(): void {
|
export function printSearchStatus(): void {
|
||||||
const status = getPiWebAccessStatus();
|
const status = getPiWebAccessStatus();
|
||||||
printInfo("Managed by: pi-web-access");
|
printInfo("Managed by: pi-web-access");
|
||||||
printInfo(`Search route: ${status.routeLabel}`);
|
printInfo(`Search route: ${status.routeLabel}`);
|
||||||
printInfo(`Request route: ${status.requestProvider}`);
|
printInfo(`Request route: ${status.requestProvider}`);
|
||||||
|
printInfo(`Search workflow: ${status.workflow}`);
|
||||||
printInfo(`Perplexity API configured: ${status.perplexityConfigured ? "yes" : "no"}`);
|
printInfo(`Perplexity API configured: ${status.perplexityConfigured ? "yes" : "no"}`);
|
||||||
|
printInfo(`Exa API configured: ${status.exaConfigured ? "yes" : "no"}`);
|
||||||
printInfo(`Gemini API configured: ${status.geminiApiConfigured ? "yes" : "no"}`);
|
printInfo(`Gemini API configured: ${status.geminiApiConfigured ? "yes" : "no"}`);
|
||||||
printInfo(`Browser profile: ${status.chromeProfile ?? "default Chromium profile"}`);
|
printInfo(`Browser profile: ${status.chromeProfile ?? "default Chromium profile"}`);
|
||||||
printInfo(`Config path: ${status.configPath}`);
|
printInfo(`Config path: ${status.configPath}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function setSearchProvider(provider: PiWebSearchProvider, apiKey?: string): void {
|
||||||
|
if (!SEARCH_PROVIDERS.includes(provider)) {
|
||||||
|
throw new Error(`Usage: feynman search set <${SEARCH_PROVIDERS.join("|")}> [api-key]`);
|
||||||
|
}
|
||||||
|
if (apiKey !== undefined && provider === "auto") {
|
||||||
|
throw new Error("The auto provider does not use an API key. Usage: feynman search set auto");
|
||||||
|
}
|
||||||
|
|
||||||
|
const updates: Partial<Record<keyof PiWebAccessConfig, unknown>> = {
|
||||||
|
provider,
|
||||||
|
searchProvider: provider,
|
||||||
|
workflow: "none",
|
||||||
|
route: undefined,
|
||||||
|
};
|
||||||
|
const apiKeyField = PROVIDER_API_KEY_FIELDS[provider];
|
||||||
|
if (apiKeyField && apiKey !== undefined) {
|
||||||
|
updates[apiKeyField] = apiKey;
|
||||||
|
}
|
||||||
|
savePiWebAccessConfig(updates);
|
||||||
|
|
||||||
|
const status = getPiWebAccessStatus();
|
||||||
|
console.log(`Web search provider set to ${status.routeLabel}.`);
|
||||||
|
console.log(`Config path: ${status.configPath}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function clearSearchConfig(): void {
|
||||||
|
savePiWebAccessConfig({ provider: undefined, searchProvider: undefined, route: undefined, workflow: "none" });
|
||||||
|
|
||||||
|
const status = getPiWebAccessStatus();
|
||||||
|
console.log(`Web search provider reset to ${status.routeLabel}.`);
|
||||||
|
console.log(`Config path: ${status.configPath}`);
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import { AuthStorage, ModelRegistry } from "@mariozechner/pi-coding-agent";
|
|
||||||
import { getUserName as getAlphaUserName, isLoggedIn as isAlphaLoggedIn } from "@companion-ai/alpha-hub/lib";
|
import { getUserName as getAlphaUserName, isLoggedIn as isAlphaLoggedIn } from "@companion-ai/alpha-hub/lib";
|
||||||
|
|
||||||
|
import { readFileSync } from "node:fs";
|
||||||
|
|
||||||
import { formatPiWebAccessDoctorLines, getPiWebAccessStatus } from "../pi/web-access.js";
|
import { formatPiWebAccessDoctorLines, getPiWebAccessStatus } from "../pi/web-access.js";
|
||||||
import { BROWSER_FALLBACK_PATHS, PANDOC_FALLBACK_PATHS, resolveExecutable } from "../system/executables.js";
|
import { BROWSER_FALLBACK_PATHS, PANDOC_FALLBACK_PATHS, resolveExecutable } from "../system/executables.js";
|
||||||
import { readJson } from "../pi/settings.js";
|
import { readJson } from "../pi/settings.js";
|
||||||
@@ -8,6 +9,31 @@ import { validatePiInstallation } from "../pi/runtime.js";
|
|||||||
import { printInfo, printPanel, printSection } from "../ui/terminal.js";
|
import { printInfo, printPanel, printSection } from "../ui/terminal.js";
|
||||||
import { getCurrentModelSpec } from "../model/commands.js";
|
import { getCurrentModelSpec } from "../model/commands.js";
|
||||||
import { buildModelStatusSnapshotFromRecords, getAvailableModelRecords, getSupportedModelRecords } from "../model/catalog.js";
|
import { buildModelStatusSnapshotFromRecords, getAvailableModelRecords, getSupportedModelRecords } from "../model/catalog.js";
|
||||||
|
import { createModelRegistry, getModelsJsonPath } from "../model/registry.js";
|
||||||
|
import { getConfiguredServiceTier } from "../model/service-tier.js";
|
||||||
|
|
||||||
|
function findProvidersMissingApiKey(modelsJsonPath: string): string[] {
|
||||||
|
try {
|
||||||
|
const raw = readFileSync(modelsJsonPath, "utf8").trim();
|
||||||
|
if (!raw) return [];
|
||||||
|
const parsed = JSON.parse(raw) as any;
|
||||||
|
const providers = parsed?.providers;
|
||||||
|
if (!providers || typeof providers !== "object") return [];
|
||||||
|
const missing: string[] = [];
|
||||||
|
for (const [providerId, config] of Object.entries(providers as Record<string, unknown>)) {
|
||||||
|
if (!config || typeof config !== "object") continue;
|
||||||
|
const models = (config as any).models;
|
||||||
|
if (!Array.isArray(models) || models.length === 0) continue;
|
||||||
|
const apiKey = (config as any).apiKey;
|
||||||
|
if (typeof apiKey !== "string" || apiKey.trim().length === 0) {
|
||||||
|
missing.push(providerId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return missing;
|
||||||
|
} catch {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export type DoctorOptions = {
|
export type DoctorOptions = {
|
||||||
settingsPath: string;
|
settingsPath: string;
|
||||||
@@ -80,6 +106,7 @@ export function runStatus(options: DoctorOptions): void {
|
|||||||
printInfo(`Recommended model: ${snapshot.recommendedModel ?? "not available"}`);
|
printInfo(`Recommended model: ${snapshot.recommendedModel ?? "not available"}`);
|
||||||
printInfo(`alphaXiv: ${snapshot.alphaLoggedIn ? snapshot.alphaUser ?? "configured" : "not configured"}`);
|
printInfo(`alphaXiv: ${snapshot.alphaLoggedIn ? snapshot.alphaUser ?? "configured" : "not configured"}`);
|
||||||
printInfo(`Web access: pi-web-access (${snapshot.webRouteLabel})`);
|
printInfo(`Web access: pi-web-access (${snapshot.webRouteLabel})`);
|
||||||
|
printInfo(`Service tier: ${getConfiguredServiceTier(options.settingsPath) ?? "not set"}`);
|
||||||
printInfo(`Preview: ${snapshot.previewConfigured ? "configured" : "not configured"}`);
|
printInfo(`Preview: ${snapshot.previewConfigured ? "configured" : "not configured"}`);
|
||||||
|
|
||||||
printSection("Paths");
|
printSection("Paths");
|
||||||
@@ -104,7 +131,7 @@ export function runStatus(options: DoctorOptions): void {
|
|||||||
|
|
||||||
export function runDoctor(options: DoctorOptions): void {
|
export function runDoctor(options: DoctorOptions): void {
|
||||||
const settings = readJson(options.settingsPath);
|
const settings = readJson(options.settingsPath);
|
||||||
const modelRegistry = new ModelRegistry(AuthStorage.create(options.authPath));
|
const modelRegistry = createModelRegistry(options.authPath);
|
||||||
const availableModels = modelRegistry.getAvailable();
|
const availableModels = modelRegistry.getAvailable();
|
||||||
const pandocPath = resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS);
|
const pandocPath = resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS);
|
||||||
const browserPath = process.env.PUPPETEER_EXECUTABLE_PATH ?? resolveExecutable("google-chrome", BROWSER_FALLBACK_PATHS);
|
const browserPath = process.env.PUPPETEER_EXECUTABLE_PATH ?? resolveExecutable("google-chrome", BROWSER_FALLBACK_PATHS);
|
||||||
@@ -140,10 +167,26 @@ export function runDoctor(options: DoctorOptions): void {
|
|||||||
console.log(`default model valid: ${modelStatus.modelValid ? "yes" : "no"}`);
|
console.log(`default model valid: ${modelStatus.modelValid ? "yes" : "no"}`);
|
||||||
console.log(`authenticated providers: ${modelStatus.authenticatedProviderCount}`);
|
console.log(`authenticated providers: ${modelStatus.authenticatedProviderCount}`);
|
||||||
console.log(`authenticated models: ${modelStatus.authenticatedModelCount}`);
|
console.log(`authenticated models: ${modelStatus.authenticatedModelCount}`);
|
||||||
|
console.log(`service tier: ${getConfiguredServiceTier(options.settingsPath) ?? "not set"}`);
|
||||||
console.log(`recommended model: ${modelStatus.recommendedModel ?? "not available"}`);
|
console.log(`recommended model: ${modelStatus.recommendedModel ?? "not available"}`);
|
||||||
if (modelStatus.recommendedModelReason) {
|
if (modelStatus.recommendedModelReason) {
|
||||||
console.log(` why: ${modelStatus.recommendedModelReason}`);
|
console.log(` why: ${modelStatus.recommendedModelReason}`);
|
||||||
}
|
}
|
||||||
|
const modelsError = modelRegistry.getError();
|
||||||
|
if (modelsError) {
|
||||||
|
console.log("models.json: error");
|
||||||
|
for (const line of modelsError.split("\n")) {
|
||||||
|
console.log(` ${line}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const modelsJsonPath = getModelsJsonPath(options.authPath);
|
||||||
|
console.log(`models.json: ${modelsJsonPath}`);
|
||||||
|
const missingApiKeyProviders = findProvidersMissingApiKey(modelsJsonPath);
|
||||||
|
if (missingApiKeyProviders.length > 0) {
|
||||||
|
console.log(` warning: provider(s) missing apiKey: ${missingApiKeyProviders.join(", ")}`);
|
||||||
|
console.log(" note: custom providers with a models[] list need apiKey in models.json to be available.");
|
||||||
|
}
|
||||||
|
}
|
||||||
console.log(`pandoc: ${pandocPath ?? "missing"}`);
|
console.log(`pandoc: ${pandocPath ?? "missing"}`);
|
||||||
console.log(`browser preview runtime: ${browserPath ?? "missing"}`);
|
console.log(`browser preview runtime: ${browserPath ?? "missing"}`);
|
||||||
for (const line of formatPiWebAccessDoctorLines()) {
|
for (const line of formatPiWebAccessDoctorLines()) {
|
||||||
|
|||||||
@@ -13,14 +13,36 @@ export function setupPreviewDependencies(): PreviewSetupResult {
|
|||||||
return { status: "ready", message: `pandoc already installed at ${pandocPath}` };
|
return { status: "ready", message: `pandoc already installed at ${pandocPath}` };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (process.platform === "darwin") {
|
||||||
const brewPath = resolveExecutable("brew", BREW_FALLBACK_PATHS);
|
const brewPath = resolveExecutable("brew", BREW_FALLBACK_PATHS);
|
||||||
if (process.platform === "darwin" && brewPath) {
|
if (brewPath) {
|
||||||
const result = spawnSync(brewPath, ["install", "pandoc"], { stdio: "inherit" });
|
const result = spawnSync(brewPath, ["install", "pandoc"], { stdio: "inherit" });
|
||||||
if (result.status !== 0) {
|
if (result.status !== 0) {
|
||||||
throw new Error("Failed to install pandoc via Homebrew.");
|
throw new Error("Failed to install pandoc via Homebrew.");
|
||||||
}
|
}
|
||||||
return { status: "installed", message: "Preview dependency installed: pandoc" };
|
return { status: "installed", message: "Preview dependency installed: pandoc" };
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (process.platform === "win32") {
|
||||||
|
const wingetPath = resolveExecutable("winget");
|
||||||
|
if (wingetPath) {
|
||||||
|
const result = spawnSync(wingetPath, ["install", "--id", "JohnMacFarlane.Pandoc", "-e"], { stdio: "inherit" });
|
||||||
|
if (result.status === 0) {
|
||||||
|
return { status: "installed", message: "Preview dependency installed: pandoc (via winget)" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (process.platform === "linux") {
|
||||||
|
const aptPath = resolveExecutable("apt-get");
|
||||||
|
if (aptPath) {
|
||||||
|
const result = spawnSync(aptPath, ["install", "-y", "pandoc"], { stdio: "inherit" });
|
||||||
|
if (result.status === 0) {
|
||||||
|
return { status: "installed", message: "Preview dependency installed: pandoc (via apt)" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
status: "manual",
|
status: "manual",
|
||||||
|
|||||||
@@ -1,30 +1,130 @@
|
|||||||
import { stdin as input, stdout as output } from "node:process";
|
import {
|
||||||
import { createInterface } from "node:readline/promises";
|
confirm as clackConfirm,
|
||||||
|
intro as clackIntro,
|
||||||
|
isCancel,
|
||||||
|
multiselect as clackMultiselect,
|
||||||
|
outro as clackOutro,
|
||||||
|
select as clackSelect,
|
||||||
|
text as clackText,
|
||||||
|
type Option,
|
||||||
|
} from "@clack/prompts";
|
||||||
|
|
||||||
export async function promptText(question: string, defaultValue = ""): Promise<string> {
|
export class SetupCancelledError extends Error {
|
||||||
if (!input.isTTY || !output.isTTY) {
|
constructor(message = "setup cancelled") {
|
||||||
|
super(message);
|
||||||
|
this.name = "SetupCancelledError";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export type PromptSelectOption<T = string> = {
|
||||||
|
value: T;
|
||||||
|
label: string;
|
||||||
|
hint?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
function ensureInteractiveTerminal(): void {
|
||||||
|
if (!process.stdin.isTTY || !process.stdout.isTTY) {
|
||||||
throw new Error("feynman setup requires an interactive terminal.");
|
throw new Error("feynman setup requires an interactive terminal.");
|
||||||
}
|
}
|
||||||
const rl = createInterface({ input, output });
|
|
||||||
try {
|
|
||||||
const suffix = defaultValue ? ` [${defaultValue}]` : "";
|
|
||||||
const value = (await rl.question(`${question}${suffix}: `)).trim();
|
|
||||||
return value || defaultValue;
|
|
||||||
} finally {
|
|
||||||
rl.close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function guardCancelled<T>(value: T | symbol): T {
|
||||||
|
if (isCancel(value)) {
|
||||||
|
throw new SetupCancelledError();
|
||||||
|
}
|
||||||
|
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isInteractiveTerminal(): boolean {
|
||||||
|
return Boolean(process.stdin.isTTY && process.stdout.isTTY);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function promptIntro(title: string): Promise<void> {
|
||||||
|
ensureInteractiveTerminal();
|
||||||
|
clackIntro(title);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function promptOutro(message: string): Promise<void> {
|
||||||
|
ensureInteractiveTerminal();
|
||||||
|
clackOutro(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function promptText(question: string, defaultValue = "", placeholder?: string): Promise<string> {
|
||||||
|
ensureInteractiveTerminal();
|
||||||
|
|
||||||
|
const value = guardCancelled(
|
||||||
|
await clackText({
|
||||||
|
message: question,
|
||||||
|
initialValue: defaultValue || undefined,
|
||||||
|
placeholder: placeholder ?? (defaultValue || undefined),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
const normalized = String(value ?? "").trim();
|
||||||
|
return normalized || defaultValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function promptSelect<T>(
|
||||||
|
question: string,
|
||||||
|
options: PromptSelectOption<T>[],
|
||||||
|
initialValue?: T,
|
||||||
|
): Promise<T> {
|
||||||
|
ensureInteractiveTerminal();
|
||||||
|
|
||||||
|
const selection = guardCancelled(
|
||||||
|
await clackSelect({
|
||||||
|
message: question,
|
||||||
|
options: options.map((option) => ({
|
||||||
|
value: option.value,
|
||||||
|
label: option.label,
|
||||||
|
hint: option.hint,
|
||||||
|
})) as Option<T>[],
|
||||||
|
initialValue,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
return selection;
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function promptChoice(question: string, choices: string[], defaultIndex = 0): Promise<number> {
|
export async function promptChoice(question: string, choices: string[], defaultIndex = 0): Promise<number> {
|
||||||
console.log(question);
|
const options = choices.map((choice, index) => ({
|
||||||
for (const [index, choice] of choices.entries()) {
|
value: index,
|
||||||
const marker = index === defaultIndex ? "*" : " ";
|
label: choice,
|
||||||
console.log(` ${marker} ${index + 1}. ${choice}`);
|
}));
|
||||||
|
return promptSelect(question, options, Math.max(0, Math.min(defaultIndex, choices.length - 1)));
|
||||||
}
|
}
|
||||||
const answer = await promptText("Select", String(defaultIndex + 1));
|
|
||||||
const parsed = Number(answer);
|
export async function promptConfirm(question: string, initialValue = true): Promise<boolean> {
|
||||||
if (!Number.isFinite(parsed) || parsed < 1 || parsed > choices.length) {
|
ensureInteractiveTerminal();
|
||||||
return defaultIndex;
|
|
||||||
|
return guardCancelled(
|
||||||
|
await clackConfirm({
|
||||||
|
message: question,
|
||||||
|
initialValue,
|
||||||
|
}),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
return parsed - 1;
|
|
||||||
|
export async function promptMultiSelect<T>(
|
||||||
|
question: string,
|
||||||
|
options: PromptSelectOption<T>[],
|
||||||
|
initialValues: T[] = [],
|
||||||
|
): Promise<T[]> {
|
||||||
|
ensureInteractiveTerminal();
|
||||||
|
|
||||||
|
const selection = guardCancelled(
|
||||||
|
await clackMultiselect({
|
||||||
|
message: question,
|
||||||
|
options: options.map((option) => ({
|
||||||
|
value: option.value,
|
||||||
|
label: option.label,
|
||||||
|
hint: option.hint,
|
||||||
|
})) as Option<T>[],
|
||||||
|
initialValues,
|
||||||
|
required: false,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
return selection;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,24 @@
|
|||||||
import { isLoggedIn as isAlphaLoggedIn, login as loginAlpha } from "@companion-ai/alpha-hub/lib";
|
import { isLoggedIn as isAlphaLoggedIn, login as loginAlpha } from "@companion-ai/alpha-hub/lib";
|
||||||
|
import { dirname } from "node:path";
|
||||||
|
|
||||||
import { getDefaultSessionDir, getFeynmanHome } from "../config/paths.js";
|
import { getPiWebAccessStatus } from "../pi/web-access.js";
|
||||||
import { getPiWebAccessStatus, getPiWebSearchConfigPath } from "../pi/web-access.js";
|
|
||||||
import { normalizeFeynmanSettings } from "../pi/settings.js";
|
import { normalizeFeynmanSettings } from "../pi/settings.js";
|
||||||
import type { ThinkingLevel } from "../pi/settings.js";
|
import type { ThinkingLevel } from "../pi/settings.js";
|
||||||
|
import { getMissingConfiguredPackages, installPackageSources } from "../pi/package-ops.js";
|
||||||
|
import { listOptionalPackagePresets } from "../pi/package-presets.js";
|
||||||
import { getCurrentModelSpec, runModelSetup } from "../model/commands.js";
|
import { getCurrentModelSpec, runModelSetup } from "../model/commands.js";
|
||||||
import { buildModelStatusSnapshotFromRecords, getAvailableModelRecords, getSupportedModelRecords } from "../model/catalog.js";
|
import { buildModelStatusSnapshotFromRecords, getAvailableModelRecords, getSupportedModelRecords } from "../model/catalog.js";
|
||||||
import { PANDOC_FALLBACK_PATHS, resolveExecutable } from "../system/executables.js";
|
import { PANDOC_FALLBACK_PATHS, resolveExecutable } from "../system/executables.js";
|
||||||
import { setupPreviewDependencies } from "./preview.js";
|
import { setupPreviewDependencies } from "./preview.js";
|
||||||
import { runDoctor } from "./doctor.js";
|
|
||||||
import { printInfo, printSection, printSuccess } from "../ui/terminal.js";
|
import { printInfo, printSection, printSuccess } from "../ui/terminal.js";
|
||||||
|
import {
|
||||||
|
isInteractiveTerminal,
|
||||||
|
promptConfirm,
|
||||||
|
promptIntro,
|
||||||
|
promptMultiSelect,
|
||||||
|
promptOutro,
|
||||||
|
SetupCancelledError,
|
||||||
|
} from "./prompts.js";
|
||||||
|
|
||||||
type SetupOptions = {
|
type SetupOptions = {
|
||||||
settingsPath: string;
|
settingsPath: string;
|
||||||
@@ -21,33 +30,161 @@ type SetupOptions = {
|
|||||||
defaultThinkingLevel?: ThinkingLevel;
|
defaultThinkingLevel?: ThinkingLevel;
|
||||||
};
|
};
|
||||||
|
|
||||||
function isInteractiveTerminal(): boolean {
|
|
||||||
return Boolean(process.stdin.isTTY && process.stdout.isTTY);
|
|
||||||
}
|
|
||||||
|
|
||||||
function printNonInteractiveSetupGuidance(): void {
|
function printNonInteractiveSetupGuidance(): void {
|
||||||
printInfo("Non-interactive terminal. Use explicit commands:");
|
printInfo("Non-interactive terminal. Use explicit commands:");
|
||||||
printInfo(" feynman model login <provider>");
|
printInfo(" feynman model login <provider>");
|
||||||
printInfo(" feynman model set <provider/model>");
|
printInfo(" feynman model set <provider/model>");
|
||||||
|
printInfo(" # or configure API keys via env vars/auth.json and rerun `feynman model list`");
|
||||||
printInfo(" feynman alpha login");
|
printInfo(" feynman alpha login");
|
||||||
printInfo(" feynman doctor");
|
printInfo(" feynman doctor");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function summarizePackageSources(sources: string[]): string {
|
||||||
|
if (sources.length <= 3) {
|
||||||
|
return sources.join(", ");
|
||||||
|
}
|
||||||
|
|
||||||
|
return `${sources.slice(0, 3).join(", ")} +${sources.length - 3} more`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function maybeInstallBundledPackages(options: SetupOptions): Promise<void> {
|
||||||
|
const agentDir = dirname(options.authPath);
|
||||||
|
const { missing, bundled } = getMissingConfiguredPackages(options.workingDir, agentDir, options.appRoot);
|
||||||
|
const userMissing = missing.filter((entry) => entry.scope === "user").map((entry) => entry.source);
|
||||||
|
const projectMissing = missing.filter((entry) => entry.scope === "project").map((entry) => entry.source);
|
||||||
|
|
||||||
|
printSection("Packages");
|
||||||
|
if (bundled.length > 0) {
|
||||||
|
printInfo(`Bundled research packages ready: ${summarizePackageSources(bundled.map((entry) => entry.source))}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (missing.length === 0) {
|
||||||
|
printInfo("No additional package install required.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
printInfo(`Missing packages: ${summarizePackageSources(missing.map((entry) => entry.source))}`);
|
||||||
|
const shouldInstall = await promptConfirm("Install missing Feynman packages now?", true);
|
||||||
|
if (!shouldInstall) {
|
||||||
|
printInfo("Skipping package install. Feynman may install missing packages later if needed.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (userMissing.length > 0) {
|
||||||
|
try {
|
||||||
|
await installPackageSources(options.workingDir, agentDir, userMissing);
|
||||||
|
printSuccess(`Installed bundled packages: ${summarizePackageSources(userMissing)}`);
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : String(error);
|
||||||
|
printInfo(message.includes("No supported package manager found")
|
||||||
|
? "No package manager available for additional installs. The standalone bundle can still run with its shipped packages."
|
||||||
|
: `Package install skipped: ${message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (projectMissing.length > 0) {
|
||||||
|
try {
|
||||||
|
await installPackageSources(options.workingDir, agentDir, projectMissing, { local: true });
|
||||||
|
printSuccess(`Installed project packages: ${summarizePackageSources(projectMissing)}`);
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : String(error);
|
||||||
|
printInfo(`Project package install skipped: ${message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function maybeInstallOptionalPackages(options: SetupOptions): Promise<void> {
|
||||||
|
const agentDir = dirname(options.authPath);
|
||||||
|
const presets = listOptionalPackagePresets();
|
||||||
|
if (presets.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const selectedPresets = await promptMultiSelect(
|
||||||
|
"Optional packages",
|
||||||
|
presets.map((preset) => ({
|
||||||
|
value: preset.name,
|
||||||
|
label: preset.name,
|
||||||
|
hint: preset.description,
|
||||||
|
})),
|
||||||
|
[],
|
||||||
|
);
|
||||||
|
|
||||||
|
if (selectedPresets.length === 0) {
|
||||||
|
printInfo("No optional packages selected.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const presetName of selectedPresets) {
|
||||||
|
const preset = presets.find((entry) => entry.name === presetName);
|
||||||
|
if (!preset) continue;
|
||||||
|
try {
|
||||||
|
await installPackageSources(options.workingDir, agentDir, preset.sources, {
|
||||||
|
persist: true,
|
||||||
|
});
|
||||||
|
printSuccess(`Installed optional preset: ${preset.name}`);
|
||||||
|
} catch (error) {
|
||||||
|
const message = error instanceof Error ? error.message : String(error);
|
||||||
|
printInfo(message.includes("No supported package manager found")
|
||||||
|
? `Skipped optional preset ${preset.name}: no package manager available.`
|
||||||
|
: `Skipped optional preset ${preset.name}: ${message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function maybeLoginAlpha(): Promise<void> {
|
||||||
|
if (isAlphaLoggedIn()) {
|
||||||
|
printInfo("alphaXiv already configured.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const shouldLogin = await promptConfirm("Connect alphaXiv now?", true);
|
||||||
|
if (!shouldLogin) {
|
||||||
|
printInfo("Skipping alphaXiv login for now.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await loginAlpha();
|
||||||
|
printSuccess("alphaXiv login complete");
|
||||||
|
} catch (error) {
|
||||||
|
printInfo(`alphaXiv login skipped: ${error instanceof Error ? error.message : String(error)}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function maybeInstallPreviewDependencies(): Promise<void> {
|
||||||
|
if (resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS)) {
|
||||||
|
printInfo("Preview support already configured.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const shouldInstall = await promptConfirm("Install pandoc for preview/export support?", false);
|
||||||
|
if (!shouldInstall) {
|
||||||
|
printInfo("Skipping preview dependency install.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = setupPreviewDependencies();
|
||||||
|
printSuccess(result.message);
|
||||||
|
} catch (error) {
|
||||||
|
printInfo(`Preview setup skipped: ${error instanceof Error ? error.message : String(error)}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
export async function runSetup(options: SetupOptions): Promise<void> {
|
export async function runSetup(options: SetupOptions): Promise<void> {
|
||||||
if (!isInteractiveTerminal()) {
|
if (!isInteractiveTerminal()) {
|
||||||
printNonInteractiveSetupGuidance();
|
printNonInteractiveSetupGuidance();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await promptIntro("Feynman setup");
|
||||||
await runModelSetup(options.settingsPath, options.authPath);
|
await runModelSetup(options.settingsPath, options.authPath);
|
||||||
|
await maybeInstallBundledPackages(options);
|
||||||
if (!isAlphaLoggedIn()) {
|
await maybeInstallOptionalPackages(options);
|
||||||
await loginAlpha();
|
await maybeLoginAlpha();
|
||||||
printSuccess("alphaXiv login complete");
|
await maybeInstallPreviewDependencies();
|
||||||
}
|
|
||||||
|
|
||||||
const result = setupPreviewDependencies();
|
|
||||||
printSuccess(result.message);
|
|
||||||
|
|
||||||
normalizeFeynmanSettings(
|
normalizeFeynmanSettings(
|
||||||
options.settingsPath,
|
options.settingsPath,
|
||||||
@@ -66,4 +203,17 @@ export async function runSetup(options: SetupOptions): Promise<void> {
|
|||||||
printInfo(`alphaXiv: ${isAlphaLoggedIn() ? "configured" : "not configured"}`);
|
printInfo(`alphaXiv: ${isAlphaLoggedIn() ? "configured" : "not configured"}`);
|
||||||
printInfo(`Preview: ${resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS) ? "configured" : "not configured"}`);
|
printInfo(`Preview: ${resolveExecutable("pandoc", PANDOC_FALLBACK_PATHS) ? "configured" : "not configured"}`);
|
||||||
printInfo(`Web: ${getPiWebAccessStatus().routeLabel}`);
|
printInfo(`Web: ${getPiWebAccessStatus().routeLabel}`);
|
||||||
|
if (modelStatus.recommended && !modelStatus.currentValid) {
|
||||||
|
printInfo(`Recommended model: ${modelStatus.recommended}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
await promptOutro("Feynman is ready");
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof SetupCancelledError) {
|
||||||
|
printInfo("Setup cancelled.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,27 +1,37 @@
|
|||||||
import { spawnSync } from "node:child_process";
|
import { spawnSync } from "node:child_process";
|
||||||
import { existsSync } from "node:fs";
|
import { existsSync } from "node:fs";
|
||||||
|
import { dirname, delimiter } from "node:path";
|
||||||
|
|
||||||
export const PANDOC_FALLBACK_PATHS = [
|
const isWindows = process.platform === "win32";
|
||||||
"/opt/homebrew/bin/pandoc",
|
const programFiles = process.env.PROGRAMFILES ?? "C:\\Program Files";
|
||||||
"/usr/local/bin/pandoc",
|
const localAppData = process.env.LOCALAPPDATA ?? "";
|
||||||
];
|
|
||||||
|
|
||||||
export const BREW_FALLBACK_PATHS = [
|
export const PANDOC_FALLBACK_PATHS = isWindows
|
||||||
"/opt/homebrew/bin/brew",
|
? [`${programFiles}\\Pandoc\\pandoc.exe`]
|
||||||
"/usr/local/bin/brew",
|
: ["/opt/homebrew/bin/pandoc", "/usr/local/bin/pandoc"];
|
||||||
];
|
|
||||||
|
|
||||||
export const BROWSER_FALLBACK_PATHS = [
|
export const BREW_FALLBACK_PATHS = isWindows
|
||||||
|
? []
|
||||||
|
: ["/opt/homebrew/bin/brew", "/usr/local/bin/brew"];
|
||||||
|
|
||||||
|
export const BROWSER_FALLBACK_PATHS = isWindows
|
||||||
|
? [
|
||||||
|
`${programFiles}\\Google\\Chrome\\Application\\chrome.exe`,
|
||||||
|
`${programFiles} (x86)\\Google\\Chrome\\Application\\chrome.exe`,
|
||||||
|
`${localAppData}\\Google\\Chrome\\Application\\chrome.exe`,
|
||||||
|
`${programFiles}\\Microsoft\\Edge\\Application\\msedge.exe`,
|
||||||
|
`${programFiles}\\BraveSoftware\\Brave-Browser\\Application\\brave.exe`,
|
||||||
|
]
|
||||||
|
: [
|
||||||
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
|
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
|
||||||
"/Applications/Chromium.app/Contents/MacOS/Chromium",
|
"/Applications/Chromium.app/Contents/MacOS/Chromium",
|
||||||
"/Applications/Brave Browser.app/Contents/MacOS/Brave Browser",
|
"/Applications/Brave Browser.app/Contents/MacOS/Brave Browser",
|
||||||
"/Applications/Microsoft Edge.app/Contents/MacOS/Microsoft Edge",
|
"/Applications/Microsoft Edge.app/Contents/MacOS/Microsoft Edge",
|
||||||
];
|
];
|
||||||
|
|
||||||
export const MERMAID_FALLBACK_PATHS = [
|
export const MERMAID_FALLBACK_PATHS = isWindows
|
||||||
"/opt/homebrew/bin/mmdc",
|
? []
|
||||||
"/usr/local/bin/mmdc",
|
: ["/opt/homebrew/bin/mmdc", "/usr/local/bin/mmdc"];
|
||||||
];
|
|
||||||
|
|
||||||
export function resolveExecutable(name: string, fallbackPaths: string[] = []): string | undefined {
|
export function resolveExecutable(name: string, fallbackPaths: string[] = []): string | undefined {
|
||||||
for (const candidate of fallbackPaths) {
|
for (const candidate of fallbackPaths) {
|
||||||
@@ -30,13 +40,25 @@ export function resolveExecutable(name: string, fallbackPaths: string[] = []): s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const result = spawnSync("sh", ["-lc", `command -v ${name}`], {
|
const isWindows = process.platform === "win32";
|
||||||
|
const env = {
|
||||||
|
...process.env,
|
||||||
|
PATH: process.env.PATH ?? "",
|
||||||
|
};
|
||||||
|
const result = isWindows
|
||||||
|
? spawnSync("cmd", ["/c", `where ${name}`], {
|
||||||
encoding: "utf8",
|
encoding: "utf8",
|
||||||
stdio: ["ignore", "pipe", "ignore"],
|
stdio: ["ignore", "pipe", "ignore"],
|
||||||
|
env,
|
||||||
|
})
|
||||||
|
: spawnSync("sh", ["-c", `command -v ${name}`], {
|
||||||
|
encoding: "utf8",
|
||||||
|
stdio: ["ignore", "pipe", "ignore"],
|
||||||
|
env,
|
||||||
});
|
});
|
||||||
|
|
||||||
if (result.status === 0) {
|
if (result.status === 0) {
|
||||||
const resolved = result.stdout.trim();
|
const resolved = result.stdout.trim().split(/\r?\n/)[0];
|
||||||
if (resolved) {
|
if (resolved) {
|
||||||
return resolved;
|
return resolved;
|
||||||
}
|
}
|
||||||
@@ -44,3 +66,9 @@ export function resolveExecutable(name: string, fallbackPaths: string[] = []): s
|
|||||||
|
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function getPathWithCurrentNode(pathValue = process.env.PATH ?? ""): string {
|
||||||
|
const nodeDir = dirname(process.execPath);
|
||||||
|
const parts = pathValue.split(delimiter).filter(Boolean);
|
||||||
|
return parts.includes(nodeDir) ? pathValue : `${nodeDir}${delimiter}${pathValue}`;
|
||||||
|
}
|
||||||
|
|||||||
52
src/system/node-version.ts
Normal file
52
src/system/node-version.ts
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
export const MIN_NODE_VERSION = "20.19.0";
|
||||||
|
export const MAX_NODE_MAJOR = 24;
|
||||||
|
export const PREFERRED_NODE_MAJOR = 22;
|
||||||
|
|
||||||
|
type ParsedNodeVersion = {
|
||||||
|
major: number;
|
||||||
|
minor: number;
|
||||||
|
patch: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
function parseNodeVersion(version: string): ParsedNodeVersion {
|
||||||
|
const [major = "0", minor = "0", patch = "0"] = version.replace(/^v/, "").split(".");
|
||||||
|
return {
|
||||||
|
major: Number.parseInt(major, 10) || 0,
|
||||||
|
minor: Number.parseInt(minor, 10) || 0,
|
||||||
|
patch: Number.parseInt(patch, 10) || 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function compareNodeVersions(left: ParsedNodeVersion, right: ParsedNodeVersion): number {
|
||||||
|
if (left.major !== right.major) return left.major - right.major;
|
||||||
|
if (left.minor !== right.minor) return left.minor - right.minor;
|
||||||
|
return left.patch - right.patch;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isSupportedNodeVersion(version = process.versions.node): boolean {
|
||||||
|
const parsed = parseNodeVersion(version);
|
||||||
|
return compareNodeVersions(parsed, parseNodeVersion(MIN_NODE_VERSION)) >= 0 && parsed.major <= MAX_NODE_MAJOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getUnsupportedNodeVersionLines(version = process.versions.node): string[] {
|
||||||
|
const isWindows = process.platform === "win32";
|
||||||
|
const parsed = parseNodeVersion(version);
|
||||||
|
const rangeText = `Node.js ${MIN_NODE_VERSION} through ${MAX_NODE_MAJOR}.x`;
|
||||||
|
return [
|
||||||
|
`feynman supports ${rangeText} (detected ${version}).`,
|
||||||
|
parsed.major > MAX_NODE_MAJOR
|
||||||
|
? "This newer Node release is not supported yet because native Pi packages may fail to build."
|
||||||
|
: isWindows
|
||||||
|
? "Install a supported Node.js release from https://nodejs.org, or use the standalone installer:"
|
||||||
|
: `Switch to a supported Node release with \`nvm install ${PREFERRED_NODE_MAJOR} && nvm use ${PREFERRED_NODE_MAJOR}\`, or use the standalone installer:`,
|
||||||
|
isWindows
|
||||||
|
? "irm https://feynman.is/install.ps1 | iex"
|
||||||
|
: "curl -fsSL https://feynman.is/install | bash",
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ensureSupportedNodeVersion(version = process.versions.node): void {
|
||||||
|
if (!isSupportedNodeVersion(version)) {
|
||||||
|
throw new Error(getUnsupportedNodeVersionLines(version).join("\n"));
|
||||||
|
}
|
||||||
|
}
|
||||||
51
src/system/open-url.ts
Normal file
51
src/system/open-url.ts
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
import { spawn } from "node:child_process";
|
||||||
|
|
||||||
|
import { resolveExecutable } from "./executables.js";
|
||||||
|
|
||||||
|
type ResolveExecutableFn = (name: string, fallbackPaths?: string[]) => string | undefined;
|
||||||
|
|
||||||
|
type OpenUrlCommand = {
|
||||||
|
command: string;
|
||||||
|
args: string[];
|
||||||
|
};
|
||||||
|
|
||||||
|
export function getOpenUrlCommand(
|
||||||
|
url: string,
|
||||||
|
platform = process.platform,
|
||||||
|
resolveCommand: ResolveExecutableFn = resolveExecutable,
|
||||||
|
): OpenUrlCommand | undefined {
|
||||||
|
if (platform === "win32") {
|
||||||
|
return {
|
||||||
|
command: "cmd",
|
||||||
|
args: ["/c", "start", "", url],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (platform === "darwin") {
|
||||||
|
const command = resolveCommand("open");
|
||||||
|
return command ? { command, args: [url] } : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
const command = resolveCommand("xdg-open");
|
||||||
|
return command ? { command, args: [url] } : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function openUrl(url: string): boolean {
|
||||||
|
const command = getOpenUrlCommand(url);
|
||||||
|
if (!command) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const child = spawn(command.command, command.args, {
|
||||||
|
detached: true,
|
||||||
|
stdio: "ignore",
|
||||||
|
windowsHide: true,
|
||||||
|
});
|
||||||
|
child.on("error", () => {});
|
||||||
|
child.unref();
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user