Add first-class LM Studio setup
This commit is contained in:
@@ -25,7 +25,7 @@ curl -fsSL https://feynman.is/install | bash
|
||||
irm https://feynman.is/install.ps1 | iex
|
||||
```
|
||||
|
||||
The one-line installer fetches the latest tagged release. To pin a version, pass it explicitly, for example `curl -fsSL https://feynman.is/install | bash -s -- 0.2.21`.
|
||||
The one-line installer fetches the latest tagged release. To pin a version, pass it explicitly, for example `curl -fsSL https://feynman.is/install | bash -s -- 0.2.22`.
|
||||
|
||||
The installer downloads a standalone native bundle with its own Node.js runtime.
|
||||
|
||||
@@ -33,7 +33,7 @@ To upgrade the standalone app later, rerun the installer. `feynman update` only
|
||||
|
||||
To uninstall the standalone app, remove the launcher and runtime bundle, then optionally remove `~/.feynman` if you also want to delete settings, sessions, and installed package state. If you also want to delete alphaXiv login state, remove `~/.ahub`. See the installation guide for platform-specific paths.
|
||||
|
||||
Local models are supported through the custom-provider flow. For Ollama, run `feynman setup`, choose `Custom provider (baseUrl + API key)`, use `openai-completions`, and point it at `http://localhost:11434/v1`.
|
||||
Local models are supported through the setup flow. For LM Studio, run `feynman setup`, choose `LM Studio`, and keep the default `http://localhost:1234/v1` unless you changed the server port. For Ollama or vLLM, choose `Custom provider (baseUrl + API key)`, use `openai-completions`, and point it at the local `/v1` endpoint.
|
||||
|
||||
### Skills Only
|
||||
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.21",
|
||||
"version": "0.2.22",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.21",
|
||||
"version": "0.2.22",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@companion-ai/feynman",
|
||||
"version": "0.2.21",
|
||||
"version": "0.2.22",
|
||||
"description": "Research-first CLI agent built on Pi and alphaXiv",
|
||||
"license": "MIT",
|
||||
"type": "module",
|
||||
|
||||
@@ -110,7 +110,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.21
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.22
|
||||
"@
|
||||
}
|
||||
|
||||
|
||||
@@ -261,7 +261,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.21
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.22
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -83,6 +83,7 @@ const API_KEY_PROVIDERS: ApiKeyProviderInfo[] = [
|
||||
{ id: "openai", label: "OpenAI Platform API", envVar: "OPENAI_API_KEY" },
|
||||
{ id: "anthropic", label: "Anthropic API", envVar: "ANTHROPIC_API_KEY" },
|
||||
{ id: "google", label: "Google Gemini API", envVar: "GEMINI_API_KEY" },
|
||||
{ id: "lm-studio", label: "LM Studio (local OpenAI-compatible server)" },
|
||||
{ id: "__custom__", label: "Custom provider (local/self-hosted/proxy)" },
|
||||
{ id: "amazon-bedrock", label: "Amazon Bedrock (AWS credential chain)" },
|
||||
{ id: "openrouter", label: "OpenRouter", envVar: "OPENROUTER_API_KEY" },
|
||||
@@ -132,6 +133,8 @@ async function selectApiKeyProvider(): Promise<ApiKeyProviderInfo | undefined> {
|
||||
label: provider.label,
|
||||
hint: provider.id === "__custom__"
|
||||
? "Ollama, vLLM, LM Studio, proxies"
|
||||
: provider.id === "lm-studio"
|
||||
? "http://localhost:1234/v1"
|
||||
: provider.envVar ?? provider.id,
|
||||
}));
|
||||
options.push({ value: "cancel", label: "Cancel" });
|
||||
@@ -362,6 +365,44 @@ async function promptCustomProviderSetup(): Promise<CustomProviderSetup | undefi
|
||||
return { providerId, modelIds, baseUrl, api, apiKeyConfig, authHeader };
|
||||
}
|
||||
|
||||
async function promptLmStudioProviderSetup(): Promise<CustomProviderSetup | undefined> {
|
||||
printSection("LM Studio");
|
||||
printInfo("Start the LM Studio local server first, then load a model.");
|
||||
|
||||
const baseUrlRaw = await promptText("Base URL", "http://localhost:1234/v1");
|
||||
const { baseUrl } = normalizeCustomProviderBaseUrl("openai-completions", baseUrlRaw);
|
||||
if (!baseUrl) {
|
||||
printWarning("Base URL is required.");
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const detectedModelIds = await bestEffortFetchOpenAiModelIds(baseUrl, "lm-studio", false);
|
||||
let modelIdsDefault = "local-model";
|
||||
if (detectedModelIds && detectedModelIds.length > 0) {
|
||||
const sample = detectedModelIds.slice(0, 10).join(", ");
|
||||
printInfo(`Detected LM Studio models: ${sample}${detectedModelIds.length > 10 ? ", ..." : ""}`);
|
||||
modelIdsDefault = detectedModelIds[0]!;
|
||||
} else {
|
||||
printInfo("No models detected from /models. Enter the exact model id shown in LM Studio.");
|
||||
}
|
||||
|
||||
const modelIdsRaw = await promptText("Model id(s) (comma-separated)", modelIdsDefault);
|
||||
const modelIds = normalizeModelIds(modelIdsRaw);
|
||||
if (modelIds.length === 0) {
|
||||
printWarning("At least one model id is required.");
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return {
|
||||
providerId: "lm-studio",
|
||||
modelIds,
|
||||
baseUrl,
|
||||
api: "openai-completions",
|
||||
apiKeyConfig: "lm-studio",
|
||||
authHeader: false,
|
||||
};
|
||||
}
|
||||
|
||||
async function verifyCustomProvider(setup: CustomProviderSetup, authPath: string): Promise<void> {
|
||||
const registry = createModelRegistry(authPath);
|
||||
const modelsError = registry.getError();
|
||||
@@ -548,6 +589,31 @@ async function configureApiKeyProvider(authPath: string, providerId?: string): P
|
||||
return configureBedrockProvider(authPath);
|
||||
}
|
||||
|
||||
if (provider.id === "lm-studio") {
|
||||
const setup = await promptLmStudioProviderSetup();
|
||||
if (!setup) {
|
||||
printInfo("LM Studio setup cancelled.");
|
||||
return false;
|
||||
}
|
||||
|
||||
const modelsJsonPath = getModelsJsonPath(authPath);
|
||||
const result = upsertProviderConfig(modelsJsonPath, setup.providerId, {
|
||||
baseUrl: setup.baseUrl,
|
||||
apiKey: setup.apiKeyConfig,
|
||||
api: setup.api,
|
||||
authHeader: setup.authHeader,
|
||||
models: setup.modelIds.map((id) => ({ id })),
|
||||
});
|
||||
if (!result.ok) {
|
||||
printWarning(result.error);
|
||||
return false;
|
||||
}
|
||||
|
||||
printSuccess("Saved LM Studio provider.");
|
||||
await verifyCustomProvider(setup, authPath);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (provider.id === "__custom__") {
|
||||
const setup = await promptCustomProviderSetup();
|
||||
if (!setup) {
|
||||
|
||||
@@ -79,6 +79,15 @@ test("resolveModelProviderForCommand falls back to API-key providers when OAuth
|
||||
assert.equal(resolved?.id, "google");
|
||||
});
|
||||
|
||||
test("resolveModelProviderForCommand supports LM Studio as a first-class local provider", () => {
|
||||
const authPath = createAuthPath({});
|
||||
|
||||
const resolved = resolveModelProviderForCommand(authPath, "lm-studio");
|
||||
|
||||
assert.equal(resolved?.kind, "api-key");
|
||||
assert.equal(resolved?.id, "lm-studio");
|
||||
});
|
||||
|
||||
test("resolveModelProviderForCommand prefers OAuth when a provider supports both auth modes", () => {
|
||||
const authPath = createAuthPath({});
|
||||
|
||||
|
||||
@@ -261,7 +261,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.21
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.22
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -110,7 +110,7 @@ This usually means the release exists, but not all platform bundles were uploade
|
||||
Workarounds:
|
||||
- try again after the release finishes publishing
|
||||
- pass the latest published version explicitly, e.g.:
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.21
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.22
|
||||
"@
|
||||
}
|
||||
|
||||
|
||||
@@ -117,13 +117,13 @@ These installers download the bundled `skills/` and `prompts/` trees plus the re
|
||||
The one-line installer already targets the latest tagged release. To pin an exact version, pass it explicitly:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.21
|
||||
curl -fsSL https://feynman.is/install | bash -s -- 0.2.22
|
||||
```
|
||||
|
||||
On Windows:
|
||||
|
||||
```powershell
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.21
|
||||
& ([scriptblock]::Create((irm https://feynman.is/install.ps1))) -Version 0.2.22
|
||||
```
|
||||
|
||||
## Post-install setup
|
||||
|
||||
@@ -52,9 +52,25 @@ Amazon Bedrock (AWS credential chain)
|
||||
|
||||
Feynman verifies the same AWS credential chain Pi uses at runtime, including `AWS_PROFILE`, `~/.aws` credentials/config, SSO, ECS/IRSA, and EC2 instance roles. Once that check passes, Bedrock models become available in `feynman model list` without needing a traditional API key.
|
||||
|
||||
### Local models: Ollama, LM Studio, vLLM
|
||||
### Local models: LM Studio, Ollama, vLLM
|
||||
|
||||
If you want to use a model running locally, choose the API-key flow and then select:
|
||||
If you want to use LM Studio, start the LM Studio local server, load a model, choose the API-key flow, and then select:
|
||||
|
||||
```text
|
||||
LM Studio (local OpenAI-compatible server)
|
||||
```
|
||||
|
||||
The default settings are:
|
||||
|
||||
```text
|
||||
Base URL: http://localhost:1234/v1
|
||||
Authorization header: No
|
||||
API key: lm-studio
|
||||
```
|
||||
|
||||
Feynman attempts to read LM Studio's `/models` endpoint and prefill the loaded model id.
|
||||
|
||||
For Ollama, vLLM, or another OpenAI-compatible local server, choose:
|
||||
|
||||
```text
|
||||
Custom provider (baseUrl + API key)
|
||||
@@ -70,7 +86,7 @@ Model ids: llama3.1:8b
|
||||
API key: local
|
||||
```
|
||||
|
||||
That same custom-provider flow also works for other OpenAI-compatible local servers such as LM Studio or vLLM. After saving the provider, run:
|
||||
After saving the provider, run:
|
||||
|
||||
```bash
|
||||
feynman model list
|
||||
|
||||
Reference in New Issue
Block a user