diff --git a/api/package.json b/api/package.json index 12a7c34d9..7451e8bef 100644 --- a/api/package.json +++ b/api/package.json @@ -42,20 +42,16 @@ "@ai-sdk/anthropic": "^0.0.51", "@ai-sdk/mistral": "^0.0.42", "@ai-sdk/openai": "^0.0.66", + "@anthropic-ai/sdk": "^0.24.3", "@clack/core": "^0.3.4", "@clack/prompts": "^0.7.0", - "@neondatabase/api-client": "^1.10.3", - "giget": "^1.2.3", - "open": "^10.1.0", - "oslo": "^1.2.1", - "picocolors": "^1.0.1", - "@anthropic-ai/sdk": "^0.24.3", "@fiberplane/fpx-types": "workspace:*", "@hono/node-server": "^1.11.1", "@hono/zod-validator": "^0.2.2", "@iarna/toml": "^2.2.5", "@langchain/core": "^0.2.15", "@libsql/client": "^0.6.2", + "@neondatabase/api-client": "^1.10.3", "acorn": "^8.11.3", "acorn-walk": "^8.3.2", "ai": "^3.4.10", @@ -65,9 +61,14 @@ "drizzle-orm": "^0.33.0", "drizzle-zod": "^0.5.1", "figlet": "^1.7.0", + "giget": "^1.2.3", "hono": "^4.6.2", "minimatch": "^10.0.1", + "ollama-ai-provider": "^0.15.2", + "open": "^10.1.0", "openai": "^4.47.1", + "oslo": "^1.2.1", + "picocolors": "^1.0.1", "source-map": "^0.7.4", "typescript": "^5.5.4", "typescript-language-server": "^4.3.3", diff --git a/api/src/lib/ai/index.ts b/api/src/lib/ai/index.ts index 3eb99ead9..b977b80a3 100644 --- a/api/src/lib/ai/index.ts +++ b/api/src/lib/ai/index.ts @@ -2,10 +2,11 @@ import { createAnthropic } from "@ai-sdk/anthropic"; import { createMistral } from "@ai-sdk/mistral"; import { createOpenAI } from "@ai-sdk/openai"; import type { Settings } from "@fiberplane/fpx-types"; -import { generateObject } from "ai"; +import { type APICallError, generateObject } from "ai"; +import { createOllama } from "ollama-ai-provider"; import logger from "../../logger.js"; -import { invokeRequestGenerationPrompt } from "./prompts.js"; -import { requestSchema } from "./tools.js"; +import { getSystemPrompt, invokeRequestGenerationPrompt } from "./prompts.js"; +import { makeRequestTool, requestSchema } from "./tools.js"; function configureProvider( aiProvider: string, @@ -38,6 +39,14 @@ function configureProvider( return mistral(providerConfig.model); } + if (aiProvider === "ollama") { + const ollama = createOllama({ + baseURL: providerConfig.baseUrl ?? undefined, + }); + + return ollama(providerConfig.model); + } + throw new Error("Unknown AI provider"); } @@ -95,6 +104,47 @@ export async function generateRequestWithAiProvider({ }); try { + const samplePrompt = ` +I need to make a request to one of my Hono api handlers. + +Here are some recent requests/responses, which you can use as inspiration for future requests. +E.g., if we recently created a resource, you can look that resource up. + + + + +The request you make should be a GET request to route: /api/geese/:id + +Here is the OpenAPI spec for the handler: + + +Here is the middleware that will be applied to the request: + + +Here is some additional context for the middleware that will be applied to the request: + + +Here is the code for the handler: + + +Here is some additional context for the handler source code, if you need it: + +`; + + const userPrompt = await invokeRequestGenerationPrompt({ + persona, + method, + path, + handler, + handlerContext, + history, + openApiSpec, + middleware, + middlewareContext, + }); + + const systemPrompt = getSystemPrompt(persona, aiProvider); + const { object: generatedObject, warnings, @@ -102,17 +152,46 @@ export async function generateRequestWithAiProvider({ } = await generateObject({ model: provider, schema: requestSchema, - prompt: await invokeRequestGenerationPrompt({ - handler, - handlerContext, - history, - openApiSpec, - middleware, - middlewareContext, - persona, - method, - path, - }), + messages: [ + { role: "system", content: systemPrompt }, + { role: "user", content: samplePrompt }, + { + role: "assistant", + content: [ + { + type: "tool-call", + toolCallId: "call_1", + toolName: "make_request", + args: makeRequestTool, + }, + ], + }, + { + role: "tool", + content: [ + { + type: "tool-result", + toolCallId: "call_1", + toolName: "make_request", + result: JSON.stringify({ + path: "/api/users/123", + pathParams: [{ key: ":id", value: "123" }], + queryParams: [ + { key: "include", value: "profile" }, + { key: "fields", value: "name,email" }, + ], + body: JSON.stringify({ + name: "John Doe", + email: "john@example.com", + }), + bodyType: { type: "json", isMultipart: false }, + headers: [], + }), + }, + ], + }, + { role: "user", content: userPrompt }, + ], }); logger.debug("Generated object, warnings, usage", { @@ -134,10 +213,8 @@ export async function generateRequestWithAiProvider({ logger.error("Error generating request with AI provider", { error, }); - const errorMessage = - error instanceof Error - ? error.message - : "Error generating request with AI provider"; + const errorMessage = createErrorMessage(error); + logger.debug("Error message", { errorMessage }); return { data: null, error: { message: errorMessage }, @@ -145,6 +222,18 @@ export async function generateRequestWithAiProvider({ } } +function createErrorMessage(error: unknown) { + if (typeof error === "object" && error !== null && "responseBody" in error) { + return `${(error as APICallError).message}: ${(error as APICallError).responseBody}`; + } + + if (error instanceof Error) { + return error.message; + } + + return "Error generating request with AI provider"; +} + // NOTE - Copy-pasted from frontend function hasValidAiConfig(settings: Settings) { const provider = settings.aiProvider; @@ -170,6 +259,11 @@ function hasValidAiConfig(settings: Settings) { const model = mistral?.model; return !!apiKey && !!model; } + case "ollama": { + const ollama = settings.aiProviderConfigurations?.ollama; + const model = ollama?.model; + return !!model; + } default: return false; } diff --git a/api/src/lib/ai/prompts.ts b/api/src/lib/ai/prompts.ts index 375826b69..a9e2f678b 100644 --- a/api/src/lib/ai/prompts.ts +++ b/api/src/lib/ai/prompts.ts @@ -1,6 +1,11 @@ import { PromptTemplate } from "@langchain/core/prompts"; -export const getSystemPrompt = (persona: string) => { +export const getSystemPrompt = (persona: string, provider?: string) => { + if (provider === "ollama") { + return persona === "QA" + ? LLAMA_3_8B_QA_PARAMETER_GENERATION_SYSTEM_PROMPT + : LLAMA_3_8B_FRIENDLY_PARAMETER_GENERATION_SYSTEM_PROMPT; + } return persona === "QA" ? QA_PARAMETER_GENERATION_SYSTEM_PROMPT : FRIENDLY_PARAMETER_GENERATION_SYSTEM_PROMPT; @@ -228,7 +233,171 @@ export const QA_PARAMETER_GENERATION_SYSTEM_PROMPT = cleanPrompt(` You are an expert QA Engineer, a thorough API tester, and a code debugging assistant for web APIs that use Hono, a typescript web framework similar to express. You have a generally hostile disposition. -You need to help craft requests to route handlers. +You need to help craft requests to route handlers. + +You will be provided the source code of a route handler for an API route, and you should generate +query parameters, a request body, and headers that will test the request. + +Be clever and creative with test data. Avoid just writing things like "test". + +For example, if you get a route like \`/users/:id\`, you should return a filled-in "path" field, +like \`/users/1234567890\` and a "pathParams" field like: + +{ "path": "/users/1234567890", "pathParams": { "key": ":id", "value": "1234567890" } } + +*Remember to keep the colon in the pathParam key!* + +If you get a route like \`POST /users/:id\` with a handler like: + +\`\`\`ts +async (c) => { + const token = c.req.headers.get("authorization")?.split(" ")[1] + + const auth = c.get("authService"); + const isAuthorized = await auth.isAuthorized(token) + if (!isAuthorized) { + return c.json({ message: "Unauthorized" }, 401) + } + + const db = c.get("db"); + + const id = c.req.param('id'); + const { email } = await c.req.json() + + const user = (await db.update(user).set({ email }).where(eq(user.id, +id)).returning())?.[0]; + + if (!user) { + return c.json({ message: 'User not found' }, 404); + } + + return c.json(user); +} +\`\`\` + +You should return a filled-in "path" field like \`/users/1234567890\` and a "pathParams" field like: + +{ "path": "/users/1234567890", "pathParams": { "key": ":id", "value": "1234567890" } } + +and a header like: + +{ "headers": { "key": "authorization", "value": "Bearer admin" } } + +and a body like: + +{ "body": { "email": "" } } + +It is possible that the body type is JSON, text, or form data. You can use the wrong body type to see what happens. +But if the body type is a file stream, just return an empty body. + +For form data, you can return a body type of "form-data". You can still return a JSON object like above, +I will handle converting it to form data. + +You should focus on trying to break things. You are a QA. + +You are the enemy of bugs. To protect quality, you must find bugs. + +Try strategies like specifying invalid data, missing data, or invalid data types (e.g., using strings instead of numbers). + +Try to break the system. But do not break yourself! +Keep your responses to a reasonable length. Including your random data. + +Never add the x-fpx-trace-id header to the request. + +Use the tool "make_request". Always respond in valid JSON. +***Don't make your responses too long, otherwise we cannot parse your JSON response.*** +`); + +export const LLAMA_3_8B_FRIENDLY_PARAMETER_GENERATION_SYSTEM_PROMPT = + cleanPrompt(` +You are a friendly, expert full-stack engineer and an API testing assistant for apps that use Hono, +a typescript web framework similar to express. + +You need to help craft requests to JSON API route handlers. + +You will be provided the source code of a route handler for an API route, and you should generate +query parameters, a request body, and headers that will test the request. + +Be clever and creative with test data. Avoid just writing things like "test" or directly copying over the things from this sample (this is just for reference). Make sure to closely follow the code of the handler + +For example, if you get a route like \`/users/:id\`, you should return a URL like +\`/users/10\` and a pathParams parameter like this: + +{ "path": "/users/10", "pathParams": { "key": ":id", "value": "10" } } + +*Remember to keep the colon in the pathParam key!* + +If you get a route like \`POST /users/:id\` with a handler like: + +\`\`\`ts +async (c) => { + const token = c.req.headers.get("authorization")?.split(" ")[1] + + const auth = c.get("authService"); + const isAuthorized = await auth.isAuthorized(token) + if (!isAuthorized) { + return c.json({ message: "Unauthorized" }, 401) + } + + const { returnOnCreated } = c.req.query() + + const db = c.get("db"); + + const id = c.req.param('id'); + const { email } = await c.req.json() + + const user = (await db.update(user).set({ email }).where(eq(user.id, +id)).returning())?.[0]; + + if (!user) { + return c.json({ message: 'User not found' }, 404); + } + + return c.json(returnOnCreated ? user : { updated: true }); +} +\`\`\` + +You should return a URL like: + +\`/users/64\` and a pathParams like: + +{ "path": "/users/64", "pathParams": { "key": ":id", "value": "64" } } + +and query params like: + +{ "queryParams": { "key": "returnOnCreated", "value": "true" } } + +and a header like: + +{ "headers": { "key": "authorization", "value": "Bearer " } } + +and a body like: + +"{\\"email\\": \\"paul@beatles.music\\"}" + +with a body type of "json" + +*Never add the x-fpx-trace-id header to the request.* + +=== + +Help the user test the happy path. + +Only return valid JSON response. +`); + +/** + * A QA (hostile) tester prompt. + * + * This prompt is used to generate requests for the API. + * It is a QA tester, who tries to break your api. + * + * NOTE - I had to stop instructing the AI to create very long data in this prompt. + * It would end up repeating 9999999 ad infinitum and break JSON responses. + */ +export const LLAMA_3_8B_QA_PARAMETER_GENERATION_SYSTEM_PROMPT = cleanPrompt(` +You are an expert QA Engineer, a thorough API tester, and a code debugging assistant for web APIs that use Hono, +a typescript web framework similar to express. You have a generally hostile disposition. + +You need to help craft requests to route handlers. You will be provided the source code of a route handler for an API route, and you should generate query parameters, a request body, and headers that will test the request. @@ -287,13 +456,13 @@ But if the body type is a file stream, just return an empty body. For form data, you can return a body type of "form-data". You can still return a JSON object like above, I will handle converting it to form data. -You should focus on trying to break things. You are a QA. +You should focus on trying to break things. You are a QA. You are the enemy of bugs. To protect quality, you must find bugs. -Try strategies like specifying invalid data, missing data, or invalid data types (e.g., using strings instead of numbers). +Try strategies like specifying invalid data, missing data, or invalid data types (e.g., using strings instead of numbers). -Try to break the system. But do not break yourself! +Try to break the system. But do not break yourself! Keep your responses to a reasonable length. Including your random data. Even if you might see it in history - never add the x-fpx-trace-id header to the request. diff --git a/api/src/routes/inference/inference.ts b/api/src/routes/inference/inference.ts index 139572558..166b8f621 100644 --- a/api/src/routes/inference/inference.ts +++ b/api/src/routes/inference/inference.ts @@ -61,6 +61,8 @@ app.post( ); } + const provider = inferenceConfig.aiProvider; + // Expand out of scope identifiers in the handler function, to add as additional context // // Uncomment console.time to see how long this takes @@ -68,10 +70,13 @@ app.post( // // console.time("Handler and Middleware Expansion"); const [handlerContextPerformant, middlewareContextPerformant] = - await expandHandler(handler, middleware ?? []).catch((error) => { - logger.error(`Error expanding handler and middleware: ${error}`); - return [null, null]; - }); + // HACK - Ditch the expand handler for ollama for now, it overwhelms llama 3.1-8b + provider !== "ollama" + ? await expandHandler(handler, middleware ?? []).catch((error) => { + logger.error(`Error expanding handler and middleware: ${error}`); + return [null, null]; + }) + : [null, null]; // console.timeEnd("Handler and Middleware Expansion"); // Generate the request diff --git a/packages/types/package.json b/packages/types/package.json index 0dbf8ce72..c4840631b 100644 --- a/packages/types/package.json +++ b/packages/types/package.json @@ -20,6 +20,7 @@ "dependencies": { "@ai-sdk/anthropic": "^0.0.51", "@ai-sdk/mistral": "^0.0.42", - "@ai-sdk/openai": "^0.0.66" + "@ai-sdk/openai": "^0.0.66", + "ollama-ai-provider": "^0.15.2" } } diff --git a/packages/types/src/settings.ts b/packages/types/src/settings.ts index a8f5b21f3..8ec046ff7 100644 --- a/packages/types/src/settings.ts +++ b/packages/types/src/settings.ts @@ -1,14 +1,10 @@ import type { AnthropicProvider } from "@ai-sdk/anthropic"; import type { MistralProvider } from "@ai-sdk/mistral"; import type { OpenAIProvider } from "@ai-sdk/openai"; +import type { OllamaProvider } from "ollama-ai-provider"; import { z } from "zod"; -export type MistralModelOptionsType = MistralProvider extends ( - modelId: infer T, - ...args: unknown[] -) => unknown - ? T - : never; +export type MistralModelOptionsType = Parameters[0]; export const MistralModelOptions: Partial< Record @@ -30,12 +26,7 @@ export const MistralModelSchema = z.union([ z.literal("mistral-large-latest"), ]); -export type AnthropicModelOptionsType = AnthropicProvider extends ( - modelId: infer T, - ...args: unknown[] -) => unknown - ? T - : never; +export type AnthropicModelOptionsType = Parameters[0]; export const AnthropicModelOptions: Partial< Record @@ -55,12 +46,20 @@ export const AnthropicModelSchema = z.union([ export type AnthropicModel = z.infer; -export type OpenAIModelOptionsType = OpenAIProvider extends ( - modelId: infer T, - ...args: unknown[] -) => unknown - ? T - : never; +export type OllamaModelOptionsType = Parameters[0]; + +export const OllamaModelOptions: Partial< + Record +> = { + "llama3.1": "LLAMA 3.1", + "llama3.1:8b": "LLAMA 3.1 8B", + "llama3.1:70b": "LLAMA 3.1 70B", + "llama3.2": "LLAMA 3.2", + "llama3.2:1b": "LLAMA 3.2 1B", + "llama3.2:3b": "LLAMA 3.2 3B", +}; + +export type OpenAIModelOptionsType = Parameters[0]; export const OpenAIModelOptions: Partial< Record @@ -85,12 +84,14 @@ export type OpenAIModel = z.infer; export const ProviderOptions = { openai: "OpenAI", anthropic: "Anthropic", + ollama: "Ollama", mistral: "Mistral", } as const; export const AiProviderTypeSchema = z.union([ z.literal("openai"), z.literal("anthropic"), + z.literal("ollama"), z.literal("mistral"), ]); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5a5aa363b..d2fdda80e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -101,6 +101,9 @@ importers: minimatch: specifier: ^10.0.1 version: 10.0.1 + ollama-ai-provider: + specifier: ^0.15.2 + version: 0.15.2(zod@3.23.8) open: specifier: ^10.1.0 version: 10.1.0 @@ -440,6 +443,9 @@ importers: '@ai-sdk/openai': specifier: ^0.0.66 version: 0.0.66(zod@3.23.8) + ollama-ai-provider: + specifier: ^0.15.2 + version: 0.15.2(zod@3.23.8) devDependencies: zod: specifier: ^3.23.8 @@ -5875,6 +5881,7 @@ packages: libsql@0.3.19: resolution: {integrity: sha512-Aj5cQ5uk/6fHdmeW0TiXK42FqUlwx7ytmMLPSaUQPin5HKKKuUPD62MAbN4OEweGBBI7q1BekoEN4gPUEL6MZA==} + cpu: [x64, arm64, wasm32] os: [darwin, linux, win32] lilconfig@2.1.0: @@ -6456,6 +6463,15 @@ packages: ohash@1.1.4: resolution: {integrity: sha512-FlDryZAahJmEF3VR3w1KogSEdWX3WhA5GPakFx4J81kEAiHyLMpdLLElS8n8dfNadMgAne/MywcvmogzscVt4g==} + ollama-ai-provider@0.15.2: + resolution: {integrity: sha512-bMDUlYmohulD87Xrv6meuftQdmFTygtrQywy6/gqdf1bTsJFP1VCx3MrisLFBzb4mMOj02NER7yZhiGIlAx30w==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.0.0 + peerDependenciesMeta: + zod: + optional: true + once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} @@ -6623,6 +6639,9 @@ packages: parse5@7.1.2: resolution: {integrity: sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==} + partial-json@0.1.7: + resolution: {integrity: sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==} + path-browserify@1.0.1: resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} @@ -14564,6 +14583,14 @@ snapshots: ohash@1.1.4: {} + ollama-ai-provider@0.15.2(zod@3.23.8): + dependencies: + '@ai-sdk/provider': 0.0.24 + '@ai-sdk/provider-utils': 1.0.20(zod@3.23.8) + partial-json: 0.1.7 + optionalDependencies: + zod: 3.23.8 + once@1.4.0: dependencies: wrappy: 1.0.2 @@ -14787,6 +14814,8 @@ snapshots: dependencies: entities: 4.5.0 + partial-json@0.1.7: {} + path-browserify@1.0.1: {} path-exists@4.0.0: {} diff --git a/studio/src/components/CodeMirrorEditor/CodeMirrorEditorCssOverrides.css b/studio/src/components/CodeMirrorEditor/CodeMirrorEditorCssOverrides.css index 0ec5de6ac..d002cb99f 100644 --- a/studio/src/components/CodeMirrorEditor/CodeMirrorEditorCssOverrides.css +++ b/studio/src/components/CodeMirrorEditor/CodeMirrorEditorCssOverrides.css @@ -7,4 +7,4 @@ white-space: pre; width: 100%; /* Ensure the editor takes the full width of its container */ max-width: 900px; -} \ No newline at end of file +} diff --git a/studio/src/hooks/useAiEnabled.ts b/studio/src/hooks/useAiEnabled.ts index de385c9de..0a3335d48 100644 --- a/studio/src/hooks/useAiEnabled.ts +++ b/studio/src/hooks/useAiEnabled.ts @@ -30,6 +30,11 @@ function hasValidAiConfig(settings: Settings) { const model = mistral?.model; return !!apiKey && !!model; } + case "ollama": { + const ollama = settings.aiProviderConfigurations?.ollama; + const model = ollama?.model; + return !!model; + } default: return false; } diff --git a/studio/src/pages/RequestorPage/RequestPanel/styles.css b/studio/src/pages/RequestorPage/RequestPanel/styles.css index e8971f9e2..68b219deb 100644 --- a/studio/src/pages/RequestorPage/RequestPanel/styles.css +++ b/studio/src/pages/RequestorPage/RequestPanel/styles.css @@ -1,6 +1,7 @@ /* Define the pulse keyframes */ @keyframes fpxpulse { - 0%, 100% { + 0%, + 100% { transform: scale(1); opacity: 1; } @@ -13,4 +14,4 @@ /* Apply the pulse animation */ .fpx-pulse { animation: fpxpulse .85s infinite; -} \ No newline at end of file +} diff --git a/studio/src/pages/SettingsPage/AISettingsForm.tsx b/studio/src/pages/SettingsPage/AISettingsForm.tsx index d7ef0ea43..4330d1f02 100644 --- a/studio/src/pages/SettingsPage/AISettingsForm.tsx +++ b/studio/src/pages/SettingsPage/AISettingsForm.tsx @@ -26,6 +26,7 @@ import { type AiProviderType, AnthropicModelOptions, MistralModelOptions, + OllamaModelOptions, OpenAIModelOptions, ProviderOptions, type Settings, @@ -53,7 +54,9 @@ function useModelOptions(provider: AiProviderType) { ? AnthropicModelOptions : provider === "mistral" ? MistralModelOptions - : {}, + : provider === "ollama" + ? OllamaModelOptions + : {}, ); // HACK - Anthropic models end in their date of release, so we sort by release date descending @@ -229,26 +232,28 @@ export function AISettingsForm({ )} /> - ( - - API Key - - field.onChange(e.target.value)} - placeholder={`Enter ${ProviderOptions[provider as AiProviderType]} API Key`} - // HACK - Prevent clipping of focus ring - className="mx-[2px]" - /> - - - )} - /> + {provider !== "ollama" && ( + ( + + API Key + + field.onChange(e.target.value)} + placeholder={`Enter ${ProviderOptions[provider as AiProviderType]} API Key`} + // HACK - Prevent clipping of focus ring + className="mx-[2px]" + /> + + + )} + /> + )} -As of writing, Studio supports both OpenAI and Anthropic as LLM providers, and allows selection of the following models: +As of writing, Studio supports OpenAI, Anthropic, Mistral, and Ollama as LLM providers, and allows selection of the following models: - - Claude 3.5 Sonnet - Claude 3 Opus - Claude 3 Sonnet - Claude 3 Haiku + - Claude 3.5 Sonnet + - Claude 3 Opus + - Claude 3 Haiku + + + - GPT-4o + - GPT-4o Mini + - GPT-4 Turbo + + + - Open Mistral 7B + - Open Mistral 8x7B + - Open Mistral 8x22B + - Mistral Nemo + - Mistral Small (Latest) + - Mistral Large (Latest) + + + - Llama 3.1 + - Llama 3.1 8B + - Llama 3.1 70B + - Llama 3.2 + - Llama 3.2 1B + - Llama 3.2 3B - - GPT-4o - GPT-4o Mini - GPT-4 Turbo You can select the provider and model once you've enabled AI Request Autofill. -Settings Page - Select AI Provider - Once you've selected the provider and model, fill in your API key and click "Save". You're ready to go! The API key is stored locally in your project, and ignored by git by default. (Specifically, it's stored in a local database, which you can find in `.fpxconfig/fpx.db`.) -### Using a different LLM provider - If your LLM provider exposes an API that is compatible with OpenAI or Anthropic, you can use it. You just need to set the base URL in your provider configuration. -Settings Page - Set Base URL - -This is a way to use a local AI, if you're so inclined! There are [instructions in our GitHub repo](https://github.com/fiberplane/fpx/tree/main/local-inference) on how to do this. -Just be warned: in our experiments with running local models, request parameters would often take 10+ seconds to generate, which is a little slow 🐢. - ## Example: Testing like a QA Engineer By default, Studio will generate request data that takes inspiration from the most recent requests you've made to your API, and it will try to help you test the happy path.