-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* initial soup of commits to add ollama support * formatting * Add mechanism to invoke ollama-specific prompts * Add some tweaks to ollama prompting and response parsing * Remove excessive logging * Reinforce schema in prompt * Add handler context - 3.1b has a big context window * Do not use static anlaysis when ai provider is ollama * Remove console.log * Make ollamaBaseUrl optional * Remove default value from the ollamaBaseUrl type * split up the json schema and the tool into separate things so they can be both reused * tweak prompts * add a few shot to improve the likelihood that the tool gets called including a sample prompt * make sure the prefs get pulled * formatting * formatting * system prompt * add ollama to the settings * add ollama flow to the request * formatting * get the correct provider property * ollama default * update docs for AI settings * improve error message * fix the list and remove format checker for the website from CI * formatting --------- Co-authored-by: Brett Beutell <[email protected]>
- Loading branch information
1 parent
a4bccd9
commit a82be2f
Showing
16 changed files
with
418 additions
and
96 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,10 +2,11 @@ import { createAnthropic } from "@ai-sdk/anthropic"; | |
import { createMistral } from "@ai-sdk/mistral"; | ||
import { createOpenAI } from "@ai-sdk/openai"; | ||
import type { Settings } from "@fiberplane/fpx-types"; | ||
import { generateObject } from "ai"; | ||
import { type APICallError, generateObject } from "ai"; | ||
import { createOllama } from "ollama-ai-provider"; | ||
import logger from "../../logger.js"; | ||
import { invokeRequestGenerationPrompt } from "./prompts.js"; | ||
import { requestSchema } from "./tools.js"; | ||
import { getSystemPrompt, invokeRequestGenerationPrompt } from "./prompts.js"; | ||
import { makeRequestTool, requestSchema } from "./tools.js"; | ||
|
||
function configureProvider( | ||
aiProvider: string, | ||
|
@@ -38,6 +39,14 @@ function configureProvider( | |
return mistral(providerConfig.model); | ||
} | ||
|
||
if (aiProvider === "ollama") { | ||
const ollama = createOllama({ | ||
baseURL: providerConfig.baseUrl ?? undefined, | ||
}); | ||
|
||
return ollama(providerConfig.model); | ||
} | ||
|
||
throw new Error("Unknown AI provider"); | ||
} | ||
|
||
|
@@ -95,24 +104,94 @@ export async function generateRequestWithAiProvider({ | |
}); | ||
|
||
try { | ||
const samplePrompt = ` | ||
I need to make a request to one of my Hono api handlers. | ||
Here are some recent requests/responses, which you can use as inspiration for future requests. | ||
E.g., if we recently created a resource, you can look that resource up. | ||
<history> | ||
</history> | ||
The request you make should be a GET request to route: /api/geese/:id | ||
Here is the OpenAPI spec for the handler: | ||
<openapi/> | ||
Here is the middleware that will be applied to the request: | ||
<middleware/> | ||
Here is some additional context for the middleware that will be applied to the request: | ||
<middlewareContext/> | ||
Here is the code for the handler: | ||
<code/> | ||
Here is some additional context for the handler source code, if you need it: | ||
<context/> | ||
`; | ||
|
||
const userPrompt = await invokeRequestGenerationPrompt({ | ||
persona, | ||
method, | ||
path, | ||
handler, | ||
handlerContext, | ||
history, | ||
openApiSpec, | ||
middleware, | ||
middlewareContext, | ||
}); | ||
|
||
const systemPrompt = getSystemPrompt(persona, aiProvider); | ||
|
||
const { | ||
object: generatedObject, | ||
warnings, | ||
usage, | ||
} = await generateObject({ | ||
model: provider, | ||
schema: requestSchema, | ||
prompt: await invokeRequestGenerationPrompt({ | ||
handler, | ||
handlerContext, | ||
history, | ||
openApiSpec, | ||
middleware, | ||
middlewareContext, | ||
persona, | ||
method, | ||
path, | ||
}), | ||
messages: [ | ||
{ role: "system", content: systemPrompt }, | ||
{ role: "user", content: samplePrompt }, | ||
{ | ||
role: "assistant", | ||
content: [ | ||
{ | ||
type: "tool-call", | ||
toolCallId: "call_1", | ||
toolName: "make_request", | ||
args: makeRequestTool, | ||
}, | ||
], | ||
}, | ||
{ | ||
role: "tool", | ||
content: [ | ||
{ | ||
type: "tool-result", | ||
toolCallId: "call_1", | ||
toolName: "make_request", | ||
result: JSON.stringify({ | ||
path: "/api/users/123", | ||
pathParams: [{ key: ":id", value: "123" }], | ||
queryParams: [ | ||
{ key: "include", value: "profile" }, | ||
{ key: "fields", value: "name,email" }, | ||
], | ||
body: JSON.stringify({ | ||
name: "John Doe", | ||
email: "[email protected]", | ||
}), | ||
bodyType: { type: "json", isMultipart: false }, | ||
headers: [], | ||
}), | ||
}, | ||
], | ||
}, | ||
{ role: "user", content: userPrompt }, | ||
], | ||
}); | ||
|
||
logger.debug("Generated object, warnings, usage", { | ||
|
@@ -134,17 +213,27 @@ export async function generateRequestWithAiProvider({ | |
logger.error("Error generating request with AI provider", { | ||
error, | ||
}); | ||
const errorMessage = | ||
error instanceof Error | ||
? error.message | ||
: "Error generating request with AI provider"; | ||
const errorMessage = createErrorMessage(error); | ||
logger.debug("Error message", { errorMessage }); | ||
return { | ||
data: null, | ||
error: { message: errorMessage }, | ||
}; | ||
} | ||
} | ||
|
||
function createErrorMessage(error: unknown) { | ||
if (typeof error === "object" && error !== null && "responseBody" in error) { | ||
return `${(error as APICallError).message}: ${(error as APICallError).responseBody}`; | ||
} | ||
|
||
if (error instanceof Error) { | ||
return error.message; | ||
} | ||
|
||
return "Error generating request with AI provider"; | ||
} | ||
|
||
// NOTE - Copy-pasted from frontend | ||
function hasValidAiConfig(settings: Settings) { | ||
const provider = settings.aiProvider; | ||
|
@@ -170,6 +259,11 @@ function hasValidAiConfig(settings: Settings) { | |
const model = mistral?.model; | ||
return !!apiKey && !!model; | ||
} | ||
case "ollama": { | ||
const ollama = settings.aiProviderConfigurations?.ollama; | ||
const model = ollama?.model; | ||
return !!model; | ||
} | ||
default: | ||
return false; | ||
} | ||
|
Oops, something went wrong.