generated from kyoh86/denops-boilerplate.vim
-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
3 changed files
with
32 additions
and
38 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,7 +2,7 @@ import { | |
is, | ||
type PredicateType, | ||
} from "https://deno.land/x/[email protected]/mod.ts"; | ||
import { isErrorResponse, isFormat, type RequestOptions } from "./types.ts"; | ||
import { isErrorResponse, type RequestOptions } from "./types.ts"; | ||
import { parseJSONStream } from "./base.ts"; | ||
import { doPost } from "./base.ts"; | ||
|
||
|
@@ -11,32 +11,6 @@ import { doPost } from "./base.ts"; | |
// Endpoint: /api/generate | ||
// Usage: https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion | ||
|
||
export const isGenerateCompletionParam = is.ObjectOf({ | ||
// The model name | ||
model: is.String, | ||
// The prompt to generate a response for | ||
prompt: is.String, | ||
// (optional) A list of base64-encoded images (for multimodal models such as llava) | ||
images: is.OptionalOf(is.ArrayOf(is.String)), | ||
// (optional) The format to return a response in. Currently the only accepted value is json | ||
format: isFormat, | ||
// (optional) Additional model parameters listed in the documentation for the Modelfile such as temperature | ||
options: is.OptionalOf(is.Record), | ||
// (optional) System message to (overrides what is defined in the Modelfile) | ||
system: is.OptionalOf(is.String), | ||
// (optional) The full prompt or prompt template (overrides what is defined in the Modelfile) | ||
template: is.OptionalOf(is.String), | ||
// (optional) The context parameter returned from a previous request to /generate, this can be used to keep a short conversational memory | ||
context: is.OptionalOf(is.ArrayOf(is.Number)), | ||
// (optional) If false the response will be returned as a single response object, rather than a stream of objects | ||
stream: is.OptionalOf(is.Boolean), | ||
// (optional) If true no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API. | ||
raw: is.OptionalOf(is.Boolean), | ||
}); | ||
export type GenerateCompletionParam = PredicateType< | ||
typeof isGenerateCompletionParam | ||
>; | ||
|
||
export const isGenerateCompletionResponse = is.OneOf([ | ||
isErrorResponse, | ||
is.ObjectOf({ | ||
|
@@ -68,16 +42,37 @@ export type GenerateCompletionResponse = PredicateType< | |
typeof isGenerateCompletionResponse | ||
>; | ||
|
||
export type GenerateCompletionParam = { | ||
// A list of base64-encoded images (for multimodal models such as llava) | ||
images?: string[] | undefined; | ||
// The format to return a response in. Currently the only accepted value is json | ||
format?: "json" | undefined; | ||
// Additional model parameters listed in the documentation for the Modelfile such as temperature | ||
options?: Record<PropertyKey, unknown> | undefined; | ||
// System message to (overrides what is defined in the Modelfile) | ||
system?: string | undefined; | ||
// The full prompt or prompt template (overrides what is defined in the Modelfile) | ||
template?: string | undefined; | ||
// The context parameter returned from a previous request to /generate, this can be used to keep a short conversational memory | ||
context?: number[] | undefined; | ||
// If true no formatting will be applied to the prompt. You may choose to use the raw parameter if you are specifying a full templated prompt in your request to the API. | ||
raw?: boolean | undefined; | ||
}; | ||
|
||
/** Generate a response for a given prompt with a provided model. | ||
* This is a streaming endpoint, so there will be a series of responses. | ||
* The final response object will include statistics and additional data from the request. | ||
*/ | ||
export async function generateCompletion( | ||
param: GenerateCompletionParam, | ||
// The model name | ||
model: string, | ||
// The prompt to generate a response for | ||
prompt: string, | ||
param?: GenerateCompletionParam, | ||
options?: RequestOptions, | ||
) { | ||
return parseJSONStream( | ||
await doPost("/api/generate", param, options), | ||
await doPost("/api/generate", { model, prompt, ...param }, options), | ||
isGenerateCompletionResponse, | ||
); | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters