Skip to content

Commit

Permalink
plumb through span tools to playground
Browse files Browse the repository at this point in the history
  • Loading branch information
Parker-Stafford committed Oct 25, 2024
1 parent abc14cf commit e40ee2c
Show file tree
Hide file tree
Showing 6 changed files with 130 additions and 6 deletions.
1 change: 1 addition & 0 deletions app/src/pages/playground/PlaygroundOutput.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ interface PlaygroundOutputProps extends PlaygroundInstanceProps {}

function PlaygroundOutputMessage({ message }: { message: ChatMessage }) {
const { role, content, toolCalls } = message;
console.log("test--", message);

Check failure on line 37 in app/src/pages/playground/PlaygroundOutput.tsx

View workflow job for this annotation

GitHub Actions / CI Typescript

Unexpected console statement
const styles = useChatMessageStyles(role);

return (
Expand Down
3 changes: 3 additions & 0 deletions app/src/pages/playground/constants.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ export const MODEL_CONFIG_PARSING_ERROR =
"Unable to parse model config, expected llm.model_name to be present.";
export const MODEL_CONFIG_WITH_INVOCATION_PARAMETERS_PARSING_ERROR =
"Unable to parse model config, expected llm.invocation_parameters json string to be present.";
// TODO(parker / apowell) - adjust this error message with anthropic support https://github.com/Arize-ai/phoenix/issues/5100
export const TOOLS_PARSING_ERROR =
"Unable to parse tools, expected tools to be an array of valid OpenAI tools.";

export const modelProviderToModelPrefixMap: Record<ModelProvider, string[]> = {
AZURE_OPENAI: [],
Expand Down
56 changes: 54 additions & 2 deletions app/src/pages/playground/playgroundUtils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,14 @@ import {
DEFAULT_CHAT_ROLE,
DEFAULT_MODEL_PROVIDER,
} from "@phoenix/constants/generativeConstants";
import { ModelConfig, PlaygroundInstance } from "@phoenix/store";
import {
ChatMessage,
createPlaygroundInstance,
generateMessageId,
generateToolId,
ModelConfig,
OpenAITool,
PlaygroundInstance,
} from "@phoenix/store";
import { assertUnreachable } from "@phoenix/typeUtils";
import { safelyParseJSON } from "@phoenix/utils/jsonUtils";
Expand All @@ -22,12 +25,15 @@ import {
OUTPUT_MESSAGES_PARSING_ERROR,
OUTPUT_VALUE_PARSING_ERROR,
SPAN_ATTRIBUTES_PARSING_ERROR,
TOOLS_PARSING_ERROR,
} from "./constants";
import {
chatMessageRolesSchema,
chatMessagesSchema,
llmInputMessageSchema,
llmOutputMessageSchema,
LlmToolSchema,
llmToolSchema,
MessageSchema,
modelConfigSchema,
modelConfigWithInvocationParametersSchema,
Expand Down Expand Up @@ -229,6 +235,47 @@ export function getModelConfigFromAttributes(parsedAttributes: unknown): {
return { modelConfig: null, parsingErrors: [MODEL_CONFIG_PARSING_ERROR] };
}

/**
* Processes the tools from the span attributes into OpenAI tools to be used in the playground
* @param tools tools from the span attributes
* @returns playground OpenAI tools
*/
function processAttributeTools(tools: LlmToolSchema): OpenAITool[] {
return (tools?.llm?.tools ?? [])
.map((tool) => {
if (tool?.tool == null) {
return null;
}
return {
id: generateToolId(),
definition: tool.tool.json_schema,
};
})
.filter((tool): tool is NonNullable<typeof tool> => tool != null);
}

/**
* Attempts to get llm.tools from the span attributes.
* @param parsedAttributes the JSON parsed span attributes
* @returns the tools from the span attributes
*/
export function getToolsFromAttributes(
parsedAttributes: unknown
):
| { tools: OpenAITool[]; parsingErrors: never[] }
| { tools: null; parsingErrors: string[] } {
const { data, success } = llmToolSchema.safeParse(parsedAttributes);

if (!success) {
return { tools: null, parsingErrors: [TOOLS_PARSING_ERROR] };
}
// If there are no tools or llm attributes, we don't want to return parsing errors, it just means the span didn't have tools
if (data?.llm?.tools == null) {
return { tools: null, parsingErrors: [] };
}
return { tools: processAttributeTools(data), parsingErrors: [] };
}

/**
* Takes a {@link PlaygroundSpan|Span} and attempts to transform it's attributes into various fields on a {@link PlaygroundInstance}.
* @param span the {@link PlaygroundSpan|Span} to transform into a playground instance
Expand Down Expand Up @@ -264,7 +311,10 @@ export function transformSpanAttributesToPlaygroundInstance(
const { modelConfig, parsingErrors: modelConfigParsingErrors } =
getModelConfigFromAttributes(parsedAttributes);

// TODO(parker): add support for tools, variables, and input / output variants
const { tools, parsingErrors: toolsParsingErrors } =
getToolsFromAttributes(parsedAttributes);

// TODO(parker): add support for prompt template variables
// https://github.com/Arize-ai/phoenix/issues/4886
return {
playgroundInstance: {
Expand All @@ -278,11 +328,13 @@ export function transformSpanAttributesToPlaygroundInstance(
}
: basePlaygroundInstance.template,
output,
tools: tools ?? basePlaygroundInstance.tools,
},
parsingErrors: [
...messageParsingErrors,
...outputParsingErrors,
...modelConfigParsingErrors,
...toolsParsingErrors,
],
};
}
Expand Down
73 changes: 70 additions & 3 deletions app/src/pages/playground/schemas.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,13 @@ import {
LLMAttributePostfixes,
MessageAttributePostfixes,
SemanticAttributePrefixes,
SemanticConventions,

Check failure on line 7 in app/src/pages/playground/schemas.ts

View workflow job for this annotation

GitHub Actions / CI Typescript

'SemanticConventions' is defined but never used
ToolAttributePostfixes,
} from "@arizeai/openinference-semantic-conventions";

import { openAIToolCallSchema, openAIToolSchema } from "@phoenix/schemas";
import { ChatMessage } from "@phoenix/store";
import { Mutable, schemaForType } from "@phoenix/typeUtils";
import { isObject, Mutable, schemaForType } from "@phoenix/typeUtils";
import { safelyParseJSON } from "@phoenix/utils/jsonUtils";

import { InvocationParameters } from "./__generated__/PlaygroundOutputSubscription.graphql";
Expand Down Expand Up @@ -39,7 +42,7 @@ const toolCallSchema = z
const messageSchema = z.object({
[SemanticAttributePrefixes.message]: z.object({
[MessageAttributePostfixes.role]: z.string(),
[MessageAttributePostfixes.content]: z.string(),
[MessageAttributePostfixes.content]: z.string().optional(),
[MessageAttributePostfixes.tool_calls]: z.array(toolCallSchema).optional(),
}),
});
Expand Down Expand Up @@ -91,7 +94,10 @@ const chatMessageSchema = schemaForType<ChatMessage>()(
z.object({
id: z.number(),
role: chatMessageRolesSchema,
content: z.string(),
// Tool call messages may not have content
content: z.string().optional(),
toolCallId: z.string().optional(),
toolCalls: z.array(openAIToolCallSchema).optional(),
})
);

Expand Down Expand Up @@ -182,6 +188,67 @@ export const modelConfigWithInvocationParametersSchema = z.object({
}),
});

/**
* The zod schema for llm.tools.{i}.tool.json_schema attribute
* This will be a json string but should be parsed as an object
*/
export const toolJSONSchemaSchema = z
.string()
.transform((s, ctx) => {
const { json } = safelyParseJSON(s);

if (json == null || !isObject(json)) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "The tool JSON schema must be a valid JSON object",
});
return z.NEVER;
}
return json;
})
// TODO(parker / apowell) - adjust this transformation with anthropic tool support https://github.com/Arize-ai/phoenix/issues/5100
.transform((o, ctx) => {
const { data, success } = openAIToolSchema.safeParse(o);

if (!success) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "The tool JSON schema must be a valid OpenAI tool schema",
});
return z.NEVER;
}
return data;
});

/**
* The zod schema for llm.tools
* @see {@link https://github.com/Arize-ai/openinference/blob/main/spec/semantic_conventions.md|Semantic Conventions}
* Note there are other attributes that can be on llm.tools.{i}.tool, namely description, name, and parameters
* however, these are encompassed by the json schema in some cases and calls to api's using tools in a non json format
* is not supported in the playground yet
*/
export const llmToolSchema = z
.object({
[SemanticAttributePrefixes.llm]: z
.object({
[LLMAttributePostfixes.tools]: z
.array(
z
.object({
[SemanticAttributePrefixes.tool]: z.object({
[ToolAttributePostfixes.json_schema]: toolJSONSchemaSchema,
}),
})
.optional()
)
.optional(),
})
.optional(),
})
.optional();

export type LlmToolSchema = z.infer<typeof llmToolSchema>;

/**
* Default set of invocation parameters for all providers and models.
*/
Expand Down
1 change: 1 addition & 0 deletions app/src/store/playground/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ export type PlaygroundInputMode = "manual" | "dataset";
export type ChatMessage = {
id: number;
role: ChatMessageRole;
// Tool call messages may not have content
content?: string;
toolCalls?: OpenAIToolCall[];
toolCallId?: string;
Expand Down
2 changes: 1 addition & 1 deletion src/phoenix/server/api/subscriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,7 @@ def _llm_output_messages(
yield f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_ROLE}", "assistant"
if content := "".join(chunk.content for chunk in text_chunks):
yield f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_CONTENT}", content
for tool_call_index, tool_call_chunks_ in tool_call_chunks.items():
for tool_call_index, (_tool_call_id, tool_call_chunks_) in enumerate(tool_call_chunks.items()):
if tool_call_chunks_ and (name := tool_call_chunks_[0].function.name):
yield (
f"{LLM_OUTPUT_MESSAGES}.0.{MESSAGE_TOOL_CALLS}.{tool_call_index}.{TOOL_CALL_FUNCTION_NAME}",
Expand Down

0 comments on commit e40ee2c

Please sign in to comment.