-
Notifications
You must be signed in to change notification settings - Fork 39
/
bedrock_converse.py
109 lines (96 loc) · 3.63 KB
/
bedrock_converse.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import json
import boto3
from opentelemetry import trace as trace_api
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace.export import ConsoleSpanExporter, SimpleSpanProcessor
from openinference.instrumentation import using_attributes
from openinference.instrumentation.bedrock import BedrockInstrumentor
endpoint = "http://127.0.0.1:6006/v1/traces"
resource = Resource(attributes={})
tracer_provider = trace_sdk.TracerProvider(resource=resource)
tracer_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter(endpoint)))
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
trace_api.set_tracer_provider(tracer_provider=tracer_provider)
BedrockInstrumentor().instrument()
session = boto3.session.Session()
client = session.client("bedrock-runtime", "us-east-1")
def invoke_example():
prompt = (
b'{"prompt": "Human: Hello there, how are you? Assistant:", "max_tokens_to_sample": 1024}'
)
with using_attributes(
session_id="my-test-session",
user_id="my-test-user",
metadata={
"test-int": 1,
"test-str": "string",
"test-list": [1, 2, 3],
"test-dict": {
"key-1": "val-1",
"key-2": "val-2",
},
},
tags=["tag-1", "tag-2"],
prompt_template="Who won the soccer match in {city} on {date}",
prompt_template_version="v1.0",
prompt_template_variables={
"city": "Johannesburg",
"date": "July 11th",
},
):
response = client.invoke_model(modelId="anthropic.claude-v2", body=prompt)
response_body = json.loads(response.get("body").read())
print(response_body["completion"])
def converse_example():
system_prompt = [{"text": "You are an expert at creating music playlists"}]
inital_message = {"role": "user", "content": [{"text": "Create a list of 3 pop songs."}]}
clarifying_message = {
"role": "user",
"content": [{"text": "Make sure the songs are by artists from the United Kingdom."}],
}
inference_config = {"maxTokens": 1024, "temperature": 0.0}
messages = []
with using_attributes(
session_id="my-test-session",
user_id="my-test-user",
metadata={
"test-int": 1,
"test-str": "string",
"test-list": [1, 2, 3],
"test-dict": {
"key-1": "val-1",
"key-2": "val-2",
},
},
tags=["tag-1", "tag-2"],
prompt_template="Who won the soccer match in {city} on {date}",
prompt_template_version="v1.0",
prompt_template_variables={
"city": "Johannesburg",
"date": "July 11th",
},
):
messages.append(inital_message)
response = client.converse(
modelId="anthropic.claude-3-5-sonnet-20240620-v1:0",
system=system_prompt,
messages=messages,
inferenceConfig=inference_config,
)
out = response["output"]["message"]
messages.append(out)
print(out.get("content")[-1].get("text"))
messages.append(clarifying_message)
response = client.converse(
modelId="anthropic.claude-v2",
system=system_prompt,
messages=messages,
inferenceConfig=inference_config,
)
out = response["output"]["message"]
print(out.get("content")[-1].get("text"))
if __name__ == "__main__":
invoke_example()
converse_example()