Spaces:
Running
Running
#%% | |
import boto3 | |
import json | |
import logging | |
aws_access_key_id='AKIA2UC26WDCAFOF7X64' | |
aws_secret_access_key='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' | |
aws_region = "eu-north-1" | |
#%% | |
from anthropic import AnthropicBedrock | |
client = AnthropicBedrock( | |
# Authenticate by either providing the keys below or use the default AWS credential providers, such as | |
# using ~/.aws/credentials or the "AWS_SECRET_ACCESS_KEY" and "AWS_ACCESS_KEY_ID" environment variables. | |
aws_access_key=aws_access_key_id, | |
aws_secret_key=aws_secret_access_key, | |
# Temporary credentials can be used with aws_session_token. | |
# Read more at https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html. | |
aws_session_token=None, | |
# aws_region changes the aws region to which the request is made. By default, we read AWS_REGION, | |
# and if that's not present, we default to us-east-1. Note that we do not read ~/.aws/config for the region. | |
aws_region=aws_region, | |
) | |
modelid='anthropic.claude-v2:1' | |
model_id='eu.anthropic.claude-3-7-sonnet-20250219-v1:0' | |
message = client.messages.create( | |
model=modelid, | |
max_tokens=256, | |
messages=[{"role": "user", "content": "Hello, world"}] | |
) | |
print(message.content) | |
#%% | |
from botocore.exceptions import ClientError | |
session = boto3.Session( | |
aws_access_key_id=aws_access_key_id, | |
aws_secret_access_key=aws_secret_access_key, | |
region_name=aws_region | |
) | |
bedrock_runtime = session.client("bedrock-runtime") | |
model_id = "anthropic.claude-v2:1" | |
payload = { | |
"anthropic_version": "bedrock-2023-05-31", | |
"max_tokens": 100, | |
"messages": [{"role": "user", "content": "Hello!"}] | |
} | |
try: | |
response = bedrock_runtime.invoke_model_with_response_stream( | |
modelId=model_id, | |
body=json.dumps(payload), | |
contentType="application/json", | |
accept="application/json" | |
) | |
print("✅ Streaming appears to be enabled for Claude v2.1.") | |
for event in response['body']: | |
chunk = event['chunk']['bytes'] | |
print(chunk.decode(), end="") | |
except ClientError as e: | |
code = e.response['Error']['Code'] | |
if code == "AccessDeniedException": | |
print("❌ Streaming is NOT enabled for Claude v2.1: Access denied.") | |
elif code == "ValidationException": | |
print("⚠️ Model does not support streaming or bad payload.") | |
else: | |
print(f"❌ Unexpected error: {e}") | |
except Exception as e: | |
print(f"❌ General error: {e}") | |
#%% | |
messages = [ | |
{"role": "user", "content": "Can you tell me a fun fact about llamas?"} | |
] | |
payload = { | |
"anthropic_version": "bedrock-2023-05-31", | |
"max_tokens": 256, | |
"messages": messages | |
} | |
# ✅ 1. Test NON-streaming (invoke_model) | |
print("🧪 Testing invoke_model (non-streaming)...") | |
try: | |
response = client.invoke_model( | |
modelId=model_id, | |
body=json.dumps(payload), | |
contentType="application/json", | |
accept="application/json" | |
) | |
result = json.loads(response["body"].read().decode("utf-8")) | |
print("✅ invoke_model succeeded.") | |
print("🧠 Claude's reply:", result["content"][0]["text"]) | |
except ClientError as e: | |
print("❌ invoke_model failed:", e) | |
# ❌ 2. Test Streaming (invoke_model_with_response_stream) | |
print("\n🧪 Testing invoke_model_with_response_stream (streaming)...") | |
try: | |
stream_response = client.invoke_model_with_response_stream( | |
modelId=model_id, | |
body=json.dumps(payload), | |
contentType="application/json", | |
accept="application/json" | |
) | |
print("✅ Streaming supported. Response:") | |
for event in stream_response["body"]: | |
chunk = event.get("chunk", {}).get("bytes", b"") | |
if chunk: | |
decoded = json.loads(chunk.decode("utf-8")) | |
delta = decoded.get("delta", {}).get("content", "") | |
print(delta, end="", flush=True) | |
except ClientError as e: | |
code = e.response["Error"]["Code"] | |
if code == "AccessDeniedException": | |
print("❌ AccessDeniedException: Streaming is not enabled for your role.") | |
elif code == "ValidationException": | |
print("⚠️ ValidationException: Model might not support streaming or payload is malformed.") | |
else: | |
print(f"❌ Unexpected error: {e}") | |
except Exception as e: | |
print(f"❌ General error: {e}") | |
#%% | |
AWS_ACCESS_KEY_ID='AKIA2UC26WDCAFOF7X64' | |
AWS_SECRET_ACCESS_KEY='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' | |
aws_region = "eu-west-1" | |
#%% | |
AWS_ACCESS_KEY_ID='AKIA2UC26WDCAFOF7X64' | |
AWS_SECRET_ACCESS_KEY='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' | |
aws_region = "eu-west-1" | |
from langchain_aws import BedrockLLM | |
modelid='anthropic.claude-v2:1' | |
modelid="mistral.mistral-large-2402-v1:0" | |
#model_id='eu.anthropic.claude-3-7-sonnet-20250219-v1:0' | |
custom_llm = BedrockLLM( | |
aws_access_key_id = AWS_ACCESS_KEY_ID, | |
aws_secret_access_key = AWS_SECRET_ACCESS_KEY, | |
region_name = aws_region, | |
# which Bedrock “provider” you’re talking to: | |
# – use "anthropic" for Claude models | |
# – use "cohere" for the Cohere models | |
provider = "mistral", | |
model_id = modelid, | |
model_kwargs = {"temperature": 0.7}, | |
streaming = True, | |
) | |
print(custom_llm.invoke("What’s the recipe for mayonnaise?")) | |
#%% | |
from langchain_aws import ChatBedrockConverse | |
import os | |
system_prompt = ( | |
"Du bist DevalBot, ein konversationeller Assistent des Deutschen Evaluierungsinstituts " | |
"für Entwicklungsbewertung (DEval). DEval bietet staatlichen und zivilgesellschaftlichen " | |
"Organisationen in der Entwicklungszusammenarbeit unabhängige und wissenschaftlich fundierte " | |
"Evaluierungen. Deine Hauptsprache ist Deutsch; antworte daher standardmäßig auf Deutsch. " | |
"Du kannst zudem bei statistischen Analysen und Programmierung in Stata und R unterstützen." | |
) | |
#%% | |
# Initialize the streaming Bedrock chat model | |
bedrock_llm = ChatBedrockConverse( | |
aws_access_key_id=AWS_ACCESS_KEY_ID, | |
aws_secret_access_key=AWS_SECRET_ACCESS_KEY, | |
region_name=os.environ.get("AWS_DEFAULT_REGION", "eu-west-1"), | |
model_id="mistral.mistral-large-2402-v1:0", # or your preferred Bedrock model | |
temperature= 0.7) | |
#%% | |
from bedrock_client import bedrock_llm, system_prompt | |
from langchain.schema import SystemMessage, HumanMessage, AIMessage | |
def build_messages( | |
user_message: str, | |
history: list[dict]) -> list: | |
messages: list = [] | |
# 1) Add the system prompt first | |
messages.append(SystemMessage(content=system_prompt)) | |
# 2) Walk the history and map to HumanMessage or AIMessage | |
for msg in history: | |
if msg["role"] == "user": | |
messages.append(HumanMessage(content=msg["content"])) | |
elif msg["role"] == "assistant": | |
messages.append(AIMessage(content=msg["content"])) | |
else: | |
# you can choose to ignore or log unexpected roles | |
continue | |
# 3) Finally, append the new user message | |
messages.append(HumanMessage(content=user_message)) | |
return messages | |
build_messages('hi',[]) | |
#%% |