File size: 1,455 Bytes
d65b1bc 275b80a d65b1bc 275b80a d65b1bc 444a3b4 d65b1bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import os
import instructor
from groq import Groq
from pydantic import BaseModel, Field
from typing import List, Dict
from pydantic import BaseModel
class Scene(BaseModel):
narration: str
image_prompts: List[str]
class VideoOutput(BaseModel):
scenes: List[Scene]
client = Groq(api_key="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")
# By default, the patch function will patch the ChatCompletion.create and ChatCompletion.create methods to support the response_model parameter
client = instructor.from_groq(client, mode=instructor.Mode.JSON)
# Now, we can use the response_model parameter using only a base model
# rather than having to use the OpenAISchema class
def chatbot(prompt: str, model: str = "llama3-70b-8192"):
response: VideoOutput = client.chat.completions.create(
model=model,
# model="gemma-7b-it",
# model="llama2-70b-4096",
# model="llama3-70b-8192",
max_tokens=5000,
response_model=VideoOutput,
# kwargs={
# # "temperature": 1,
# "max_tokens": 5000,
# # "top_p": 1,
# "stream": False,
# "stop": None,
# },
messages=[
# {
# "role": "system",
# "content": """""",
# },
{
"role": "user",
"content": prompt,
},
],
)
return response.dict()
|