Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,9 @@
|
|
1 |
import os
|
2 |
from groq import Groq
|
3 |
import gradio as gr
|
4 |
-
from transformers import AutoModel, AutoConfig
|
5 |
|
6 |
client = Groq(api_key = os.environ.get("GROQ_API_KEY"), )
|
7 |
|
8 |
-
hf_token = os.environ.get("HF_TOKEN")
|
9 |
-
model = AutoModel.from_pretrained("HusseinEid/marian-finetuned-kde4-en-to-fr", token = hf_token)
|
10 |
-
|
11 |
system_prompt = {
|
12 |
"role": "system",
|
13 |
"content":
|
@@ -27,7 +23,7 @@ async def chat_groq(message, history):
|
|
27 |
response_content = ''
|
28 |
|
29 |
stream = client. chat.completions.create(
|
30 |
-
model=
|
31 |
messages=messages,
|
32 |
max_tokens=1024,
|
33 |
temperature=1.2,
|
|
|
1 |
import os
|
2 |
from groq import Groq
|
3 |
import gradio as gr
|
|
|
4 |
|
5 |
client = Groq(api_key = os.environ.get("GROQ_API_KEY"), )
|
6 |
|
|
|
|
|
|
|
7 |
system_prompt = {
|
8 |
"role": "system",
|
9 |
"content":
|
|
|
23 |
response_content = ''
|
24 |
|
25 |
stream = client. chat.completions.create(
|
26 |
+
model="llama3-8b-8192",
|
27 |
messages=messages,
|
28 |
max_tokens=1024,
|
29 |
temperature=1.2,
|