Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -11,10 +11,11 @@ import os
|
|
11 |
|
12 |
api_key = os.environ.get('groq')
|
13 |
read_key = os.environ.get('HF_TOKEN', None)
|
14 |
-
|
15 |
# Use Llama 3 70B powered by Groq for answering
|
16 |
def ask_llm(ort):
|
17 |
-
|
|
|
18 |
try:
|
19 |
completion = client.chat.completions.create(
|
20 |
model="llama3-70b-8192",
|
@@ -106,13 +107,13 @@ with gr.Blocks() as demo:
|
|
106 |
gr.Markdown("# ")
|
107 |
with gr.Row():
|
108 |
ort_input = gr.Textbox(label="Ort", placeholder="Gib den Namen des Ortes ein")
|
109 |
-
links_output = gr.Textbox(label="
|
110 |
#links_output = gr.DataFrame(label="Ergebnisse")
|
111 |
#json_output = gr.JSON(label="Ergebnisse")
|
112 |
|
113 |
def process_ort(ort):
|
114 |
antwort = ask_llm(ort)
|
115 |
-
antwort=gr.Markdown()
|
116 |
return antwort
|
117 |
links = parse_links_and_content(ort)
|
118 |
return links
|
|
|
11 |
|
12 |
api_key = os.environ.get('groq')
|
13 |
read_key = os.environ.get('HF_TOKEN', None)
|
14 |
+
client = groq.Client(api_key=api_key)
|
15 |
# Use Llama 3 70B powered by Groq for answering
|
16 |
def ask_llm(ort):
|
17 |
+
return "hallo"
|
18 |
+
|
19 |
try:
|
20 |
completion = client.chat.completions.create(
|
21 |
model="llama3-70b-8192",
|
|
|
107 |
gr.Markdown("# ")
|
108 |
with gr.Row():
|
109 |
ort_input = gr.Textbox(label="Ort", placeholder="Gib den Namen des Ortes ein")
|
110 |
+
links_output = gr.Textbox(label="Antwort")
|
111 |
#links_output = gr.DataFrame(label="Ergebnisse")
|
112 |
#json_output = gr.JSON(label="Ergebnisse")
|
113 |
|
114 |
def process_ort(ort):
|
115 |
antwort = ask_llm(ort)
|
116 |
+
#antwort=gr.Markdown()
|
117 |
return antwort
|
118 |
links = parse_links_and_content(ort)
|
119 |
return links
|