Spaces:
Sleeping
Sleeping
Commit
·
bb92a95
1
Parent(s):
1031ad9
JSON PARSE
Browse files
app.py
CHANGED
@@ -2,19 +2,20 @@ import gradio as gr
|
|
2 |
from jinja2 import Template
|
3 |
import openai
|
4 |
import os
|
|
|
5 |
API_ENDPOINT = "https://txl0ptjvttfogwt9.us-east-1.aws.endpoints.huggingface.cloud/v1/"
|
6 |
API_KEY = "NA"
|
7 |
|
8 |
LLAMA_API_ENDPOINT=os.getenv("LLAMA_API_ENDPOINT")
|
9 |
LLAMA_API_KEY=os.getenv("LLAMA_API_KEY")
|
10 |
-
def llama_guard_classify(conv_prefix,
|
11 |
-
response = {"role": "assistant", "content": response_content}
|
12 |
model_name = 'meta-llama/Meta-Llama-Guard-3-8B'
|
13 |
client = openai.OpenAI(
|
14 |
base_url=LLAMA_API_ENDPOINT,
|
15 |
api_key=LLAMA_API_KEY
|
16 |
)
|
17 |
conv = conv_prefix
|
|
|
18 |
conv.append(response)
|
19 |
output = client.chat.completions.create(
|
20 |
model=model_name,
|
@@ -63,6 +64,7 @@ Now, please output the following as a JSON object:
|
|
63 |
|
64 |
def process_inputs(conv_prefix, response_content):
|
65 |
response = {"role": "assistant", "content": response_content}
|
|
|
66 |
output = classify_prompt(conv_prefix, response)
|
67 |
llama_output = llama_guard_classify(conv_prefix, response)
|
68 |
return output,llama_output
|
|
|
2 |
from jinja2 import Template
|
3 |
import openai
|
4 |
import os
|
5 |
+
import json
|
6 |
API_ENDPOINT = "https://txl0ptjvttfogwt9.us-east-1.aws.endpoints.huggingface.cloud/v1/"
|
7 |
API_KEY = "NA"
|
8 |
|
9 |
LLAMA_API_ENDPOINT=os.getenv("LLAMA_API_ENDPOINT")
|
10 |
LLAMA_API_KEY=os.getenv("LLAMA_API_KEY")
|
11 |
+
def llama_guard_classify(conv_prefix, response):
|
|
|
12 |
model_name = 'meta-llama/Meta-Llama-Guard-3-8B'
|
13 |
client = openai.OpenAI(
|
14 |
base_url=LLAMA_API_ENDPOINT,
|
15 |
api_key=LLAMA_API_KEY
|
16 |
)
|
17 |
conv = conv_prefix
|
18 |
+
print(conv)
|
19 |
conv.append(response)
|
20 |
output = client.chat.completions.create(
|
21 |
model=model_name,
|
|
|
64 |
|
65 |
def process_inputs(conv_prefix, response_content):
|
66 |
response = {"role": "assistant", "content": response_content}
|
67 |
+
conv_prefix = json.loads(conv_prefix)
|
68 |
output = classify_prompt(conv_prefix, response)
|
69 |
llama_output = llama_guard_classify(conv_prefix, response)
|
70 |
return output,llama_output
|