Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import re
|
|
5 |
import base64
|
6 |
import os
|
7 |
import copy
|
|
|
8 |
|
9 |
SUS_PROMPT = f"""
|
10 |
|
@@ -155,7 +156,11 @@ def getbool(text):
|
|
155 |
return match.group(1)
|
156 |
return None
|
157 |
|
158 |
-
llm_client = InferenceClient(token=os.environ['HF_KEY'])
|
|
|
|
|
|
|
|
|
159 |
clasif_client = Client("ChavinloSocialRise/BotRejectTest")
|
160 |
|
161 |
AGENT_NAME = "Maddie"
|
@@ -296,7 +301,7 @@ def attack_sus(message, max_tokens, temperature, top_p, available_pic_str):
|
|
296 |
sus_copy = copy.copy(SUS_PROMPT)
|
297 |
sus_copy = sus_copy.replace("AVAILABLE_PICTURES_REPLACE_TEXT", available_pic_str)
|
298 |
|
299 |
-
output = llm_client.
|
300 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
301 |
messages=[
|
302 |
{
|
@@ -357,7 +362,7 @@ def respond(
|
|
357 |
|
358 |
messages.append({"role": "user", "content": message})
|
359 |
|
360 |
-
message = llm_client.
|
361 |
messages,
|
362 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
363 |
max_tokens=max_tokens,
|
|
|
5 |
import base64
|
6 |
import os
|
7 |
import copy
|
8 |
+
from openai import OpenAI
|
9 |
|
10 |
SUS_PROMPT = f"""
|
11 |
|
|
|
156 |
return match.group(1)
|
157 |
return None
|
158 |
|
159 |
+
#llm_client = InferenceClient(token=os.environ['HF_KEY'])
|
160 |
+
llm_client = OpenAI(
|
161 |
+
api_key=os.environ.get("XWV1ST04C0QLWNVAUSJWI6VJMR7YDJCKJSAR6TPA"),
|
162 |
+
base_url="https://api.runpod.ai/v2/vllm-xwks6ov5k2ugw5/openai/v1",
|
163 |
+
)
|
164 |
clasif_client = Client("ChavinloSocialRise/BotRejectTest")
|
165 |
|
166 |
AGENT_NAME = "Maddie"
|
|
|
301 |
sus_copy = copy.copy(SUS_PROMPT)
|
302 |
sus_copy = sus_copy.replace("AVAILABLE_PICTURES_REPLACE_TEXT", available_pic_str)
|
303 |
|
304 |
+
output = llm_client.chat.completions.create(
|
305 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
306 |
messages=[
|
307 |
{
|
|
|
362 |
|
363 |
messages.append({"role": "user", "content": message})
|
364 |
|
365 |
+
message = llm_client.chat.completions.create(
|
366 |
messages,
|
367 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
368 |
max_tokens=max_tokens,
|