Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -141,7 +141,7 @@ message contents
|
|
| 141 |
|
| 142 |
"""
|
| 143 |
|
| 144 |
-
def attack_sus(message):
|
| 145 |
output = llm_client.chat_completion(
|
| 146 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
| 147 |
messages=[
|
|
@@ -154,8 +154,10 @@ def attack_sus(message):
|
|
| 154 |
"content": message
|
| 155 |
}
|
| 156 |
],
|
| 157 |
-
|
| 158 |
-
|
|
|
|
|
|
|
| 159 |
)
|
| 160 |
|
| 161 |
return output.choices[0].delta.content
|
|
@@ -173,7 +175,7 @@ def respond(
|
|
| 173 |
suspicious = getbool(clasif_client.predict(text=message, api_name="/predict"))
|
| 174 |
|
| 175 |
if suspicious == "POSITIVE":
|
| 176 |
-
for message in attack_sus(message):
|
| 177 |
response += token
|
| 178 |
yield response
|
| 179 |
elif suspicious == "NEGATIVE":
|
|
@@ -187,10 +189,11 @@ def respond(
|
|
| 187 |
|
| 188 |
messages.append({"role": "user", "content": message})
|
| 189 |
|
| 190 |
-
for message in
|
|
|
|
| 191 |
messages,
|
| 192 |
max_tokens=max_tokens,
|
| 193 |
-
stream=
|
| 194 |
temperature=temperature,
|
| 195 |
top_p=top_p,
|
| 196 |
):
|
|
|
|
| 141 |
|
| 142 |
"""
|
| 143 |
|
| 144 |
+
def attack_sus(message, max_tokens, temperature, top_p):
|
| 145 |
output = llm_client.chat_completion(
|
| 146 |
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
| 147 |
messages=[
|
|
|
|
| 154 |
"content": message
|
| 155 |
}
|
| 156 |
],
|
| 157 |
+
max_tokens=max_tokens,
|
| 158 |
+
stream=False,
|
| 159 |
+
temperature=temperature,
|
| 160 |
+
top_p=top_p,
|
| 161 |
)
|
| 162 |
|
| 163 |
return output.choices[0].delta.content
|
|
|
|
| 175 |
suspicious = getbool(clasif_client.predict(text=message, api_name="/predict"))
|
| 176 |
|
| 177 |
if suspicious == "POSITIVE":
|
| 178 |
+
for message in attack_sus(message, max_tokens, temperature, top_p):
|
| 179 |
response += token
|
| 180 |
yield response
|
| 181 |
elif suspicious == "NEGATIVE":
|
|
|
|
| 189 |
|
| 190 |
messages.append({"role": "user", "content": message})
|
| 191 |
|
| 192 |
+
for message in llm_client.chat_completion(
|
| 193 |
+
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
|
| 194 |
messages,
|
| 195 |
max_tokens=max_tokens,
|
| 196 |
+
stream=False,
|
| 197 |
temperature=temperature,
|
| 198 |
top_p=top_p,
|
| 199 |
):
|