Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,41 +1,26 @@
|
|
1 |
-
import requests
|
2 |
-
import json
|
3 |
import gradio as gr
|
|
|
4 |
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
headers = {
|
10 |
-
'Content-Type': 'application/json'
|
11 |
-
}
|
12 |
|
13 |
history = []
|
14 |
|
15 |
def generate_response(prompt):
|
16 |
-
global history
|
17 |
history.append(prompt)
|
18 |
final_prompt = "\n".join(history)
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
response = requests.post(url, headers=headers, data=json.dumps(data))
|
25 |
-
|
26 |
-
if response.status_code == 200:
|
27 |
-
response = response.json()
|
28 |
-
actual_response = response['response']
|
29 |
-
return actual_response
|
30 |
-
else:
|
31 |
-
print("error:", response.text)
|
32 |
-
|
33 |
|
34 |
interface = gr.Interface(
|
35 |
fn=generate_response,
|
36 |
-
inputs=gr.
|
37 |
outputs="text",
|
38 |
-
title="
|
39 |
-
description="
|
40 |
)
|
41 |
interface.launch()
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
|
4 |
|
5 |
+
model_name = "curiouscurrent/omnicode"
|
6 |
+
text_generator = pipeline("text-generation", model=model_name)
|
|
|
|
|
|
|
|
|
7 |
|
8 |
history = []
|
9 |
|
10 |
def generate_response(prompt):
|
|
|
11 |
history.append(prompt)
|
12 |
final_prompt = "\n".join(history)
|
13 |
+
|
14 |
+
# Generate response
|
15 |
+
response = text_generator(final_prompt, max_length=100)[0]['generated_text']
|
16 |
+
|
17 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
interface = gr.Interface(
|
20 |
fn=generate_response,
|
21 |
+
inputs=gr.inputs.Textarea(lines=4, placeholder="Enter your Prompt"),
|
22 |
outputs="text",
|
23 |
+
title="Text Generation App",
|
24 |
+
description="Generate text based on the input prompt."
|
25 |
)
|
26 |
interface.launch()
|