|
import gradio as gr |
|
from transformers import pipeline |
|
+import spaces |
|
from diffusers import DiffusionPipeline |
|
|
|
pipe = DiffusionPipeline.from_pretrained(...) |
|
pipe.to('cuda') |
|
|
|
[email protected] |
|
def generate(prompt): |
|
return pipe(prompt).images |
|
|
|
gr.Interface( |
|
fn=generate, |
|
inputs=gr.Text(), |
|
outputs=gr.Gallery(), |
|
).launch() |
|
|
|
|
|
generator = pipeline('text-generation', model='gpt2') |
|
|
|
|
|
|
|
|
|
def generate_text(prompt): |
|
response = generator(prompt, max_length=100, num_return_sequences=1, truncation=True) |
|
return response[0]['generated_text'] |
|
|
|
|
|
def chatbot(message,history): |
|
str(message) |
|
return generate_text(message) |
|
|
|
|
|
|
|
gr.ChatInterface(chatbot).launch() |
|
|