File size: 824 Bytes
27d5bfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "DiscoResearch/DiscoLM_German_7b_v1"
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

def generate_answer(question):
    inputs = tokenizer.encode("Question: " + question, return_tensors="pt")
    outputs = model.generate(inputs, max_length=2000, num_return_sequences=1, do_sample=True)
    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return answer

iface = gr.Interface(
    fn=generate_answer,
    inputs="text",
    outputs="text",
    title="The Art of Prompt Engineering",
    description="Definiere deine Prompt, am besten auf Deutsch",
)

iface.launch(share=True)  # Deploy the interface