hugging2021 commited on
Commit
593284a
·
verified ·
1 Parent(s): fb9a3a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -1
app.py CHANGED
@@ -1,3 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
- gr.load("models/cognitivecomputations/Wizard-Vicuna-30B-Uncensored").launch()
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Setze das Cache-Verzeichnis
5
+ os.environ['TRANSFORMERS_CACHE'] = '/path/to/custom/cache/directory'
6
+
7
+ model_name = "Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
+
11
+ # Funktion zur Textgenerierung definieren
12
+ def generate_text(prompt):
13
+ inputs = tokenizer(prompt, return_tensors="pt")
14
+ outputs = model.generate(inputs["input_ids"], max_length=100)
15
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
16
+
17
  import gradio as gr
18
 
19
+ # Gradio-Interface erstellen
20
+ iface = gr.Interface(
21
+ fn=generate_text,
22
+ inputs="text",
23
+ outputs="text",
24
+ title="WizardLM Uncensored SuperCOT StoryTelling"
25
+ )
26
+
27
+ # Interface mit öffentlichem Link starten
28
+ iface.launch(share=True)