sotosbarl commited on
Commit
a49b72a
·
verified ·
1 Parent(s): b52c842

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -13
app.py CHANGED
@@ -1,23 +1,105 @@
1
- # Install transformers from source - only needed for versions <= v4.34
2
- # pip install git+https://github.com/huggingface/transformers.git
3
- # pip install accelerate
4
-
5
  import torch
6
- from transformers import pipeline
 
 
 
 
 
 
 
 
7
 
 
 
 
 
 
 
 
8
  pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
9
 
10
- # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
11
- messages = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  {
13
  "role": "system",
14
- "content": "You are a friendly chatbot",
15
  },
16
- {"role": "user", "content": "I'm 29 years old and I live in Greece. Tellm me how old am I"},
17
- ]
18
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
19
- outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
20
- print(outputs[0]["generated_text"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # <|system|>
22
  # You are a friendly chatbot who always responds in the style of a pirate.</s>
23
  # <|user|>
 
1
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
 
 
 
2
  import torch
3
+ import pickle
4
+ import streamlit as st
5
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
6
+
7
+ from translate import Translator
8
+
9
+ def init_session_state():
10
+ if 'history' not in st.session_state:
11
+ st.session_state.history = ""
12
 
13
+ # Initialize session state
14
+ init_session_state()
15
+ # model_name = "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli"
16
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
17
+ # model = AutoModelForSequenceClassification.from_pretrained(model_name)
18
+
19
+ classifier = pipeline("zero-shot-classification", model="MoritzLaurer/mDeBERTa-v3-base-mnli-xnli")
20
  pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
21
 
22
+ # with open('chapter_titles.pkl', 'rb') as file:
23
+ # titles_astiko = pickle.load(file)
24
+ # labels1 = ["κληρονομικό", "ακίνητα", "διαζύγιο"]
25
+ # # labels2 = ["αποδοχή κληρονομιάς", "αποποίηση", "διαθήκη"]
26
+ # # labels3 = ["μίσθωση", "κυριότητα", "έξωση", "απλήρωτα νοίκια"]
27
+
28
+
29
+ # titles_astiko = ["γάμος", "αλλοδαπός", "φορολογία", "κληρονομικά", "στέγη", "οικογενειακό", "εμπορικό","κλοπή","απάτη"]
30
+ # Load dictionary from the file using pickle
31
+ with open('my_dict.pickle', 'rb') as file:
32
+ dictionary = pickle.load(file)
33
+
34
+ def classify(text,labels):
35
+ output = classifier(text, labels, multi_label=False)
36
+
37
+ return output
38
+
39
+
40
+ text = st.text_input('Enter some text:') # Input field for new text
41
+
42
+ if text:
43
+
44
+ labels = list(dictionary)
45
+
46
+ output = classify(text,labels)
47
+
48
+ output = output["labels"][0]
49
+
50
+ labels = list(dictionary[output])
51
+
52
+ output2 = classify(text,labels)
53
+
54
+ output2 = output2["labels"][0]
55
+
56
+
57
+ answer = dictionary[output][output2]
58
+
59
+ # Create a translator object with specified source and target languages
60
+ translator = Translator(from_lang='el', to_lang='en')
61
+ translator2 = Translator(from_lang='en', to_lang='el')
62
+
63
+
64
+
65
+ # Translate the text from Greek to English
66
+ answer = translator.translate(answer)
67
+ text = translator.translate(text)
68
+
69
+ # text_to_translate2 = text[499:999]
70
+ # translated_text2 = translator.translate(text_to_translate2)
71
+
72
+
73
+
74
+ st.session_state.history += "Based on the following information" + answer +"answer this question:" + text + "by reasoning step by step" # Add new text to history
75
+ # out = pipe(st.session_state.history) # Generate output based on history
76
+
77
+ messages = [
78
  {
79
  "role": "system",
80
+ "content": "You are a friendly chatbot who answers question based on the info that I give you:" + answer,
81
  },
82
+ {"role": "user", "content": text"},
83
+ ]
84
+ prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
85
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
86
+
87
+
88
+
89
+ st.text(st.session_state.history)
90
+
91
+ translated_text2 = translator2.translate(outputs)
92
+
93
+ st.text(translated_text2)
94
+ # st.text("History: " + st.session_state.history)
95
+
96
+ # st.text(output)
97
+ # st.text(output2)
98
+
99
+ # st.text(answer)
100
+
101
+ # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
102
+
103
  # <|system|>
104
  # You are a friendly chatbot who always responds in the style of a pirate.</s>
105
  # <|user|>