Spaces:
Runtime error
Runtime error
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
| import torch | |
| import pickle | |
| import streamlit as st | |
| device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") | |
| from translate import Translator | |
| def init_session_state(): | |
| if 'history' not in st.session_state: | |
| st.session_state.history = "" | |
| # Initialize session state | |
| init_session_state() | |
| # model_name = "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli" | |
| # tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| classifier = pipeline("zero-shot-classification", model="MoritzLaurer/mDeBERTa-v3-base-mnli-xnli") | |
| pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") | |
| # with open('chapter_titles.pkl', 'rb') as file: | |
| # titles_astiko = pickle.load(file) | |
| # labels1 = ["κληρονομικό", "ακίνητα", "διαζύγιο"] | |
| # # labels2 = ["αποδοχή κληρονομιάς", "αποποίηση", "διαθήκη"] | |
| # # labels3 = ["μίσθωση", "κυριότητα", "έξωση", "απλήρωτα νοίκια"] | |
| # titles_astiko = ["γάμος", "αλλοδαπός", "φορολογία", "κληρονομικά", "στέγη", "οικογενειακό", "εμπορικό","κλοπή","απάτη"] | |
| # Load dictionary from the file using pickle | |
| with open('my_dict.pickle', 'rb') as file: | |
| dictionary = pickle.load(file) | |
| def classify(text,labels): | |
| output = classifier(text, labels, multi_label=False) | |
| return output | |
| text = st.text_input('Enter some text:') # Input field for new text | |
| if text: | |
| labels = list(dictionary) | |
| output = classify(text,labels) | |
| output = output["labels"][0] | |
| labels = list(dictionary[output]) | |
| output2 = classify(text,labels) | |
| output2 = output2["labels"][0] | |
| answer = dictionary[output][output2] | |
| # Create a translator object with specified source and target languages | |
| translator = Translator(from_lang='el', to_lang='en') | |
| translator2 = Translator(from_lang='en', to_lang='el') | |
| # Translate the text from Greek to English | |
| answer = translator.translate(answer) | |
| text = translator.translate(text) | |
| # text_to_translate2 = text[499:999] | |
| # translated_text2 = translator.translate(text_to_translate2) | |
| st.session_state.history += "Based on the following information" + answer +"answer this question:" + text + "by reasoning step by step" # Add new text to history | |
| # out = pipe(st.session_state.history) # Generate output based on history | |
| content = "You are a friendly chatbot who answers question based only on this info:" + answer | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": content, | |
| }, | |
| {"role": "user", "content": text}, | |
| ] | |
| prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) | |
| st.text(outputs[0]['generated_text']) | |
| st.text(st.session_state.history) | |
| # translated_text2 = translator2.translate(outputs) | |
| # st.text(translated_text2) | |
| # st.text("History: " + st.session_state.history) | |
| # st.text(output) | |
| # st.text(output2) | |
| # st.text(answer) | |
| # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating | |
| # <|system|> | |
| # You are a friendly chatbot who always responds in the style of a pirate.</s> | |
| # <|user|> | |
| # How many helicopters can a human eat in one sitting?</s> | |
| # <|assistant|> | |
| # ... | |