DevBM commited on
Commit
d67da2f
·
verified ·
1 Parent(s): 6a69891

Upload 2 files

Browse files
Files changed (2) hide show
  1. a.py +42 -0
  2. requirements.txt +13 -0
a.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM
4
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
5
+
6
+ st.subheader('Pipe5: Text-To-Text Generation -> Que. Generation',divider='orange')
7
+
8
+ if st.toggle(label='Show Pipe5'):
9
+ models = [
10
+ 'google/flan-t5-base',
11
+ 'meta-llama/Meta-Llama-3-8B',
12
+ 'meta-llama/Meta-Llama-3-8B-Instruct'
13
+ ]
14
+ model_name = st.selectbox(
15
+ label='Select Model',
16
+ options=models,
17
+ placeholder='google/vit-base-patch16-224',
18
+ )
19
+ if model_name == models[0]:
20
+ tokenizer = T5Tokenizer.from_pretrained(model_name)
21
+ model = T5ForConditionalGeneration.from_pretrained(model_name)
22
+ else:
23
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
24
+ model = AutoModelForCausalLM.from_pretrained(model_name)
25
+
26
+ input_text = st.text_area(label='Enter the text from which question is to be generated:',value='Bruce Wayne is the Batman.')
27
+ input_text = 'Generate a question from this: ' + input_text
28
+ input_ids = tokenizer(input_text, return_tensors='pt').input_ids
29
+
30
+
31
+ outputs = model.generate(input_ids)
32
+ output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
33
+ if st.checkbox(label='Show Tokenized output'):
34
+ st.write(outputs)
35
+ st.write("Output is:")
36
+ st.write(f"{output_text}")
37
+ if st.toggle(label='Access model unrestricted'):
38
+ input_text = st.text_area('Enter text')
39
+ input_ids = tokenizer(input_text, return_tensors='pt').input_ids
40
+ outputs = model.generate(input_ids)
41
+ st.write(tokenizer.decode(outputs[0]))
42
+ st.write(outputs)
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain==0.0.184
2
+ PyPDF2==3.0.1
3
+ python-dotenv==1.0.0
4
+ streamlit==1.18.1
5
+ faiss-cpu==1.7.4
6
+ altair==4
7
+ tiktoken==0.4.0
8
+ # uncomment to use huggingface llms
9
+ huggingface-hub==0.14.1
10
+
11
+ # uncomment to use instructor embeddings
12
+ InstructorEmbedding==1.0.1
13
+ sentence-transformers==2.2.2