Pippoz commited on
Commit
0caba13
·
1 Parent(s): 2446aa4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -20
app.py CHANGED
@@ -1,39 +1,19 @@
1
  import streamlit as st
2
  import time
3
- import tokenizers
4
  from transformers import pipeline
5
  import torch
6
- #from transformers import AutoModelForCausalLM, AutoTokenizer
7
 
8
- #@st.cache(allow_output_mutation=True)
9
- #def define_model():
10
- # model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b", torch_dtype=torch.float16).cuda()
11
- # tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
12
- # return model, tokenizer
13
  st.markdown('## OPT-1.3 Billion parameter (Meta)')
14
 
15
  with st.spinner('Loading Model... (This may take a while)'):
16
  generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
17
  st.success('Model loaded correctly!')
18
 
19
- #@st.cache(allow_output_mutation=True)
20
- #def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
21
- # input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
22
- # generated_ids = model.generate(input_ids, num_return_sequences=num_sequences, max_length=max_length)
23
- # answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
24
- # return answer
25
-
26
-
27
- #model, tokenizer = define_model()
28
-
29
  prompt= st.text_area('Your prompt here',
30
  '''Hello, I'm am conscious and''')
31
 
32
  answer = generator(prompt, max_length=100, no_repeat_ngram_size=3, early_stopping=True, num_beams=5)
33
 
34
- #answer = opt_model(prompt, model, tokenizer,)
35
- #lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
36
- #answer = define_model(prompt)
37
  lst = answer[0]['generated_text']
38
 
39
  t = st.empty()
 
1
  import streamlit as st
2
  import time
 
3
  from transformers import pipeline
4
  import torch
 
5
 
 
 
 
 
 
6
  st.markdown('## OPT-1.3 Billion parameter (Meta)')
7
 
8
  with st.spinner('Loading Model... (This may take a while)'):
9
  generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
10
  st.success('Model loaded correctly!')
11
 
 
 
 
 
 
 
 
 
 
 
12
  prompt= st.text_area('Your prompt here',
13
  '''Hello, I'm am conscious and''')
14
 
15
  answer = generator(prompt, max_length=100, no_repeat_ngram_size=3, early_stopping=True, num_beams=5)
16
 
 
 
 
17
  lst = answer[0]['generated_text']
18
 
19
  t = st.empty()