File size: 1,253 Bytes
7d5081f
 
4946d76
7d5081f
 
8f074bc
c9ee852
8f074bc
 
 
8aa5b8d
8f074bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1afa65
5c4eafa
f11888f
7d5081f
8f074bc
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import streamlit as st
import time
from transformers import pipeline
import torch

st.markdown('## Text-generation OPT from Meta ')

@st.cache(allow_output_mutation=True)
def get_model():
    return pipeline('text-generation', model=model)

col1, col2 = st.beta_columns([2,1])

with col2:
    select_model = st.radio(
        "Select the model to use:",
        ('OPT-125m', 'OPT-350m', 'OPT-1.3b'))

    if select_model == 'OPT-1.3b':
        model = 'facebook/opt-1.3b'
    elif select_model == 'OPT-350m':
        model = 'facebook/opt-350m'
    elif select_model == 'OPT-125m':
        model = 'facebook/opt-125m'

    if select_model:
        with st.spinner('Loading Model... (This may take a while)'):
            generator = get_model()    
            #time.sleep(2)
            st.success('Model loaded correctly!')
    

with col1:
    prompt= st.text_area('Your prompt here',
        '''AI will help humanity?''') 

    # answer = ['ciao come stai stutto bene']
    # lst = ''.join(answer)
 
answer = generator(prompt, max_length=60, no_repeat_ngram_size=2, early_stopping=True, num_beams=5)
lst = answer[0]['generated_text']


    t = st.empty()
    for i in range(len(lst)):
        t.markdown(" %s..." % lst[0:i])
        time.sleep(0.04)