File size: 823 Bytes
23f3344
 
 
 
9ea0029
 
23f3344
1f8012b
23f3344
 
 
1f8012b
23f3344
542e6ca
1f8012b
23f3344
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import sys
sys.path.insert(0, './petals/')
 
import transformers
import streamlit as st

# Import a Petals model
# from src.client.remote_model import DistributedBloomForCausalLM


MODEL_NAME = "bigscience/test-bloomd-6b3" # select model you like
# INITIAL_PEERS = ["/ip4/193.106.95.184/tcp/31000/p2p/QmSg7izCDtowVTACbUmWvEiQZNY4wgCQ9T9Doo66K59X6q"]

tokenizer = transformers.BloomTokenizerFast.from_pretrained(MODEL_NAME)
model = transformers.BloomForCausalLM.from_pretrained(MODEL_NAME)

text = st.text_input('Enter some text')
max_new_tokens = st.slider('Select a value', min_value=1, max_value=100)

if text:
    input_ids = tokenizer([text], return_tensors="pt").input_ids
    output = model.generate(input_ids, max_new_tokens=max_new_tokens)
    output_text = tokenizer.batch_decode(output)
    st.write(output_text)