Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,9 +22,17 @@ inputs = tokenizer(prompt, return_tensors="pt")
|
|
22 |
outputs = model(**inputs) #, labels=inputs["input_ids"])
|
23 |
|
24 |
|
25 |
-
last_hidden_states = outputs.last_hidden_state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
output = last_hidden_states #['last_hidden_states']
|
28 |
|
29 |
# decoded = tokenizer.decode(output)
|
30 |
|
@@ -43,8 +51,8 @@ output = last_hidden_states #['last_hidden_states']
|
|
43 |
with st.container():
|
44 |
st.write('\n\n')
|
45 |
st.write('LLM-LANAChat\n\n')
|
|
|
46 |
st.write(output)
|
47 |
-
st.write(type(output))
|
48 |
|
49 |
print('\nsaida gerada.')
|
50 |
print('\n\n')
|
|
|
22 |
outputs = model(**inputs) #, labels=inputs["input_ids"])
|
23 |
|
24 |
|
25 |
+
# last_hidden_states = outputs.last_hidden_state
|
26 |
+
|
27 |
+
# output = last_hidden_states #['last_hidden_states']
|
28 |
+
|
29 |
+
|
30 |
+
# input_text = "The theory of special relativity states "
|
31 |
+
# input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
32 |
+
|
33 |
+
outputs = model.generate(**inputs)
|
34 |
+
output = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
35 |
|
|
|
36 |
|
37 |
# decoded = tokenizer.decode(output)
|
38 |
|
|
|
51 |
with st.container():
|
52 |
st.write('\n\n')
|
53 |
st.write('LLM-LANAChat\n\n')
|
54 |
+
st.write(outputs)
|
55 |
st.write(output)
|
|
|
56 |
|
57 |
print('\nsaida gerada.')
|
58 |
print('\n\n')
|