sahilmayekar commited on
Commit
c9d2ea5
·
1 Parent(s): fc952fa

Code Updated

Browse files
Files changed (1) hide show
  1. app.py +38 -2
app.py CHANGED
@@ -1,4 +1,40 @@
1
  import streamlit as st
 
 
 
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
+ import pandas as pd
4
+ import numpy as np
5
+ from datasets import load_dataset
6
+ import re
7
 
8
+ # Load the dataset
9
+ ds = load_dataset("Vezora/Open-Critic-GPT")
10
+
11
+ st.write("Dataset")
12
+
13
+ # Load the model and tokenizer
14
+ model_name = "shareAI/llama3.1-8b-instruct-dpo-zh"
15
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
16
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
17
+
18
+ # Function to generate a response from the model
19
+ def generate_response(human_text):
20
+ inputs = tokenizer.encode(human_text, return_tensors='pt')
21
+ outputs = model.generate(inputs, max_length=50, num_beams=5, early_stopping=True)
22
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
+ return response
24
+
25
+ # Iterate over the first few examples in the dataset and display them with model responses
26
+ for i, x in enumerate(ds["train"]):
27
+ col1, col2, col3 = st.columns(3)
28
+ if i < 3:
29
+ with col1:
30
+ st.code(x["Human"])
31
+
32
+ with col2:
33
+ st.write(x["Assistant"])
34
+
35
+ with col3:
36
+ # Generate and display the model's response
37
+ response = generate_response(x["Human"])
38
+ st.write(response)
39
+ else:
40
+ break