Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -60,4 +60,36 @@ demo = gr.ChatInterface(
|
|
60 |
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
+
demo.launch()
|
64 |
+
|
65 |
+
|
66 |
+
# import gradio as gr
|
67 |
+
|
68 |
+
# gr.load("models/meta-llama/Meta-Llama-3.1-70B-Instruct").launch()
|
69 |
+
|
70 |
+
# import streamlit as st
|
71 |
+
# from transformers import AutoTokenizer, AutoModelForCausalLM
|
72 |
+
|
73 |
+
# # Load model directly
|
74 |
+
# tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
|
75 |
+
# model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
|
76 |
+
|
77 |
+
# # Initialize chat history
|
78 |
+
# if "chat_history" not in st.session_state:
|
79 |
+
# st.session_state.chat_history = []
|
80 |
+
|
81 |
+
# # Display chat history
|
82 |
+
# for chat in st.session_state.chat_history:
|
83 |
+
# st.write(f"User: {chat['user']}")
|
84 |
+
# st.write(f"Response: {chat['response']}")
|
85 |
+
|
86 |
+
# # Get user input
|
87 |
+
# user_input = st.text_input("Enter your message:")
|
88 |
+
|
89 |
+
# # Generate response
|
90 |
+
# if st.button("Send"):
|
91 |
+
# inputs = tokenizer(user_input, return_tensors="pt")
|
92 |
+
# outputs = model.generate(**inputs)
|
93 |
+
# response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
94 |
+
# st.session_state.chat_history.append({"user": user_input, "response": response})
|
95 |
+
# st.write(f"Response: {response}")
|