Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- app.py +5 -1
- getans.py +14 -0
- requirements.txt +4 -2
app.py
CHANGED
@@ -1,21 +1,25 @@
|
|
1 |
from flask import Flask, render_template, request, jsonify
|
|
|
2 |
|
3 |
app = Flask(__name__)
|
4 |
|
5 |
# Store chat history
|
6 |
chat_history = []
|
7 |
|
|
|
8 |
@app.route('/')
|
9 |
def index():
|
10 |
return render_template('index.html', chat_history=chat_history)
|
11 |
|
|
|
12 |
@app.route('/send_message', methods=['POST'])
|
13 |
def send_message():
|
14 |
user_message = request.form['message']
|
15 |
# Here you can process the user message and generate a response
|
16 |
-
bot_response =
|
17 |
chat_history.append({'user': user_message, 'bot': bot_response})
|
18 |
return jsonify(chat_history=chat_history)
|
19 |
|
|
|
20 |
if __name__ == '__main__':
|
21 |
app.run(debug=True)
|
|
|
1 |
from flask import Flask, render_template, request, jsonify
|
2 |
+
from getans import get_response
|
3 |
|
4 |
app = Flask(__name__)
|
5 |
|
6 |
# Store chat history
|
7 |
chat_history = []
|
8 |
|
9 |
+
|
10 |
@app.route('/')
|
11 |
def index():
|
12 |
return render_template('index.html', chat_history=chat_history)
|
13 |
|
14 |
+
|
15 |
@app.route('/send_message', methods=['POST'])
|
16 |
def send_message():
|
17 |
user_message = request.form['message']
|
18 |
# Here you can process the user message and generate a response
|
19 |
+
bot_response = get_response(user_message, max_new_tokens=100)
|
20 |
chat_history.append({'user': user_message, 'bot': bot_response})
|
21 |
return jsonify(chat_history=chat_history)
|
22 |
|
23 |
+
|
24 |
if __name__ == '__main__':
|
25 |
app.run(debug=True)
|
getans.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
+
|
4 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
5 |
+
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
7 |
+
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
8 |
+
|
9 |
+
|
10 |
+
def get_response(prompt, max_new_tokens=50):
|
11 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
12 |
+
outputs = model.generate(**inputs, max_new_tokens=max_new_tokens, temperature=0.0001, do_sample=True)
|
13 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Use indexing instead of calling
|
14 |
+
return response
|
requirements.txt
CHANGED
@@ -1,2 +1,4 @@
|
|
1 |
-
Flask~=3.0.3
|
2 |
-
gunicorn~=22.0.0
|
|
|
|
|
|
1 |
+
Flask~=3.0.3
|
2 |
+
gunicorn~=22.0.0
|
3 |
+
torch
|
4 |
+
transformers
|