Rename app.py to the_best_app.py
Browse files- app.py → the_best_app.py +3 -3
app.py → the_best_app.py
RENAMED
@@ -46,7 +46,7 @@ def generate_response(text):
|
|
46 |
print(f"Messages: {messages}, ", f"current time: {current_time_gmt()}")
|
47 |
|
48 |
completion = client.chat.completions.create(
|
49 |
-
model="microsoft/Phi-3.5-mini-instruct",
|
50 |
messages=messages,
|
51 |
max_tokens=2048,
|
52 |
temperature=0.5,
|
@@ -55,7 +55,7 @@ def generate_response(text):
|
|
55 |
#print("\ncompletion: ", completion.choices[0].message.content, f"\ncurrent time: {current_time_gmt()}")
|
56 |
return completion.choices[0].message.content
|
57 |
elif language == "english":
|
58 |
-
content = "keep it short but tell your decision making process, " + text
|
59 |
print("content: ", content)
|
60 |
messages = [
|
61 |
{ "role": "user", "content": content }
|
@@ -63,7 +63,7 @@ def generate_response(text):
|
|
63 |
print(f"Messages: {messages}, ", f"current time: {current_time_gmt()}")
|
64 |
|
65 |
completion = client.chat.completions.create(
|
66 |
-
model="mistralai/Mistral-Nemo-Instruct-2407",
|
67 |
messages=messages,
|
68 |
max_tokens=2048,
|
69 |
temperature=0.5,
|
|
|
46 |
print(f"Messages: {messages}, ", f"current time: {current_time_gmt()}")
|
47 |
|
48 |
completion = client.chat.completions.create(
|
49 |
+
model="microsoft/Phi-3.5-mini-instruct", # good in english. not so good in hebrew # i need to change it!!!
|
50 |
messages=messages,
|
51 |
max_tokens=2048,
|
52 |
temperature=0.5,
|
|
|
55 |
#print("\ncompletion: ", completion.choices[0].message.content, f"\ncurrent time: {current_time_gmt()}")
|
56 |
return completion.choices[0].message.content
|
57 |
elif language == "english":
|
58 |
+
content = "keep it short but tell your decision making process, " + text # good
|
59 |
print("content: ", content)
|
60 |
messages = [
|
61 |
{ "role": "user", "content": content }
|
|
|
63 |
print(f"Messages: {messages}, ", f"current time: {current_time_gmt()}")
|
64 |
|
65 |
completion = client.chat.completions.create(
|
66 |
+
model="mistralai/Mistral-Nemo-Instruct-2407", # good
|
67 |
messages=messages,
|
68 |
max_tokens=2048,
|
69 |
temperature=0.5,
|