Spaces:
Sleeping
Sleeping
Update utils/mistral.py
Browse files- utils/mistral.py +12 -9
utils/mistral.py
CHANGED
@@ -65,12 +65,13 @@ def Model_ProfessionalDetails_Output(resume, client):
|
|
65 |
'''
|
66 |
}
|
67 |
|
68 |
-
response = ""
|
69 |
-
for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True):#, temperature=0.35):
|
70 |
-
|
71 |
|
72 |
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False)#, temperature=0.35)
|
73 |
print("This is without stream data ",data.choices[0].message.content)
|
|
|
74 |
|
75 |
try:
|
76 |
clean_response = Data_Cleaner(response)
|
@@ -104,11 +105,12 @@ def Model_EducationalDetails_Output(resume, client):
|
|
104 |
'''
|
105 |
}
|
106 |
|
107 |
-
response = ""
|
108 |
-
for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True):#, temperature=0.35):
|
109 |
-
|
110 |
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False)#, temperature=0.35)
|
111 |
print("This is without stream data ",data.choices[0].message.content)
|
|
|
112 |
|
113 |
try:
|
114 |
clean_response = Data_Cleaner(response)
|
@@ -143,12 +145,13 @@ def Model_PersonalDetails_Output(resume, client):
|
|
143 |
}
|
144 |
|
145 |
# Response
|
146 |
-
response = ""
|
147 |
-
for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=True):#, temperature=0.35):
|
148 |
-
|
149 |
|
150 |
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False)#, temperature=0.35)
|
151 |
print("This is without stream data ",data.choices[0].message.content)
|
|
|
152 |
|
153 |
# Handle cases where the response might have formatting issues
|
154 |
try:
|
|
|
65 |
'''
|
66 |
}
|
67 |
|
68 |
+
#response = ""
|
69 |
+
#for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True):#, temperature=0.35):
|
70 |
+
# response += message.choices[0].delta.content
|
71 |
|
72 |
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False)#, temperature=0.35)
|
73 |
print("This is without stream data ",data.choices[0].message.content)
|
74 |
+
response = data.choices[0].message.content
|
75 |
|
76 |
try:
|
77 |
clean_response = Data_Cleaner(response)
|
|
|
105 |
'''
|
106 |
}
|
107 |
|
108 |
+
#response = ""
|
109 |
+
#for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True):#, temperature=0.35):
|
110 |
+
# response += message.choices[0].delta.content
|
111 |
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False)#, temperature=0.35)
|
112 |
print("This is without stream data ",data.choices[0].message.content)
|
113 |
+
response = data.choices[0].message.content
|
114 |
|
115 |
try:
|
116 |
clean_response = Data_Cleaner(response)
|
|
|
145 |
}
|
146 |
|
147 |
# Response
|
148 |
+
#response = ""
|
149 |
+
#for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=True):#, temperature=0.35):
|
150 |
+
# response += message.choices[0].delta.content
|
151 |
|
152 |
data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False)#, temperature=0.35)
|
153 |
print("This is without stream data ",data.choices[0].message.content)
|
154 |
+
response = data.choices[0].message.content
|
155 |
|
156 |
# Handle cases where the response might have formatting issues
|
157 |
try:
|