WebashalarForML commited on
Commit
4456e93
·
verified ·
1 Parent(s): 0a8adb5

Update utils/mistral.py

Browse files
Files changed (1) hide show
  1. utils/mistral.py +9 -9
utils/mistral.py CHANGED
@@ -66,11 +66,11 @@ def Model_ProfessionalDetails_Output(resume, client):
66
  }
67
 
68
  response = ""
69
- for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True, temperature=0.35, response_format={"type": "json"}):
70
  response += message.choices[0].delta.content
71
 
72
- #data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False, temperature=0.35, response_format={"type": "json"})
73
- #print("This is without stream data ",data)
74
 
75
  try:
76
  clean_response = Data_Cleaner(response)
@@ -105,10 +105,10 @@ def Model_EducationalDetails_Output(resume, client):
105
  }
106
 
107
  response = ""
108
- for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True, temperature=0.35, response_format={"type": "json"}):
109
  response += message.choices[0].delta.content
110
- #data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False, temperature=0.35, response_format={"type": "json"})
111
- #print("This is without stream data ",data)
112
 
113
  try:
114
  clean_response = Data_Cleaner(response)
@@ -144,11 +144,11 @@ def Model_PersonalDetails_Output(resume, client):
144
 
145
  # Response
146
  response = ""
147
- for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=True, temperature=0.35, response_format={"type": "json"}):
148
  response += message.choices[0].delta.content
149
 
150
- #data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False, temperature=0.35, response_format={"type": "json"})
151
- #print("This is without stream data ",data)
152
 
153
  # Handle cases where the response might have formatting issues
154
  try:
 
66
  }
67
 
68
  response = ""
69
+ for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True, temperature=0.35):
70
  response += message.choices[0].delta.content
71
 
72
+ data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False, temperature=0.35)
73
+ print("This is without stream data ",data)
74
 
75
  try:
76
  clean_response = Data_Cleaner(response)
 
105
  }
106
 
107
  response = ""
108
+ for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=4096, stream=True, temperature=0.35):
109
  response += message.choices[0].delta.content
110
+ data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False, temperature=0.35)
111
+ print("This is without stream data ",data)
112
 
113
  try:
114
  clean_response = Data_Cleaner(response)
 
144
 
145
  # Response
146
  response = ""
147
+ for message in client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=True, temperature=0.35):
148
  response += message.choices[0].delta.content
149
 
150
+ data = client.chat_completion(messages=[system_role, user_prompt], max_tokens=3000, stream=False, temperature=0.35)
151
+ print("This is without stream data ",data)
152
 
153
  # Handle cases where the response might have formatting issues
154
  try: