Hatman commited on
Commit
241dc23
·
verified ·
1 Parent(s): 7baf531

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +12 -34
main.py CHANGED
@@ -93,38 +93,21 @@ async def core():
93
  def getPrompt(prompt, modelID, attempts=1):
94
  response = {}
95
  print(modelID)
96
- if modelID != magic_prompt_model:
97
- chat = [
98
- {"role": "user", "content": prompt_base},
99
- {"role": "assistant", "content": prompt_assistant},
100
- {"role": "user", "content": prompt},
101
- ]
102
- try:
103
  response = groqClient.chat.completions.create(messages=chat, temperature=1, max_tokens=2048, top_p=1, stream=False, stop=None, model=modelID)
104
- except Exception as e:
105
- print(f"An error occurred: {e}")
106
- if attempts < 3:
107
- getPrompt(prompt, modelID, attempts + 1)
108
- else:
109
- try:
110
  apiData={"inputs":prompt, "parameters": parameters, "options": options, "timeout": 45}
111
  response = requests.post(API_URL + modelID, headers=headers, data=json.dumps(apiData))
112
- print(response)
113
- if response.status_code == 200:
114
- try:
115
- responseData = response.json()
116
- return responseData
117
- except ValueError as e:
118
- print(f"Error parsing JSON: {e}")
119
- else:
120
- print(f"Error from API: {response.status_code} - {response.text}")
121
- if attempts < 3:
122
- getPrompt(prompt, modelID, attempts + 1)
123
- except Exception as e:
124
- print(f"An error occurred: {e}")
125
- if attempts < 3:
126
- getPrompt(prompt, modelID, attempts + 1)
127
- print(str(response.json()))
128
  return response.json()
129
 
130
  @app.post("/inferencePrompt")
@@ -133,12 +116,7 @@ def inferencePrompt(item: Core):
133
  try:
134
  plain_response_data = getPrompt(item.itemString, prompt_model)
135
  magic_response_data = getPrompt(item.itemString, magic_prompt_model)
136
- print(plain_response_data['choices'])
137
- print(plain_response_data['choices'][0])
138
- print(plain_response_data['choices'][0]['message'])
139
  plain_response = plain_response_data['choices'][0]['message']['content']
140
- print(plain_response)
141
- print(magic_response_data[0]["generated_text"])
142
  returnJson = {"plain": plain_response, "magic": item.itemString + magic_response_data[0]["generated_text"]}
143
  print(f'Return Json {returnJson}')
144
  return returnJson
 
93
  def getPrompt(prompt, modelID, attempts=1):
94
  response = {}
95
  print(modelID)
96
+ try:
97
+ if modelID != magic_prompt_model:
98
+ chat = [
99
+ {"role": "user", "content": prompt_base},
100
+ {"role": "assistant", "content": prompt_assistant},
101
+ {"role": "user", "content": prompt},
102
+ ]
103
  response = groqClient.chat.completions.create(messages=chat, temperature=1, max_tokens=2048, top_p=1, stream=False, stop=None, model=modelID)
104
+ else:
 
 
 
 
 
105
  apiData={"inputs":prompt, "parameters": parameters, "options": options, "timeout": 45}
106
  response = requests.post(API_URL + modelID, headers=headers, data=json.dumps(apiData))
107
+ except Exception as e:
108
+ print(f"An error occurred: {e}")
109
+ if attempts < 3:
110
+ getPrompt(prompt, modelID, attempts + 1)
 
 
 
 
 
 
 
 
 
 
 
 
111
  return response.json()
112
 
113
  @app.post("/inferencePrompt")
 
116
  try:
117
  plain_response_data = getPrompt(item.itemString, prompt_model)
118
  magic_response_data = getPrompt(item.itemString, magic_prompt_model)
 
 
 
119
  plain_response = plain_response_data['choices'][0]['message']['content']
 
 
120
  returnJson = {"plain": plain_response, "magic": item.itemString + magic_response_data[0]["generated_text"]}
121
  print(f'Return Json {returnJson}')
122
  return returnJson