Spaces:
Running
Running
Upload 29 files
Browse files
main.py
CHANGED
@@ -119,6 +119,7 @@ def getPrompt(prompt, modelID, attempts=1):
|
|
119 |
|
120 |
@app.post("/inferencePrompt")
|
121 |
def inferencePrompt(item: Core):
|
|
|
122 |
try:
|
123 |
plain_response_data = getPrompt(item.itemString, prompt_model)
|
124 |
magic_response_data = getPrompt(item.itemString, magic_prompt_model)
|
@@ -240,6 +241,7 @@ def inferenceAPI(model, item, attempts = 1):
|
|
240 |
response = requests.request("POST", API_URL + model, headers=headers, data=api_data)
|
241 |
if response is None:
|
242 |
inferenceAPI(get_random_model(activeModels['text-to-image']), item, attempts+1)
|
|
|
243 |
image_stream = BytesIO(response.content)
|
244 |
image = Image.open(image_stream)
|
245 |
image.save("response.png")
|
@@ -304,6 +306,7 @@ def nsfw_check(attempts = 1):
|
|
304 |
|
305 |
@app.post("/api")
|
306 |
async def inference(item: Item):
|
|
|
307 |
activeModels = InferenceClient().list_deployed_models()
|
308 |
base64_img = ""
|
309 |
model = item.modelID
|
|
|
119 |
|
120 |
@app.post("/inferencePrompt")
|
121 |
def inferencePrompt(item: Core):
|
122 |
+
print("Start API Inference Prompt")
|
123 |
try:
|
124 |
plain_response_data = getPrompt(item.itemString, prompt_model)
|
125 |
magic_response_data = getPrompt(item.itemString, magic_prompt_model)
|
|
|
241 |
response = requests.request("POST", API_URL + model, headers=headers, data=api_data)
|
242 |
if response is None:
|
243 |
inferenceAPI(get_random_model(activeModels['text-to-image']), item, attempts+1)
|
244 |
+
print(response.content[0:200])
|
245 |
image_stream = BytesIO(response.content)
|
246 |
image = Image.open(image_stream)
|
247 |
image.save("response.png")
|
|
|
306 |
|
307 |
@app.post("/api")
|
308 |
async def inference(item: Item):
|
309 |
+
print("Start API Inference")
|
310 |
activeModels = InferenceClient().list_deployed_models()
|
311 |
base64_img = ""
|
312 |
model = item.modelID
|