Nerva1228 commited on
Commit
d872a4a
·
verified ·
1 Parent(s): 3ef185b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -131
app.py CHANGED
@@ -1,121 +1,3 @@
1
- # import gradio as gr
2
- # import spaces
3
- # from PIL import Image
4
- # import torch
5
- # from transformers import AutoModelForCausalLM, AutoProcessor
6
- # import requests
7
- # import json
8
-
9
- # device = "cuda" if torch.cuda.is_available() else "cpu"
10
-
11
- # model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True).to(device)
12
- # processor = AutoProcessor.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True)
13
-
14
-
15
- # SERVER_URL = 'http://43.156.72.113:8188'
16
- # FETCH_TASKS_URL = SERVER_URL + '/fetch/'
17
- # UPDATE_TASK_STATUS_URL = SERVER_URL + '/update/'
18
-
19
- # def fetch_task(category, fetch_all=False):
20
- # params = {'fetch_all': 'true' if fetch_all else 'false'}
21
- # response = requests.post(FETCH_TASKS_URL + category, params=params)
22
- # if response.status_code == 200:
23
- # return response.json()
24
- # else:
25
- # print(f"Failed to fetch tasks: {response.status_code} - {response.text}")
26
- # return None
27
-
28
- # def update_task_status(category, task_id, status, result=None):
29
- # data = {'status': status}
30
- # if result:
31
- # data['result'] = result
32
-
33
- # response = requests.post(UPDATE_TASK_STATUS_URL + category + f'/{task_id}', json=data)
34
- # if response.status_code == 200:
35
- # print(f"Task {task_id} updated successfully: {json.dumps(response.json(), indent=4)}")
36
- # else:
37
- # print(f"Failed to update task {task_id}: {response.status_code} - {response.text}")
38
-
39
-
40
- # @spaces.GPU(duration=200)
41
- # def infer(prompt, image, request: gr.Request):
42
-
43
- # if request:
44
- # print("请求头字典:", request.headers)
45
- # print("IP 地址:", request.client.host)
46
- # print("查询参数:", dict(request.query_params))
47
- # print("会话哈希:", request.session_hash)
48
-
49
- # max_size = 256
50
- # width, height = image.size
51
- # if width > height:
52
- # new_width = max_size
53
- # new_height = int((new_width / width) * height)
54
- # else:
55
- # new_height = max_size
56
- # new_width = int((new_height / height) * width)
57
-
58
- # image = image.resize((new_width, new_height), Image.LANCZOS)
59
-
60
- # inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
61
-
62
- # generated_ids = model.generate(
63
- # input_ids=inputs["input_ids"],
64
- # pixel_values=inputs["pixel_values"],
65
- # max_new_tokens=1024,
66
- # do_sample=False,
67
- # num_beams=3
68
- # )
69
-
70
- # generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
71
-
72
- # parsed_answer = processor.post_process_generation(generated_text, task=prompt, image_size=(image.width, image.height))
73
-
74
- # return parsed_answer
75
-
76
- # css = """
77
- # #col-container {
78
- # margin: 0 auto;
79
- # max-width: 800px;
80
- # }
81
- # """
82
-
83
- # with gr.Blocks(css=css) as app:
84
- # with gr.Column(elem_id="col-container"):
85
- # gr.Markdown(f"""# Tag The Image
86
- # Get tag based on images using the Florence-2-base-PromptGen-v1.5 model.
87
- # """)
88
-
89
- # with gr.Row():
90
- # prompt = gr.Text(
91
- # label="Prompt",
92
- # show_label=False,
93
- # max_lines=1,
94
- # placeholder="Enter your prompt or blank here.",
95
- # container=False,
96
- # )
97
- # image_input = gr.Image(
98
- # label="Image",
99
- # type="pil",
100
- # show_label=False,
101
- # container=False,
102
- # )
103
- # run_button = gr.Button("Run", scale=0)
104
-
105
- # result = gr.Textbox(label="Generated Text", show_label=False)
106
-
107
-
108
- # gr.on(
109
- # triggers=[run_button.click, prompt.submit],
110
- # fn=infer,
111
- # inputs=[prompt, image_input],
112
- # outputs=[result]
113
- # )
114
-
115
- # app.queue()
116
- # app.launch(show_error=True)
117
-
118
-
119
  import gradio as gr
120
  import spaces
121
  from PIL import Image
@@ -123,15 +5,13 @@ import torch
123
  from transformers import AutoModelForCausalLM, AutoProcessor
124
  import requests
125
  import json
126
- from io import BytesIO
127
-
128
 
129
  device = "cuda" if torch.cuda.is_available() else "cpu"
130
 
131
  model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True).to(device)
132
  processor = AutoProcessor.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True)
133
 
134
-
135
  SERVER_URL = 'http://43.156.72.113:8188'
136
  FETCH_TASKS_URL = SERVER_URL + '/fetch/'
137
  UPDATE_TASK_STATUS_URL = SERVER_URL + '/update/'
@@ -158,14 +38,8 @@ def update_task_status(category, task_id, status, result=None):
158
 
159
 
160
  @spaces.GPU(duration=150)
161
- def infer(request: gr.Request):
162
- # if request:
163
- # print("请求头字典:", request.headers)
164
- # print("IP 地址:", request.client.host)
165
- # print("查询参数:", dict(request.query_params))
166
- # print("会话哈希:", request.session_hash)
167
-
168
- # Fetch tasks
169
  img2text_tasks = fetch_task('img2text', fetch_all=True)
170
 
171
  if not img2text_tasks:
@@ -212,10 +86,10 @@ def infer(request: gr.Request):
212
  except Exception as e:
213
  print(f"Error processing task {task['id']}: {e}")
214
  # If error occurs, update the task status to Failed
215
- update_task_status('img2text', task['id'], 'Failed')
216
  return f"Error processing task {task['id']}: {e}"
217
 
218
- return "No pending tasks found."
219
 
220
 
221
  css = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import spaces
3
  from PIL import Image
 
5
  from transformers import AutoModelForCausalLM, AutoProcessor
6
  import requests
7
  import json
8
+ # from io import BytesIO
 
9
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
 
12
  model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True).to(device)
13
  processor = AutoProcessor.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True)
14
 
 
15
  SERVER_URL = 'http://43.156.72.113:8188'
16
  FETCH_TASKS_URL = SERVER_URL + '/fetch/'
17
  UPDATE_TASK_STATUS_URL = SERVER_URL + '/update/'
 
38
 
39
 
40
  @spaces.GPU(duration=150)
41
+ def infer():
42
+
 
 
 
 
 
 
43
  img2text_tasks = fetch_task('img2text', fetch_all=True)
44
 
45
  if not img2text_tasks:
 
86
  except Exception as e:
87
  print(f"Error processing task {task['id']}: {e}")
88
  # If error occurs, update the task status to Failed
89
+ update_task_status('img2text', task['id'], 'Failed', e)
90
  return f"Error processing task {task['id']}: {e}"
91
 
92
+ return "Successed! No pending tasks found."
93
 
94
 
95
  css = """