Update run.py
Browse files
run.py
CHANGED
@@ -7,16 +7,16 @@ from transformers import SegformerImageProcessor, SegformerForSemanticSegmentati
|
|
7 |
|
8 |
|
9 |
|
10 |
-
class Count:
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
|
15 |
-
|
16 |
-
|
17 |
|
18 |
|
19 |
-
cnt = 0
|
20 |
weights2load = 'segformer_ep15_loss0.00.pth'
|
21 |
id2label = {0: 'seal', 255: 'bck'}
|
22 |
label2id = {'seal': 0, 'bck': 255}
|
@@ -31,23 +31,25 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
31 |
model.load_state_dict(torch.load(weights2load, weights_only=True, map_location=device))
|
32 |
model.to(device).eval()
|
33 |
|
34 |
-
counter = Count()
|
35 |
|
36 |
-
def
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
logits = outputs.logits.cpu().detach().numpy() ** 2
|
44 |
-
counter.imout = (logits[0, 0] - logits[0, 0].min()) / (logits[0, 0].max() - logits[0, 0].min())
|
45 |
-
return counter.imout, cnt #np.flipud(im)
|
46 |
|
47 |
-
with gr.Blocks() as demo:
|
48 |
-
|
49 |
-
|
50 |
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
if __name__ == "__main__":
|
53 |
|
|
|
7 |
|
8 |
|
9 |
|
10 |
+
# class Count:
|
11 |
+
# def __init__(self):
|
12 |
+
# self.n = 0
|
13 |
+
# self.imout = np.zeros((1000, 1000))
|
14 |
|
15 |
+
# def step(self):
|
16 |
+
# self.n += 1
|
17 |
|
18 |
|
19 |
+
# cnt = 0
|
20 |
weights2load = 'segformer_ep15_loss0.00.pth'
|
21 |
id2label = {0: 'seal', 255: 'bck'}
|
22 |
label2id = {'seal': 0, 'bck': 255}
|
|
|
31 |
model.load_state_dict(torch.load(weights2load, weights_only=True, map_location=device))
|
32 |
model.to(device).eval()
|
33 |
|
34 |
+
# counter = Count()
|
35 |
|
36 |
+
def segment(im, interval_s=2):
|
37 |
+
# if (counter.imout.sum() == 0) or ((cnt % 100) == 0):
|
38 |
+
pixel_values = image_processor(im, return_tensors="pt").pixel_values.to(device)
|
39 |
+
outputs = model(pixel_values=pixel_values)
|
40 |
+
logits = outputs.logits.cpu().detach().numpy() ** 2
|
41 |
+
imout = (logits[0, 0] - logits[0, 0].min()) / (logits[0, 0].max() - logits[0, 0].min())
|
42 |
+
return imout #, cnt #np.flipud(im)
|
|
|
|
|
|
|
43 |
|
44 |
+
# with gr.Blocks() as demo:
|
45 |
+
# inp = gr.Image(sources=["webcam"], streaming=True)
|
46 |
+
# inp.stream(segment, inputs=inp, outputs=[gr.Image(), gr.Number()])
|
47 |
|
48 |
+
demo = gr.Interface(
|
49 |
+
segment,
|
50 |
+
[gr.Image(source="webcam", tool=None)],
|
51 |
+
["image"],
|
52 |
+
)
|
53 |
|
54 |
if __name__ == "__main__":
|
55 |
|