Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from matplotlib import gridspec | |
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| from PIL import Image | |
| import tensorflow as tf | |
| from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation | |
| feature_extractor = SegformerFeatureExtractor.from_pretrained( | |
| "nvidia/segformer-b0-finetuned-ade-512-512" | |
| ) | |
| model = TFSegformerForSemanticSegmentation.from_pretrained( | |
| "nvidia/segformer-b0-finetuned-ade-512-512" | |
| ) | |
| def ade_palette(): | |
| """ADE20K palette that maps each class to RGB values.""" | |
| return [ | |
| [22, 122, 213], | |
| [240, 3, 156], | |
| [87, 176, 33], | |
| [154, 88, 111], | |
| [63, 54, 244], | |
| [201, 235, 59], | |
| [102, 66, 183], | |
| [94, 147, 5], | |
| [39, 198, 247], | |
| [17, 149, 92], | |
| [130, 78, 184], | |
| [246, 119, 107], | |
| [225, 23, 68], | |
| [52, 189, 140], | |
| [142, 10, 22], | |
| [114, 161, 251], | |
| [168, 55, 34], | |
| [75, 203, 89], | |
| [32, 45, 235], | |
| [74, 1, 129], | |
| [31, 166, 96], | |
| [223, 51, 202], | |
| [57, 72, 27], | |
| [143, 191, 176], | |
| [111, 33, 244], | |
| [20, 155, 62], | |
| [128, 99, 209], | |
| [254, 120, 14], | |
| [229, 67, 175], | |
| [53, 206, 40], | |
| [198, 77, 10], | |
| [8, 166, 142], | |
| [133, 45, 111], | |
| [222, 199, 239], | |
| [56, 18, 90], | |
| [164, 98, 206], | |
| [239, 135, 60], | |
| [106, 28, 139], | |
| [49, 172, 224], | |
| [179, 109, 34], | |
| [12, 191, 157], | |
| [121, 64, 88], | |
| [243, 214, 127], | |
| [82, 11, 165], | |
| [158, 37, 192], | |
| [31, 144, 55], | |
| [176, 220, 252], | |
| [68, 5, 123], | |
| [220, 157, 73], | |
| [41, 183, 210], | |
| [173, 85, 14], | |
| [16, 131, 99], | |
| [135, 50, 177], | |
| [227, 202, 244], | |
| [47, 175, 217], | |
| [181, 112, 28], | |
| [15, 190, 160], | |
| [124, 66, 91], | |
| [241, 217, 130], | |
| [80, 13, 168], | |
| [157, 40, 195], | |
| [30, 147, 52], | |
| [175, 223, 249], | |
| [67, 7, 126], | |
| [218, 160, 76], | |
| [235, 141, 45], | |
| [101, 33, 149], | |
| [46, 178, 220], | |
| [182, 114, 31], | |
| [14, 193, 163], | |
| [122, 69, 94], | |
| [240, 219, 133], | |
| [79, 16, 171], | |
| [156, 43, 198], | |
| [29, 150, 58], | |
| [225, 207, 243], | |
| [51, 27, 121], | |
| [159, 107, 229], | |
| [234, 143, 48], | |
| [100, 35, 152], | |
| [239, 221, 136], | |
| [78, 19, 174], | |
| [155, 46, 201], | |
| [28, 152, 61], | |
| [173, 227, 243], | |
| [20, 127, 108], | |
| [138, 59, 179], | |
| [224, 209, 245], | |
| [50, 29, 124], | |
| [161, 109, 232], | |
| [233, 145, 51], | |
| [99, 37, 155], | |
| [44, 174, 226], | |
| [184, 118, 20], | |
| [12, 195, 169], | |
| [125, 73, 100], | |
| [238, 223, 139], | |
| [77, 22, 177], | |
| [154, 49, 204], | |
| [27, 154, 64], | |
| [51, 86, 205] | |
| ] | |
| labels_list = [] | |
| with open(r'labels.txt', 'r') as fp: | |
| for line in fp: | |
| labels_list.append(line[:-1]) | |
| colormap = np.asarray(ade_palette()) | |
| def label_to_color_image(label): | |
| if label.ndim != 2: | |
| raise ValueError("Expect 2-D input label") | |
| if np.max(label) >= len(colormap): | |
| raise ValueError("label value too large.") | |
| return colormap[label] | |
| def draw_plot(pred_img, seg): | |
| fig = plt.figure(figsize=(20, 15)) | |
| grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1]) | |
| plt.subplot(grid_spec[0]) | |
| plt.imshow(pred_img) | |
| plt.axis('off') | |
| LABEL_NAMES = np.asarray(labels_list) | |
| FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1) | |
| FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP) | |
| unique_labels = np.unique(seg.numpy().astype("uint8")) | |
| ax = plt.subplot(grid_spec[1]) | |
| plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest") | |
| ax.yaxis.tick_right() | |
| plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels]) | |
| plt.xticks([], []) | |
| ax.tick_params(width=0.0, labelsize=25) | |
| return fig | |
| def sepia(input_img): | |
| input_img = Image.fromarray(input_img) | |
| inputs = feature_extractor(images=input_img, return_tensors="tf") | |
| outputs = model(**inputs) | |
| logits = outputs.logits | |
| logits = tf.transpose(logits, [0, 2, 3, 1]) | |
| logits = tf.image.resize( | |
| logits, input_img.size[::-1] | |
| ) # We reverse the shape of `image` because `image.size` returns width and height. | |
| seg = tf.math.argmax(logits, axis=-1)[0] | |
| color_seg = np.zeros( | |
| (seg.shape[0], seg.shape[1], 3), dtype=np.uint8 | |
| ) # height, width, 3 | |
| for label, color in enumerate(colormap): | |
| color_seg[seg.numpy() == label, :] = color | |
| # Show image + mask | |
| pred_img = np.array(input_img) * 0.5 + color_seg * 0.5 | |
| pred_img = pred_img.astype(np.uint8) | |
| fig = draw_plot(pred_img, seg) | |
| return fig | |
| demo = gr.Interface(fn=sepia, | |
| inputs=gr.Image(shape=(800, 600)), | |
| outputs=['plot'], | |
| examples=["indoor.jpg", "indoor1.jpg", "indoor2.jpg", "indoor3.jpg"], | |
| allow_flagging='never') | |
| demo.launch() | |