File size: 3,819 Bytes
ba1d2e6
c83ef86
 
ba1d2e6
c83ef86
ba1d2e6
c83ef86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
import numpy as np
from math import ceil
from huggingface_hub import from_pretrained_keras

model = from_pretrained_keras("GIanlucaRub/autoencoder_model_d_0")
def double_res(input_image):
    input_height = input_image.shape[0] 
    input_width = input_image.shape[1] 
    height = ceil(input_height/128)
    width = ceil(input_width/128)
    expanded_input_image = np.zeros((128*height, 128*width,3), dtype=np.uint8)
    np.copyto(expanded_input_image[0:input_height, 0:input_width], input_image)
    
    output_image = np.zeros((128*height*2, 128*width*2,3), dtype=np.float32)

    for i in range(height):
        for j in range(width):
            temp_slice = expanded_input_image[i*128:(i+1)*128, j*128:(j+1)*128]/255
            upsampled_slice = model.predict(temp_slice[np.newaxis, ...]) 
            np.copyto(output_image[i*256:(i+1)*256, j*256:(j+1)*256], upsampled_slice[0])
            if i!= 0 and j!= 0 and i != height-1 and j!=width-1:
# removing inner borders
                right_slice = expanded_input_image[i*128:(i+1)*128, (j+1)*128-64:(j+1)*128+64]/255
                right_upsampled_slice = model.predict(right_slice[np.newaxis, ...]) 
                resized_right_slice = right_upsampled_slice[0][64:192,64:192]
                np.copyto(output_image[i*256+64:(i+1)*256-64, (j+1)*256-64:(j+1)*256+64], resized_right_slice)
                
                left_slice = expanded_input_image[i*128:(i+1)*128, j*128-64:(j)*128+64]/255
                left_upsampled_slice = model.predict(left_slice[np.newaxis, ...]) 
                resized_left_slice = left_upsampled_slice[0][64:192,64:192]
                np.copyto(output_image[i*256+64:(i+1)*256-64, j*256-64:j*256+64], resized_left_slice)
                
                upper_slice = expanded_input_image[(i+1)*128-64:(i+1)*128+64, j*128:(j+1)*128]/255
                upper_upsampled_slice = model.predict(upper_slice[np.newaxis, ...]) 
                resized_upper_slice = upper_upsampled_slice[0][64:192,64:192]
                np.copyto(output_image[(i+1)*256-64:(i+1)*256+64, j*256+64:(j+1)*256-64], resized_upper_slice)
                
                lower_slice = expanded_input_image[i*128-64:i*128+64, j*128:(j+1)*128]/255
                lower_upsampled_slice = model.predict(lower_slice[np.newaxis, ...]) 
                resized_lower_slice = lower_upsampled_slice[0][64:192,64:192]
                np.copyto(output_image[i*256-64:i*256+64, j*256+64:(j+1)*256-64], resized_lower_slice)


# removing angles
                lower_right_slice = expanded_input_image[i*128-64:i*128+64, (j+1)*128-64:(j+1)*128+64]/255
                lower_right_upsampled_slice = model.predict(lower_right_slice[np.newaxis, ...]) 
                resized_lower_right_slice = lower_right_upsampled_slice[0][64:192,64:192]
                np.copyto(output_image[i*256-64:i*256+64,  (j+1)*256-64:(j+1)*256+64], resized_lower_right_slice)
                
                lower_left_slice = expanded_input_image[i*128-64:i*128+64, j*128-64:j*128+64]/255
                lower_left_upsampled_slice = model.predict(lower_left_slice[np.newaxis, ...]) 
                resized_lower_left_slice = lower_left_upsampled_slice[0][64:192,64:192]
                np.copyto(output_image[i*256-64:i*256+64,  j*256-64:j*256+64], resized_lower_left_slice)
                

                
                
    resized_output_image = output_image[0:input_height*2,0:input_width*2]
    return resized_output_image

demo = gr.Interface(
    fn=double_res,
    title="Double picture resolution",
    description="Upload a picture and get the horizontal and vertical resolution doubled (4x pixels)",
    allow_flagging="never",
    inputs=[
        gr.inputs.Image(type="numpy")
        ],
    outputs=gr.Image(type="numpy"))

demo.launch()