File size: 9,879 Bytes
45f7be1
 
 
 
42897ae
a52b051
 
 
42897ae
a52b051
 
6efeffc
45f7be1
 
 
6efeffc
45f7be1
a52b051
45f7be1
 
 
 
 
 
 
 
 
 
 
a52b051
45f7be1
 
 
 
0c2f440
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b1e7b22
0c2f440
 
 
 
36e6906
 
 
 
 
 
45f7be1
 
 
 
a52b051
45f7be1
 
 
 
a52b051
45f7be1
 
 
 
 
a52b051
45f7be1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42897ae
a52b051
42897ae
084a1c8
17f91f8
084a1c8
 
42897ae
8319e98
 
42897ae
8319e98
084a1c8
42897ae
8319e98
084a1c8
42897ae
6ad0561
42897ae
084a1c8
8319e98
6ad0561
084a1c8
 
 
45f7be1
 
 
 
1b5be08
45f7be1
a52b051
3e8f3e6
 
a52b051
8319e98
 
 
 
 
 
 
 
 
 
a52b051
36e6906
45f7be1
a5e00fd
 
 
 
 
6ad0561
 
a5e00fd
45f7be1
 
9b980f8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45f7be1
a52b051
45f7be1
 
 
 
a52b051
9b980f8
42897ae
a52b051
9b980f8
 
 
 
 
 
 
 
 
 
 
 
36e6906
9b980f8
 
a52b051
9b980f8
 
 
 
a52b051
77ca91f
45f7be1
9b980f8
45f7be1
 
 
 
 
 
 
 
3647dae
0b075c8
f3675a4
45f7be1
 
36e6906
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
import os
import json
import numpy as np
import torch
from PIL import Image, ImageDraw
import gradio as gr
from openai import OpenAI
from geopy.geocoders import Nominatim
from staticmap import StaticMap, CircleMarker, Polygon
from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline
import spaces

# Initialize APIs
openai_client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
geolocator = Nominatim(user_agent="geoapi")

# Function to fetch coordinates
@spaces.GPU
def get_geo_coordinates(location_name):
    try:
        location = geolocator.geocode(location_name)
        if location:
            return [location.longitude, location.latitude]
        return None
    except Exception as e:
        print(f"Error fetching coordinates for {location_name}: {e}")
        return None

# Function to process OpenAI chat response
@spaces.GPU
def process_openai_response(query):
    response = openai_client.chat.completions.create(
        model="gpt-4o-mini",
        messages=[
    {
      "role": "system",
      "content": [
        {
          "type": "text",
          "text": "\"input\": \"\"\"You are a skilled assistant answering geographical and historical questions. For each question, generate a structured output in JSON format, based on city names without coordinates. The response should include:\
Answer: A concise response to the question.\
Feature Representation: A feature type based on city names (Point, LineString, Polygon, MultiPoint, MultiLineString, MultiPolygon, GeometryCollection).\
Description: A prompt for a diffusion model describing the what should we draw regarding that.\
\
Handle the following cases:\
\
1. **Single or Multiple Points**: Create a point or a list of points for multiple cities.\
2. **LineString**: Create a line between two cities.\
3. **Polygon**: Represent an area formed by three or more cities (closed). Example: Cities forming a triangle (A, B, C).\
4. **MultiPoint, MultiLineString, MultiPolygon, GeometryCollection**: Use as needed based on the question.\
\
For example, if asked about cities forming a polygon, create a feature like this:\
\
Input: Mark an area with three cities.\
Output: {\"input\": \"Mark an area with three cities.\", \"output\": {\"answer\": \"The cities A, B, and C form a triangle.\", \"feature_representation\": {\"type\": \"Polygon\", \"cities\": [\"A\", \"B\", \"C\"], \"properties\": {\"description\": \"satelite image of a plantation, green fill, 4k, map, detailed, greenary, plants, vegitation, high contrast\"}}}}\
\
Ensure all responses are descriptive and relevant to city names only, without coordinates.\
\"}\"}"
        }
      ]
    },
    {
      "role": "user",
      "content": [
        {
          "type": "text",
          "text": query
        }
      ]
    }
  ],
        temperature=1,
        max_tokens=2048,
        top_p=1,
        frequency_penalty=0,
        presence_penalty=0,
        response_format={"type": "json_object"}
    )
    return json.loads(response.choices[0].message.content)

# Generate GeoJSON from OpenAI response
@spaces.GPU
def generate_geojson(response):
    feature_type = response['output']['feature_representation']['type']
    city_names = response['output']['feature_representation']['cities']
    properties = response['output']['feature_representation']['properties']

    coordinates = []
    for city in city_names:
        coord = get_geo_coordinates(city)
        if coord:
            coordinates.append(coord)

    if feature_type == "Polygon":
        coordinates.append(coordinates[0])  # Close the polygon

    return {
        "type": "FeatureCollection",
        "features": [{
            "type": "Feature",
            "properties": properties,
            "geometry": {
                "type": feature_type,
                "coordinates": [coordinates] if feature_type == "Polygon" else coordinates
            }
        }]
    }

# Generate static map image
@spaces.GPU
def generate_static_map(geojson_data):
    # Create a static map object with specified dimensions
    m = StaticMap(600, 600)
    
    # Process each feature in the GeoJSON
    for feature in geojson_data["features"]:
        geom_type = feature["geometry"]["type"]
        coords = feature["geometry"]["coordinates"]

        if geom_type == "Point":
            # Add a blue marker for Point geometries
            m.add_marker(CircleMarker((coords[0], coords[1]), 'blue', 10))
        elif geom_type in ["MultiPoint", "LineString"]:
            # Add a red marker for each point in MultiPoint or LineString geometries
            for coord in coords:
                m.add_marker(CircleMarker((coord[0], coord[1]), 'blue', 10))
        elif geom_type in ["Polygon", "MultiPolygon"]:
            # Add green polygons for Polygon or MultiPolygon geometries
            for polygon in coords:
                m.add_polygon(Polygon([(c[0], c[1]) for c in polygon], 'blue', 3))
    
    # Render the static map and return the Pillow Image object
    return m.render(zoom=10)

# ControlNet pipeline setup
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16)
pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
    "stable-diffusion-v1-5/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16
)
# ZeroGPU compatibility
pipeline.to('cuda')

@spaces.GPU
def make_inpaint_condition(init_image, mask_image):
    init_image = np.array(init_image.convert("RGB")).astype(np.float32) / 255.0
    mask_image = np.array(mask_image.convert("L")).astype(np.float32) / 255.0

    assert init_image.shape[0:1] == mask_image.shape[0:1], "image and image_mask must have the same image size"
    init_image[mask_image > 0.5] = -1.0  # set as masked pixel
    init_image = np.expand_dims(init_image, 0).transpose(0, 3, 1, 2)
    init_image = torch.from_numpy(init_image)
    return init_image

@spaces.GPU
def generate_satellite_image(init_image, mask_image, prompt):
    control_image = make_inpaint_condition(init_image, mask_image)
    result = pipeline(
        prompt=prompt, 
        image=init_image, 
        mask_image=mask_image, 
        control_image=control_image,
        strength=0.45,
        guidance_scale=62
        )
    return result.images[0]

def get_bounds(geojson):
    coordinates = []
    for feature in geojson["features"]:
        geom_type = feature["geometry"]["type"]
        coords = feature["geometry"]["coordinates"]
        if geom_type == "Point":
            coordinates.append(coords)
        elif geom_type in ["MultiPoint", "LineString"]:
            coordinates.extend(coords)
        elif geom_type in ["MultiLineString", "Polygon"]:
            for part in coords:
                coordinates.extend(part)
        elif geom_type == "MultiPolygon":
            for polygon in coords:
                for part in polygon:
                    coordinates.extend(part)
    lats = [coord[1] for coord in coordinates]
    lngs = [coord[0] for coord in coordinates]
    return [[min(lats), min(lngs)], [max(lats), max(lngs)]]

@spaces.GPU
def generate_static_map(geojson_data, bounds=None):
    # Create a static map object with specified dimensions
    m = StaticMap(600, 600)
    
    if bounds:
        center_lat = (bounds[0][0] + bounds[1][0]) / 2
        center_lng = (bounds[0][1] + bounds[1][1]) / 2
        zoom = 10  # Adjust zoom level as needed
        m.set_center(center_lat, center_lng, zoom)
    
    # Process each feature in the GeoJSON
    for feature in geojson_data["features"]:
        geom_type = feature["geometry"]["type"]
        coords = feature["geometry"]["coordinates"]

        if geom_type == "Point":
            m.add_marker(CircleMarker((coords[0], coords[1]), 'blue', 10))
        elif geom_type in ["MultiPoint", "LineString"]:
            for coord in coords:
                m.add_marker(CircleMarker((coord[0], coord[1]), 'blue', 10))
        elif geom_type in ["Polygon", "MultiPolygon"]:
            for polygon in coords:
                m.add_polygon(Polygon([(c[0], c[1]) for c in polygon], 'blue', 3))

    return m.render(zoom=10)


# Gradio UI
@spaces.GPU
def handle_query(query):
    # Process OpenAI response
    response = process_openai_response(query)
    geojson_data = generate_geojson(response)

    # Generate the main map image
    map_image = generate_static_map(geojson_data)

    # Generate the empty map using the same bounds
    bounds = get_bounds(geojson_data)
    empty_geojson = {
        "type": "FeatureCollection",
        "features": []  # Empty map contains no features
    }
    empty_map_image = generate_static_map(empty_geojson)  # Empty map with the same bounds

    # Create the mask
    difference = np.abs(np.array(map_image.convert("RGB")) - np.array(empty_map_image.convert("RGB")))
    threshold = 10  # Tolerance for difference
    mask = (np.sum(difference, axis=-1) > threshold).astype(np.uint8) * 255

    # Convert the mask to a PIL image
    mask_image = Image.fromarray(mask, mode="L")

    # Generate the satellite image
    satellite_image = generate_satellite_image(
        map_image, mask_image, response['output']['feature_representation']['properties']['description']
    )

    return map_image, satellite_image, mask_image, response


# Gradio interface
with gr.Blocks() as demo:
    with gr.Row():
        query_input = gr.Textbox(label="Enter Query")
        submit_btn = gr.Button("Submit")
    with gr.Row():
        map_output = gr.Image(label="Map Visualization")
        satellite_output = gr.Image(label="Generated Satellite Image")
        mask_output = gr.Image(label="Mask")
        image_prompt = gr.Textbox(label="Image Prompt Used")
    submit_btn.click(handle_query, inputs=[query_input], outputs=[map_output, satellite_output, mask_output, image_prompt])

if __name__ == "__main__":
    demo.launch()