Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import json | |
import numpy as np | |
import torch | |
from PIL import Image, ImageDraw | |
import gradio as gr | |
from openai import OpenAI | |
from geopy.geocoders import Nominatim | |
from staticmap import StaticMap, CircleMarker, Polygon | |
from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline | |
import spaces | |
# Initialize APIs | |
openai_client = OpenAI(api_key=os.environ['OPENAI_API_KEY']) | |
geolocator = Nominatim(user_agent="geoapi") | |
# Function to fetch coordinates | |
def get_geo_coordinates(location_name): | |
try: | |
location = geolocator.geocode(location_name) | |
if location: | |
return [location.longitude, location.latitude] | |
return None | |
except Exception as e: | |
print(f"Error fetching coordinates for {location_name}: {e}") | |
return None | |
# Function to process OpenAI chat response | |
def process_openai_response(query): | |
response = openai_client.chat.completions.create( | |
model="gpt-4o-mini", | |
messages=[ | |
{ | |
"role": "system", | |
"content": [ | |
{ | |
"type": "text", | |
"text": "\"input\": \"\"\"You are a skilled assistant answering geographical and historical questions. For each question, generate a structured output in JSON format, based on city names without coordinates. The response should include:\ | |
Answer: A concise response to the question.\ | |
Feature Representation: A feature type based on city names (Point, LineString, Polygon, MultiPoint, MultiLineString, MultiPolygon, GeometryCollection).\ | |
Description: A prompt for a diffusion model describing the what should we draw regarding that.\ | |
\ | |
Handle the following cases:\ | |
\ | |
1. **Single or Multiple Points**: Create a point or a list of points for multiple cities.\ | |
2. **LineString**: Create a line between two cities.\ | |
3. **Polygon**: Represent an area formed by three or more cities (closed). Example: Cities forming a triangle (A, B, C).\ | |
4. **MultiPoint, MultiLineString, MultiPolygon, GeometryCollection**: Use as needed based on the question.\ | |
\ | |
For example, if asked about cities forming a polygon, create a feature like this:\ | |
\ | |
Input: Mark an area with three cities.\ | |
Output: {\"input\": \"Mark an area with three cities.\", \"output\": {\"answer\": \"The cities A, B, and C form a triangle.\", \"feature_representation\": {\"type\": \"Polygon\", \"cities\": [\"A\", \"B\", \"C\"], \"properties\": {\"description\": \"satelite image of a plantation, green fill, 4k, map, detailed, greenary, plants, vegitation, high contrast\"}}}}\ | |
\ | |
Ensure all responses are descriptive and relevant to city names only, without coordinates.\ | |
\"}\"}" | |
} | |
] | |
}, | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": query | |
} | |
] | |
} | |
], | |
temperature=1, | |
max_tokens=2048, | |
top_p=1, | |
frequency_penalty=0, | |
presence_penalty=0, | |
response_format={"type": "json_object"} | |
) | |
return json.loads(response.choices[0].message.content) | |
# Generate GeoJSON from OpenAI response | |
def generate_geojson(response): | |
feature_type = response['output']['feature_representation']['type'] | |
city_names = response['output']['feature_representation']['cities'] | |
properties = response['output']['feature_representation']['properties'] | |
coordinates = [] | |
for city in city_names: | |
coord = get_geo_coordinates(city) | |
if coord: | |
coordinates.append(coord) | |
if feature_type == "Polygon": | |
coordinates.append(coordinates[0]) # Close the polygon | |
return { | |
"type": "FeatureCollection", | |
"features": [{ | |
"type": "Feature", | |
"properties": properties, | |
"geometry": { | |
"type": feature_type, | |
"coordinates": [coordinates] if feature_type == "Polygon" else coordinates | |
} | |
}] | |
} | |
# Generate static map image | |
def generate_static_map(geojson_data, bounds=None): | |
# Create a static map object with specified dimensions | |
m = StaticMap(600, 600) | |
if bounds: | |
# If bounds are provided, set the center of the map | |
center_lat = (bounds[0][0] + bounds[1][0]) / 2 | |
center_lng = (bounds[0][1] + bounds[1][1]) / 2 | |
zoom = 10 # Adjust zoom level as needed | |
m.set_center(center_lat, center_lng, zoom) | |
# Check if there are no features to avoid an empty map | |
if not geojson_data["features"]: | |
# Add a small invisible marker to prevent rendering error | |
m.add_marker(CircleMarker((0.0, 0.0), '#FFFFFF', 0)) # White marker with size 0 | |
# Process each feature in the GeoJSON | |
for feature in geojson_data["features"]: | |
geom_type = feature["geometry"]["type"] | |
coords = feature["geometry"]["coordinates"] | |
if geom_type == "Point": | |
m.add_marker(CircleMarker((coords[0], coords[1]), 'blue', 10)) | |
elif geom_type in ["MultiPoint", "LineString"]: | |
for coord in coords: | |
m.add_marker(CircleMarker((coord[0], coord[1]), 'blue', 10)) | |
elif geom_type in ["Polygon", "MultiPolygon"]: | |
for polygon in coords: | |
m.add_polygon(Polygon([(c[0], c[1]) for c in polygon], 'blue', 3)) | |
return m.render(zoom=10) | |
# ControlNet pipeline setup | |
controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16) | |
pipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained( | |
"stable-diffusion-v1-5/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16 | |
) | |
# ZeroGPU compatibility | |
pipeline.to('cuda') | |
def make_inpaint_condition(init_image, mask_image): | |
init_image = np.array(init_image.convert("RGB")).astype(np.float32) / 255.0 | |
mask_image = np.array(mask_image.convert("L")).astype(np.float32) / 255.0 | |
assert init_image.shape[0:1] == mask_image.shape[0:1], "image and image_mask must have the same image size" | |
init_image[mask_image > 0.5] = -1.0 # set as masked pixel | |
init_image = np.expand_dims(init_image, 0).transpose(0, 3, 1, 2) | |
init_image = torch.from_numpy(init_image) | |
return init_image | |
def generate_satellite_image(init_image, mask_image, prompt): | |
control_image = make_inpaint_condition(init_image, mask_image) | |
result = pipeline( | |
prompt=prompt, | |
image=init_image, | |
mask_image=mask_image, | |
control_image=control_image, | |
strength=0.45, | |
guidance_scale=62 | |
) | |
return result.images[0] | |
def get_bounds(geojson): | |
coordinates = [] | |
for feature in geojson["features"]: | |
geom_type = feature["geometry"]["type"] | |
coords = feature["geometry"]["coordinates"] | |
if geom_type == "Point": | |
coordinates.append(coords) | |
elif geom_type in ["MultiPoint", "LineString"]: | |
coordinates.extend(coords) | |
elif geom_type in ["MultiLineString", "Polygon"]: | |
for part in coords: | |
coordinates.extend(part) | |
elif geom_type == "MultiPolygon": | |
for polygon in coords: | |
for part in polygon: | |
coordinates.extend(part) | |
lats = [coord[1] for coord in coordinates] | |
lngs = [coord[0] for coord in coordinates] | |
return [[min(lats), min(lngs)], [max(lats), max(lngs)]] | |
# Gradio UI | |
def handle_query(query): | |
# Process OpenAI response | |
response = process_openai_response(query) | |
geojson_data = generate_geojson(response) | |
# Generate the main map image | |
map_image = generate_static_map(geojson_data) | |
# Generate the empty map using the same bounds | |
bounds = get_bounds(geojson_data) | |
empty_geojson = { | |
"type": "FeatureCollection", | |
"features": [] # Empty map contains no features | |
} | |
empty_map_image = generate_static_map(empty_geojson) # Empty map with the same bounds | |
# Create the mask | |
difference = np.abs(np.array(map_image.convert("RGB")) - np.array(empty_map_image.convert("RGB"))) | |
threshold = 10 # Tolerance for difference | |
mask = (np.sum(difference, axis=-1) > threshold).astype(np.uint8) * 255 | |
# Convert the mask to a PIL image | |
mask_image = Image.fromarray(mask, mode="L") | |
# Generate the satellite image | |
satellite_image = generate_satellite_image( | |
map_image, mask_image, response['output']['feature_representation']['properties']['description'] | |
) | |
return map_image, empty_map_image, satellite_image, mask_image, response | |
def update_query(selected_query): | |
return selected_query | |
query_options = [ | |
"“US $ 10.5 Million has been allocated to recommence the construction of Kurinchakerny Bridge facilitating the transport and business needs of approximately one hundred thousand residents in the Divisional Secretariat Division of Kinniya in Trincomalee District,” the ministry said in a statement.", | |
"Due to considerable rainfall in the up- and mid- stream areas of Kala Oya, the Rajanganaya reservoir is now spilling at a rate of 17,000 cubic feet per second, the department said." | |
] | |
# Gradio interface | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
selected_query = gr.Dropdown(label="Select Query", choices=query_options, value=query_options[-1]) | |
query_input = gr.Textbox(label="Enter Query", value=query_options[-1]) | |
selected_query.change(update_query, inputs=selected_query, outputs=query_input) | |
submit_btn = gr.Button("Submit") | |
with gr.Row(): | |
map_output = gr.Image(label="Map Visualization") | |
empty_map_output = gr.Image(label="Empty Visualization") | |
with gr.Row(): | |
satellite_output = gr.Image(label="Generated Satellite Image") | |
mask_output = gr.Image(label="Mask") | |
image_prompt = gr.Textbox(label="Image Prompt Used") | |
submit_btn.click(handle_query, inputs=[query_input], outputs=[map_output, empty_map_output, satellite_output, mask_output, image_prompt]) | |
if __name__ == "__main__": | |
demo.launch() | |