Search is not available for this dataset
text
stringlengths
75
104k
async def get_stream(self, *command_args, conn_type="I", offset=0): """ :py:func:`asyncio.coroutine` Create :py:class:`aioftp.DataConnectionThrottleStreamIO` for straight read/write io. :param command_args: arguments for :py:meth:`aioftp.Client.command` :param conn_type: connection type ("I", "A", "E", "L") :type conn_type: :py:class:`str` :param offset: byte offset for stream start position :type offset: :py:class:`int` :rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO` """ reader, writer = await self.get_passive_connection(conn_type) if offset: await self.command("REST " + str(offset), "350") await self.command(*command_args) stream = DataConnectionThrottleStreamIO( self, reader, writer, throttles={"_": self.throttle}, timeout=self.socket_timeout, ) return stream
def jenks_breaks(values, nb_class): """ Compute jenks natural breaks on a sequence of `values`, given `nb_class`, the number of desired class. Parameters ---------- values : array-like The Iterable sequence of numbers (integer/float) to be used. nb_class : int The desired number of class (as some other functions requests a `k` value, `nb_class` is like `k` + 1). Have to be lesser than the length of `values` and greater than 2. Returns ------- breaks : tuple of floats The computed break values, including minimum and maximum, in order to have all the bounds for building `nb_class` class, so the returned tuple has a length of `nb_class` + 1. Examples -------- Using nb_class = 3, expecting 4 break values , including min and max : >>> jenks_breaks( [1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3], nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8) """ if not isinstance(values, Iterable) or isinstance(values, (str, bytes)): raise TypeError("A sequence of numbers is expected") if isinstance(nb_class, float) and int(nb_class) == nb_class: nb_class = int(nb_class) if not isinstance(nb_class, int): raise TypeError( "Number of class have to be a positive integer: " "expected an instance of 'int' but found {}" .format(type(nb_class))) nb_values = len(values) if np and isinstance(values, np.ndarray): values = values[np.argwhere(np.isfinite(values)).reshape(-1)] else: values = [i for i in values if isfinite(i)] if len(values) != nb_values: warnings.warn('Invalid values encountered (NaN or Inf) were ignored') nb_values = len(values) if nb_class >= nb_values or nb_class < 2: raise ValueError("Number of class have to be an integer " "greater than 2 and " "smaller than the number of values to use") return jenks._jenks_breaks(values, nb_class)
def grab_to_file(self, filename, bbox=None): """http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html. only "jpeg" or "png" """ w = self.gtk.gdk.get_default_root_window() # Capture the whole screen. if bbox is None: sz = w.get_size() pb = self.gtk.gdk.Pixbuf( self.gtk.gdk.COLORSPACE_RGB, False, 8, sz[0], sz[1]) # 24bit RGB pb = pb.get_from_drawable( w, w.get_colormap(), 0, 0, 0, 0, sz[0], sz[1]) # Only capture what we need. The smaller the capture, the faster. else: sz = [bbox[2] - bbox[0], bbox[3] - bbox[1]] pb = self.gtk.gdk.Pixbuf( self.gtk.gdk.COLORSPACE_RGB, False, 8, sz[0], sz[1]) pb = pb.get_from_drawable( w, w.get_colormap(), bbox[0], bbox[1], 0, 0, sz[0], sz[1]) assert pb ftype = 'png' if filename.endswith('.jpeg'): ftype = 'jpeg' pb.save(filename, ftype)
def grab(self, bbox=None): """Grabs an image directly to a buffer. :param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates of sub-region to capture. :return: PIL RGB image :raises: ValueError, if image data does not have 3 channels (RGB), each with 8 bits. :rtype: Image """ w = Gdk.get_default_root_window() if bbox is not None: g = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]] else: g = w.get_geometry() pb = Gdk.pixbuf_get_from_window(w, *g) if pb.get_bits_per_sample() != 8: raise ValueError('Expected 8 bits per pixel.') elif pb.get_n_channels() != 3: raise ValueError('Expected RGB image.') # Read the entire buffer into a python bytes object. # read_pixel_bytes: New in version 2.32. pixel_bytes = pb.read_pixel_bytes().get_data() # type: bytes width, height = g[2], g[3] # Probably for SSE alignment reasons, the pixbuf has extra data in each line. # The args after "raw" help handle this; see # http://effbot.org/imagingbook/decoder.htm#the-raw-decoder return Image.frombytes( 'RGB', (width, height), pixel_bytes, 'raw', 'RGB', pb.get_rowstride(), 1)
def grab(bbox=None, childprocess=None, backend=None): """Copy the contents of the screen to PIL image memory. :param bbox: optional bounding box (x1,y1,x2,y2) :param childprocess: pyscreenshot can cause an error, if it is used on more different virtual displays and back-end is not in different process. Some back-ends are always different processes: scrot, imagemagick The default is False if the program was started inside IDLE, otherwise it is True. :param backend: back-end can be forced if set (examples:scrot, wx,..), otherwise back-end is automatic """ if childprocess is None: childprocess = childprocess_default_value() return _grab( to_file=False, childprocess=childprocess, backend=backend, bbox=bbox)
def grab_to_file(filename, childprocess=None, backend=None): """Copy the contents of the screen to a file. Internal function! Use PIL.Image.save() for saving image to file. :param filename: file for saving :param childprocess: see :py:func:`grab` :param backend: see :py:func:`grab` """ if childprocess is None: childprocess = childprocess_default_value() return _grab(to_file=True, childprocess=childprocess, backend=backend, filename=filename)
def backend_version(backend, childprocess=None): """Back-end version. :param backend: back-end (examples:scrot, wx,..) :param childprocess: see :py:func:`grab` :return: version as string """ if childprocess is None: childprocess = childprocess_default_value() if not childprocess: return _backend_version(backend) else: return run_in_childprocess(_backend_version, None, backend)
def write(self, process_tile, data): """ Write data from process tiles into PNG file(s). Parameters ---------- process_tile : ``BufferedTile`` must be member of process ``TilePyramid`` """ data = self._prepare_array(data) if data.mask.all(): logger.debug("data empty, nothing to write") else: # in case of S3 output, create an boto3 resource bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None # Convert from process_tile to output_tiles and write for tile in self.pyramid.intersecting(process_tile): out_path = self.get_path(tile) self.prepare_path(tile) out_tile = BufferedTile(tile, self.pixelbuffer) write_raster_window( in_tile=process_tile, in_data=data, out_profile=self.profile(out_tile), out_tile=out_tile, out_path=out_path, bucket_resource=bucket_resource )
def read(self, output_tile, **kwargs): """ Read existing process output. Parameters ---------- output_tile : ``BufferedTile`` must be member of output ``TilePyramid`` Returns ------- process output : ``BufferedTile`` with appended data """ try: return ma.masked_values( read_raster_no_crs( self.get_path(output_tile), indexes=(4 if self.old_band_num else 2) ), 0 ) except FileNotFoundError: return self.empty(output_tile)
def profile(self, tile=None): """ Create a metadata dictionary for rasterio. Parameters ---------- tile : ``BufferedTile`` Returns ------- metadata : dictionary output profile dictionary used for rasterio. """ dst_metadata = dict(self._profile) if tile is not None: dst_metadata.update( width=tile.width, height=tile.height, affine=tile.affine, driver="PNG", crs=tile.crs ) return dst_metadata
def open( config, mode="continue", zoom=None, bounds=None, single_input_file=None, with_cache=False, debug=False ): """ Open a Mapchete process. Parameters ---------- config : MapcheteConfig object, config dict or path to mapchete file Mapchete process configuration mode : string * ``memory``: Generate process output on demand without reading pre-existing data or writing new data. * ``readonly``: Just read data without processing new data. * ``continue``: (default) Don't overwrite existing output. * ``overwrite``: Overwrite existing output. zoom : list or integer process zoom level or a pair of minimum and maximum zoom level bounds : tuple left, bottom, right, top process boundaries in output pyramid single_input_file : string single input file if supported by process with_cache : bool process output data cached in memory Returns ------- Mapchete a Mapchete process object """ return Mapchete( MapcheteConfig( config, mode=mode, zoom=zoom, bounds=bounds, single_input_file=single_input_file, debug=debug), with_cache=with_cache)
def count_tiles(geometry, pyramid, minzoom, maxzoom, init_zoom=0): """ Count number of tiles intersecting with geometry. Parameters ---------- geometry : shapely geometry pyramid : TilePyramid minzoom : int maxzoom : int init_zoom : int Returns ------- number of tiles """ if not 0 <= init_zoom <= minzoom <= maxzoom: raise ValueError("invalid zoom levels given") # tile buffers are not being taken into account unbuffered_pyramid = TilePyramid( pyramid.grid, tile_size=pyramid.tile_size, metatiling=pyramid.metatiling ) # make sure no rounding errors occur geometry = geometry.buffer(-0.000000001) return _count_tiles( [ unbuffered_pyramid.tile(*tile_id) for tile_id in product( [init_zoom], range(pyramid.matrix_height(init_zoom)), range(pyramid.matrix_width(init_zoom)) ) ], geometry, minzoom, maxzoom )
def _get_zoom_level(zoom, process): """Determine zoom levels.""" if zoom is None: return reversed(process.config.zoom_levels) if isinstance(zoom, int): return [zoom] elif len(zoom) == 2: return reversed(range(min(zoom), max(zoom)+1)) elif len(zoom) == 1: return zoom
def _process_worker(process, process_tile): """Worker function running the process.""" logger.debug((process_tile.id, "running on %s" % current_process().name)) # skip execution if overwrite is disabled and tile exists if ( process.config.mode == "continue" and process.config.output.tiles_exist(process_tile) ): logger.debug((process_tile.id, "tile exists, skipping")) return ProcessInfo( tile=process_tile, processed=False, process_msg="output already exists", written=False, write_msg="nothing written" ) # execute on process tile else: with Timer() as t: try: output = process.execute(process_tile, raise_nodata=True) except MapcheteNodataTile: output = None processor_message = "processed in %s" % t logger.debug((process_tile.id, processor_message)) writer_info = process.write(process_tile, output) return ProcessInfo( tile=process_tile, processed=True, process_msg=processor_message, written=writer_info.written, write_msg=writer_info.write_msg )
def get_process_tiles(self, zoom=None): """ Yield process tiles. Tiles intersecting with the input data bounding boxes as well as process bounds, if provided, are considered process tiles. This is to avoid iterating through empty tiles. Parameters ---------- zoom : integer zoom level process tiles should be returned from; if none is given, return all process tiles yields ------ BufferedTile objects """ if zoom or zoom == 0: for tile in self.config.process_pyramid.tiles_from_geom( self.config.area_at_zoom(zoom), zoom ): yield tile else: for zoom in reversed(self.config.zoom_levels): for tile in self.config.process_pyramid.tiles_from_geom( self.config.area_at_zoom(zoom), zoom ): yield tile
def batch_process( self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=1 ): """ Process a large batch of tiles. Parameters ---------- process : MapcheteProcess process to be run zoom : list or int either single zoom level or list of minimum and maximum zoom level; None processes all (default: None) tile : tuple zoom, row and column of tile to be processed (cannot be used with zoom) multi : int number of workers (default: number of CPU cores) max_chunksize : int maximum number of process tiles to be queued for each worker; (default: 1) """ list(self.batch_processor(zoom, tile, multi, max_chunksize))
def batch_processor( self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=1 ): """ Process a large batch of tiles and yield report messages per tile. Parameters ---------- zoom : list or int either single zoom level or list of minimum and maximum zoom level; None processes all (default: None) tile : tuple zoom, row and column of tile to be processed (cannot be used with zoom) multi : int number of workers (default: number of CPU cores) max_chunksize : int maximum number of process tiles to be queued for each worker; (default: 1) """ if zoom and tile: raise ValueError("use either zoom or tile") # run single tile if tile: yield _run_on_single_tile(self, tile) # run concurrently elif multi > 1: for process_info in _run_with_multiprocessing( self, list(_get_zoom_level(zoom, self)), multi, max_chunksize ): yield process_info # run sequentially elif multi == 1: for process_info in _run_without_multiprocessing( self, list(_get_zoom_level(zoom, self)) ): yield process_info
def count_tiles(self, minzoom, maxzoom, init_zoom=0): """ Count number of tiles intersecting with geometry. Parameters ---------- geometry : shapely geometry pyramid : TilePyramid minzoom : int maxzoom : int init_zoom : int Returns ------- number of tiles """ if (minzoom, maxzoom) not in self._count_tiles_cache: self._count_tiles_cache[(minzoom, maxzoom)] = count_tiles( self.config.area_at_zoom(), self.config.process_pyramid, minzoom, maxzoom, init_zoom=0 ) return self._count_tiles_cache[(minzoom, maxzoom)]
def execute(self, process_tile, raise_nodata=False): """ Run the Mapchete process. Execute, write and return data. Parameters ---------- process_tile : Tile or tile index tuple Member of the process tile pyramid (not necessarily the output pyramid, if output has a different metatiling setting) Returns ------- data : NumPy array or features process output """ if self.config.mode not in ["memory", "continue", "overwrite"]: raise ValueError("process mode must be memory, continue or overwrite") if isinstance(process_tile, tuple): process_tile = self.config.process_pyramid.tile(*process_tile) elif isinstance(process_tile, BufferedTile): pass else: raise TypeError("process_tile must be tuple or BufferedTile") if process_tile.zoom not in self.config.zoom_levels: return self.config.output.empty(process_tile) return self._execute(process_tile, raise_nodata=raise_nodata)
def read(self, output_tile): """ Read from written process output. Parameters ---------- output_tile : BufferedTile or tile index tuple Member of the output tile pyramid (not necessarily the process pyramid, if output has a different metatiling setting) Returns ------- data : NumPy array or features process output """ if self.config.mode not in ["readonly", "continue", "overwrite"]: raise ValueError("process mode must be readonly, continue or overwrite") if isinstance(output_tile, tuple): output_tile = self.config.output_pyramid.tile(*output_tile) elif isinstance(output_tile, BufferedTile): pass else: raise TypeError("output_tile must be tuple or BufferedTile") return self.config.output.read(output_tile)
def write(self, process_tile, data): """ Write data into output format. Parameters ---------- process_tile : BufferedTile or tile index tuple process tile data : NumPy array or features data to be written """ if isinstance(process_tile, tuple): process_tile = self.config.process_pyramid.tile(*process_tile) elif not isinstance(process_tile, BufferedTile): raise ValueError("invalid process_tile type: %s" % type(process_tile)) if self.config.mode not in ["continue", "overwrite"]: raise ValueError("cannot write output in current process mode") if self.config.mode == "continue" and ( self.config.output.tiles_exist(process_tile) ): message = "output exists, not overwritten" logger.debug((process_tile.id, message)) return ProcessInfo( tile=process_tile, processed=False, process_msg=None, written=False, write_msg=message ) elif data is None: message = "output empty, nothing written" logger.debug((process_tile.id, message)) return ProcessInfo( tile=process_tile, processed=False, process_msg=None, written=False, write_msg=message ) else: with Timer() as t: self.config.output.write(process_tile=process_tile, data=data) message = "output written in %s" % t logger.debug((process_tile.id, message)) return ProcessInfo( tile=process_tile, processed=False, process_msg=None, written=True, write_msg=message )
def get_raw_output(self, tile, _baselevel_readonly=False): """ Get output raw data. This function won't work with multiprocessing, as it uses the ``threading.Lock()`` class. Parameters ---------- tile : tuple, Tile or BufferedTile If a tile index is given, a tile from the output pyramid will be assumed. Tile cannot be bigger than process tile! Returns ------- data : NumPy array or features process output """ if not isinstance(tile, (BufferedTile, tuple)): raise TypeError("'tile' must be a tuple or BufferedTile") if isinstance(tile, tuple): tile = self.config.output_pyramid.tile(*tile) if _baselevel_readonly: tile = self.config.baselevels["tile_pyramid"].tile(*tile.id) # Return empty data if zoom level is outside of process zoom levels. if tile.zoom not in self.config.zoom_levels: return self.config.output.empty(tile) # TODO implement reprojection if tile.crs != self.config.process_pyramid.crs: raise NotImplementedError( "reprojection between processes not yet implemented" ) if self.config.mode == "memory": # Determine affected process Tile and check whether it is already # cached. process_tile = self.config.process_pyramid.intersecting(tile)[0] return self._extract( in_tile=process_tile, in_data=self._execute_using_cache(process_tile), out_tile=tile ) # TODO: cases where tile intersects with multiple process tiles process_tile = self.config.process_pyramid.intersecting(tile)[0] # get output_tiles that intersect with current tile if tile.pixelbuffer > self.config.output.pixelbuffer: output_tiles = list(self.config.output_pyramid.tiles_from_bounds( tile.bounds, tile.zoom )) else: output_tiles = self.config.output_pyramid.intersecting(tile) if self.config.mode == "readonly" or _baselevel_readonly: if self.config.output.tiles_exist(process_tile): return self._read_existing_output(tile, output_tiles) else: return self.config.output.empty(tile) elif self.config.mode == "continue" and not _baselevel_readonly: if self.config.output.tiles_exist(process_tile): return self._read_existing_output(tile, output_tiles) else: return self._process_and_overwrite_output(tile, process_tile) elif self.config.mode == "overwrite" and not _baselevel_readonly: return self._process_and_overwrite_output(tile, process_tile)
def _extract(self, in_tile=None, in_data=None, out_tile=None): """Extract data from tile.""" return self.config.output.extract_subset( input_data_tiles=[(in_tile, in_data)], out_tile=out_tile )
def read(self, **kwargs): """ Read existing output data from a previous run. Returns ------- process output : NumPy array (raster) or feature iterator (vector) """ if self.tile.pixelbuffer > self.config.output.pixelbuffer: output_tiles = list(self.config.output_pyramid.tiles_from_bounds( self.tile.bounds, self.tile.zoom )) else: output_tiles = self.config.output_pyramid.intersecting(self.tile) return self.config.output.extract_subset( input_data_tiles=[ (output_tile, self.config.output.read(output_tile)) for output_tile in output_tiles ], out_tile=self.tile, )
def open(self, input_id, **kwargs): """ Open input data. Parameters ---------- input_id : string input identifier from configuration file or file path kwargs : driver specific parameters (e.g. resampling) Returns ------- tiled input data : InputTile reprojected input data within tile """ if not isinstance(input_id, str): return input_id.open(self.tile, **kwargs) if input_id not in self.params["input"]: raise ValueError("%s not found in config as input file" % input_id) return self.params["input"][input_id].open(self.tile, **kwargs)
def hillshade( self, elevation, azimuth=315.0, altitude=45.0, z=1.0, scale=1.0 ): """ Calculate hillshading from elevation data. Parameters ---------- elevation : array input elevation data azimuth : float horizontal angle of light source (315: North-West) altitude : float vertical angle of light source (90 would result in slope shading) z : float vertical exaggeration factor scale : float scale factor of pixel size units versus height units (insert 112000 when having elevation values in meters in a geodetic projection) Returns ------- hillshade : array """ return commons_hillshade.hillshade( elevation, self, azimuth, altitude, z, scale)
def contours( self, elevation, interval=100, field='elev', base=0 ): """ Extract contour lines from elevation data. Parameters ---------- elevation : array input elevation data interval : integer elevation value interval when drawing contour lines field : string output field name containing elevation value base : integer elevation base value the intervals are computed from Returns ------- contours : iterable contours as GeoJSON-like pairs of properties and geometry """ return commons_contours.extract_contours( elevation, self.tile, interval=interval, field=field, base=base)
def clip( self, array, geometries, inverted=False, clip_buffer=0 ): """ Clip array by geometry. Parameters ---------- array : array raster data to be clipped geometries : iterable geometries used to clip source array inverted : bool invert clipping (default: False) clip_buffer : int buffer (in pixels) geometries before applying clip Returns ------- clipped array : array """ return commons_clip.clip_array_with_vector( array, self.tile.affine, geometries, inverted=inverted, clip_buffer=clip_buffer*self.tile.pixel_x_size)
def clip_array_with_vector( array, array_affine, geometries, inverted=False, clip_buffer=0 ): """ Clip input array with a vector list. Parameters ---------- array : array input raster data array_affine : Affine Affine object describing the raster's geolocation geometries : iterable iterable of dictionaries, where every entry has a 'geometry' and 'properties' key. inverted : bool invert clip (default: False) clip_buffer : integer buffer (in pixels) geometries before clipping Returns ------- clipped array : array """ # buffer input geometries and clean up buffered_geometries = [] for feature in geometries: feature_geom = to_shape(feature["geometry"]) if feature_geom.is_empty: continue if feature_geom.geom_type == "GeometryCollection": # for GeometryCollections apply buffer to every subgeometry # and make union buffered_geom = unary_union([ g.buffer(clip_buffer) for g in feature_geom]) else: buffered_geom = feature_geom.buffer(clip_buffer) if not buffered_geom.is_empty: buffered_geometries.append(buffered_geom) # mask raster by buffered geometries if buffered_geometries: if array.ndim == 2: return ma.masked_array( array, geometry_mask( buffered_geometries, array.shape, array_affine, invert=inverted)) elif array.ndim == 3: mask = geometry_mask( buffered_geometries, (array.shape[1], array.shape[2]), array_affine, invert=inverted) return ma.masked_array( array, mask=np.stack((mask for band in array))) # if no geometries, return unmasked array else: fill = False if inverted else True return ma.masked_array( array, mask=np.full(array.shape, fill, dtype=bool))
def pyramid( input_raster, output_dir, pyramid_type=None, output_format=None, resampling_method=None, scale_method=None, zoom=None, bounds=None, overwrite=False, debug=False ): """Create tile pyramid out of input raster.""" bounds = bounds if bounds else None options = dict( pyramid_type=pyramid_type, scale_method=scale_method, output_format=output_format, resampling=resampling_method, zoom=zoom, bounds=bounds, overwrite=overwrite ) raster2pyramid(input_raster, output_dir, options)
def raster2pyramid(input_file, output_dir, options): """Create a tile pyramid out of an input raster dataset.""" pyramid_type = options["pyramid_type"] scale_method = options["scale_method"] output_format = options["output_format"] resampling = options["resampling"] zoom = options["zoom"] bounds = options["bounds"] mode = "overwrite" if options["overwrite"] else "continue" # Prepare process parameters minzoom, maxzoom = _get_zoom(zoom, input_file, pyramid_type) with rasterio.open(input_file, "r") as input_raster: output_bands = input_raster.count input_dtype = input_raster.dtypes[0] output_dtype = input_raster.dtypes[0] nodataval = input_raster.nodatavals[0] nodataval = nodataval if nodataval else 0 if output_format == "PNG" and output_bands > 3: output_bands = 3 output_dtype = 'uint8' scales_minmax = () if scale_method == "dtype_scale": for index in range(1, output_bands+1): scales_minmax += (DTYPE_RANGES[input_dtype], ) elif scale_method == "minmax_scale": for index in range(1, output_bands+1): band = input_raster.read(index) scales_minmax += ((band.min(), band.max()), ) elif scale_method == "crop": for index in range(1, output_bands+1): scales_minmax += ((0, 255), ) if input_dtype == "uint8": scale_method = None scales_minmax = () for index in range(1, output_bands+1): scales_minmax += ((None, None), ) # Create configuration config = dict( process="mapchete.processes.pyramid.tilify", output={ "path": output_dir, "format": output_format, "bands": output_bands, "dtype": output_dtype }, pyramid=dict(pixelbuffer=5, grid=pyramid_type), scale_method=scale_method, scales_minmax=scales_minmax, input={"raster": input_file}, config_dir=os.getcwd(), zoom_levels=dict(min=minzoom, max=maxzoom), nodataval=nodataval, resampling=resampling, bounds=bounds, baselevel={"zoom": maxzoom, "resampling": resampling}, mode=mode ) # create process with mapchete.open(config, zoom=zoom, bounds=bounds) as mp: # prepare output directory if not os.path.exists(output_dir): os.makedirs(output_dir) # run process mp.batch_process(zoom=[minzoom, maxzoom])
def _get_zoom(zoom, input_raster, pyramid_type): """Determine minimum and maximum zoomlevel.""" if not zoom: minzoom = 1 maxzoom = get_best_zoom_level(input_raster, pyramid_type) elif len(zoom) == 1: minzoom = zoom[0] maxzoom = zoom[0] elif len(zoom) == 2: if zoom[0] < zoom[1]: minzoom = zoom[0] maxzoom = zoom[1] else: minzoom = zoom[1] maxzoom = zoom[0] return minzoom, maxzoom
def validate_values(config, values): """ Validate whether value is found in config and has the right type. Parameters ---------- config : dict configuration dictionary values : list list of (str, type) tuples of values and value types expected in config Returns ------- True if config is valid. Raises ------ Exception if value is not found or has the wrong type. """ if not isinstance(config, dict): raise TypeError("config must be a dictionary") for value, vtype in values: if value not in config: raise ValueError("%s not given" % value) if not isinstance(config[value], vtype): raise TypeError("%s must be %s" % (value, vtype)) return True
def get_hash(x): """Return hash of x.""" if isinstance(x, str): return hash(x) elif isinstance(x, dict): return hash(yaml.dump(x))
def get_zoom_levels(process_zoom_levels=None, init_zoom_levels=None): """Validate and return zoom levels.""" process_zoom_levels = _validate_zooms(process_zoom_levels) if init_zoom_levels is None: return process_zoom_levels else: init_zoom_levels = _validate_zooms(init_zoom_levels) if not set(init_zoom_levels).issubset(set(process_zoom_levels)): raise MapcheteConfigError( "init zooms must be a subset of process zoom") return init_zoom_levels
def snap_bounds(bounds=None, pyramid=None, zoom=None): """ Snaps bounds to tiles boundaries of specific zoom level. Parameters ---------- bounds : bounds to be snapped pyramid : TilePyramid zoom : int Returns ------- Bounds(left, bottom, right, top) """ if not isinstance(bounds, (tuple, list)): raise TypeError("bounds must be either a tuple or a list") if len(bounds) != 4: raise ValueError("bounds has to have exactly four values") if not isinstance(pyramid, BufferedTilePyramid): raise TypeError("pyramid has to be a BufferedTilePyramid") bounds = Bounds(*bounds) lb = pyramid.tile_from_xy(bounds.left, bounds.bottom, zoom, on_edge_use="rt").bounds rt = pyramid.tile_from_xy(bounds.right, bounds.top, zoom, on_edge_use="lb").bounds return Bounds(lb.left, lb.bottom, rt.right, rt.top)
def clip_bounds(bounds=None, clip=None): """ Clips bounds by clip. Parameters ---------- bounds : bounds to be clipped clip : clip bounds Returns ------- Bounds(left, bottom, right, top) """ bounds = Bounds(*bounds) clip = Bounds(*clip) return Bounds( max(bounds.left, clip.left), max(bounds.bottom, clip.bottom), min(bounds.right, clip.right), min(bounds.top, clip.top) )
def raw_conf_process_pyramid(raw_conf): """ Loads the process pyramid of a raw configuration. Parameters ---------- raw_conf : dict Raw mapchete configuration as dictionary. Returns ------- BufferedTilePyramid """ return BufferedTilePyramid( raw_conf["pyramid"]["grid"], metatiling=raw_conf["pyramid"].get("metatiling", 1), pixelbuffer=raw_conf["pyramid"].get("pixelbuffer", 0) )
def bounds_from_opts( wkt_geometry=None, point=None, bounds=None, zoom=None, raw_conf=None ): """ Loads the process pyramid of a raw configuration. Parameters ---------- raw_conf : dict Raw mapchete configuration as dictionary. Returns ------- BufferedTilePyramid """ if wkt_geometry: return wkt.loads(wkt_geometry).bounds elif point: x, y = point zoom_levels = get_zoom_levels( process_zoom_levels=raw_conf["zoom_levels"], init_zoom_levels=zoom ) tp = raw_conf_process_pyramid(raw_conf) return tp.tile_from_xy(x, y, max(zoom_levels)).bounds else: return bounds
def _validate_zooms(zooms): """ Return a list of zoom levels. Following inputs are converted: - int --> [int] - dict{min, max} --> range(min, max + 1) - [int] --> [int] - [int, int] --> range(smaller int, bigger int + 1) """ if isinstance(zooms, dict): if any([a not in zooms for a in ["min", "max"]]): raise MapcheteConfigError("min and max zoom required") zmin = _validate_zoom(zooms["min"]) zmax = _validate_zoom(zooms["max"]) if zmin > zmax: raise MapcheteConfigError( "max zoom must not be smaller than min zoom") return list(range(zmin, zmax + 1)) elif isinstance(zooms, list): if len(zooms) == 1: return zooms elif len(zooms) == 2: zmin, zmax = sorted([_validate_zoom(z) for z in zooms]) return list(range(zmin, zmax + 1)) else: return zooms else: return [_validate_zoom(zooms)]
def _raw_at_zoom(config, zooms): """Return parameter dictionary per zoom level.""" params_per_zoom = {} for zoom in zooms: params = {} for name, element in config.items(): if name not in _RESERVED_PARAMETERS: out_element = _element_at_zoom(name, element, zoom) if out_element is not None: params[name] = out_element params_per_zoom[zoom] = params return params_per_zoom
def _element_at_zoom(name, element, zoom): """ Return the element filtered by zoom level. - An input integer or float gets returned as is. - An input string is checked whether it starts with "zoom". Then, the provided zoom level gets parsed and compared with the actual zoom level. If zoom levels match, the element gets returned. TODOs/gotchas: - Elements are unordered, which can lead to unexpected results when defining the YAML config. - Provided zoom levels for one element in config file are not allowed to "overlap", i.e. there is not yet a decision mechanism implemented which handles this case. """ # If element is a dictionary, analyze subitems. if isinstance(element, dict): if "format" in element: # we have an input or output driver here return element out_elements = {} for sub_name, sub_element in element.items(): out_element = _element_at_zoom(sub_name, sub_element, zoom) if name == "input": out_elements[sub_name] = out_element elif out_element is not None: out_elements[sub_name] = out_element # If there is only one subelement, collapse unless it is # input. In such case, return a dictionary. if len(out_elements) == 1 and name != "input": return next(iter(out_elements.values())) # If subelement is empty, return None if len(out_elements) == 0: return None return out_elements # If element is a zoom level statement, filter element. elif isinstance(name, str): if name.startswith("zoom"): return _filter_by_zoom( conf_string=name.strip("zoom").strip(), zoom=zoom, element=element) # If element is a string but not a zoom level statement, return # element. else: return element # Return all other types as they are. else: return element
def _filter_by_zoom(element=None, conf_string=None, zoom=None): """Return element only if zoom condition matches with config string.""" for op_str, op_func in [ # order of operators is important: # prematurely return in cases of "<=" or ">=", otherwise # _strip_zoom() cannot parse config strings starting with "<" # or ">" ("=", operator.eq), ("<=", operator.le), (">=", operator.ge), ("<", operator.lt), (">", operator.gt), ]: if conf_string.startswith(op_str): return element if op_func(zoom, _strip_zoom(conf_string, op_str)) else None
def _strip_zoom(input_string, strip_string): """Return zoom level as integer or throw error.""" try: return int(input_string.strip(strip_string)) except Exception as e: raise MapcheteConfigError("zoom level could not be determined: %s" % e)
def _flatten_tree(tree, old_path=None): """Flatten dict tree into dictionary where keys are paths of old dict.""" flat_tree = [] for key, value in tree.items(): new_path = "/".join([old_path, key]) if old_path else key if isinstance(value, dict) and "format" not in value: flat_tree.extend(_flatten_tree(value, old_path=new_path)) else: flat_tree.append((new_path, value)) return flat_tree
def _unflatten_tree(flat): """Reverse tree flattening.""" tree = {} for key, value in flat.items(): path = key.split("/") # we are at the end of a branch if len(path) == 1: tree[key] = value # there are more branches else: # create new dict if not path[0] in tree: tree[path[0]] = _unflatten_tree({"/".join(path[1:]): value}) # add keys to existing dict else: branch = _unflatten_tree({"/".join(path[1:]): value}) if not path[1] in tree[path[0]]: tree[path[0]][path[1]] = branch[path[1]] else: tree[path[0]][path[1]].update(branch[path[1]]) return tree
def bounds(self): """Process bounds as defined in the configuration.""" if self._raw["bounds"] is None: return self.process_pyramid.bounds else: return Bounds(*_validate_bounds(self._raw["bounds"]))
def init_bounds(self): """ Process bounds this process is currently initialized with. This gets triggered by using the ``init_bounds`` kwarg. If not set, it will be equal to self.bounds. """ if self._raw["init_bounds"] is None: return self.bounds else: return Bounds(*_validate_bounds(self._raw["init_bounds"]))
def effective_bounds(self): """ Effective process bounds required to initialize inputs. Process bounds sometimes have to be larger, because all intersecting process tiles have to be covered as well. """ return snap_bounds( bounds=clip_bounds(bounds=self.init_bounds, clip=self.process_pyramid.bounds), pyramid=self.process_pyramid, zoom=min( self.baselevels["zooms"] ) if self.baselevels else min( self.init_zoom_levels ) )
def output(self): """Output object of driver.""" output_params = dict( self._raw["output"], grid=self.output_pyramid.grid, pixelbuffer=self.output_pyramid.pixelbuffer, metatiling=self.output_pyramid.metatiling ) if "path" in output_params: output_params.update( path=absolute_path(path=output_params["path"], base_dir=self.config_dir) ) if "format" not in output_params: raise MapcheteConfigError("output format not specified") if output_params["format"] not in available_output_formats(): raise MapcheteConfigError( "format %s not available in %s" % ( output_params["format"], str(available_output_formats()) ) ) writer = load_output_writer(output_params) try: writer.is_valid_with_config(output_params) except Exception as e: logger.exception(e) raise MapcheteConfigError( "driver %s not compatible with configuration: %s" % ( writer.METADATA["driver_name"], e ) ) return writer
def input(self): """ Input items used for process stored in a dictionary. Keys are the hashes of the input parameters, values the respective InputData classes. """ # the delimiters are used by some input drivers delimiters = dict( zoom=self.init_zoom_levels, bounds=self.init_bounds, process_bounds=self.bounds, effective_bounds=self.effective_bounds ) # get input items only of initialized zoom levels raw_inputs = { # convert input definition to hash get_hash(v): v for zoom in self.init_zoom_levels if "input" in self._params_at_zoom[zoom] # to preserve file groups, "flatten" the input tree and use # the tree paths as keys for key, v in _flatten_tree(self._params_at_zoom[zoom]["input"]) if v is not None } initalized_inputs = {} for k, v in raw_inputs.items(): # for files and tile directories if isinstance(v, str): logger.debug("load input reader for simple input %s", v) try: reader = load_input_reader( dict( path=absolute_path(path=v, base_dir=self.config_dir), pyramid=self.process_pyramid, pixelbuffer=self.process_pyramid.pixelbuffer, delimiters=delimiters ), readonly=self.mode == "readonly") except Exception as e: logger.exception(e) raise MapcheteDriverError("error when loading input %s: %s" % (v, e)) logger.debug("input reader for simple input %s is %s", v, reader) # for abstract inputs elif isinstance(v, dict): logger.debug("load input reader for abstract input %s", v) try: reader = load_input_reader( dict( abstract=deepcopy(v), pyramid=self.process_pyramid, pixelbuffer=self.process_pyramid.pixelbuffer, delimiters=delimiters, conf_dir=self.config_dir ), readonly=self.mode == "readonly") except Exception as e: logger.exception(e) raise MapcheteDriverError("error when loading input %s: %s" % (v, e)) logger.debug("input reader for abstract input %s is %s", v, reader) else: raise MapcheteConfigError("invalid input type %s", type(v)) # trigger bbox creation reader.bbox(out_crs=self.process_pyramid.crs) initalized_inputs[k] = reader return initalized_inputs
def baselevels(self): """ Optional baselevels configuration. baselevels: min: <zoom> max: <zoom> lower: <resampling method> higher: <resampling method> """ if "baselevels" not in self._raw: return {} baselevels = self._raw["baselevels"] minmax = {k: v for k, v in baselevels.items() if k in ["min", "max"]} if not minmax: raise MapcheteConfigError("no min and max values given for baselevels") for v in minmax.values(): if not isinstance(v, int) or v < 0: raise MapcheteConfigError( "invalid baselevel zoom parameter given: %s" % minmax.values() ) zooms = list(range( minmax.get("min", min(self.zoom_levels)), minmax.get("max", max(self.zoom_levels)) + 1) ) if not set(self.zoom_levels).difference(set(zooms)): raise MapcheteConfigError("baselevels zooms fully cover process zooms") return dict( zooms=zooms, lower=baselevels.get("lower", "nearest"), higher=baselevels.get("higher", "nearest"), tile_pyramid=BufferedTilePyramid( self.output_pyramid.grid, pixelbuffer=self.output_pyramid.pixelbuffer, metatiling=self.process_pyramid.metatiling ) )
def params_at_zoom(self, zoom): """ Return configuration parameters snapshot for zoom as dictionary. Parameters ---------- zoom : int zoom level Returns ------- configuration snapshot : dictionary zoom level dependent process configuration """ if zoom not in self.init_zoom_levels: raise ValueError( "zoom level not available with current configuration") out = dict(self._params_at_zoom[zoom], input={}, output=self.output) if "input" in self._params_at_zoom[zoom]: flat_inputs = {} for k, v in _flatten_tree(self._params_at_zoom[zoom]["input"]): if v is None: flat_inputs[k] = None else: flat_inputs[k] = self.input[get_hash(v)] out["input"] = _unflatten_tree(flat_inputs) else: out["input"] = {} return out
def area_at_zoom(self, zoom=None): """ Return process bounding box for zoom level. Parameters ---------- zoom : int or None if None, the union of all zoom level areas is returned Returns ------- process area : shapely geometry """ if zoom is None: if not self._cache_full_process_area: logger.debug("calculate process area ...") self._cache_full_process_area = cascaded_union([ self._area_at_zoom(z) for z in self.init_zoom_levels] ).buffer(0) return self._cache_full_process_area else: if zoom not in self.init_zoom_levels: raise ValueError( "zoom level not available with current configuration") return self._area_at_zoom(zoom)
def bounds_at_zoom(self, zoom=None): """ Return process bounds for zoom level. Parameters ---------- zoom : integer or list Returns ------- process bounds : tuple left, bottom, right, top """ return () if self.area_at_zoom(zoom).is_empty else Bounds( *self.area_at_zoom(zoom).bounds)
def process_file(self): """Deprecated.""" warnings.warn(DeprecationWarning("'self.process_file' is deprecated")) return os.path.join(self._raw["config_dir"], self._raw["process"])
def zoom_index_gen( mp=None, out_dir=None, zoom=None, geojson=False, gpkg=False, shapefile=False, txt=False, vrt=False, fieldname="location", basepath=None, for_gdal=True, threading=False, ): """ Generate indexes for given zoom level. Parameters ---------- mp : Mapchete object process output to be indexed out_dir : path optionally override process output directory zoom : int zoom level to be processed geojson : bool generate GeoJSON index (default: False) gpkg : bool generate GeoPackage index (default: False) shapefile : bool generate Shapefile index (default: False) txt : bool generate tile path list textfile (default: False) vrt : bool GDAL-style VRT file (default: False) fieldname : str field name which contains paths of tiles (default: "location") basepath : str if set, use custom base path instead of output path for_gdal : bool use GDAL compatible remote paths, i.e. add "/vsicurl/" before path (default: True) """ for zoom in get_zoom_levels(process_zoom_levels=zoom): with ExitStack() as es: # get index writers for all enabled formats index_writers = [] if geojson: index_writers.append( es.enter_context( VectorFileWriter( driver="GeoJSON", out_path=_index_file_path(out_dir, zoom, "geojson"), crs=mp.config.output_pyramid.crs, fieldname=fieldname ) ) ) if gpkg: index_writers.append( es.enter_context( VectorFileWriter( driver="GPKG", out_path=_index_file_path(out_dir, zoom, "gpkg"), crs=mp.config.output_pyramid.crs, fieldname=fieldname ) ) ) if shapefile: index_writers.append( es.enter_context( VectorFileWriter( driver="ESRI Shapefile", out_path=_index_file_path(out_dir, zoom, "shp"), crs=mp.config.output_pyramid.crs, fieldname=fieldname ) ) ) if txt: index_writers.append( es.enter_context( TextFileWriter(out_path=_index_file_path(out_dir, zoom, "txt")) ) ) if vrt: index_writers.append( es.enter_context( VRTFileWriter( out_path=_index_file_path(out_dir, zoom, "vrt"), output=mp.config.output, out_pyramid=mp.config.output_pyramid ) ) ) logger.debug("use the following index writers: %s", index_writers) def _worker(tile): # if there are indexes to write to, check if output exists tile_path = _tile_path( orig_path=mp.config.output.get_path(tile), basepath=basepath, for_gdal=for_gdal ) indexes = [ i for i in index_writers if not i.entry_exists(tile=tile, path=tile_path) ] if indexes: output_exists = mp.config.output.tiles_exist(output_tile=tile) else: output_exists = None return tile, tile_path, indexes, output_exists with concurrent.futures.ThreadPoolExecutor() as executor: for task in concurrent.futures.as_completed( ( executor.submit(_worker, i) for i in mp.config.output_pyramid.tiles_from_geom( mp.config.area_at_zoom(zoom), zoom ) ) ): tile, tile_path, indexes, output_exists = task.result() # only write entries if there are indexes to write to and output # exists if indexes and output_exists: logger.debug("%s exists", tile_path) logger.debug("write to %s indexes" % len(indexes)) for index in indexes: index.write(tile, tile_path) # yield tile for progress information yield tile
def get_segmentize_value(input_file=None, tile_pyramid=None): """ Return the recommended segmentation value in input file units. It is calculated by multiplyling raster pixel size with tile shape in pixels. Parameters ---------- input_file : str location of a file readable by rasterio tile_pyramied : ``TilePyramid`` or ``BufferedTilePyramid`` tile pyramid to estimate target tile size Returns ------- segmenize value : float length suggested of line segmentation to reproject file bounds """ warnings.warn(DeprecationWarning("get_segmentize_value() has moved to mapchete.io")) return io.get_segmentize_value(input_file, tile_pyramid)
def profile(self): """Return raster metadata.""" with rasterio.open(self.path, "r") as src: return deepcopy(src.meta)
def bbox(self, out_crs=None): """ Return data bounding box. Parameters ---------- out_crs : ``rasterio.crs.CRS`` rasterio CRS object (default: CRS of process pyramid) Returns ------- bounding box : geometry Shapely geometry object """ out_crs = self.pyramid.crs if out_crs is None else out_crs with rasterio.open(self.path) as inp: inp_crs = inp.crs out_bbox = bbox = box(*inp.bounds) # If soucre and target CRSes differ, segmentize and reproject if inp_crs != out_crs: # estimate segmentize value (raster pixel size * tile size) # and get reprojected bounding box return reproject_geometry( segmentize_geometry( bbox, inp.transform[0] * self.pyramid.tile_size ), src_crs=inp_crs, dst_crs=out_crs ) else: return out_bbox
def read(self, indexes=None, **kwargs): """ Read reprojected & resampled input data. Returns ------- data : array """ return read_raster_window( self.raster_file.path, self.tile, indexes=self._get_band_indexes(indexes), resampling=self.resampling, gdal_opts=self.gdal_opts )
def is_empty(self, indexes=None): """ Check if there is data within this tile. Returns ------- is empty : bool """ # empty if tile does not intersect with file bounding box return not self.tile.bbox.intersects( self.raster_file.bbox(out_crs=self.tile.crs) )
def _get_band_indexes(self, indexes=None): """Return valid band indexes.""" if indexes: if isinstance(indexes, list): return indexes else: return [indexes] else: return range(1, self.raster_file.profile["count"] + 1)
def execute(mp): """ Example process for testing. Inputs: ------- file1 raster file Parameters: ----------- Output: ------- np.ndarray """ # Reading and writing data works like this: with mp.open("file1", resampling="bilinear") as raster_file: if raster_file.is_empty(): return "empty" # This assures a transparent tile instead of a pink error tile # is returned when using mapchete serve. dem = raster_file.read() return dem
def read(self, output_tile, **kwargs): """ Read existing process output. Parameters ---------- output_tile : ``BufferedTile`` must be member of output ``TilePyramid`` Returns ------- process output : list """ path = self.get_path(output_tile) try: with fiona.open(path, "r") as src: return list(src) except DriverError as e: for i in ("does not exist in the file system", "No such file or directory"): if i in str(e): return self.empty(output_tile) else: raise
def write(self, process_tile, data): """ Write data from process tiles into GeoJSON file(s). Parameters ---------- process_tile : ``BufferedTile`` must be member of process ``TilePyramid`` """ if data is None or len(data) == 0: return if not isinstance(data, (list, types.GeneratorType)): raise TypeError( "GeoJSON driver data has to be a list or generator of GeoJSON objects" ) data = list(data) if not len(data): logger.debug("no features to write") else: # in case of S3 output, create an boto3 resource bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None # Convert from process_tile to output_tiles for tile in self.pyramid.intersecting(process_tile): out_path = self.get_path(tile) self.prepare_path(tile) out_tile = BufferedTile(tile, self.pixelbuffer) write_vector_window( in_data=data, out_schema=self.output_params["schema"], out_tile=out_tile, out_path=out_path, bucket_resource=bucket_resource )
def is_valid_with_config(self, config): """ Check if output format is valid with other process parameters. Parameters ---------- config : dictionary output configuration parameters Returns ------- is_valid : bool """ validate_values(config, [("schema", dict), ("path", str)]) validate_values(config["schema"], [("properties", dict), ("geometry", str)]) if config["schema"]["geometry"] not in [ "Geometry", "Point", "MultiPoint", "Line", "MultiLine", "Polygon", "MultiPolygon" ]: raise TypeError("invalid geometry type") return True
def read(self, validity_check=True, no_neighbors=False, **kwargs): """ Read data from process output. Parameters ---------- validity_check : bool run geometry validity check (default: True) no_neighbors : bool don't include neighbor tiles if there is a pixelbuffer (default: False) Returns ------- features : list GeoJSON-like list of features """ if no_neighbors: raise NotImplementedError() return self._from_cache(validity_check=validity_check)
def execute( mapchete_files, zoom=None, bounds=None, point=None, wkt_geometry=None, tile=None, overwrite=False, multi=None, input_file=None, logfile=None, verbose=False, no_pbar=False, debug=False, max_chunksize=None, vrt=False, idx_out_dir=None ): """Execute a Mapchete process.""" multi = multi if multi else cpu_count() mode = "overwrite" if overwrite else "continue" # send verbose messages to /dev/null if not activated if debug or not verbose: verbose_dst = open(os.devnull, 'w') else: verbose_dst = sys.stdout for mapchete_file in mapchete_files: tqdm.tqdm.write("preparing to process %s" % mapchete_file, file=verbose_dst) with click_spinner.spinner(disable=debug) as spinner: # process single tile if tile: tile = raw_conf_process_pyramid(raw_conf(mapchete_file)).tile(*tile) with mapchete.open( mapchete_file, mode=mode, bounds=tile.bounds, zoom=tile.zoom, single_input_file=input_file ) as mp: spinner.stop() tqdm.tqdm.write("processing 1 tile", file=verbose_dst) # run process on tile for result in mp.batch_processor(tile=tile): utils.write_verbose_msg(result, dst=verbose_dst) tqdm.tqdm.write( "processing %s finished" % mapchete_file, file=verbose_dst ) # write VRT index if vrt: tqdm.tqdm.write("creating VRT", file=verbose_dst) for tile in tqdm.tqdm( zoom_index_gen( mp=mp, zoom=tile.zoom, out_dir=( idx_out_dir if idx_out_dir else mp.config.output.path ), vrt=vrt, ), total=mp.count_tiles(tile.zoom, tile.zoom), unit="tile", disable=debug or no_pbar ): logger.debug("%s indexed", tile) tqdm.tqdm.write( "VRT(s) creation for %s finished" % mapchete_file, file=verbose_dst ) # process area else: with mapchete.open( mapchete_file, mode=mode, zoom=zoom, bounds=bounds_from_opts( wkt_geometry=wkt_geometry, point=point, bounds=bounds, raw_conf=raw_conf(mapchete_file) ), single_input_file=input_file ) as mp: spinner.stop() tiles_count = mp.count_tiles( min(mp.config.init_zoom_levels), max(mp.config.init_zoom_levels) ) tqdm.tqdm.write( "processing %s tile(s) on %s worker(s)" % (tiles_count, multi), file=verbose_dst ) # run process on tiles for process_info in tqdm.tqdm( mp.batch_processor( multi=multi, zoom=zoom, max_chunksize=max_chunksize ), total=tiles_count, unit="tile", disable=debug or no_pbar ): utils.write_verbose_msg(process_info, dst=verbose_dst) tqdm.tqdm.write( "processing %s finished" % mapchete_file, file=verbose_dst ) # write VRT index if vrt: tqdm.tqdm.write("creating VRT(s)", file=verbose_dst) for tile in tqdm.tqdm( zoom_index_gen( mp=mp, zoom=mp.config.init_zoom_levels, out_dir=( idx_out_dir if idx_out_dir else mp.config.output.path ), vrt=vrt ), total=mp.count_tiles( min(mp.config.init_zoom_levels), max(mp.config.init_zoom_levels) ), unit="tile", disable=debug or no_pbar ): logger.debug("%s indexed", tile) tqdm.tqdm.write( "VRT(s) creation for %s finished" % mapchete_file, file=verbose_dst )
def available_output_formats(): """ Return all available output formats. Returns ------- formats : list all available output formats """ output_formats = [] for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT): driver_ = v.load() if hasattr(driver_, "METADATA") and ( driver_.METADATA["mode"] in ["w", "rw"] ): output_formats.append(driver_.METADATA["driver_name"]) return output_formats
def available_input_formats(): """ Return all available input formats. Returns ------- formats : list all available input formats """ input_formats = [] for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT): logger.debug("driver found: %s", v) driver_ = v.load() if hasattr(driver_, "METADATA") and (driver_.METADATA["mode"] in ["r", "rw"]): input_formats.append(driver_.METADATA["driver_name"]) return input_formats
def load_output_writer(output_params, readonly=False): """ Return output class of driver. Returns ------- output : ``OutputData`` output writer object """ if not isinstance(output_params, dict): raise TypeError("output_params must be a dictionary") driver_name = output_params["format"] for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT): _driver = v.load() if all( [hasattr(_driver, attr) for attr in ["OutputData", "METADATA"]] ) and ( _driver.METADATA["driver_name"] == driver_name ): return _driver.OutputData(output_params, readonly=readonly) raise MapcheteDriverError("no loader for driver '%s' could be found." % driver_name)
def load_input_reader(input_params, readonly=False): """ Return input class of driver. Returns ------- input_params : ``InputData`` input parameters """ logger.debug("find input reader with params %s", input_params) if not isinstance(input_params, dict): raise TypeError("input_params must be a dictionary") if "abstract" in input_params: driver_name = input_params["abstract"]["format"] elif "path" in input_params: if os.path.splitext(input_params["path"])[1]: input_file = input_params["path"] driver_name = driver_from_file(input_file) else: logger.debug("%s is a directory", input_params["path"]) driver_name = "TileDirectory" else: raise MapcheteDriverError("invalid input parameters %s" % input_params) for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT): driver_ = v.load() if hasattr(driver_, "METADATA") and ( driver_.METADATA["driver_name"] == driver_name ): return v.load().InputData(input_params, readonly=readonly) raise MapcheteDriverError("no loader for driver '%s' could be found." % driver_name)
def driver_from_file(input_file): """ Guess driver from file extension. Returns ------- driver : string driver name """ file_ext = os.path.splitext(input_file)[1].split(".")[1] if file_ext not in _file_ext_to_driver(): raise MapcheteDriverError( "no driver could be found for file extension %s" % file_ext ) driver = _file_ext_to_driver()[file_ext] if len(driver) > 1: warnings.warn( DeprecationWarning( "more than one driver for file found, taking %s" % driver[0] ) ) return driver[0]
def write_output_metadata(output_params): """Dump output JSON and verify parameters if output metadata exist.""" if "path" in output_params: metadata_path = os.path.join(output_params["path"], "metadata.json") logger.debug("check for output %s", metadata_path) try: existing_params = read_output_metadata(metadata_path) logger.debug("%s exists", metadata_path) logger.debug("existing output parameters: %s", pformat(existing_params)) existing_tp = existing_params["pyramid"] current_params = params_to_dump(output_params) logger.debug("current output parameters: %s", pformat(current_params)) current_tp = BufferedTilePyramid(**current_params["pyramid"]) if existing_tp != current_tp: raise MapcheteConfigError( "pyramid definitions between existing and new output do not match: " "%s != %s" % (existing_tp, current_tp) ) existing_format = existing_params["driver"]["format"] current_format = current_params["driver"]["format"] if existing_format != current_format: raise MapcheteConfigError( "existing output format does not match new output format: " "%s != %s" % ( (existing_format, current_format) ) ) except FileNotFoundError: logger.debug("%s does not exist", metadata_path) dump_params = params_to_dump(output_params) # dump output metadata write_json(metadata_path, dump_params) else: logger.debug("no path parameter found")
def formats(input_formats, output_formats, debug=False): """List input and/or output formats.""" if input_formats == output_formats: show_inputs, show_outputs = True, True else: show_inputs, show_outputs = input_formats, output_formats if show_inputs: click.echo("input formats:") for driver in available_input_formats(): click.echo("- %s" % driver) if show_outputs: click.echo("output formats:") for driver in available_output_formats(): click.echo("- %s" % driver)
def open( self, tile, tile_directory_zoom=None, matching_method="gdal", matching_max_zoom=None, matching_precision=8, fallback_to_higher_zoom=False, resampling="nearest", **kwargs ): """ Return InputTile object. Parameters ---------- tile : ``Tile`` tile_directory_zoom : None If set, data will be read from exactly this zoom level matching_method : str ('gdal' or 'min') (default: 'gdal') gdal: Uses GDAL's standard method. Here, the target resolution is calculated by averaging the extent's pixel sizes over both x and y axes. This approach returns a zoom level which may not have the best quality but will speed up reading significantly. min: Returns the zoom level which matches the minimum resolution of the extents four corner pixels. This approach returns the zoom level with the best possible quality but with low performance. If the tile extent is outside of the destination pyramid, a TopologicalError will be raised. matching_max_zoom : int (default: None) If set, it will prevent reading from zoom levels above the maximum. matching_precision : int Round resolutions to n digits before comparing. fallback_to_higher_zoom : bool (default: False) In case no data is found at zoom level, try to read data from higher zoom levels. Enabling this setting can lead to many IO requests in areas with no data. resampling : string raster file: one of "nearest", "average", "bilinear" or "lanczos" Returns ------- input tile : ``InputTile`` tile view of input data """ # determine tile bounds in TileDirectory CRS td_bounds = reproject_geometry( tile.bbox, src_crs=tile.tp.crs, dst_crs=self.td_pyramid.crs ).bounds # find target zoom level if tile_directory_zoom is not None: zoom = tile_directory_zoom else: zoom = tile_to_zoom_level( tile, dst_pyramid=self.td_pyramid, matching_method=matching_method, precision=matching_precision ) if matching_max_zoom is not None: zoom = min([zoom, matching_max_zoom]) if fallback_to_higher_zoom: tiles_paths = [] # check if tiles exist otherwise try higher zoom level while len(tiles_paths) == 0 and zoom >= 0: tiles_paths = _get_tiles_paths( basepath=self.path, ext=self._ext, pyramid=self.td_pyramid, bounds=td_bounds, zoom=zoom ) logger.debug("%s existing tiles found at zoom %s", len(tiles_paths), zoom) zoom -= 1 else: tiles_paths = _get_tiles_paths( basepath=self.path, ext=self._ext, pyramid=self.td_pyramid, bounds=td_bounds, zoom=zoom ) logger.debug("%s existing tiles found at zoom %s", len(tiles_paths), zoom) return InputTile( tile, tiles_paths=tiles_paths, file_type=self._file_type, profile=self._profile, td_crs=self.td_pyramid.crs, resampling=resampling, read_as_tiledir_func=self._read_as_tiledir_func, **kwargs )
def bbox(self, out_crs=None): """ Return data bounding box. Parameters ---------- out_crs : ``rasterio.crs.CRS`` rasterio CRS object (default: CRS of process pyramid) Returns ------- bounding box : geometry Shapely geometry object """ return reproject_geometry( box(*self._bounds), src_crs=self.td_pyramid.crs, dst_crs=self.pyramid.crs if out_crs is None else out_crs )
def read( self, validity_check=False, indexes=None, resampling=None, dst_nodata=None, gdal_opts=None, **kwargs ): """ Read reprojected & resampled input data. Parameters ---------- validity_check : bool vector file: also run checks if reprojected geometry is valid, otherwise throw RuntimeError (default: True) indexes : list or int raster file: a list of band numbers; None will read all. dst_nodata : int or float, optional raster file: if not set, the nodata value from the source dataset will be used gdal_opts : dict raster file: GDAL options passed on to rasterio.Env() Returns ------- data : list for vector files or numpy array for raster files """ return self._read_as_tiledir( data_type=self._file_type, out_tile=self.tile, td_crs=self._td_crs, tiles_paths=self._tiles_paths, profile=self._profile, validity_check=validity_check, indexes=indexes, resampling=resampling if resampling else self._resampling, dst_nodata=dst_nodata, gdal_opts=gdal_opts, **{k: v for k, v in kwargs.items() if k != "data_type"} )
def extract_contours(array, tile, interval=100, field='elev', base=0): """ Extract contour lines from an array. Parameters ---------- array : array input elevation data tile : Tile tile covering the array interval : integer elevation value interval when drawing contour lines field : string output field name containing elevation value base : integer elevation base value the intervals are computed from Returns ------- contours : iterable contours as GeoJSON-like pairs of properties and geometry """ import matplotlib.pyplot as plt levels = _get_contour_values( array.min(), array.max(), interval=interval, base=base) if not levels: return [] contours = plt.contour(array, levels) index = 0 out_contours = [] for level in range(len(contours.collections)): elevation = levels[index] index += 1 paths = contours.collections[level].get_paths() for path in paths: out_coords = [ ( tile.left + (y * tile.pixel_x_size), tile.top - (x * tile.pixel_y_size), ) for x, y in zip(path.vertices[:, 1], path.vertices[:, 0]) ] if len(out_coords) >= 2: out_contours.append( dict( properties={field: elevation}, geometry=mapping(LineString(out_coords)) ) ) return out_contours
def _get_contour_values(min_val, max_val, base=0, interval=100): """Return a list of values between min and max within an interval.""" i = base out = [] if min_val < base: while i >= min_val: i -= interval while i <= max_val: if i >= min_val: out.append(i) i += interval return out
def create( mapchete_file, process_file, out_format, out_path=None, pyramid_type=None, force=False ): """Create an empty Mapchete and process file in a given directory.""" if os.path.isfile(process_file) or os.path.isfile(mapchete_file): if not force: raise IOError("file(s) already exists") out_path = out_path if out_path else os.path.join(os.getcwd(), "output") # copy file template to target directory process_template = pkg_resources.resource_filename( "mapchete.static", "process_template.py" ) process_file = os.path.join(os.getcwd(), process_file) copyfile(process_template, process_file) # modify and copy mapchete file template to target directory mapchete_template = pkg_resources.resource_filename( "mapchete.static", "mapchete_template.mapchete" ) output_options = dict( format=out_format, path=out_path, **FORMAT_MANDATORY[out_format] ) pyramid_options = {'grid': pyramid_type} substitute_elements = { 'process_file': process_file, 'output': dump({'output': output_options}, default_flow_style=False), 'pyramid': dump({'pyramid': pyramid_options}, default_flow_style=False) } with open(mapchete_template, 'r') as config_template: config = Template(config_template.read()) customized_config = config.substitute(substitute_elements) with open(mapchete_file, 'w') as target_config: target_config.write(customized_config)
def tiles_exist(self, process_tile=None, output_tile=None): """ Check whether output tiles of a tile (either process or output) exists. Parameters ---------- process_tile : ``BufferedTile`` must be member of process ``TilePyramid`` output_tile : ``BufferedTile`` must be member of output ``TilePyramid`` Returns ------- exists : bool """ if process_tile and output_tile: raise ValueError("just one of 'process_tile' and 'output_tile' allowed") if process_tile: return any( path_exists(self.get_path(tile)) for tile in self.pyramid.intersecting(process_tile) ) if output_tile: return path_exists(self.get_path(output_tile))
def get_path(self, tile): """ Determine target file path. Parameters ---------- tile : ``BufferedTile`` must be member of output ``TilePyramid`` Returns ------- path : string """ return os.path.join(*[ self.path, str(tile.zoom), str(tile.row), str(tile.col) + self.file_extension ])
def prepare_path(self, tile): """ Create directory and subdirectory if necessary. Parameters ---------- tile : ``BufferedTile`` must be member of output ``TilePyramid`` """ makedirs(os.path.dirname(self.get_path(tile)))
def output_is_valid(self, process_data): """ Check whether process output is allowed with output driver. Parameters ---------- process_data : raw process output Returns ------- True or False """ if self.METADATA["data_type"] == "raster": return ( is_numpy_or_masked_array(process_data) or is_numpy_or_masked_array_with_tags(process_data) ) elif self.METADATA["data_type"] == "vector": return is_feature_list(process_data)
def output_cleaned(self, process_data): """ Return verified and cleaned output. Parameters ---------- process_data : raw process output Returns ------- NumPy array or list of features. """ if self.METADATA["data_type"] == "raster": if is_numpy_or_masked_array(process_data): return process_data elif is_numpy_or_masked_array_with_tags(process_data): data, tags = process_data return self.output_cleaned(data), tags elif self.METADATA["data_type"] == "vector": return list(process_data)
def extract_subset(self, input_data_tiles=None, out_tile=None): """ Extract subset from multiple tiles. input_data_tiles : list of (``Tile``, process data) tuples out_tile : ``Tile`` Returns ------- NumPy array or list of features. """ if self.METADATA["data_type"] == "raster": mosaic = create_mosaic(input_data_tiles) return extract_from_array( in_raster=prepare_array( mosaic.data, nodata=self.nodata, dtype=self.output_params["dtype"] ), in_affine=mosaic.affine, out_tile=out_tile ) elif self.METADATA["data_type"] == "vector": return [ feature for feature in list( chain.from_iterable([features for _, features in input_data_tiles]) ) if shape(feature["geometry"]).intersects(out_tile.bbox) ]
def _read_as_tiledir( self, out_tile=None, td_crs=None, tiles_paths=None, profile=None, validity_check=False, indexes=None, resampling=None, dst_nodata=None, gdal_opts=None, **kwargs ): """ Read reprojected & resampled input data. Parameters ---------- validity_check : bool vector file: also run checks if reprojected geometry is valid, otherwise throw RuntimeError (default: True) indexes : list or int raster file: a list of band numbers; None will read all. dst_nodata : int or float, optional raster file: if not set, the nodata value from the source dataset will be used gdal_opts : dict raster file: GDAL options passed on to rasterio.Env() Returns ------- data : list for vector files or numpy array for raster files """ return _read_as_tiledir( data_type=self.METADATA["data_type"], out_tile=out_tile, td_crs=td_crs, tiles_paths=tiles_paths, profile=profile, validity_check=validity_check, indexes=indexes, resampling=resampling, dst_nodata=dst_nodata, gdal_opts=gdal_opts, **{k: v for k, v in kwargs.items() if k != "data_type"} )
def calculate_slope_aspect(elevation, xres, yres, z=1.0, scale=1.0): """ Calculate slope and aspect map. Return a pair of arrays 2 pixels smaller than the input elevation array. Slope is returned in radians, from 0 for sheer face to pi/2 for flat ground. Aspect is returned in radians, counterclockwise from -pi at north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 Parameters ---------- elevation : array input elevation data xres : float column width yres : float row height z : float vertical exaggeration factor scale : float scale factor of pixel size units versus height units (insert 112000 when having elevation values in meters in a geodetic projection) Returns ------- slope shade : array """ z = float(z) scale = float(scale) height, width = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [ z * elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3), range(3)) ] x = ( (window[0] + window[3] + window[3] + window[6]) - (window[2] + window[5] + window[5] + window[8]) ) / (8.0 * xres * scale) y = ( (window[6] + window[7] + window[7] + window[8]) - (window[0] + window[1] + window[1] + window[2]) ) / (8.0 * yres * scale) # in radians, from 0 to pi/2 slope = math.pi/2 - np.arctan(np.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at north back to pi aspect = np.arctan2(x, y) return slope, aspect
def hillshade(elevation, tile, azimuth=315.0, altitude=45.0, z=1.0, scale=1.0): """ Return hillshaded numpy array. Parameters ---------- elevation : array input elevation data tile : Tile tile covering the array z : float vertical exaggeration factor scale : float scale factor of pixel size units versus height units (insert 112000 when having elevation values in meters in a geodetic projection) """ azimuth = float(azimuth) altitude = float(altitude) z = float(z) scale = float(scale) xres = tile.tile.pixel_x_size yres = -tile.tile.pixel_y_size slope, aspect = calculate_slope_aspect( elevation, xres, yres, z=z, scale=scale) deg2rad = math.pi / 180.0 shaded = np.sin(altitude * deg2rad) * np.sin(slope) \ + np.cos(altitude * deg2rad) * np.cos(slope) \ * np.cos((azimuth - 90.0) * deg2rad - aspect) # shaded now has values between -1.0 and +1.0 # stretch to 0 - 255 and invert shaded = (((shaded+1.0)/2)*-255.0).astype("uint8") # add one pixel padding using the edge values return ma.masked_array( data=np.pad(shaded, 1, mode='edge'), mask=elevation.mask )
def tile(self, zoom, row, col): """ Return ``BufferedTile`` object of this ``BufferedTilePyramid``. Parameters ---------- zoom : integer zoom level row : integer tile matrix row col : integer tile matrix column Returns ------- buffered tile : ``BufferedTile`` """ tile = self.tile_pyramid.tile(zoom, row, col) return BufferedTile(tile, pixelbuffer=self.pixelbuffer)
def tiles_from_bounds(self, bounds, zoom): """ Return all tiles intersecting with bounds. Bounds values will be cleaned if they cross the antimeridian or are outside of the Northern or Southern tile pyramid bounds. Parameters ---------- bounds : tuple (left, bottom, right, top) bounding values in tile pyramid CRS zoom : integer zoom level Yields ------ intersecting tiles : generator generates ``BufferedTiles`` """ for tile in self.tiles_from_bbox(box(*bounds), zoom): yield self.tile(*tile.id)
def tiles_from_bbox(self, geometry, zoom): """ All metatiles intersecting with given bounding box. Parameters ---------- geometry : ``shapely.geometry`` zoom : integer zoom level Yields ------ intersecting tiles : generator generates ``BufferedTiles`` """ for tile in self.tile_pyramid.tiles_from_bbox(geometry, zoom): yield self.tile(*tile.id)
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. Parameters ---------- geometry : ``shapely.geometry`` zoom : integer zoom level Yields ------ intersecting tiles : ``BufferedTile`` """ for tile in self.tile_pyramid.tiles_from_geom(geometry, zoom): yield self.tile(*tile.id)
def intersecting(self, tile): """ Return all BufferedTiles intersecting with tile. Parameters ---------- tile : ``BufferedTile`` another tile """ return [ self.tile(*intersecting_tile.id) for intersecting_tile in self.tile_pyramid.intersecting(tile) ]
def to_dict(self): """ Return dictionary representation of pyramid parameters. """ return dict( grid=self.grid.to_dict(), metatiling=self.metatiling, tile_size=self.tile_size, pixelbuffer=self.pixelbuffer )
def get_children(self): """ Get tile children (intersecting tiles in next zoom level). Returns ------- children : list a list of ``BufferedTiles`` """ return [BufferedTile(t, self.pixelbuffer) for t in self._tile.get_children()]
def get_neighbors(self, connectedness=8): """ Return tile neighbors. Tile neighbors are unique, i.e. in some edge cases, where both the left and right neighbor wrapped around the antimeridian is the same. Also, neighbors ouside the northern and southern TilePyramid boundaries are excluded, because they are invalid. ------------- | 8 | 1 | 5 | ------------- | 4 | x | 2 | ------------- | 7 | 3 | 6 | ------------- Parameters ---------- connectedness : int [4 or 8] return four direct neighbors or all eight. Returns ------- list of BufferedTiles """ return [ BufferedTile(t, self.pixelbuffer) for t in self._tile.get_neighbors(connectedness=connectedness) ]
def is_on_edge(self): """Determine whether tile touches or goes over pyramid edge.""" return ( self.left <= self.tile_pyramid.left or # touches_left self.bottom <= self.tile_pyramid.bottom or # touches_bottom self.right >= self.tile_pyramid.right or # touches_right self.top >= self.tile_pyramid.top # touches_top )