code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def Resolution(self, zoom):
'''Resolution (arc/pixel) for given zoom level (measured at Equator)'''
return self.resFact / 2 ** zoom
# return 180 / float( 1 << (8+zoom) )
|
Resolution (arc/pixel) for given zoom level (measured at Equator)
|
Resolution
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def ZoomForPixelSize(self, pixelSize):
'''Maximal scaledown zoom of the pyramid closest to the pixelSize.'''
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i - 1
else:
return 0 # We don't want to scale up
|
Maximal scaledown zoom of the pyramid closest to the pixelSize.
|
ZoomForPixelSize
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def TileBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile'''
res = self.resFact / 2 ** zoom
return (tx * self.tileSize * res - 180, ty * self.tileSize
* res - 90, (tx + 1) * self.tileSize * res - 180, (ty
+ 1) * self.tileSize * res - 90)
|
Returns bounds of the given tile
|
TileBounds
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def TileLatLonBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile in the SWNE form'''
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
|
Returns bounds of the given tile in the SWNE form
|
TileLatLonBounds
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def __init__(
self,
width,
height,
tilesize=256,
tileformat='jpg',
):
"""Initialization of the Zoomify tile tree"""
self.tilesize = tilesize
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tilesize), math.ceil(height
/ tilesize))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.push(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while imagesize[0] > tilesize or imageSize[1] > tilesize:
imagesize = (math.floor(imagesize[0] / 2),
math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tilesize),
math.ceil(imagesize[1] / tilesize))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(self.tierSizeInTiles[i
- 1][0] * self.tierSizeInTiles[i - 1][1]
+ self.tileCountUpToTier[i - 1])
|
Initialization of the Zoomify tile tree
|
__init__
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def tilefilename(
self,
x,
y,
z,
):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] \
+ self.tileCountUpToTier[z]
return os.path.join('TileGroup%.0f' % math.floor(tileIndex
/ 256), '%s-%s-%s.%s' % (z, x, y,
self.tileformat))
|
Returns filename for tile with given coordinates
|
tilefilename
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def process(self):
"""The main processing function, runs all the main steps of processing"""
# Opening and preprocessing of the input file
self.open_input()
# Generation of main metadata files and HTML viewers
self.generate_metadata()
# Generation of the lowest tiles
self.generate_base_tiles()
# Generation of the overview tiles (higher in the pyramid)
self.generate_overview_tiles()
|
The main processing function, runs all the main steps of processing
|
process
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def error(self, msg, details=''):
"""Print an error message and stop the processing"""
if details:
self.parser.error(msg + '''
''' + details)
else:
self.parser.error(msg)
|
Print an error message and stop the processing
|
error
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def optparse_init(self):
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = 'Usage: %prog [options] input_file(s) [output]'
p = OptionParser(usage, version='%prog ' + __version__)
p.add_option(
'-p',
'--profile',
dest='profile',
type='choice',
choices=profile_list,
help="Tile cutting profile (%s) - default 'mercator' (Google Maps compatible)"
% ','.join(profile_list),
)
p.add_option(
'-r',
'--resampling',
dest='resampling',
type='choice',
choices=resampling_list,
help="Resampling method (%s) - default 'average'"
% ','.join(resampling_list),
)
p.add_option('-s', '--s_srs', dest='s_srs', metavar='SRS',
help='The spatial reference system used for the source input data'
)
p.add_option('-z', '--zoom', dest='zoom',
help="Zoom levels to render (format:'2-5' or '10')."
)
p.add_option('-e', '--resume', dest='resume',
action='store_true',
help='Resume mode. Generate only missing files.')
p.add_option('-a', '--srcnodata', dest='srcnodata',
metavar='NODATA',
help='NODATA transparency value to assign to the input data'
)
p.add_option('-d', '--tmscompatible', dest='tmscompatible',
action='store_true',
help='When using the geodetic profile, specifies the base resolution as 0.703125 or 2 tiles at zoom level 0.'
)
p.add_option('-l', '--leaflet', action='store_true',
dest='leaflet',
help="Set 0,0 point to north. For use with 'leaflet'. Requires -p raster. "
)
p.add_option('--processes', dest='processes', type='int',
default=multiprocessing.cpu_count(),
help='Number of concurrent processes (defaults to the number of cores in the system)'
)
p.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Print status messages to stdout')
# KML options
g = OptionGroup(p, 'KML (Google Earth) options',
'Options for generated Google Earth SuperOverlay metadata'
)
g.add_option('-k', '--force-kml', dest='kml',
action='store_true',
help="Generate KML for Google Earth - default for 'geodetic' profile and 'raster' in EPSG:4326. For a dataset with different projection use with caution!"
)
g.add_option('-n', '--no-kml', dest='kml', action='store_false'
,
help='Avoid automatic generation of KML files for EPSG:4326'
)
g.add_option('-u', '--url', dest='url',
help='URL address where the generated tiles are going to be published'
)
p.add_option_group(g)
# HTML options
g = OptionGroup(p, 'Web viewer options',
'Options for generated HTML viewers a la Google Maps'
)
g.add_option(
'-w',
'--webviewer',
dest='webviewer',
type='choice',
choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'"
% ','.join(webviewer_list),
)
g.add_option('-t', '--title', dest='title',
help='Title of the map')
g.add_option('-c', '--copyright', dest='copyright',
help='Copyright for the map')
g.add_option('-g', '--googlekey', dest='googlekey',
help='Google Maps API key from http://code.google.com/apis/maps/signup.html'
)
(g.add_option('-b', '--bingkey', dest='bingkey',
help='Bing Maps API key from https://www.bingmapsportal.com/'
), )
p.add_option_group(g)
# TODO: MapFile + TileIndexes per zoom level for efficient MapServer WMS
# g = OptionGroup(p, "WMS MapServer metadata", "Options for generated mapfile and tileindexes for MapServer")
# g.add_option("-i", "--tileindex", dest='wms', action="store_true"
# help="Generate tileindex and mapfile for MapServer (WMS)")
# p.add_option_group(g)
p.set_defaults(
verbose=False,
profile='mercator',
kml=False,
url='',
webviewer='all',
copyright='',
resampling='average',
resume=False,
googlekey='INSERT_YOUR_KEY_HERE',
bingkey='INSERT_YOUR_KEY_HERE',
)
self.parser = p
|
Prepare the option parser for input (argv)
|
optparse_init
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.UseExceptions()
gdal.AllRegister()
if not self.options.verbose:
gdal.PushErrorHandler('CPLQuietErrorHandler')
# Initialize necessary GDAL drivers
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?"
, self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?"
)
# Open the input file
if self.input:
self.in_ds = gdal.Open(self.input, gdal.GA_ReadOnly)
else:
raise Exception('No input file was specified')
if self.options.verbose:
print ('Input file:', '( %sP x %sL - %s bands)'
% (self.in_ds.RasterXSize, self.in_ds.RasterYSize,
self.in_ds.RasterCount))
if not self.in_ds:
# Note: GDAL prints the ERROR message too
self.error("It is not possible to open the input file '%s'."
% self.input)
# Read metadata from the input file
if self.in_ds.RasterCount == 0:
self.error("Input file '%s' has no raster band"
% self.input)
if self.in_ds.GetRasterBand(1).GetRasterColorTable():
# TODO: Process directly paletted dataset by generating VRT in memory
self.error('Please convert this file to RGB/RGBA and run gdal2tiles on the result.'
,
"""From paletted file you can create RGBA file (temp.vrt) by:
gdal_translate -of vrt -expand rgba %s temp.vrt
then run:
gdal2tiles temp.vrt"""
% self.input)
# Get NODATA value
self.in_nodata = []
for i in range(1, self.in_ds.RasterCount + 1):
if self.in_ds.GetRasterBand(i).GetNoDataValue() != None:
self.in_nodata.append(self.in_ds.GetRasterBand(i).GetNoDataValue())
if self.options.srcnodata:
nds = list(map(float, self.options.srcnodata.split(',')))
if len(nds) < self.in_ds.RasterCount:
self.in_nodata = (nds
* self.in_ds.RasterCount)[:self.in_ds.RasterCount]
else:
self.in_nodata = nds
if self.options.verbose:
print('NODATA: %s' % self.in_nodata)
#
# Here we should have RGBA input dataset opened in self.in_ds
#
if self.options.verbose:
print ('Preprocessed file:', '( %sP x %sL - %s bands)'
% (self.in_ds.RasterXSize, self.in_ds.RasterYSize,
self.in_ds.RasterCount))
# Spatial Reference System of the input raster
self.in_srs = None
if self.options.s_srs:
self.in_srs = osr.SpatialReference()
self.in_srs.SetFromUserInput(self.options.s_srs)
self.in_srs_wkt = self.in_srs.ExportToWkt()
else:
self.in_srs_wkt = self.in_ds.GetProjection()
if not self.in_srs_wkt and self.in_ds.GetGCPCount() != 0:
self.in_srs_wkt = self.in_ds.GetGCPProjection()
if self.in_srs_wkt:
self.in_srs = osr.SpatialReference()
self.in_srs.ImportFromWkt(self.in_srs_wkt)
# elif self.options.profile != 'raster':
# self.error("There is no spatial reference system info included in the input file.","You should run gdal2tiles with --s_srs EPSG:XXXX or similar.")
# Spatial Reference System of tiles
self.out_srs = osr.SpatialReference()
if self.options.profile == 'mercator':
self.out_srs.ImportFromEPSG(900913)
elif self.options.profile == 'geodetic':
self.out_srs.ImportFromEPSG(4326)
else:
self.out_srs = self.in_srs
# Are the reference systems the same? Reproject if necessary.
self.out_ds = None
if self.options.profile in ('mercator', 'geodetic'):
if self.in_ds.GetGeoTransform() == (
0.0,
1.0,
0.0,
0.0,
0.0,
1.0,
) and self.in_ds.GetGCPCount() == 0:
self.error("There is no georeference - neither affine transformation (worldfile) nor GCPs. You can generate only 'raster' profile tiles."
,
"Either gdal2tiles with parameter -p 'raster' or use another GIS software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if self.in_srs:
if self.in_srs.ExportToProj4() \
!= self.out_srs.ExportToProj4() \
or self.in_ds.GetGCPCount() != 0:
# Generation of VRT dataset in tile projection, default 'nearest neighbour' warping
self.out_ds = gdal.AutoCreateWarpedVRT(self.in_ds,
self.in_srs_wkt, self.out_srs.ExportToWkt())
# TODO: HIGH PRIORITY: Correction of AutoCreateWarpedVRT according the max zoomlevel for correct direct warping!!!
if self.options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
self.out_ds.GetDriver().CreateCopy('tiles.vrt',
self.out_ds)
# Note: self.in_srs and self.in_srs_wkt contain still the non-warped reference system!!!
# Correction of AutoCreateWarpedVRT for NODATA values
if self.in_nodata != []:
(fd, tempfilename) = \
tempfile.mkstemp('-gdal2tiles.vrt')
self.out_ds.GetDriver().CreateCopy(tempfilename,
self.out_ds)
# open as a text file
s = open(tempfilename).read()
# Add the warping options
s = s.replace("""<GDALWarpOptions>""",
"""<GDALWarpOptions>
<Option name="INIT_DEST">NO_DATA</Option>
<Option name="UNIFIED_SRC_NODATA">YES</Option>""")
# replace BandMapping tag for NODATA bands....
for i in range(len(self.in_nodata)):
s = \
s.replace("""<BandMapping src="%i" dst="%i"/>"""
% (i + 1, i + 1),
"""<BandMapping src="%i" dst="%i">
<SrcNoDataReal>%i</SrcNoDataReal>
<SrcNoDataImag>0</SrcNoDataImag>
<DstNoDataReal>%i</DstNoDataReal>
<DstNoDataImag>0</DstNoDataImag>
</BandMapping>"""
% (i + 1, i + 1, self.in_nodata[i],
self.in_nodata[i])) # Or rewrite to white by: , 255 ))
# save the corrected VRT
open(tempfilename, 'w').write(s)
# open by GDAL as self.out_ds
self.out_ds = gdal.Open(tempfilename) # , gdal.GA_ReadOnly)
# delete the temporary file
os.unlink(tempfilename)
# set NODATA_VALUE metadata
self.out_ds.SetMetadataItem('NODATA_VALUES',
'%i %i %i' % (self.in_nodata[0],
self.in_nodata[1], self.in_nodata[2]))
if self.options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
open('tiles1.vrt', 'w').write(s)
# -----------------------------------
# Correction of AutoCreateWarpedVRT for Mono (1 band) and RGB (3 bands) files without NODATA:
# equivalent of gdalwarp -dstalpha
if self.in_nodata == [] and self.out_ds.RasterCount \
in [1, 3]:
(fd, tempfilename) = \
tempfile.mkstemp('-gdal2tiles.vrt')
self.out_ds.GetDriver().CreateCopy(tempfilename,
self.out_ds)
# open as a text file
s = open(tempfilename).read()
# Add the warping options
s = s.replace("""<BlockXSize>""",
"""<VRTRasterBand dataType="Byte" band="%i" subClass="VRTWarpedRasterBand">
<ColorInterp>Alpha</ColorInterp>
</VRTRasterBand>
<BlockXSize>"""
% (self.out_ds.RasterCount + 1))
s = s.replace("""</GDALWarpOptions>""",
"""<DstAlphaBand>%i</DstAlphaBand>
</GDALWarpOptions>"""
% (self.out_ds.RasterCount + 1))
s = s.replace("""</WorkingDataType>""",
"""</WorkingDataType>
<Option name="INIT_DEST">0</Option>"""
)
# save the corrected VRT
open(tempfilename, 'w').write(s)
# open by GDAL as self.out_ds
self.out_ds = gdal.Open(tempfilename) # , gdal.GA_ReadOnly)
# delete the temporary file
os.unlink(tempfilename)
if self.options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
open('tiles1.vrt', 'w').write(s)
s = '''
'''
else:
self.error('Input file has unknown SRS.',
'Use --s_srs ESPG:xyz (or similar) to provide source reference system.'
)
if self.out_ds and self.options.verbose:
print ('Projected file:', 'tiles.vrt',
'( %sP x %sL - %s bands)'
% (self.out_ds.RasterXSize,
self.out_ds.RasterYSize,
self.out_ds.RasterCount))
if not self.out_ds:
self.out_ds = self.in_ds
#
# Here we should have a raster (out_ds) in the correct Spatial Reference system
#
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.out_ds.GetRasterBand(1).GetMaskBand()
if self.alphaband.GetMaskFlags() & gdal.GMF_ALPHA \
or self.out_ds.RasterCount == 4 or self.out_ds.RasterCount \
== 2:
# TODO: Better test for alpha band in the dataset
self.dataBandsCount = self.out_ds.RasterCount - 1
else:
self.dataBandsCount = self.out_ds.RasterCount
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() \
== self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print('KML autotest OK!')
# Read the georeference
self.out_gt = self.out_ds.GetGeoTransform()
# originX, originY = self.out_gt[0], self.out_gt[3]
# pixelSize = self.out_gt[1] # = self.out_gt[5]
# Test the size of the pixel
# MAPTILER - COMMENTED
# if self.out_gt[1] != (-1 * self.out_gt[5]) and self.options.profile != 'raster':
# TODO: Process corectly coordinates with are have swichted Y axis (display in OpenLayers too)
# self.error("Size of the pixel in the output differ for X and Y axes.")
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
self.error('Georeference of the raster contains rotation or skew. Such raster is not supported. Please use gdalwarp first.'
)
# TODO: Do the warping in this case automaticaly
#
# Here we expect: pixel is square, no rotation on the raster
#
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.out_ds.RasterXSize \
* self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.out_ds.RasterYSize \
* self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print ('Bounds (output srs):', round(self.ominx, 13),
self.ominy, self.omaxx, self.omaxy)
#
# Calculating ranges for tiles in different zoom levels
#
if self.options.profile == 'mercator':
self.mercator = GlobalMercator() # from globalmaptiles.py
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
(tminx, tminy) = self.mercator.MetersToTile(self.ominx,
self.ominy, tz)
(tmaxx, tmaxy) = self.mercator.MetersToTile(self.omaxx,
self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
(tminx, tminy) = (max(0, tminx), max(0, tminy))
(tmaxx, tmaxy) = (min(2 ** tz - 1, tmaxx), min(2 ** tz
- 1, tmaxy))
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz == None:
self.tminz = \
self.mercator.ZoomForPixelSize(self.out_gt[1]
* max(self.out_ds.RasterXSize,
self.out_ds.RasterYSize) / float(self.tilesize))
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tmaxz == None:
self.tmaxz = \
self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print ('Bounds (latlong):',
self.mercator.MetersToLatLon(self.ominx,
self.ominy),
self.mercator.MetersToLatLon(self.omaxx,
self.omaxy))
print ('MinZoomLevel:', self.tminz)
print ('MaxZoomLevel:', self.tmaxz, '(',
self.mercator.Resolution(self.tmaxz), ')')
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible) # from globalmaptiles.py
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
(tminx, tminy) = self.geodetic.LonLatToTile(self.ominx,
self.ominy, tz)
(tmaxx, tmaxy) = self.geodetic.LonLatToTile(self.omaxx,
self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
(tminx, tminy) = (max(0, tminx), max(0, tminy))
(tmaxx, tmaxy) = (min(2 ** (tz + 1) - 1, tmaxx), min(2
** tz - 1, tmaxy))
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tminz == None:
self.tminz = \
self.geodetic.ZoomForPixelSize(self.out_gt[1]
* max(self.out_ds.RasterXSize,
self.out_ds.RasterYSize) / float(self.tilesize))
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tmaxz == None:
self.tmaxz = \
self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print ('Bounds (latlong):', self.ominx, self.ominy,
self.omaxx, self.omaxy)
if self.options.profile == 'raster':
log2 = lambda x: math.log10(x) / math.log10(2) # log2 (base 2 logarithm)
self.nativezoom = \
int(max(math.ceil(log2(self.out_ds.RasterXSize
/ float(self.tilesize))),
math.ceil(log2(self.out_ds.RasterYSize
/ float(self.tilesize)))))
if self.tmaxz < self.nativezoom:
self.tmaxz = self.nativezoom
if self.options.verbose:
print ('Native zoom of the raster:', self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz == None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz == None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0 ** (self.nativezoom - tz) * self.tilesize
(tminx, tminy) = (0, 0)
tmaxx = int(math.ceil(self.out_ds.RasterXSize / tsize)) \
- 1
tmaxy = int(math.ceil(self.out_ds.RasterYSize / tsize)) \
- 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
self.ct = osr.CoordinateTransformation(self.in_srs,
srs4326)
def rastertileswne(x, y, z):
pixelsizex = 2 ** (self.tmaxz - z) * self.out_gt[1] # X-pixel size in level
pixelsizey = 2 ** (self.tmaxz - z) * self.out_gt[1] # Y-pixel size in level (usually -1*pixelsizex)
west = self.out_gt[0] + x * self.tilesize \
* pixelsizex
east = west + self.tilesize * pixelsizex
south = self.ominy + y * self.tilesize * pixelsizex
north = south + self.tilesize * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
(west, south) = self.ct.TransformPoint(west,
south)[:2]
(east, north) = self.ct.TransformPoint(east,
north)[:2]
return (south, west, north, east)
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0)
|
Initialization of the input raster, reprojection if necessary
|
open_input
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def generate_metadata(self):
"""Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing)."""
if not os.path.exists(self.output):
os.makedirs(self.output)
if self.options.profile == 'mercator':
(south, west) = self.mercator.MetersToLatLon(self.ominx,
self.ominy)
(north, east) = self.mercator.MetersToLatLon(self.omaxx,
self.omaxy)
(south, west) = (max(-85.05112878, south), max(-180.0,
west))
(north, east) = (min(85.05112878, north), min(180.0, east))
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') \
and self.options.profile == 'mercator':
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'googlemaps.html')):
f = open(os.path.join(self.output, 'googlemaps.html'
), 'w')
f.write(self.generate_googlemaps())
f.close()
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
elif self.options.profile == 'geodetic':
(west, south) = (self.ominx, self.ominy)
(east, north) = (self.omaxx, self.omaxy)
(south, west) = (max(-90.0, south), max(-180.0, west))
(north, east) = (min(90.0, north), min(180.0, east))
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
elif self.options.profile == 'raster':
(west, south) = (self.ominx, self.ominy)
(east, north) = (self.omaxx, self.omaxy)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
# Generate tilemapresource.xml.
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'tilemapresource.xml')):
f = open(os.path.join(self.output, 'tilemapresource.xml'),
'w')
f.write(self.generate_tilemapresource())
f.close()
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
(xmin, ymin, xmax, ymax) = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'doc.kml')):
f = open(os.path.join(self.output, 'doc.kml'), 'w')
f.write(self.generate_kml(None, None, None,
children))
f.close()
|
Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing).
|
generate_metadata
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def generate_base_tiles(self, cpu):
"""Generation of the base tiles (the lowest in the pyramid) directly from the input raster"""
if self.options.verbose:
# mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY
# px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)
# print("Pixel coordinates:", px, py, (mx, my))
print('')
print('Tiles generated from the max zoom level:')
print('----------------------------------------')
print('')
# Set the bounds
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[self.tmaxz]
# Just the center tile
# tminx = tminx+ (tmaxx - tminx)/2
# tminy = tminy+ (tmaxy - tminy)/2
# tmaxx = tminx
# tmaxy = tminy
ds = self.out_ds
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print ('dataBandsCount: ', self.dataBandsCount)
print ('tilebands: ', tilebands)
# print(tminx, tminy, tmaxx, tmaxy)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
# print(tcount)
ti = 0
yrange = range(tmaxy, tminy - 1, -1)
if self.options.leaflet:
yrange = range(tminy, tmaxy + 1)
tz = self.tmaxz
for ty in yrange:
for tx in range(tminx, tmaxx + 1):
if self.stopped:
break
ti += 1
if (ti - 1) % self.options.processes != cpu:
continue
tilefilename = os.path.join(self.output, str(tz),
str(tx), '%s.%s' % (ty, self.tileext))
if self.options.verbose:
print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )"
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print('Tile generation skiped because of --resume')
else:
queue.put(tcount)
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:900913
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# print "\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % ( b[0], b[1], b[2], b[3], "tiles.vrt", tz, tx, ty)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
(rb, wb) = self.geo_query(ds, b[0], b[3], b[2],
b[1])
nativesize = wb[0] + wb[2] # Pixel size in the raster covering query geo extent
if self.options.verbose:
print ('\tNative Extent (querysize',
nativesize, '): ', rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
(rb, wb) = self.geo_query(
ds,
b[0],
b[3],
b[2],
b[1],
querysize=querysize,
)
(rx, ry, rxsize, rysize) = rb
(wx, wy, wxsize, wysize) = wb
else:
# 'raster' profile:
tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom
xsize = self.out_ds.RasterXSize # size of the raster in pixels
ysize = self.out_ds.RasterYSize
if tz >= self.nativezoom:
querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)
rx = tx * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
if self.options.leaflet:
ry = ty * tsize
else:
ry = ysize - ty * tsize - rysize
(wx, wy) = (0, 0)
(wxsize, wysize) = (int(rxsize / float(tsize)
* self.tilesize), int(rysize / float(tsize)
* self.tilesize))
if not self.options.leaflet:
if wysize != self.tilesize:
wy = self.tilesize - wysize
if self.options.verbose:
print ('\tReadRaster Extent: ', (rx, ry, rxsize,
rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
# Tile dataset in memory
dstile = self.mem_drv.Create('', self.tilesize,
self.tilesize, tilebands)
data = ds.ReadRaster(
rx,
ry,
rxsize,
rysize,
wxsize,
wysize,
band_list=list(range(1, self.dataBandsCount + 1)),
)
alpha = self.alphaband.ReadRaster(
rx,
ry,
rxsize,
rysize,
wxsize,
wysize,
)
if self.tilesize == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(
wx,
wy,
wxsize,
wysize,
data,
band_list=list(range(1, self.dataBandsCount
+ 1)),
)
dstile.WriteRaster(
wx,
wy,
wxsize,
wysize,
alpha,
band_list=[tilebands],
)
else:
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)
# the ReadRaster function returns high-quality raster (not ugly nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
# Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo
dsquery = self.mem_drv.Create('', querysize,
querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now only png tiles are supported)
# for i in range(1, tilebands+1):
# dsquery.GetRasterBand(1).Fill(tilenodata)
dsquery.WriteRaster(
wx,
wy,
wxsize,
wysize,
data,
band_list=list(range(1, self.dataBandsCount
+ 1)),
)
dsquery.WriteRaster(
wx,
wy,
wxsize,
wysize,
alpha,
band_list=[tilebands],
)
self.scale_query_to_tile(dsquery, dstile,
tilefilename)
del dsquery
del data
if self.options.resampling != 'antialias':
# Write a copy of tile to png/jpg
self.out_drv.CreateCopy(tilefilename, dstile,
strict=0)
del dstile
# Create a KML file for this tile.
if self.kml:
kmlfilename = os.path.join(self.output, str(tz),
str(tx), '%d.kml' % ty)
if not self.options.resume \
or not os.path.exists(kmlfilename):
f = open(kmlfilename, 'w')
f.write(self.generate_kml(tx, ty, tz))
f.close()
if not self.options.verbose:
queue.put(tcount)
|
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
|
generate_base_tiles
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def generate_overview_tiles(self, cpu, tz):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
tilebands = self.dataBandsCount + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for z in range(self.tmaxz - 1, self.tminz - 1, -1):
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[z]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy
- tminy))
ti = 0
# querysize = tilesize * 2
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz]
yrange = range(tmaxy, tminy - 1, -1)
if self.options.leaflet:
yrange = range(tminy, tmaxy + 1)
for ty in yrange:
for tx in range(tminx, tmaxx + 1):
if self.stopped:
break
ti += 1
if (ti - 1) % self.options.processes != cpu:
continue
tilefilename = os.path.join(self.output, str(tz),
str(tx), '%s.%s' % (ty, self.tileext))
if self.options.verbose:
print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )"
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print('Tile generation skiped because of --resume')
else:
queue.put(tcount)
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = self.mem_drv.Create('', 2 * self.tilesize, 2
* self.tilesize, tilebands)
# TODO: fill the null value
# for i in range(1, tilebands+1):
# dsquery.GetRasterBand(1).Fill(tilenodata)
dstile = self.mem_drv.Create('', self.tilesize,
self.tilesize, tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
(minx, miny, maxx, maxy) = self.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y \
<= maxy:
dsquerytile = \
gdal.Open(os.path.join(self.output,
str(tz + 1), str(x), '%s.%s' % (y,
self.tileext)), gdal.GA_ReadOnly)
if self.options.leaflet:
if ty:
tileposy = y % (2 * ty) \
* self.tilesize
elif ty == 0 and y == 1:
tileposy = self.tilesize
else:
tileposy = 0
else:
if ty == 0 and y == 1 or ty != 0 and y \
% (2 * ty) != 0:
tileposy = 0
else:
tileposy = self.tilesize
if tx:
tileposx = x % (2 * tx) * self.tilesize
elif tx == 0 and x == 1:
tileposx = self.tilesize
else:
tileposx = 0
dsquery.WriteRaster(
tileposx,
tileposy,
self.tilesize,
self.tilesize,
dsquerytile.ReadRaster(0, 0,
self.tilesize, self.tilesize),
band_list=list(range(1, tilebands
+ 1)),
)
children.append([x, y, tz + 1])
self.scale_query_to_tile(dsquery, dstile, tilefilename)
# Write a copy of tile to png/jpg
if self.options.resampling != 'antialias':
# Write a copy of tile to png/jpg
self.out_drv.CreateCopy(tilefilename, dstile,
strict=0)
if self.options.verbose:
print (
'\tbuild from zoom',
tz + 1,
' tiles:',
(2 * tx, 2 * ty),
(2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1),
(2 * tx + 1, 2 * ty + 1),
)
# Create a KML file for this tile.
if self.kml:
f = open(os.path.join(self.output, '%d/%d/%d.kml'
% (tz, tx, ty)), 'w')
f.write(self.generate_kml(tx, ty, tz, children))
f.close()
if not self.options.verbose:
queue.put(tcount)
|
Generation of the overview tiles (higher in the pyramid) based on existing tiles
|
generate_overview_tiles
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def geo_query(
self,
ds,
ulx,
uly,
lrx,
lry,
querysize=0,
):
"""For given dataset and query in cartographic coordinates
returns parameters for ReadRaster() in raster coordinates and
x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds."""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
(wxsize, wysize) = (rxsize, rysize)
else:
(wxsize, wysize) = (querysize, querysize)
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return ((rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
|
For given dataset and query in cartographic coordinates
returns parameters for ReadRaster() in raster coordinates and
x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
|
geo_query
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def scale_query_to_tile(
self,
dsquery,
dstile,
tilefilename='',
):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
if self.options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
# if i != 4:
# dsquery.GetRasterBand(i).SetNoDataValue(0)
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i),
dstile.GetRasterBand(i), 'average')
if res != 0:
self.error('RegenerateOverview() failed on %s, error %d'
% (tilefilename, res))
elif self.options.resampling == 'antialias':
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands),
numpy.uint8)
for i in range(tilebands):
array[:, :, i] = \
gdalarray.BandReadAsArray(dsquery.GetRasterBand(i
+ 1), 0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, self.tiledriver)
else:
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((
0.0,
tilesize / float(querysize),
0.0,
0.0,
0.0,
tilesize / float(querysize),
))
dstile.SetGeoTransform((
0.0,
1.0,
0.0,
0.0,
0.0,
1.0,
))
res = gdal.ReprojectImage(dsquery, dstile, None, None,
self.resampling)
if res != 0:
self.error('ReprojectImage() failed on %s, error %d'
% (tilefilename, res))
|
Scales down query dataset to the tile dataset
|
scale_query_to_tile
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
"""
args = {}
args['title'] = self.options.title
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = 'EPSG:900913'
elif self.options.profile == 'geodetic':
args['srs'] = 'EPSG:4326'
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ''
s = \
"""<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tilesize)d" height="%(tilesize)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" \
% args
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 2 ** (self.nativezoom
- z) * self.out_gt[1], z)
elif self.options.profile == 'mercator':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 156543.0339 / 2 ** z, z)
elif self.options.profile == 'geodetic':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 0.703125 / 2 ** z, z)
s += """ </TileSets>
</TileMap>
"""
return s
|
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
|
generate_tilemapresource
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def generate_kml(
self,
tx,
ty,
tz,
children=[],
**args
):
"""
Template for the KML. Returns filled string.
"""
(args['tx'], args['ty'], args['tz']) = (tx, ty, tz)
args['tileformat'] = self.tileext
if 'tilesize' not in args:
args['tilesize'] = self.tilesize
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tilesize'] / 2) # / 2.56) # default 128
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tilesize'] * 8) # 1.7) # default 2048 (used to be -1)
if children == []:
args['maxlodpixels'] = -1
if tx == None:
tilekml = False
args['title'] = self.options.title
else:
tilekml = True
args['title'] = '%d/%d/%d.kml' % (tz, tx, ty)
(args['south'], args['west'], args['north'], args['east'
]) = self.tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx != None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = self.options.url
if not url:
if tilekml:
url = '../../'
else:
url = ''
s = \
"""<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" \
% args
if tilekml:
s += \
"""
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" \
% args
for (cx, cy, cz) in children:
(csouth, cwest, cnorth, ceast) = self.tileswne(cx, cy, cz)
s += \
"""
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" \
% (
cz,
cx,
cy,
args['tileformat'],
cnorth,
csouth,
ceast,
cwest,
args['minlodpixels'],
url,
cz,
cx,
cy,
)
s += """ </Document>
</kml>
"""
return s
|
Template for the KML. Returns filled string.
|
generate_kml
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = \
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" \
% args
if self.kml:
s += \
"""
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += \
"""
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += \
"""
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" \
% args
s += \
"""
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" \
% args
return s
|
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
|
generate_googlemaps
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = '-1'
else:
args['tmsoffset'] = ''
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2 ** self.nativezoom \
* self.out_gt[1]
s = \
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" \
% args
if self.options.profile == 'mercator':
s += \
"""
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>""" \
% args
s += \
"""
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" \
% args
if self.options.profile == 'mercator':
s += \
"""
var options = {
div: "map",
controls: [],
projection: "EPSG:900913",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS( "TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent( mapBounds.transform(map.displayProjection, map.projection ) );
""" \
% args
elif self.options.profile == 'geodetic':
s += \
"""
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS( "TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent( mapBounds );
""" \
% args
elif self.options.profile == 'raster':
s += \
"""
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s ),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS( "TMS Layer","",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent( mapBounds );
""" \
% args
s += \
"""
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" \
% args
if self.options.profile == 'mercator':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
elif self.options.profile == 'geodetic':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
elif self.options.profile == 'raster':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
s += \
"""
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" \
% args
return s
|
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
|
generate_openlayers
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles-multiprocess.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles-multiprocess.py
|
MIT
|
def __init__(self, tileSize=256):
'''Initialize the TMS Global Mercator pyramid'''
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
|
Initialize the TMS Global Mercator pyramid
|
__init__
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def LatLonToMeters(self, lat, lon):
'''Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913'''
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) \
/ (math.pi / 180.0)
my = my * self.originShift / 180.0
return (mx, my)
|
Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913
|
LatLonToMeters
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def MetersToLatLon(self, mx, my):
'''Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum'''
lon = mx / self.originShift * 180.0
lat = my / self.originShift * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi
/ 180.0)) - math.pi / 2.0)
return (lat, lon)
|
Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum
|
MetersToLatLon
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def PixelsToMeters(
self,
px,
py,
zoom,
):
'''Converts pixel coordinates in given zoom level of pyramid to EPSG:900913'''
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return (mx, my)
|
Converts pixel coordinates in given zoom level of pyramid to EPSG:900913
|
PixelsToMeters
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def MetersToPixels(
self,
mx,
my,
zoom,
):
'''Converts EPSG:900913 to pyramid pixel coordinates in given zoom level'''
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return (px, py)
|
Converts EPSG:900913 to pyramid pixel coordinates in given zoom level
|
MetersToPixels
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def PixelsToTile(self, px, py):
'''Returns a tile covering region in given pixel coordinates'''
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return (tx, ty)
|
Returns a tile covering region in given pixel coordinates
|
PixelsToTile
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def PixelsToRaster(
self,
px,
py,
zoom,
):
'''Move the origin of pixel coordinates to top-left corner'''
mapSize = self.tileSize << zoom
return (px, mapSize - py)
|
Move the origin of pixel coordinates to top-left corner
|
PixelsToRaster
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def MetersToTile(
self,
mx,
my,
zoom,
):
'''Returns tile for given mercator coordinates'''
(px, py) = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
|
Returns tile for given mercator coordinates
|
MetersToTile
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def TileBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile in EPSG:900913 coordinates'''
(minx, miny) = self.PixelsToMeters(tx * self.tileSize, ty
* self.tileSize, zoom)
(maxx, maxy) = self.PixelsToMeters((tx + 1) * self.tileSize,
(ty + 1) * self.tileSize, zoom)
return (minx, miny, maxx, maxy)
|
Returns bounds of the given tile in EPSG:900913 coordinates
|
TileBounds
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def TileLatLonBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile in latutude/longitude using WGS84 datum'''
bounds = self.TileBounds(tx, ty, zoom)
(minLat, minLon) = self.MetersToLatLon(bounds[0], bounds[1])
(maxLat, maxLon) = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
|
Returns bounds of the given tile in latutude/longitude using WGS84 datum
|
TileLatLonBounds
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def Resolution(self, zoom):
'''Resolution (meters/pixel) for given zoom level (measured at Equator)'''
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / 2 ** zoom
|
Resolution (meters/pixel) for given zoom level (measured at Equator)
|
Resolution
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def ZoomForPixelSize(self, pixelSize):
'''Maximal scaledown zoom of the pyramid closest to the pixelSize.'''
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i - 1
else:
return 0 # We don't want to scale up
|
Maximal scaledown zoom of the pyramid closest to the pixelSize.
|
ZoomForPixelSize
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def GoogleTile(
self,
tx,
ty,
zoom,
):
'''Converts TMS tile coordinates to Google Tile coordinates'''
# coordinate origin is moved from bottom-left to top-left corner of the extent
return (tx, 2 ** zoom - 1 - ty)
|
Converts TMS tile coordinates to Google Tile coordinates
|
GoogleTile
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def QuadTree(
self,
tx,
ty,
zoom,
):
'''Converts TMS tile coordinates to Microsoft QuadTree'''
quadKey = ''
ty = 2 ** zoom - 1 - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << i - 1
if tx & mask != 0:
digit += 1
if ty & mask != 0:
digit += 2
quadKey += str(digit)
return quadKey
|
Converts TMS tile coordinates to Microsoft QuadTree
|
QuadTree
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def LonLatToPixels(
self,
lon,
lat,
zoom,
):
'''Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid'''
res = self.resFact / 2 ** zoom
px = (180 + lon) / res
py = (90 + lat) / res
return (px, py)
|
Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid
|
LonLatToPixels
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def PixelsToTile(self, px, py):
'''Returns coordinates of the tile covering region in pixel coordinates'''
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return (tx, ty)
|
Returns coordinates of the tile covering region in pixel coordinates
|
PixelsToTile
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def LonLatToTile(
self,
lon,
lat,
zoom,
):
'''Returns the tile for zoom which covers given lon/lat coordinates'''
(px, py) = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
|
Returns the tile for zoom which covers given lon/lat coordinates
|
LonLatToTile
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def Resolution(self, zoom):
'''Resolution (arc/pixel) for given zoom level (measured at Equator)'''
return self.resFact / 2 ** zoom
# return 180 / float( 1 << (8+zoom) )
|
Resolution (arc/pixel) for given zoom level (measured at Equator)
|
Resolution
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def ZoomForPixelSize(self, pixelSize):
'''Maximal scaledown zoom of the pyramid closest to the pixelSize.'''
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i - 1
else:
return 0 # We don't want to scale up
|
Maximal scaledown zoom of the pyramid closest to the pixelSize.
|
ZoomForPixelSize
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def TileBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile'''
res = self.resFact / 2 ** zoom
return (tx * self.tileSize * res - 180, ty * self.tileSize
* res - 90, (tx + 1) * self.tileSize * res - 180, (ty
+ 1) * self.tileSize * res - 90)
|
Returns bounds of the given tile
|
TileBounds
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def TileLatLonBounds(
self,
tx,
ty,
zoom,
):
'''Returns bounds of the given tile in the SWNE form'''
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
|
Returns bounds of the given tile in the SWNE form
|
TileLatLonBounds
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def __init__(
self,
width,
height,
tilesize=256,
tileformat='jpg',
):
"""Initialization of the Zoomify tile tree"""
self.tilesize = tilesize
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tilesize), math.ceil(height
/ tilesize))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.push(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while imagesize[0] > tilesize or imageSize[1] > tilesize:
imagesize = (math.floor(imagesize[0] / 2),
math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tilesize),
math.ceil(imagesize[1] / tilesize))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(self.tierSizeInTiles[i
- 1][0] * self.tierSizeInTiles[i - 1][1]
+ self.tileCountUpToTier[i - 1])
|
Initialization of the Zoomify tile tree
|
__init__
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def tilefilename(
self,
x,
y,
z,
):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] \
+ self.tileCountUpToTier[z]
return os.path.join('TileGroup%.0f' % math.floor(tileIndex
/ 256), '%s-%s-%s.%s' % (z, x, y,
self.tileformat))
|
Returns filename for tile with given coordinates
|
tilefilename
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def process(self):
"""The main processing function, runs all the main steps of processing"""
# Opening and preprocessing of the input file
self.open_input()
# Generation of main metadata files and HTML viewers
self.generate_metadata()
# Generation of the lowest tiles
self.generate_base_tiles()
# Generation of the overview tiles (higher in the pyramid)
self.generate_overview_tiles()
|
The main processing function, runs all the main steps of processing
|
process
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def error(self, msg, details=''):
"""Print an error message and stop the processing"""
if details:
self.parser.error(msg + '''
''' + details)
else:
self.parser.error(msg)
|
Print an error message and stop the processing
|
error
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def optparse_init(self):
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = 'Usage: %prog [options] input_file(s) [output]'
p = OptionParser(usage, version='%prog ' + __version__)
p.add_option(
'-p',
'--profile',
dest='profile',
type='choice',
choices=profile_list,
help="Tile cutting profile (%s) - default 'mercator' (Google Maps compatible)"
% ','.join(profile_list),
)
p.add_option(
'-r',
'--resampling',
dest='resampling',
type='choice',
choices=resampling_list,
help="Resampling method (%s) - default 'average'"
% ','.join(resampling_list),
)
p.add_option('-s', '--s_srs', dest='s_srs', metavar='SRS',
help='The spatial reference system used for the source input data'
)
p.add_option('-z', '--zoom', dest='zoom',
help="Zoom levels to render (format:'2-5' or '10')."
)
p.add_option('-e', '--resume', dest='resume',
action='store_true',
help='Resume mode. Generate only missing files.')
p.add_option('-a', '--srcnodata', dest='srcnodata',
metavar='NODATA',
help='NODATA transparency value to assign to the input data'
)
p.add_option('-d', '--tmscompatible', dest='tmscompatible',
action='store_true',
help='When using the geodetic profile, specifies the base resolution as 0.703125 or 2 tiles at zoom level 0.'
)
p.add_option('-l', '--leaflet', action='store_true',
dest='leaflet',
help="Set 0,0 point to north. For use with 'leaflet'. Requires -p raster. "
)
p.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Print status messages to stdout')
# KML options
g = OptionGroup(p, 'KML (Google Earth) options',
'Options for generated Google Earth SuperOverlay metadata'
)
g.add_option('-k', '--force-kml', dest='kml',
action='store_true',
help="Generate KML for Google Earth - default for 'geodetic' profile and 'raster' in EPSG:4326. For a dataset with different projection use with caution!"
)
g.add_option('-n', '--no-kml', dest='kml', action='store_false'
,
help='Avoid automatic generation of KML files for EPSG:4326'
)
g.add_option('-u', '--url', dest='url',
help='URL address where the generated tiles are going to be published'
)
p.add_option_group(g)
# HTML options
g = OptionGroup(p, 'Web viewer options',
'Options for generated HTML viewers a la Google Maps'
)
g.add_option(
'-w',
'--webviewer',
dest='webviewer',
type='choice',
choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'"
% ','.join(webviewer_list),
)
g.add_option('-t', '--title', dest='title',
help='Title of the map')
g.add_option('-c', '--copyright', dest='copyright',
help='Copyright for the map')
g.add_option('-g', '--googlekey', dest='googlekey',
help='Google Maps API key from http://code.google.com/apis/maps/signup.html'
)
(g.add_option('-b', '--bingkey', dest='bingkey',
help='Bing Maps API key from https://www.bingmapsportal.com/'
), )
p.add_option_group(g)
# TODO: MapFile + TileIndexes per zoom level for efficient MapServer WMS
# g = OptionGroup(p, "WMS MapServer metadata", "Options for generated mapfile and tileindexes for MapServer")
# g.add_option("-i", "--tileindex", dest='wms', action="store_true"
# help="Generate tileindex and mapfile for MapServer (WMS)")
# p.add_option_group(g)
p.set_defaults(
verbose=False,
profile='mercator',
kml=False,
url='',
webviewer='all',
copyright='',
resampling='average',
resume=False,
googlekey='INSERT_YOUR_KEY_HERE',
bingkey='INSERT_YOUR_KEY_HERE',
)
self.parser = p
|
Prepare the option parser for input (argv)
|
optparse_init
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
# Initialize necessary GDAL drivers
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?"
, self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?"
)
# Open the input file
if self.input:
self.in_ds = gdal.Open(self.input, gdal.GA_ReadOnly)
else:
raise Exception('No input file was specified')
if self.options.verbose:
print('Input file:', '( %sP x %sL - %s bands)'
% (self.in_ds.RasterXSize, self.in_ds.RasterYSize,
self.in_ds.RasterCount))
if not self.in_ds:
# Note: GDAL prints the ERROR message too
self.error("It is not possible to open the input file '%s'."
% self.input)
# Read metadata from the input file
if self.in_ds.RasterCount == 0:
self.error("Input file '%s' has no raster band"
% self.input)
if self.in_ds.GetRasterBand(1).GetRasterColorTable():
# TODO: Process directly paletted dataset by generating VRT in memory
self.error('Please convert this file to RGB/RGBA and run gdal2tiles on the result.'
,
"""From paletted file you can create RGBA file (temp.vrt) by:
gdal_translate -of vrt -expand rgba %s temp.vrt
then run:
gdal2tiles temp.vrt"""
% self.input)
# Get NODATA value
self.in_nodata = []
for i in range(1, self.in_ds.RasterCount + 1):
if self.in_ds.GetRasterBand(i).GetNoDataValue() != None:
self.in_nodata.append(self.in_ds.GetRasterBand(i).GetNoDataValue())
if self.options.srcnodata:
nds = list(map(float, self.options.srcnodata.split(',')))
if len(nds) < self.in_ds.RasterCount:
self.in_nodata = (nds
* self.in_ds.RasterCount)[:self.in_ds.RasterCount]
else:
self.in_nodata = nds
if self.options.verbose:
print('NODATA: %s' % self.in_nodata)
#
# Here we should have RGBA input dataset opened in self.in_ds
#
if self.options.verbose:
print ('Preprocessed file:', '( %sP x %sL - %s bands)'
% (self.in_ds.RasterXSize, self.in_ds.RasterYSize,
self.in_ds.RasterCount))
# Spatial Reference System of the input raster
self.in_srs = None
if self.options.s_srs:
self.in_srs = osr.SpatialReference()
self.in_srs.SetFromUserInput(self.options.s_srs)
self.in_srs_wkt = self.in_srs.ExportToWkt()
else:
self.in_srs_wkt = self.in_ds.GetProjection()
if not self.in_srs_wkt and self.in_ds.GetGCPCount() != 0:
self.in_srs_wkt = self.in_ds.GetGCPProjection()
if self.in_srs_wkt:
self.in_srs = osr.SpatialReference()
self.in_srs.ImportFromWkt(self.in_srs_wkt)
# elif self.options.profile != 'raster':
# self.error("There is no spatial reference system info included in the input file.","You should run gdal2tiles with --s_srs EPSG:XXXX or similar.")
# Spatial Reference System of tiles
self.out_srs = osr.SpatialReference()
if self.options.profile == 'mercator':
self.out_srs.ImportFromEPSG(900913)
elif self.options.profile == 'geodetic':
self.out_srs.ImportFromEPSG(4326)
else:
self.out_srs = self.in_srs
# Are the reference systems the same? Reproject if necessary.
self.out_ds = None
if self.options.profile in ('mercator', 'geodetic'):
if self.in_ds.GetGeoTransform() == (
0.0,
1.0,
0.0,
0.0,
0.0,
1.0,
) and self.in_ds.GetGCPCount() == 0:
self.error("There is no georeference - neither affine transformation (worldfile) nor GCPs. You can generate only 'raster' profile tiles."
,
"Either gdal2tiles with parameter -p 'raster' or use another GIS software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if self.in_srs:
if self.in_srs.ExportToProj4() \
!= self.out_srs.ExportToProj4() \
or self.in_ds.GetGCPCount() != 0:
# Generation of VRT dataset in tile projection, default 'nearest neighbour' warping
self.out_ds = gdal.AutoCreateWarpedVRT(self.in_ds,
self.in_srs_wkt, self.out_srs.ExportToWkt())
# TODO: HIGH PRIORITY: Correction of AutoCreateWarpedVRT according the max zoomlevel for correct direct warping!!!
if self.options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
self.out_ds.GetDriver().CreateCopy('tiles.vrt',
self.out_ds)
# Note: self.in_srs and self.in_srs_wkt contain still the non-warped reference system!!!
# Correction of AutoCreateWarpedVRT for NODATA values
if self.in_nodata != []:
import tempfile
tempfilename = tempfile.mktemp('-gdal2tiles.vrt'
)
self.out_ds.GetDriver().CreateCopy(tempfilename,
self.out_ds)
# open as a text file
s = open(tempfilename).read()
# Add the warping options
s = s.replace("""<GDALWarpOptions>""",
"""<GDALWarpOptions>
<Option name="INIT_DEST">NO_DATA</Option>
<Option name="UNIFIED_SRC_NODATA">YES</Option>""")
# replace BandMapping tag for NODATA bands....
for i in range(len(self.in_nodata)):
s = \
s.replace("""<BandMapping src="%i" dst="%i"/>"""
% (i + 1, i + 1),
"""<BandMapping src="%i" dst="%i">
<SrcNoDataReal>%i</SrcNoDataReal>
<SrcNoDataImag>0</SrcNoDataImag>
<DstNoDataReal>%i</DstNoDataReal>
<DstNoDataImag>0</DstNoDataImag>
</BandMapping>"""
% (i + 1, i + 1, self.in_nodata[i],
self.in_nodata[i])) # Or rewrite to white by: , 255 ))
# save the corrected VRT
open(tempfilename, 'w').write(s)
# open by GDAL as self.out_ds
self.out_ds = gdal.Open(tempfilename) # , gdal.GA_ReadOnly)
# delete the temporary file
os.unlink(tempfilename)
# set NODATA_VALUE metadata
self.out_ds.SetMetadataItem('NODATA_VALUES',
'%i %i %i' % (self.in_nodata[0],
self.in_nodata[1], self.in_nodata[2]))
if self.options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
open('tiles1.vrt', 'w').write(s)
# -----------------------------------
# Correction of AutoCreateWarpedVRT for Mono (1 band) and RGB (3 bands) files without NODATA:
# equivalent of gdalwarp -dstalpha
if self.in_nodata == [] and self.out_ds.RasterCount \
in [1, 3]:
import tempfile
tempfilename = tempfile.mktemp('-gdal2tiles.vrt'
)
self.out_ds.GetDriver().CreateCopy(tempfilename,
self.out_ds)
# open as a text file
s = open(tempfilename).read()
# Add the warping options
s = s.replace("""<BlockXSize>""",
"""<VRTRasterBand dataType="Byte" band="%i" subClass="VRTWarpedRasterBand">
<ColorInterp>Alpha</ColorInterp>
</VRTRasterBand>
<BlockXSize>"""
% (self.out_ds.RasterCount + 1))
s = s.replace("""</GDALWarpOptions>""",
"""<DstAlphaBand>%i</DstAlphaBand>
</GDALWarpOptions>"""
% (self.out_ds.RasterCount + 1))
s = s.replace("""</WorkingDataType>""",
"""</WorkingDataType>
<Option name="INIT_DEST">0</Option>"""
)
# save the corrected VRT
open(tempfilename, 'w').write(s)
# open by GDAL as self.out_ds
self.out_ds = gdal.Open(tempfilename) # , gdal.GA_ReadOnly)
# delete the temporary file
os.unlink(tempfilename)
if self.options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
open('tiles1.vrt', 'w').write(s)
s = '''
'''
else:
self.error('Input file has unknown SRS.',
'Use --s_srs ESPG:xyz (or similar) to provide source reference system.'
)
if self.out_ds and self.options.verbose:
print ('Projected file:', 'tiles.vrt',
'( %sP x %sL - %s bands)'
% (self.out_ds.RasterXSize,
self.out_ds.RasterYSize,
self.out_ds.RasterCount))
if not self.out_ds:
self.out_ds = self.in_ds
#
# Here we should have a raster (out_ds) in the correct Spatial Reference system
#
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.out_ds.GetRasterBand(1).GetMaskBand()
if self.alphaband.GetMaskFlags() & gdal.GMF_ALPHA \
or self.out_ds.RasterCount == 4 or self.out_ds.RasterCount \
== 2:
# TODO: Better test for alpha band in the dataset
self.dataBandsCount = self.out_ds.RasterCount - 1
else:
self.dataBandsCount = self.out_ds.RasterCount
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() \
== self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print('KML autotest OK!')
# Read the georeference
self.out_gt = self.out_ds.GetGeoTransform()
# originX, originY = self.out_gt[0], self.out_gt[3]
# pixelSize = self.out_gt[1] # = self.out_gt[5]
# Test the size of the pixel
# MAPTILER - COMMENTED
# if self.out_gt[1] != (-1 * self.out_gt[5]) and self.options.profile != 'raster':
# TODO: Process corectly coordinates with are have swichted Y axis (display in OpenLayers too)
# self.error("Size of the pixel in the output differ for X and Y axes.")
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
self.error('Georeference of the raster contains rotation or skew. Such raster is not supported. Please use gdalwarp first.'
)
# TODO: Do the warping in this case automaticaly
#
# Here we expect: pixel is square, no rotation on the raster
#
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.out_ds.RasterXSize \
* self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.out_ds.RasterYSize \
* self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print ('Bounds (output srs):', round(self.ominx, 13),
self.ominy, self.omaxx, self.omaxy)
#
# Calculating ranges for tiles in different zoom levels
#
if self.options.profile == 'mercator':
self.mercator = GlobalMercator() # from globalmaptiles.py
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
(tminx, tminy) = self.mercator.MetersToTile(self.ominx,
self.ominy, tz)
(tmaxx, tmaxy) = self.mercator.MetersToTile(self.omaxx,
self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
(tminx, tminy) = (max(0, tminx), max(0, tminy))
(tmaxx, tmaxy) = (min(2 ** tz - 1, tmaxx), min(2 ** tz
- 1, tmaxy))
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz == None:
self.tminz = \
self.mercator.ZoomForPixelSize(self.out_gt[1]
* max(self.out_ds.RasterXSize,
self.out_ds.RasterYSize) / float(self.tilesize))
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tmaxz == None:
self.tmaxz = \
self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print ('Bounds (latlong):',
self.mercator.MetersToLatLon(self.ominx,
self.ominy),
self.mercator.MetersToLatLon(self.omaxx,
self.omaxy))
print ('MinZoomLevel:', self.tminz)
print ('MaxZoomLevel:', self.tmaxz, '(',
self.mercator.Resolution(self.tmaxz), ')')
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible) # from globalmaptiles.py
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
(tminx, tminy) = self.geodetic.LonLatToTile(self.ominx,
self.ominy, tz)
(tmaxx, tmaxy) = self.geodetic.LonLatToTile(self.omaxx,
self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
(tminx, tminy) = (max(0, tminx), max(0, tminy))
(tmaxx, tmaxy) = (min(2 ** (tz + 1) - 1, tmaxx), min(2
** tz - 1, tmaxy))
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tminz == None:
self.tminz = \
self.geodetic.ZoomForPixelSize(self.out_gt[1]
* max(self.out_ds.RasterXSize,
self.out_ds.RasterYSize) / float(self.tilesize))
# Get the maximal zoom level (closest possible zoom level up on the resolution of raster)
if self.tmaxz == None:
self.tmaxz = \
self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print ('Bounds (latlong):', self.ominx, self.ominy,
self.omaxx, self.omaxy)
if self.options.profile == 'raster':
log2 = lambda x: math.log10(x) / math.log10(2) # log2 (base 2 logarithm)
self.nativezoom = \
int(max(math.ceil(log2(self.out_ds.RasterXSize
/ float(self.tilesize))),
math.ceil(log2(self.out_ds.RasterYSize
/ float(self.tilesize)))))
if int(self.tmaxz or 0) < self.nativezoom:
self.tmaxz = self.nativezoom
if self.options.verbose:
print ('Native zoom of the raster:', self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz == None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz == None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0 ** (self.nativezoom - tz) * self.tilesize
(tminx, tminy) = (0, 0)
tmaxx = int(math.ceil(self.out_ds.RasterXSize / tsize)) \
- 1
tmaxy = int(math.ceil(self.out_ds.RasterYSize / tsize)) \
- 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
self.ct = osr.CoordinateTransformation(self.in_srs,
srs4326)
def rastertileswne(x, y, z):
pixelsizex = 2 ** (self.tmaxz - z) * self.out_gt[1] # X-pixel size in level
pixelsizey = 2 ** (self.tmaxz - z) * self.out_gt[1] # Y-pixel size in level (usually -1*pixelsizex)
west = self.out_gt[0] + x * self.tilesize \
* pixelsizex
east = west + self.tilesize * pixelsizex
south = self.ominy + y * self.tilesize * pixelsizex
north = south + self.tilesize * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
(west, south) = self.ct.TransformPoint(west,
south)[:2]
(east, north) = self.ct.TransformPoint(east,
north)[:2]
return (south, west, north, east)
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0)
|
Initialization of the input raster, reprojection if necessary
|
open_input
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def generate_metadata(self):
"""Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing)."""
if not os.path.exists(self.output):
os.makedirs(self.output)
if self.options.profile == 'mercator':
(south, west) = self.mercator.MetersToLatLon(self.ominx,
self.ominy)
(north, east) = self.mercator.MetersToLatLon(self.omaxx,
self.omaxy)
(south, west) = (max(-85.05112878, south), max(-180.0,
west))
(north, east) = (min(85.05112878, north), min(180.0, east))
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') \
and self.options.profile == 'mercator':
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'googlemaps.html')):
f = open(os.path.join(self.output, 'googlemaps.html'
), 'w')
f.write(self.generate_googlemaps())
f.close()
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
elif self.options.profile == 'geodetic':
(west, south) = (self.ominx, self.ominy)
(east, north) = (self.omaxx, self.omaxy)
(south, west) = (max(-90.0, south), max(-180.0, west))
(north, east) = (min(90.0, north), min(180.0, east))
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
elif self.options.profile == 'raster':
(west, south) = (self.ominx, self.ominy)
(east, north) = (self.omaxx, self.omaxy)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'openlayers.html')):
f = open(os.path.join(self.output, 'openlayers.html'
), 'w')
f.write(self.generate_openlayers())
f.close()
# Generate tilemapresource.xml.
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'tilemapresource.xml')):
f = open(os.path.join(self.output, 'tilemapresource.xml'),
'w')
f.write(self.generate_tilemapresource())
f.close()
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
(xmin, ymin, xmax, ymax) = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if not self.options.resume \
or not os.path.exists(os.path.join(self.output,
'doc.kml')):
f = open(os.path.join(self.output, 'doc.kml'), 'w')
f.write(self.generate_kml(None, None, None,
children))
f.close()
|
Generation of main metadata files and HTML viewers (metadata related to particular tiles are generated during the tile processing).
|
generate_metadata
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def generate_base_tiles(self):
"""Generation of the base tiles (the lowest in the pyramid) directly from the input raster"""
print('Generating Base Tiles:')
if self.options.verbose:
# mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY
# px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)
# print("Pixel coordinates:", px, py, (mx, my))
print('')
print('Tiles generated from the max zoom level:')
print('----------------------------------------')
print('')
# Set the bounds
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[self.tmaxz]
# Just the center tile
# tminx = tminx+ (tmaxx - tminx)/2
# tminy = tminy+ (tmaxy - tminy)/2
# tmaxx = tminx
# tmaxy = tminy
ds = self.out_ds
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print ('dataBandsCount: ', self.dataBandsCount)
print ('tilebands: ', tilebands)
# print(tminx, tminy, tmaxx, tmaxy)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
# print(tcount)
ti = 0
tz = self.tmaxz
yrange = range(tmaxy, tminy - 1, -1)
if self.options.leaflet:
yrange = range(tminy, tmaxy + 1)
for ty in yrange:
for tx in range(tminx, tmaxx + 1):
if self.stopped:
break
ti += 1
tilefilename = os.path.join(self.output, str(tz),
str(tx), '%s.%s' % (ty, self.tileext))
if self.options.verbose:
print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )"
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print('Tile generation skiped because of --resume')
else:
self.progressbar(ti / float(tcount))
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:900913
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# print("\tgdalwarp -ts 256 256 -te %s %s %s %s %s %s_%s_%s.tif" % ( b[0], b[1], b[2], b[3], "tiles.vrt", tz, tx, ty))
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
(rb, wb) = self.geo_query(ds, b[0], b[3], b[2],
b[1])
nativesize = wb[0] + wb[2] # Pixel size in the raster covering query geo extent
if self.options.verbose:
print ('\tNative Extent (querysize',
nativesize, '): ', rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
(rb, wb) = self.geo_query(
ds,
b[0],
b[3],
b[2],
b[1],
querysize=querysize,
)
(rx, ry, rxsize, rysize) = rb
(wx, wy, wxsize, wysize) = wb
else:
# 'raster' profile:
tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom
xsize = self.out_ds.RasterXSize # size of the raster in pixels
ysize = self.out_ds.RasterYSize
if tz >= self.nativezoom:
querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)
rx = tx * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
if self.options.leaflet:
ry = ty * tsize
else:
ry = ysize - ty * tsize - rysize
(wx, wy) = (0, 0)
(wxsize, wysize) = (int(rxsize / float(tsize)
* self.tilesize), int(rysize / float(tsize)
* self.tilesize))
if not self.options.leaflet:
if wysize != self.tilesize:
wy = self.tilesize - wysize
if self.options.verbose:
print ('\tReadRaster Extent: ', (rx, ry, rxsize,
rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
# Tile dataset in memory
dstile = self.mem_drv.Create('', self.tilesize,
self.tilesize, tilebands)
data = ds.ReadRaster(
rx,
ry,
rxsize,
rysize,
wxsize,
wysize,
band_list=list(range(1, self.dataBandsCount + 1)),
)
alpha = self.alphaband.ReadRaster(
rx,
ry,
rxsize,
rysize,
wxsize,
wysize,
)
if self.tilesize == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(
wx,
wy,
wxsize,
wysize,
data,
band_list=list(range(1, self.dataBandsCount
+ 1)),
)
dstile.WriteRaster(
wx,
wy,
wxsize,
wysize,
alpha,
band_list=[tilebands],
)
else:
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)
# the ReadRaster function returns high-quality raster (not ugly nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
# Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo
dsquery = self.mem_drv.Create('', querysize,
querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now only png tiles are supported)
# for i in range(1, tilebands+1):
# dsquery.GetRasterBand(1).Fill(tilenodata)
dsquery.WriteRaster(
wx,
wy,
wxsize,
wysize,
data,
band_list=list(range(1, self.dataBandsCount
+ 1)),
)
dsquery.WriteRaster(
wx,
wy,
wxsize,
wysize,
alpha,
band_list=[tilebands],
)
self.scale_query_to_tile(dsquery, dstile,
tilefilename)
del dsquery
del data
if self.options.resampling != 'antialias':
# Write a copy of tile to png/jpg
self.out_drv.CreateCopy(tilefilename, dstile,
strict=0)
del dstile
# Create a KML file for this tile.
if self.kml:
kmlfilename = os.path.join(self.output, str(tz),
str(tx), '%d.kml' % ty)
if not self.options.resume \
or not os.path.exists(kmlfilename):
f = open(kmlfilename, 'w')
f.write(self.generate_kml(tx, ty, tz))
f.close()
if not self.options.verbose:
self.progressbar(ti / float(tcount))
|
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
|
generate_base_tiles
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def generate_overview_tiles(self):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
print('Generating Overview Tiles:')
tilebands = self.dataBandsCount + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(self.tmaxz - 1, self.tminz - 1, -1):
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy
- tminy))
ti = 0
# querysize = tilesize * 2
for tz in range(self.tmaxz - 1, self.tminz - 1, -1):
(tminx, tminy, tmaxx, tmaxy) = self.tminmax[tz]
yrange = range(tmaxy, tminy - 1, -1)
if self.options.leaflet:
yrange = range(tminy, tmaxy + 1)
for ty in yrange:
for tx in range(tminx, tmaxx + 1):
if self.stopped:
break
ti += 1
tilefilename = os.path.join(self.output, str(tz),
str(tx), '%s.%s' % (ty, self.tileext))
if self.options.verbose:
print (ti, '/', tcount, tilefilename) # , "( TileMapService: z / x / y )"
if self.options.resume \
and os.path.exists(tilefilename):
if self.options.verbose:
print('Tile generation skiped because of --resume')
else:
self.progressbar(ti / float(tcount))
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = self.mem_drv.Create('', 2
* self.tilesize, 2 * self.tilesize,
tilebands)
# TODO: fill the null value
# for i in range(1, tilebands+1):
# dsquery.GetRasterBand(1).Fill(tilenodata)
dstile = self.mem_drv.Create('', self.tilesize,
self.tilesize, tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
(minx, miny, maxx, maxy) = self.tminmax[tz
+ 1]
if x >= minx and x <= maxx and y >= miny \
and y <= maxy:
dsquerytile = \
gdal.Open(os.path.join(self.output,
str(tz + 1), str(x), '%s.%s'
% (y, self.tileext)),
gdal.GA_ReadOnly)
if self.options.leaflet:
if ty:
tileposy = y % (2 * ty) \
* self.tilesize
elif ty == 0 and y == 1:
tileposy = self.tilesize
else:
tileposy = 0
else:
if ty == 0 and y == 1 or ty != 0 \
and y % (2 * ty) != 0:
tileposy = 0
else:
tileposy = self.tilesize
if tx:
tileposx = x % (2 * tx) \
* self.tilesize
elif tx == 0 and x == 1:
tileposx = self.tilesize
else:
tileposx = 0
dsquery.WriteRaster(
tileposx,
tileposy,
self.tilesize,
self.tilesize,
dsquerytile.ReadRaster(0, 0,
self.tilesize, self.tilesize),
band_list=list(range(1, tilebands
+ 1)),
)
children.append([x, y, tz + 1])
self.scale_query_to_tile(dsquery, dstile,
tilefilename)
# Write a copy of tile to png/jpg
if self.options.resampling != 'antialias':
# Write a copy of tile to png/jpg
self.out_drv.CreateCopy(tilefilename, dstile,
strict=0)
if self.options.verbose:
print (
'\tbuild from zoom',
tz + 1,
' tiles:',
(2 * tx, 2 * ty),
(2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1),
(2 * tx + 1, 2 * ty + 1),
)
# Create a KML file for this tile.
if self.kml:
f = open(os.path.join(self.output,
'%d/%d/%d.kml' % (tz, tx, ty)), 'w')
f.write(self.generate_kml(tx, ty, tz, children))
f.close()
if not self.options.verbose:
self.progressbar(ti / float(tcount))
|
Generation of the overview tiles (higher in the pyramid) based on existing tiles
|
generate_overview_tiles
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def geo_query(
self,
ds,
ulx,
uly,
lrx,
lry,
querysize=0,
):
"""For given dataset and query in cartographic coordinates
returns parameters for ReadRaster() in raster coordinates and
x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds."""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
(wxsize, wysize) = (rxsize, rysize)
else:
(wxsize, wysize) = (querysize, querysize)
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return ((rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
|
For given dataset and query in cartographic coordinates
returns parameters for ReadRaster() in raster coordinates and
x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
|
geo_query
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def scale_query_to_tile(
self,
dsquery,
dstile,
tilefilename='',
):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
if self.options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
# if i != 4:
# dsquery.GetRasterBand(i).SetNoDataValue(0)
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i),
dstile.GetRasterBand(i), 'average')
if res != 0:
self.error('RegenerateOverview() failed on %s, error %d'
% (tilefilename, res))
elif self.options.resampling == 'antialias':
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands),
numpy.uint8)
for i in range(tilebands):
array[:, :, i] = \
gdalarray.BandReadAsArray(dsquery.GetRasterBand(i
+ 1), 0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, self.tiledriver)
else:
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((
0.0,
tilesize / float(querysize),
0.0,
0.0,
0.0,
tilesize / float(querysize),
))
dstile.SetGeoTransform((
0.0,
1.0,
0.0,
0.0,
0.0,
1.0,
))
res = gdal.ReprojectImage(dsquery, dstile, None, None,
self.resampling)
if res != 0:
self.error('ReprojectImage() failed on %s, error %d'
% (tilefilename, res))
|
Scales down query dataset to the tile dataset
|
scale_query_to_tile
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
"""
args = {}
args['title'] = self.options.title
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = 'EPSG:900913'
elif self.options.profile == 'geodetic':
args['srs'] = 'EPSG:4326'
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ''
s = \
"""<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tilesize)d" height="%(tilesize)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" \
% args
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 2 ** (self.nativezoom
- z) * self.out_gt[1], z)
elif self.options.profile == 'mercator':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 156543.0339 / 2 ** z, z)
elif self.options.profile == 'geodetic':
s += \
""" <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" \
% (args['publishurl'], z, 0.703125 / 2 ** z, z)
s += """ </TileSets>
</TileMap>
"""
return s
|
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
|
generate_tilemapresource
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def generate_kml(
self,
tx,
ty,
tz,
children=[],
**args
):
"""
Template for the KML. Returns filled string.
"""
(args['tx'], args['ty'], args['tz']) = (tx, ty, tz)
args['tileformat'] = self.tileext
if 'tilesize' not in args:
args['tilesize'] = self.tilesize
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tilesize'] / 2) # / 2.56) # default 128
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tilesize'] * 8) # 1.7) # default 2048 (used to be -1)
if children == []:
args['maxlodpixels'] = -1
if tx == None:
tilekml = False
args['title'] = self.options.title
else:
tilekml = True
args['title'] = '%d/%d/%d.kml' % (tz, tx, ty)
(args['south'], args['west'], args['north'], args['east'
]) = self.tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx != None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = self.options.url
if not url:
if tilekml:
url = '../../'
else:
url = ''
s = \
"""<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" \
% args
if tilekml:
s += \
"""
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" \
% args
for (cx, cy, cz) in children:
(csouth, cwest, cnorth, ceast) = self.tileswne(cx, cy, cz)
s += \
"""
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" \
% (
cz,
cx,
cy,
args['tileformat'],
cnorth,
csouth,
ceast,
cwest,
args['minlodpixels'],
url,
cz,
cx,
cy,
)
s += """ </Document>
</kml>
"""
return s
|
Template for the KML. Returns filled string.
|
generate_kml
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = \
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" \
% args
if self.kml:
s += \
"""
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += \
"""
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += \
"""
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" \
% args
s += \
"""
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" \
% args
return s
|
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
|
generate_googlemaps
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
(args['south'], args['west'], args['north'], args['east']) = \
self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = '-1'
else:
args['tmsoffset'] = ''
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2 ** self.nativezoom \
* self.out_gt[1]
s = \
"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" \
% args
if self.options.profile == 'mercator':
s += \
"""
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>""" \
% args
s += \
"""
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" \
% args
if self.options.profile == 'mercator':
s += \
"""
var options = {
div: "map",
controls: [],
projection: "EPSG:900913",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" \
% args
elif self.options.profile == 'geodetic':
s += \
"""
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" \
% args
elif self.options.profile == 'raster':
s += \
"""
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" \
% args
s += \
"""
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" \
% args
if self.options.profile == 'mercator':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
elif self.options.profile == 'geodetic':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
elif self.options.profile == 'raster':
s += \
"""
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" \
% args
s += \
"""
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.maptiler.org/">MapTiler</a>/<a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" \
% args
return s
|
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
|
generate_openlayers
|
python
|
commenthol/gdal2tiles-leaflet
|
gdal2tiles.py
|
https://github.com/commenthol/gdal2tiles-leaflet/blob/master/gdal2tiles.py
|
MIT
|
def build_dataset(tokenizer, config):
'''
We assume that we have preprocessed the dataset appropriately such that the sample is organized as follows:
{"positive": prompt + answer_positive, "negative": prompt + answer_negative}, where the positive response is preferred.
'''
def tokenize(sample):
tokenized_pos = tokenizer(sample['positive'], truncation=True)
tokenized_neg = tokenizer(sample['negative'], truncation=True)
sample["chosen_input_ids"] = tokenized_pos["input_ids"]
sample["chosen_attention_mask"] = tokenized_pos["attention_mask"]
sample["rejected_input_ids"] = tokenized_neg["input_ids"]
sample["rejected_attention_mask"] = tokenized_neg["attention_mask"]
return sample
ds = load_dataset("json", data_files=config.dataset_path, split="train", field="instances")
ds = ds.map(tokenize, batched=False)
ds = ds.filter(lambda x: len(x["chosen_input_ids"]) <= 512 and len(x["rejected_input_ids"]) <= 512)
eval_dataset = None
if config.validation_split_percentage > 0:
idx_gap = int((1-config.validation_split_percentage/100) * len(ds))
train_dataset = ds.select(range(idx_gap))
eval_dataset = ds.select(range(idx_gap, len(ds)))
else:
train_dataset = ds
return train_dataset, eval_dataset
|
We assume that we have preprocessed the dataset appropriately such that the sample is organized as follows:
{"positive": prompt + answer_positive, "negative": prompt + answer_negative}, where the positive response is preferred.
|
build_dataset
|
python
|
OptimalScale/LMFlow
|
contrib/rlhflow/reward_modeling.py
|
https://github.com/OptimalScale/LMFlow/blob/master/contrib/rlhflow/reward_modeling.py
|
Apache-2.0
|
def tokenize(
self,
dataset,
add_special_tokens=True,
*args,
**kwargs
) -> Dataset:
"""
Tokenize the full dataset.
Parameters
------------
dataset : lmflow.datasets.Dataset.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
tokenized_datasets :
The tokenized dataset, without any leading or trailing special
tokens (normally they are Begin-Of-Sentence or End-Of-Sentence
tokens).
"""
# Preprocessing the datasets.
# First we tokenize all the texts.
if dataset.get_backend() != "huggingface":
raise NotImplementedError(
"tokenization of datasets with non-huggingface backend are"
"not supported yet"
)
dataset_type = dataset.get_type()
model_args = self.model_args
raw_datasets = dataset
hf_raw_datasets = dataset.get_backend_dataset()
column_names = list(hf_raw_datasets.features)
data_args = raw_datasets.get_data_args()
# Requires three types of information for tokenizing different datasets
# 1) Which fields require tokenization, e.g.
# "text2float": "text", but not "float"
# "text2text": both "input" and "output"
# 2) How will there tokenized sequence concatenated together, e.g.
# "text_only": "text" -> "text"
# "text2text": "input", "output" -> "input" + "output"
# 3) Which fields require loss in final computation, e.g.
# "text_only": "text"
# "text2text": "output" only
tokenized_column_order = None # Handles 1) and 2)
label_columns = None # Handles 3)
if dataset_type == "text_only":
tokenized_column_order = ["text"]
label_columns = ["text"]
elif dataset_type == "text2text":
tokenized_column_order = ["input", "output"]
label_columns = ["output"]
add_special_tokens = False
elif dataset_type == "conversation":
if data_args.conversation_template:
if data_args.conversation_template in PRESET_TEMPLATES.keys():
conversation_template = PRESET_TEMPLATES[data_args.conversation_template]
else:
raise NotImplementedError(
f"Conversation template {data_args.conversation_template} is not supported yet."
)
else:
logger.warning("No conversation template provided. Using default template.")
conversation_template = PRESET_TEMPLATES['empty']
logger.warning(f"Conversation template: {conversation_template}")
else:
raise NotImplementedError(
f"dataset type \"{dataset_type}\" is not supported, currently"
" only support following data types:\n"
f" 1) {TEXT_ONLY_DATASET_DESCRIPTION}\n"
f" 2) {TEXT2TEXT_DATASET_DESCRIPTION}\n"
f" 3) {CONVERSATION_DATASET_DESCRIPTION}\n"
)
# Whether to truncate long sequences to fit into max_length
use_truncation = False
if model_args.use_lora or data_args.disable_group_texts:
use_truncation = True
tokenize_fn = conversation_tokenize_function
tokenize_fn_kwargs = {
"data_args": data_args,
"tokenizer": self.tokenizer,
"column_names": column_names,
}
if "conversation" in dataset_type:
tokenize_fn_kwargs["conversation_template"] = conversation_template
else:
tokenize_fn_kwargs["label_columns"] = label_columns
tokenize_fn_kwargs["tokenized_column_order"] = tokenized_column_order
tokenize_fn_kwargs["add_special_tokens"] = add_special_tokens
tokenize_fn_kwargs["use_truncation"] = use_truncation
tokenize_kwargs = {}
if not data_args.streaming:
fingerprint = hashlib.md5(
(
raw_datasets.get_fingerprint()
+ str(self.tokenizer)
+ f'###padding_side={self.tokenizer.padding_side}'
+ ('###conversation_template=' + str(conversation_template) if "conversation" in dataset_type else "")
+ f'###disable_group_texts={data_args.disable_group_texts}'
+ f'###block_size={data_args.block_size}'
).encode("utf-8")
).hexdigest()
tokenize_kwargs = {
"num_proc": data_args.preprocessing_num_workers,
"load_from_cache_file": not data_args.overwrite_cache,
"desc": "Running tokenizer on dataset",
"new_fingerprint": fingerprint,
}
tokenized_datasets = raw_datasets.map(
tokenize_fn,
batched=True,
remove_columns=column_names,
fn_kwargs=tokenize_fn_kwargs,
**tokenize_kwargs
)
return tokenized_datasets
|
Tokenize the full dataset.
Parameters
------------
dataset : lmflow.datasets.Dataset.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
tokenized_datasets :
The tokenized dataset, without any leading or trailing special
tokens (normally they are Begin-Of-Sentence or End-Of-Sentence
tokens).
|
tokenize
|
python
|
OptimalScale/LMFlow
|
contrib/tool-finetune/function_call_finetune.py
|
https://github.com/OptimalScale/LMFlow/blob/master/contrib/tool-finetune/function_call_finetune.py
|
Apache-2.0
|
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
# if src.requires_grad == True:
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
|
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
|
update_ema
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/diffusion_dpo/train_diffusion_dpo_lisa.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/diffusion_dpo/train_diffusion_dpo_lisa.py
|
Apache-2.0
|
def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to
lower case (Default value = True)
"""
current_sample = None
for filesample in data:
assert isinstance(filesample, dict)
fname, value = filesample["fname"], filesample["data"]
prefix, suffix = keys(fname)
if prefix is None:
continue
if lcase:
suffix = suffix.lower()
# FIXME webdataset version throws if suffix in current_sample, but we have a potential for
# this happening in the current LAION400m dataset if a tar ends with same prefix as the next
# begins, rare, but can happen since prefix aren't unique across tar files in that dataset
if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample:
if valid_sample(current_sample):
yield current_sample
current_sample = {"__key__": prefix, "__url__": filesample["__url__"]}
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
if valid_sample(current_sample):
yield current_sample
|
Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to
lower case (Default value = True)
|
group_by_keys_nothrow
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
Apache-2.0
|
def guidance_scale_embedding(w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
|
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
guidance_scale_embedding
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
Apache-2.0
|
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
return x[(...,) + (None,) * dims_to_append]
|
Appends dimensions to the end of a tensor until it has target_dims dimensions.
|
append_dims
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
Apache-2.0
|
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
# if src.requires_grad == True:
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
|
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
|
update_ema
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lisa.py
|
Apache-2.0
|
def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to
lower case (Default value = True)
"""
current_sample = None
for filesample in data:
assert isinstance(filesample, dict)
fname, value = filesample["fname"], filesample["data"]
prefix, suffix = keys(fname)
if prefix is None:
continue
if lcase:
suffix = suffix.lower()
# FIXME webdataset version throws if suffix in current_sample, but we have a potential for
# this happening in the current LAION400m dataset if a tar ends with same prefix as the next
# begins, rare, but can happen since prefix aren't unique across tar files in that dataset
if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample:
if valid_sample(current_sample):
yield current_sample
current_sample = {"__key__": prefix, "__url__": filesample["__url__"]}
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
if valid_sample(current_sample):
yield current_sample
|
Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to
lower case (Default value = True)
|
group_by_keys_nothrow
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
Apache-2.0
|
def guidance_scale_embedding(w, embedding_dim=512, dtype=torch.float32):
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
|
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
timesteps (`torch.Tensor`):
generate embedding vectors at these timesteps
embedding_dim (`int`, *optional*, defaults to 512):
dimension of the embeddings to generate
dtype:
data type of the generated embeddings
Returns:
`torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
|
guidance_scale_embedding
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
Apache-2.0
|
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
return x[(...,) + (None,) * dims_to_append]
|
Appends dimensions to the end of a tensor until it has target_dims dimensions.
|
append_dims
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
Apache-2.0
|
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
|
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
|
update_ema
|
python
|
OptimalScale/LMFlow
|
experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
https://github.com/OptimalScale/LMFlow/blob/master/experimental/LISA-diffusion/latent_consistency_model/train_lcm_distill_sd_wds_lora.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--dataset_path", type=str,
default=None,
help=textwrap.dedent("input dataset path, reads from stdin by default")
)
parser.add_argument(
"--output_path", type=str,
default=None,
help=textwrap.dedent("output dataset path, writes to stdout by default")
)
parser.add_argument(
"--end_mark", type=str,
default="###",
help=textwrap.dedent("end mark that append to the end of output")
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/add_end_mark.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/add_end_mark.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--dataset_path", type=str,
default=None,
help=textwrap.dedent("input dataset path, reads from stdin by default")
)
parser.add_argument(
"--output_path", type=str,
default=None,
help=textwrap.dedent("output dataset path, writes to stdout by default")
)
parser.add_argument(
"--prompt_structure", type=str,
default="{input}",
help=textwrap.dedent("prompt structure to augment input")
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/add_prompt.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/add_prompt.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--output_path", type=str,
default=None,
help=textwrap.dedent("output dataset path, writes to stdout by default")
)
parser.add_argument(
"--merge_from_path", type=str,
nargs="+",
help=textwrap.dedent(
"dataset path of the extra dataset that will be merged"
" into input dataset"
)
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/concat.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/concat.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--output_path", type=str,
default=None,
help=textwrap.dedent("output dataset path, writes to stdout by default")
)
parser.add_argument(
"--merge_from_path", type=str,
nargs="+",
help=textwrap.dedent(
"dataset path of the extra dataset that will be merged"
" into input dataset"
)
)
parser.add_argument(
"--seed", type=int, default=42,
help=textwrap.dedent("pseudorandom seed")
)
parser.add_argument(
"--eval_size", type=int, default=200,
help=textwrap.dedent("size of eval dataset")
)
parser.add_argument(
"--test_size", type=int, default=1000,
help=textwrap.dedent("size of test dataset")
)
parser.add_argument(
"--k", type=int, default=10,
help=textwrap.dedent("the train dataset will be divide into k folds")
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/concat_shuffle_split.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/concat_shuffle_split.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--dataset_path", type=str,
default=None,
help="input dataset path, reads from stdin by default"
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/count.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/count.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--dataset_path", type=str,
default=None,
help=textwrap.dedent("input dataset path, reads from stdin by default")
)
# Training parameters
parser.add_argument(
"--output_path", type=str,
default=None,
help=textwrap.dedent("output dataset path, writes to stdout by default")
)
parser.add_argument(
"--merge_from_path", type=str,
nargs="+",
help=textwrap.dedent(
"dataset path of the extra dataset that will be merged"
" into input dataset"
)
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/merge.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/merge.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--dataset_path", type=str,
default=None,
help=textwrap.dedent("input dataset path, reads from stdin by default")
)
parser.add_argument(
"--output_path", type=str,
default=None,
help=textwrap.dedent("output dataset path, writes to stdout by default")
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/raw2textonly.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/raw2textonly.py
|
Apache-2.0
|
def raw2textonly(fin):
"""
Converts raw text to text-only format.
Args:
fin: the input file description of the raw text file.
Returns:
a dict with "text-only" format.
"""
data_dict = {
"type": "text_only",
"instances": [ { "text": line.strip() } for line in fin ],
}
return data_dict
|
Converts raw text to text-only format.
Args:
fin: the input file description of the raw text file.
Returns:
a dict with "text-only" format.
|
raw2textonly
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/raw2textonly.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/raw2textonly.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--dataset_path", type=str,
default=None,
help="input dataset path, reads from stdin by default"
)
parser.add_argument(
"--output_path", type=str,
default=None,
help="output dataset path, writes to stdout by default"
)
parser.add_argument(
"--ratio", type=float, required=True,
help="sample ratio, will be floored if number of samples is not a int"
)
parser.add_argument(
"--seed", type=int, default=42,
help="pseudorandom seed"
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/sample.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/sample.py
|
Apache-2.0
|
def parse_argument(sys_argv):
"""Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
# Training parameters
parser.add_argument(
"--dataset_path", type=str,
default=None,
help="input dataset path, reads from stdin by default"
)
parser.add_argument(
"--output_path", type=str,
default=None,
help="output dataset path, writes to stdout by default"
)
parser.add_argument(
"--seed", type=int, default=42,
help="pseudorandom seed"
)
# Parses from commandline
args = parser.parse_args(sys_argv[1:])
return args
|
Parses arguments from command line.
Args:
sys_argv: the list of arguments (strings) from command line.
Returns:
A struct whose member corresponds to the required (optional) variable.
For example,
```
args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
args.input # 'a.txt'
args.num # 10
```
|
parse_argument
|
python
|
OptimalScale/LMFlow
|
scripts/data_preprocess/shuffle.py
|
https://github.com/OptimalScale/LMFlow/blob/master/scripts/data_preprocess/shuffle.py
|
Apache-2.0
|
def _check_instance_format(self):
"""
Checks if data (instances) have required fields.
Raises messages with hints if not matched.
"""
fields = self.backend_dataset.features
correct_fields = INSTANCE_FIELDS_MAP[self.type]
if not set(correct_fields).issubset(set(fields)):
raise ValueError(
f'data instance fields incorrect'
f' {list(correct_fields)} are required.'
)
|
Checks if data (instances) have required fields.
Raises messages with hints if not matched.
|
_check_instance_format
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def from_dict(self, dict_obj: dict, *args, **kwargs):
r"""
Create a Dataset object from a dictionary.
Return a Dataset given a dict with format:
{
"type": TYPE,
"instances": [
{
"key_1": VALUE_1.1,
"key_2": VALUE_1.2,
...
},
{
"key_1": VALUE_2.1,
"key_2": VALUE_2.2,
...
},
...
]
}
Parameters
-----------
dict_obj : dict.
A dictionary containing the dataset information.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
---------
self : Dataset object.
"""
if self.backend == "huggingface":
if KEY_TYPE not in dict_obj:
raise ValueError(
f'"{KEY_TYPE}" must be provided to initialize a dataset,'
f' e.g.\n'
f' {TEXT_ONLY_DATASET_DESCRIPTION}'
)
if KEY_INSTANCES not in dict_obj:
raise ValueError(
f'"{KEY_INSTANCES}" must be provided to initialize a'
f' dataset, e.g.\n'
f' {TEXT_ONLY_DATASET_DESCRIPTION}'
)
self.type = dict_obj[KEY_TYPE]
if not self.type in INSTANCE_FIELDS_MAP:
raise ValueError(f'type "{self.type}" is not supported')
correct_fields = INSTANCE_FIELDS_MAP[self.type]
for i, instance in enumerate(dict_obj[KEY_INSTANCES]):
fields = instance.keys()
if not set(correct_fields).issubset(set(fields)):
raise ValueError(
f'data instance fields incorrect'
f' {list(correct_fields)} are required.'
)
try:
hf_dict = {}
if len(dict_obj[KEY_INSTANCES]) > 0:
for key in dict_obj[KEY_INSTANCES][0].keys():
hf_dict[key] = [
instance[key] for instance in dict_obj[KEY_INSTANCES]
]
self.backend_dataset = HFDataset.from_dict(hf_dict, *args, **kwargs)
except AttributeError as ex:
raise ValueError(
f"Error occurs: {ex}. Failed to convert dict to"
f" \"{self.type}\" dataset," f" the standard format is as"
f" follows:\n"
f" {DATASET_DESCRIPTION_MAP[self.type]}"
)
self._check_instance_format()
return self
elif self.backend == "dict":
self.backend_dataset = dict_obj
self.type = dict_obj[KEY_TYPE]
return self
else:
raise NotImplementedError(
f'Currently .from_dict is not supported for backend "{self.backend}"'
)
|
Create a Dataset object from a dictionary.
Return a Dataset given a dict with format:
{
"type": TYPE,
"instances": [
{
"key_1": VALUE_1.1,
"key_2": VALUE_1.2,
...
},
{
"key_1": VALUE_2.1,
"key_2": VALUE_2.2,
...
},
...
]
}
Parameters
-----------
dict_obj : dict.
A dictionary containing the dataset information.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
---------
self : Dataset object.
|
from_dict
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def create_from_dict(cls, dict_obj, *args, **kwargs):
r"""
Returns
--------
Returns a Dataset object given a dict.
"""
empty_data_args = DatasetArguments(dataset_path=None)
dataset = Dataset(empty_data_args)
return dataset.from_dict(dict_obj)
|
Returns
--------
Returns a Dataset object given a dict.
|
create_from_dict
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def to_dict(self):
r"""
Returns
---------
Return a dict represents the dataset:
{
"type": TYPE,
"instances": [
{
"key_1": VALUE_1.1,
"key_2": VALUE_1.2,
...
},
{
"key_1": VALUE_2.1,
"key_2": VALUE_2.2,
...
},
...
]
}
A python dict object represents the content of this dataset.
"""
if self.backend == "huggingface":
dict_obj = {}
dict_obj[KEY_TYPE] = self.get_type()
hf_dict = self.backend_dataset.to_dict()
dict_obj[KEY_INSTANCES] = []
first_key = None
for key in hf_dict.keys():
first_key = key
break
if first_key is not None:
num_instances = len(hf_dict[first_key])
dict_obj[KEY_INSTANCES] = [
{
key: hf_dict[key][i] for key in hf_dict.keys()
}
for i in range(num_instances)
]
return dict_obj
elif self.backend == "dict":
dict_obj = self.backend_dataset
return dict_obj
else:
raise NotImplementedError(
f'Current .to_dict is not supported for backend "{self.backend}"'
)
|
Returns
---------
Return a dict represents the dataset:
{
"type": TYPE,
"instances": [
{
"key_1": VALUE_1.1,
"key_2": VALUE_1.2,
...
},
{
"key_1": VALUE_2.1,
"key_2": VALUE_2.2,
...
},
...
]
}
A python dict object represents the content of this dataset.
|
to_dict
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def map(self, *args, **kwargs):
r"""
Parameters
------------
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
---------
self : Dataset object.
"""
# If the dataset uses Hugging Face as the backend,
# call the `map()` function of the Hugging Face backend dataset
if self.backend == "huggingface":
# Set the mapped dataset as the backend dataset of the current dataset
mapped_backend_dataset = self.backend_dataset.map(*args, **kwargs)
self.backend_dataset = mapped_backend_dataset
return self
else:
# If the backend is not Hugging Face, raise a NotImplementedError
raise NotImplementedError(
f'Currently .map is not supported for backend "{self.backend}"'
)
|
Parameters
------------
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
---------
self : Dataset object.
|
map
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def save(
self,
file_path: str,
format: str="json"
):
r"""
Save the dataset to a json file.
Parameters
------------
file_path : str.
The path to the file where the dataset will be saved.
"""
if format == "json":
assert Path(file_path).suffix == ".json", "The file path must have a .json extension."
with open(file_path, "w", encoding='utf-8') as fout:
json.dump(self.to_dict(), fout, indent=4, ensure_ascii=False)
else:
logger.error(f"Unsupported format when saving the dataset: {format}.")
|
Save the dataset to a json file.
Parameters
------------
file_path : str.
The path to the file where the dataset will be saved.
|
save
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def sample(self, n: int, seed: int=42):
r"""
Sample n instances from the dataset.
Parameters
------------
n : int.
The number of instances to sample from the dataset.
Returns
---------
sample_dataset : Dataset object.
A new dataset object containing the sampled instances.
"""
if self.backend == "huggingface":
sampled_dataset = self.backend_dataset.shuffle(seed=seed).select(range(n))
output_dataset = self.create_from_dict(
{
"type": self.get_type(),
"instances": [
{
col_name: sampled_dataset[col_name][i] for col_name in sampled_dataset.column_names
} for i in range(n)
]
}
)
return output_dataset
else:
raise NotImplementedError(
f'Currently .sample is not supported for backend "{self.backend}"'
)
|
Sample n instances from the dataset.
Parameters
------------
n : int.
The number of instances to sample from the dataset.
Returns
---------
sample_dataset : Dataset object.
A new dataset object containing the sampled instances.
|
sample
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def train_test_split(self, test_size: float=0.2, shuffle: bool=True, seed: int=42):
r"""
Split the dataset into training and testing sets.
Parameters
------------
test_size : float, default=0.2.
The proportion of the dataset that will be used for testing.
Returns
---------
train_dataset : Dataset object.
A new dataset object containing the training instances.
test_dataset : Dataset object.
A new dataset object containing the testing instances.
"""
if self.backend == "huggingface":
splited = self.backend_dataset.train_test_split(
test_size=test_size, shuffle=shuffle, seed=seed
)
train_dataset = self.create_from_dict(
{
"type": self.get_type(),
"instances": [
{
col_name: splited["train"][col_name][i] for col_name in splited["train"].column_names
} for i in range(len(splited["train"]))
]
}
)
test_dataset = self.create_from_dict(
{
"type": self.get_type(),
"instances": [
{
col_name: splited["test"][col_name][i] for col_name in splited["test"].column_names
} for i in range(len(splited["test"]))
]
}
)
return train_dataset, test_dataset
else:
raise NotImplementedError(
f'Currently .train_test_split is not supported for backend "{self.backend}"'
)
|
Split the dataset into training and testing sets.
Parameters
------------
test_size : float, default=0.2.
The proportion of the dataset that will be used for testing.
Returns
---------
train_dataset : Dataset object.
A new dataset object containing the training instances.
test_dataset : Dataset object.
A new dataset object containing the testing instances.
|
train_test_split
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def drop_instances(self, indices: list):
r"""
Drop instances from the dataset.
Parameters
------------
indices : list.
A list of indices of the instances to drop from the dataset.
"""
if self.backend == "huggingface":
self.backend_dataset = self.backend_dataset.remove_indices(indices)
else:
raise NotImplementedError(
f'Currently .drop_instances is not supported for backend "{self.backend}"'
)
|
Drop instances from the dataset.
Parameters
------------
indices : list.
A list of indices of the instances to drop from the dataset.
|
drop_instances
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def sanity_check(
self,
drop_invalid: bool=True,
):
r"""
Perform a sanity check on the dataset.
"""
if self.backend == "huggingface":
self.hf_dataset_sanity_check(drop_invalid)
else:
raise NotImplementedError(
f'Currently .sanity_check is not supported for backend "{self.backend}"'
)
|
Perform a sanity check on the dataset.
|
sanity_check
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def hf_dataset_sanity_check(
self,
drop_invalid: bool=True,
):
r"""
Perform a sanity check on the HuggingFace dataset.
"""
if self.backend_dataset is None or len(self.backend_dataset) == 0:
raise ValueError("Dataset is empty.")
if self.type == 'text_to_textlist':
num_output_per_instance = len(self.backend_dataset['output'][0])
dataset_cache = self.backend_dataset.filter(lambda x: len(x['input'])!=0)
dataset_cache = self.backend_dataset.filter(lambda x: len(x['output']) == num_output_per_instance)
dataset_cache = self.backend_dataset.filter(lambda x: not all([len(output) == 0 for output in x['output']]))
if len(dataset_cache) != len(self.backend_dataset):
warning_info = (
f"Found {len(self.backend_dataset) - len(dataset_cache)} invalid instances "
"during hf_dataset_sanity_check, please check:\n"
" 1. length of input strings should not be empty\n"
" 2. length of output strings should not be all empty\n"
" 3. number of output strings should be consistent\n" # since we will use tensor reshape later
)
if drop_invalid:
self.backend_dataset = dataset_cache
logger.warning(warning_info+"Invalid instances are dropped.")
else:
raise ValueError(warning_info)
else:
logger.warning(f"No sanity check for {self.type} dataset.")
|
Perform a sanity check on the HuggingFace dataset.
|
hf_dataset_sanity_check
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/dataset.py
|
Apache-2.0
|
def preprocess_llama_from_llava_plain(
sources,
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False):
"""
This function just add the image in the front of text.
And don't add any prompt.
Args:
sources: The input data with text and image.
tokenizer: The tokenizer to process text.
has_image: Whether the input data has image.
Returns:
The input_ids and labels for the model.
"""
conversations = []
for source in sources:
assert len(source) == 2
assert DEFAULT_IMAGE_TOKEN in source[0]['value']
source[0]['value'] = DEFAULT_IMAGE_TOKEN
conversation = source[0]['value'] + source[1]['value'] + conversation_lib.default_conversation.sep
conversations.append(conversation)
# tokenize conversations
input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
targets = copy.deepcopy(input_ids)
for target, source in zip(targets, sources):
tokenized_len = len(tokenizer_image_token(source[0]['value'], tokenizer))
target[:tokenized_len] = IGNORE_INDEX
return dict(input_ids=input_ids, labels=targets)
|
This function just add the image in the front of text.
And don't add any prompt.
Args:
sources: The input data with text and image.
tokenizer: The tokenizer to process text.
has_image: Whether the input data has image.
Returns:
The input_ids and labels for the model.
|
preprocess_llama_from_llava_plain
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/multi_modal_dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/multi_modal_dataset.py
|
Apache-2.0
|
def preprocess_llama_from_llava_v1(
sources,
tokenizer: transformers.PreTrainedTokenizer,
has_image: bool = False):
"""
This function add the prompt and then put the image after the prompt.
So it needs additional code to generate the target label.
Args:
sources: The input data with text and image.
tokenizer: The tokenizer to process text.
has_image: Whether the input data has image.
Returns:
The input_ids and labels for the model.
"""
conv = conversation_lib.default_conversation.copy()
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
# Apply prompt templates
conversations = []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != conv.roles[0]:
# Skip the first one if it is not from human
source = source[1:]
conv.messages = []
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
assert role == conv.roles[j % 2], f"{i}"
conv.append_message(role, sentence["value"])
conversations.append(conv.get_prompt())
# Tokenize conversations
if has_image:
input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
else:
input_ids = tokenizer(
conversations,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
targets = input_ids.clone()
assert conv.sep_style == conversation_lib.SeparatorStyle.TWO
# Mask targets
sep = conv.sep + conv.roles[1] + ": "
for conversation, target in zip(conversations, targets):
total_len = int(target.ne(tokenizer.pad_token_id).sum())
rounds = conversation.split(conv.sep2)
cur_len = 1
target[:cur_len] = IGNORE_INDEX
for i, rou in enumerate(rounds):
if rou == "":
break
parts = rou.split(sep)
if len(parts) != 2:
break
parts[0] += sep
if has_image:
round_len = len(tokenizer_image_token(rou, tokenizer))
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
else:
round_len = len(tokenizer(rou).input_ids)
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
cur_len += round_len
target[cur_len:] = IGNORE_INDEX
if cur_len < tokenizer.model_max_length:
if cur_len != total_len:
target[:] = IGNORE_INDEX
print(
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
f" (ignored)"
)
return dict(
input_ids=input_ids,
labels=targets,
)
|
This function add the prompt and then put the image after the prompt.
So it needs additional code to generate the target label.
Args:
sources: The input data with text and image.
tokenizer: The tokenizer to process text.
has_image: Whether the input data has image.
Returns:
The input_ids and labels for the model.
|
preprocess_llama_from_llava_v1
|
python
|
OptimalScale/LMFlow
|
src/lmflow/datasets/multi_modal_dataset.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/datasets/multi_modal_dataset.py
|
Apache-2.0
|
def __init__(
self,
model_args,
tune_strategy='normal',
ds_config=None,
device="gpu",
use_accelerator=False,
*args,
**kwargs
):
"""
Initializes a HFDecoderModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
:param tune_strategy: tuning strategy: normal, none, lora or adapter
:param ds_config: deepspeed configuration for distributed training
"""
HFModelMixin.__init__(
self,
model_args=model_args,
do_train=True if tune_strategy == "normal" else False,
ds_config=ds_config,
device=device,
use_accelerator=use_accelerator,
*args,
**kwargs
)
|
Initializes a HFDecoderModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
:param tune_strategy: tuning strategy: normal, none, lora or adapter
:param ds_config: deepspeed configuration for distributed training
|
__init__
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def tokenize(
self,
dataset: Dataset,
add_special_tokens=True,
*args,
**kwargs
) -> Dataset:
"""
Tokenize the full dataset.
Parameters
------------
dataset : lmflow.datasets.Dataset.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
tokenized_datasets :
The tokenized dataset, without any leading or trailing special
tokens (normally they are Begin-Of-Sentence or End-Of-Sentence
tokens).
"""
# Preprocessing the datasets.
# First we tokenize all the texts.
if dataset.get_backend() != "huggingface":
raise NotImplementedError(
"tokenization of datasets with non-huggingface backend are"
"not supported yet"
)
dataset_type = dataset.get_type()
model_args = self.model_args
raw_datasets = dataset
hf_raw_datasets = dataset.get_backend_dataset()
column_names = list(hf_raw_datasets.features)
data_args = raw_datasets.get_data_args()
# Requires three types of information for tokenizing different datasets
# 1) Which fields require tokenization, e.g.
# "text2float": "text", but not "float"
# "text2text": both "input" and "output"
# 2) How will there tokenized sequence concatenated together, e.g.
# "text_only": "text" -> "text"
# "text2text": "input", "output" -> "input" + "output"
# 3) Which fields require loss in final computation, e.g.
# "text_only": "text"
# "text2text": "output" only
tokenized_column_order = None # Handles 1) and 2)
label_columns = None # Handles 3)
if dataset_type == "text_only":
tokenized_column_order = ["text"]
label_columns = ["text"]
elif dataset_type == "text2text":
tokenized_column_order = ["input", "output"]
label_columns = ["output"]
add_special_tokens = False
elif dataset_type == "conversation":
if data_args.conversation_template:
if data_args.conversation_template in PRESET_TEMPLATES.keys():
conversation_template = PRESET_TEMPLATES[data_args.conversation_template]
else:
raise NotImplementedError(
f"Conversation template {data_args.conversation_template} is not supported yet."
)
else:
logger.warning("No conversation template provided. Using default template.")
conversation_template = PRESET_TEMPLATES['empty']
logger.warning(f"Conversation template: {conversation_template}")
else:
raise NotImplementedError(
f"dataset type \"{dataset_type}\" is not supported, currently"
" only support following data types:\n"
f" 1) {TEXT_ONLY_DATASET_DESCRIPTION}\n"
f" 2) {TEXT2TEXT_DATASET_DESCRIPTION}\n"
f" 3) {CONVERSATION_DATASET_DESCRIPTION}\n"
)
# Whether to truncate long sequences to fit into max_length
use_truncation = False
if model_args.use_lora or data_args.disable_group_texts:
use_truncation = True
tokenize_fn = conversation_tokenize_function if "conversation" in dataset_type else tokenize_function
tokenize_fn_kwargs = {
"data_args": data_args,
"tokenizer": self.tokenizer,
"column_names": column_names,
}
if "conversation" in dataset_type:
tokenize_fn_kwargs["conversation_template"] = conversation_template
else:
tokenize_fn_kwargs["label_columns"] = label_columns
tokenize_fn_kwargs["tokenized_column_order"] = tokenized_column_order
tokenize_fn_kwargs["add_special_tokens"] = add_special_tokens
tokenize_fn_kwargs["use_truncation"] = use_truncation
tokenize_kwargs = {}
if not data_args.streaming:
fingerprint = hashlib.md5(
(
raw_datasets.get_fingerprint()
+ str(self.tokenizer)
+ f'###padding_side={self.tokenizer.padding_side}'
+ ('###conversation_template=' + str(conversation_template) if "conversation" in dataset_type else "")
+ f'###disable_group_texts={data_args.disable_group_texts}'
+ f'###block_size={data_args.block_size}'
).encode("utf-8")
).hexdigest()
tokenize_kwargs = {
"num_proc": data_args.preprocessing_num_workers,
"load_from_cache_file": not data_args.overwrite_cache,
"desc": "Running tokenizer on dataset",
"new_fingerprint": fingerprint,
}
if data_args.block_size < self.tokenizer.model_max_length:
logger.warning(
f"block_size {data_args.block_size} < model_max_length {self.tokenizer.model_max_length}, "
"use block_size for maximum tokenized sequence length."
)
tokenized_datasets = raw_datasets.map(
tokenize_fn,
batched=True,
remove_columns=column_names,
fn_kwargs=tokenize_fn_kwargs,
**tokenize_kwargs
)
return tokenized_datasets
|
Tokenize the full dataset.
Parameters
------------
dataset : lmflow.datasets.Dataset.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
tokenized_datasets :
The tokenized dataset, without any leading or trailing special
tokens (normally they are Begin-Of-Sentence or End-Of-Sentence
tokens).
|
tokenize
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def encode(self, input: Union[str, List[str]], *args, **kwargs ) -> Union[List[int], List[List[int]]]:
"""
Perform encoding process of the tokenizer.
Parameters
------------
inputs : str or list.
The text sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
if string input,return the tokenized inputs.
"Hello,world!"-> [101, 7592, 1010, 2088, 102]
if batch input,return {input_ids,attention_mask,token_type_ids}
["Hello,world!","Hello!"]-> {'input_ids': tensor([[ 101, 7592, 1010, 2088, 102],...),'attention_mask': tensor([[1, 1, 1, 1, 1],[0,0,1,1,1]])}
"""
if isinstance(input, list):
return self.tokenizer(text=input, *args, **kwargs)#batch encode,will automatically do left padding
elif isinstance(input, str):
return self.tokenizer.encode(text=input, *args, **kwargs)
else:
raise NotImplementedError(f'type "{type(input)}" cannot be encoded')
|
Perform encoding process of the tokenizer.
Parameters
------------
inputs : str or list.
The text sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
if string input,return the tokenized inputs.
"Hello,world!"-> [101, 7592, 1010, 2088, 102]
if batch input,return {input_ids,attention_mask,token_type_ids}
["Hello,world!","Hello!"]-> {'input_ids': tensor([[ 101, 7592, 1010, 2088, 102],...),'attention_mask': tensor([[1, 1, 1, 1, 1],[0,0,1,1,1]])}
|
encode
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def decode(self, input, *args, **kwargs ) -> Union[str, List[str]]:
"""
Perform decoding process of the tokenizer.
Parameters
------------
inputs : list or tensor.
The token sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The text decoded from the token inputs.
if batch input,return the list of text
[[101, 7592, 1010, 2088, 102],[101, 7592, 1010, 2088, 102]]-> ["Hello,world!","Hello,world!"
if single input,return the text
[101, 7592, 1010, 2088, 102]-> "Hello,world!"
"""
if isinstance(input, List):
input=torch.tensor(input)
if input.dim()==2:
return self.tokenizer.batch_decode(input, *args, **kwargs)#batch_decode
else:
# Can be list of ints or a Tensor
return self.tokenizer.decode(input, *args, **kwargs)
|
Perform decoding process of the tokenizer.
Parameters
------------
inputs : list or tensor.
The token sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The text decoded from the token inputs.
if batch input,return the list of text
[[101, 7592, 1010, 2088, 102],[101, 7592, 1010, 2088, 102]]-> ["Hello,world!","Hello,world!"
if single input,return the text
[101, 7592, 1010, 2088, 102]-> "Hello,world!"
|
decode
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def inference(
self,
inputs,
release_gpu: bool = False,
use_vllm: bool = False,
**kwargs
):
"""
Perform generation process of the model.
Parameters
------------
inputs :
The sequence used as a prompt for the generation or as model inputs to the model.
When using vllm inference, this should be a string or a list of strings.
When using normal inference, this should be a tensor.
release_gpu : bool, optional
Whether to release the GPU resource after inference, by default False.
use_vllm : bool, optional
Whether to use VLLM for inference, by default False.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
"""
if not self._activated:
self.activate_model_for_inference(
use_vllm=use_vllm,
**kwargs,
)
if use_vllm:
if not is_vllm_available():
raise ImportError("vllm is not installed. Please install vllm to use VLLM inference.")
res = self.__vllm_inference(inputs, **kwargs)
else:
res = self.__inference(inputs, **kwargs)
if release_gpu:
self.deactivate_model_for_inference(use_vllm=use_vllm)
return res
|
Perform generation process of the model.
Parameters
------------
inputs :
The sequence used as a prompt for the generation or as model inputs to the model.
When using vllm inference, this should be a string or a list of strings.
When using normal inference, this should be a tensor.
release_gpu : bool, optional
Whether to release the GPU resource after inference, by default False.
use_vllm : bool, optional
Whether to use VLLM for inference, by default False.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
|
inference
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def __inference(self, inputs, *args, **kwargs):
"""
Perform generation process of the model.
Parameters
------------
inputs :
The **tokenized** sequence used as a prompt for the generation or as model inputs to the model.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
"""
with torch.no_grad():
if self.use_accelerator:
outputs = self.backend_model.generate(
input_ids=inputs,
pad_token_id=self.tokenizer.pad_token_id,
*args,
**kwargs
)
else:
if self.device == "gpu":
outputs = self.ds_engine.module.generate(
input_ids=inputs,
synced_gpus=True,
pad_token_id=self.tokenizer.pad_token_id,
*args,
**kwargs
)
elif self.device == "cpu":
outputs = self.backend_model.generate(
input_ids=inputs,
synced_gpus=True,
pad_token_id=self.tokenizer.pad_token_id,
*args,
**kwargs
)
else:
raise NotImplementedError(
f"device \"{self.device}\" is not supported"
)
return outputs
|
Perform generation process of the model.
Parameters
------------
inputs :
The **tokenized** sequence used as a prompt for the generation or as model inputs to the model.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
|
__inference
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def __vllm_inference(
self,
inputs: Union[str, List[str]],
sampling_params: Optional['SamplingParams'] = None,
**kwargs,
) -> List[VLLMInferenceResultWithInput]:
"""Perform VLLM inference process of the model.
Parameters
----------
inputs : Union[str, List[str]]
Prompt(s), string or a list of strings.
sampling_params : Optional[SamplingParams], optional
vllm SamplingParams object, by default None.
Returns
-------
List[VLLMInferenceResultWithInput]
Return a list of VLLMInferenceResultWithInput, where each
element contains the input prompt and the corresponding output.
When `sampling_params.detokenize = True`, the output would be a list of strings,
contains sampling_params.n samples for the corresponding prompt.
When `sampling_params.detokenize = False`, return a list of list of ints
(token ids, no decoding after generation).
"""
vllm_outputs = self.backend_model_for_inference.generate(
inputs,
sampling_params=sampling_params,
use_tqdm=True,
)
final_output = []
for output in vllm_outputs:
if sampling_params.detokenize:
output_list = [sentence.text for sentence in output.outputs]
else:
output_list = [sentence.token_ids for sentence in output.outputs]
final_output.append({"input": output.prompt, "output": output_list})
return final_output
|
Perform VLLM inference process of the model.
Parameters
----------
inputs : Union[str, List[str]]
Prompt(s), string or a list of strings.
sampling_params : Optional[SamplingParams], optional
vllm SamplingParams object, by default None.
Returns
-------
List[VLLMInferenceResultWithInput]
Return a list of VLLMInferenceResultWithInput, where each
element contains the input prompt and the corresponding output.
When `sampling_params.detokenize = True`, the output would be a list of strings,
contains sampling_params.n samples for the corresponding prompt.
When `sampling_params.detokenize = False`, return a list of list of ints
(token ids, no decoding after generation).
|
__vllm_inference
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def prepare_inputs_for_inference(
self,
dataset: Dataset,
apply_chat_template: bool = True,
enable_distributed_inference: bool = False,
use_vllm: bool = False,
**kwargs,
) -> Union[List[str], "ray.data.Dataset", Dict[str, torch.Tensor]]:
"""
Prepare inputs for inference.
Parameters
------------
dataset : lmflow.datasets.Dataset.
The dataset used for inference.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The prepared inputs for inference.
"""
if use_vllm:
if not is_ray_available() and enable_distributed_inference:
raise ImportError("ray is not installed. Please install ray to use distributed vllm inference.")
inference_inputs = self.__prepare_inputs_for_vllm_inference(
dataset=dataset,
apply_chat_template=apply_chat_template,
enable_distributed_inference=enable_distributed_inference,
)
else:
inference_inputs = self.__prepare_inputs_for_inference(
dataset,
apply_chat_template=apply_chat_template,
enable_distributed_inference=enable_distributed_inference,
)
return inference_inputs
|
Prepare inputs for inference.
Parameters
------------
dataset : lmflow.datasets.Dataset.
The dataset used for inference.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The prepared inputs for inference.
|
prepare_inputs_for_inference
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def save(self, dir, save_full_model=False, *args, **kwargs):
"""
Perform generation process of the model.
Parameters
------------
dir :
The directory to save model and tokenizer
save_full_model : Optional.
Whether to save full model.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
"""
self.get_tokenizer().save_pretrained(dir)
if save_full_model and self.model_args.use_lora:
save_dtype = (
torch.float16
if self.model_args.torch_dtype in ["auto", None]
else getattr(torch, self.model_args.torch_dtype)
)
self.backend_model_full.to(dtype=save_dtype).save_pretrained(dir)
logger.warning(f"Save full model with dtype: {save_dtype}")
else:
self.get_backend_model().save_pretrained(dir)
|
Perform generation process of the model.
Parameters
------------
dir :
The directory to save model and tokenizer
save_full_model : Optional.
Whether to save full model.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The generated sequence output
|
save
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_decoder_model.py
|
Apache-2.0
|
def __init__(
self,
model_args,
tune_strategy='normal',
ds_config=None,
device="gpu",
use_accelerator=False,
custom_model=False,
with_deepspeed=True,
pipeline_args=None,
*args,
**kwargs
):
"""
Initializes a HFDecoderModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
:param tune_strategy: tuning strategy: normal, none, lora or adapter
:param ds_config: deepspeed configuration for distributed training
"""
# See more about loading any type of standard or custom dataset (from
# files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training: The .from_pretrained methods guarantee that
# only one local process can concurrently download model & vocab.
self.device = device
if tune_strategy == 'normal':
raise NotImplementedError(
f"tune_strategy \"{tune_strategy}\" is not supported"
)
elif tune_strategy == 'none':
if use_accelerator:
raise NotImplementedError(
f"Currently encoder2decoder model is not supported with accelerator"
)
# dschf = HfDeepSpeedConfig(ds_config)
dschf = HfTrainerDeepSpeedConfig(ds_config)
if pipeline_args is not None:
dschf.trainer_config_process(pipeline_args)
peft_model_id = model_args.lora_model_path
# NOTE: Currently offload is not supported by llama
if "llama" in model_args.model_name_or_path and model_args.use_ram_optimized_load:
logger.warning(
"llama does not support RAM optimized load. Automatically"
" use original load instead."
)
model_args.use_ram_optimized_load = False
# get model register
self.arch_type = model_args.arch_type
if self.arch_type == "encoder_decoder":
if model_args.model_name_or_path == 'THUDM/chatglm-6b':
model_register = AutoModel
else:
model_register = AutoModelForSeq2SeqLM
elif self.arch_type == "vision_encoder_decoder":
if not custom_model:
model_register = AutoModelForVision2Seq
else:
model_register = CustomAutoVision2SeqModel
else:
raise NotImplementedError
if not custom_model:
if model_args.model_name_or_path == 'THUDM/chatglm-6b':
self.backend_model = model_register.from_pretrained(model_args.model_name_or_path, trust_remote_code=True)
elif model_args.use_ram_optimized_load and peft_model_id is None:
try:
# RAM-optimized load
self.backend_model = model_register.from_pretrained(
model_args.model_name_or_path,
device_map="auto",
offload_folder="offload",
offload_state_dict=True,
)
except:
logger.warning(
"Failed to use RAM optimized load. Automatically"
" use original load instead."
)
# Normal load
self.backend_model = model_register.from_pretrained(
model_args.model_name_or_path,
)
else:
if peft_model_id is not None:
logger.warning(
"LoRA does not support RAM optimized load currently."
" Automatically use original load instead."
)
self.backend_model = model_register.from_pretrained(
model_args.model_name_or_path,
)
# else:
# self.backend_model = model_register.from_pretrained(
# model_args.model_name_or_path)
else:
if model_args.llava_loading is False:
# FIXME remove the following from_pretrained code by
# creating a unified pretrained model.
model = CustomAutoVision2SeqModel.from_pretrained(model_args.model_name_or_path)
if model_args.llm_model_name_or_path is not None:
text_config = LlamaConfig.from_pretrained(model_args.llm_model_name_or_path)
model.config.text_config = text_config
model.language_model_from_pretrained(model_args.llm_model_name_or_path,
low_resource=model_args.low_resource)
state_dict = torch.load(
model_args.pretrained_language_projection_path,
map_location="cpu")
model.load_state_dict(state_dict, strict=False)
else:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path)
if model_args.low_resource:
kwargs = dict(
torch_dtype=torch.float16,
load_in_8bit=True,
device_map="auto",
)
else:
# kwargs = dict(torch_dtype=torch.float16)
kwargs = dict(device_map="auto")
if (model_args.image_encoder_name_or_path is None and
model_args.qformer_name_or_path is None and
model_args.llm_model_name_or_path is None):
config = AutoConfig.from_pretrained(
model_args.model_name_or_path)
model = CustomAutoVision2SeqModel.from_pretrained(
model_args.model_name_or_path, **kwargs)
else:
config = update_custom_config(config, model_args)
model = CustomAutoVision2SeqModel(
config,
image_encoder_name_or_path=model_args.image_encoder_name_or_path,
qformer_name_or_path=model_args.qformer_name_or_path,
language_model_name_or_path=model_args.llm_model_name_or_path,
low_resource=model_args.low_resource)
if model_args.pretrained_language_projection_path is not None:
state_dict = torch.load(
model_args.pretrained_language_projection_path, map_location="cpu")
new_state_dict = {}
new_state_dict['model.language_projection.weight'] = \
state_dict['model.mm_projector.weight']
new_state_dict['model.language_projection.bias'] = \
state_dict['model.mm_projector.bias']
if model_args.llava_pretrain_model_path is not None:
# used for inference that directly load the preatrain model
model = load_llava_pretrain_model(
model, model_args.llava_pretrain_model_path)
if model_args.save_pretrain_model_path is not None:
model.save_pretrained(
model_args.save_pretrain_model_path)
self.backend_model = model
# init tokenizer
if self.arch_type == "encoder_decoder":
self.tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, trust_remote_code=True)
elif self.arch_type == "vision_encoder_decoder":
if model_args.llava_loading is False:
# blip2 image and token processor
self.tokenizer = AutoProcessor.from_pretrained(
model_args.model_name_or_path, trust_remote_code=True)
if model_args.llm_model_name_or_path is not None:
# update the tokenizer from the custom llm.
self.tokenizer.tokenizer = (
AutoTokenizer.from_pretrained(
model_args.llm_model_name_or_path)
)
self.image_processor = self.tokenizer.image_processor
else:
# image processor is stored in the vision encoder
if model_args.llm_model_name_or_path is not None:
self.tokenizer = AutoTokenizer.from_pretrained(
model_args.llm_model_name_or_path)
else:
self.tokenizer = AutoTokenizer.from_pretrained(
config.text_config._name_or_path)
self.image_processor = self.backend_model.image_processor
else:
raise NotImplementedError
self.backend_model_full = self.backend_model
if peft_model_id is not None:
self.backend_model = PeftModel.from_pretrained(
self.backend_model, peft_model_id
)
if tune_strategy == "none" and with_deepspeed is True:
# when load the model with 4bit / 8bit.
# fail to use deepspeed.
if device == "gpu":
deepspeed.init_distributed()
self.ds_engine = deepspeed.initialize(model=self.backend_model, config_params=ds_config)[0]
self.ds_engine.module.eval()
self.tokenizer.padding_side = "left" # necessary for auto-gressive inference
elif tune_strategy == 'adapter':
raise NotImplementedError('adapter tune strategy not implemented')
if self.arch_type == "encoder_decoder":
if self.tokenizer.eos_token_id is None:
self.tokenizer.eos_token_id = self.backend_model.config.eos_token_id
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
|
Initializes a HFDecoderModel instance.
:param model_args: dictionary with model arguments such as model name, path, revision, etc.
:param tune_strategy: tuning strategy: normal, none, lora or adapter
:param ds_config: deepspeed configuration for distributed training
|
__init__
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_encoder_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_encoder_decoder_model.py
|
Apache-2.0
|
def encode(self, input: Union[str, List[str]], *args, **kwargs ) -> Union[List[int], List[List[int]]]:
"""
Perform encoding process of the tokenizer.
Parameters
------------
inputs : str or list.
The text sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The tokenized inputs.
"""
# check how to handle the image processor
if isinstance(input, dict):
# TODO refactor the input type to make it elegant.
kwargs.update(input)
if "images" not in input:
tokens = self.tokenizer(*args, **kwargs)
else:
if getattr(self.tokenizer, "image_processor", None) is not None:
tokens = self.tokenizer(*args, **kwargs)
elif getattr(self, "image_processor", None) is not None:
images = kwargs.pop("images")
tokens = self.tokenizer(*args, **kwargs)
images = self.image_processor.preprocess(
images, return_tensors='pt')['pixel_values'][0]
tokens['pixel_values'] = images
else:
print("Can not find the image processor")
raise NotImplementedError
return tokens
elif isinstance(input, list):
return self.tokenizer(text=input, *args, **kwargs)#batch encode,will automatically do left padding
elif isinstance(input, str):
return self.tokenizer.encode(text=input, *args, **kwargs)
else:
raise NotImplementedError(f'type "{type(input)}" cannot be encoded')
|
Perform encoding process of the tokenizer.
Parameters
------------
inputs : str or list.
The text sequence.
args : Optional.
Positional arguments.
kwargs : Optional.
Keyword arguments.
Returns
------------
outputs :
The tokenized inputs.
|
encode
|
python
|
OptimalScale/LMFlow
|
src/lmflow/models/hf_encoder_decoder_model.py
|
https://github.com/OptimalScale/LMFlow/blob/master/src/lmflow/models/hf_encoder_decoder_model.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.