code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def writeLayerContents(self, layerOrder=None, validate=None):
"""
Write the layercontents.plist file. This method *must* be called
after all glyph sets have been written.
"""
if validate is None:
validate = self._validate
if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
return
if layerOrder is not None:
newOrder = []
for layerName in layerOrder:
if layerName is None:
layerName = DEFAULT_LAYER_NAME
newOrder.append(layerName)
layerOrder = newOrder
else:
layerOrder = list(self.layerContents.keys())
if validate and set(layerOrder) != set(self.layerContents.keys()):
raise UFOLibError(
"The layer order content does not match the glyph sets that have been created."
)
layerContents = [
(layerName, self.layerContents[layerName]) for layerName in layerOrder
]
self._writePlist(LAYERCONTENTS_FILENAME, layerContents)
|
Write the layercontents.plist file. This method *must* be called
after all glyph sets have been written.
|
writeLayerContents
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def getGlyphSet(
self,
layerName=None,
defaultLayer=True,
glyphNameToFileNameFunc=None,
validateRead=None,
validateWrite=None,
expectContentsFile=False,
):
"""
Return the GlyphSet object associated with the
appropriate glyph directory in the .ufo.
If layerName is None, the default glyph set
will be used. The defaultLayer flag indictes
that the layer should be saved into the default
glyphs directory.
``validateRead`` will validate the read data, by default it is set to the
class's validate value, can be overridden.
``validateWrte`` will validate the written data, by default it is set to the
class's validate value, can be overridden.
``expectContentsFile`` will raise a GlifLibError if a contents.plist file is
not found on the glyph set file system. This should be set to ``True`` if you
are reading an existing UFO and ``False`` if you use ``getGlyphSet`` to create
a fresh glyph set.
"""
if validateRead is None:
validateRead = self._validate
if validateWrite is None:
validateWrite = self._validate
# only default can be written in < 3
if self._formatVersion < UFOFormatVersion.FORMAT_3_0 and (
not defaultLayer or layerName is not None
):
raise UFOLibError(
f"Only the default layer can be writen in UFO {self._formatVersion.major}."
)
# locate a layer name when None has been given
if layerName is None and defaultLayer:
for existingLayerName, directory in self.layerContents.items():
if directory == DEFAULT_GLYPHS_DIRNAME:
layerName = existingLayerName
if layerName is None:
layerName = DEFAULT_LAYER_NAME
elif layerName is None and not defaultLayer:
raise UFOLibError("A layer name must be provided for non-default layers.")
# move along to format specific writing
if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
return self._getDefaultGlyphSet(
validateRead,
validateWrite,
glyphNameToFileNameFunc=glyphNameToFileNameFunc,
expectContentsFile=expectContentsFile,
)
elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:
return self._getGlyphSetFormatVersion3(
validateRead,
validateWrite,
layerName=layerName,
defaultLayer=defaultLayer,
glyphNameToFileNameFunc=glyphNameToFileNameFunc,
expectContentsFile=expectContentsFile,
)
else:
raise NotImplementedError(self._formatVersion)
|
Return the GlyphSet object associated with the
appropriate glyph directory in the .ufo.
If layerName is None, the default glyph set
will be used. The defaultLayer flag indictes
that the layer should be saved into the default
glyphs directory.
``validateRead`` will validate the read data, by default it is set to the
class's validate value, can be overridden.
``validateWrte`` will validate the written data, by default it is set to the
class's validate value, can be overridden.
``expectContentsFile`` will raise a GlifLibError if a contents.plist file is
not found on the glyph set file system. This should be set to ``True`` if you
are reading an existing UFO and ``False`` if you use ``getGlyphSet`` to create
a fresh glyph set.
|
getGlyphSet
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def renameGlyphSet(self, layerName, newLayerName, defaultLayer=False):
"""
Rename a glyph set.
Note: if a GlyphSet object has already been retrieved for
layerName, it is up to the caller to inform that object that
the directory it represents has changed.
"""
if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
# ignore renaming glyph sets for UFO1 UFO2
# just write the data from the default layer
return
# the new and old names can be the same
# as long as the default is being switched
if layerName == newLayerName:
# if the default is off and the layer is already not the default, skip
if (
self.layerContents[layerName] != DEFAULT_GLYPHS_DIRNAME
and not defaultLayer
):
return
# if the default is on and the layer is already the default, skip
if self.layerContents[layerName] == DEFAULT_GLYPHS_DIRNAME and defaultLayer:
return
else:
# make sure the new layer name doesn't already exist
if newLayerName is None:
newLayerName = DEFAULT_LAYER_NAME
if newLayerName in self.layerContents:
raise UFOLibError("A layer named %s already exists." % newLayerName)
# make sure the default layer doesn't already exist
if defaultLayer and DEFAULT_GLYPHS_DIRNAME in self.layerContents.values():
raise UFOLibError("A default layer already exists.")
# get the paths
oldDirectory = self._findDirectoryForLayerName(layerName)
if defaultLayer:
newDirectory = DEFAULT_GLYPHS_DIRNAME
else:
existing = {name.lower() for name in self.layerContents.values()}
newDirectory = userNameToFileName(
newLayerName, existing=existing, prefix="glyphs."
)
# update the internal mapping
del self.layerContents[layerName]
self.layerContents[newLayerName] = newDirectory
# do the file system copy
self.fs.movedir(oldDirectory, newDirectory, create=True)
|
Rename a glyph set.
Note: if a GlyphSet object has already been retrieved for
layerName, it is up to the caller to inform that object that
the directory it represents has changed.
|
renameGlyphSet
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def deleteGlyphSet(self, layerName):
"""
Remove the glyph set matching layerName.
"""
if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
# ignore deleting glyph sets for UFO1 UFO2 as there are no layers
# just write the data from the default layer
return
foundDirectory = self._findDirectoryForLayerName(layerName)
self.removePath(foundDirectory, removeEmptyParents=False)
del self.layerContents[layerName]
|
Remove the glyph set matching layerName.
|
deleteGlyphSet
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def writeImage(self, fileName, data, validate=None):
"""
Write data to fileName in the images directory.
The data must be a valid PNG.
"""
if validate is None:
validate = self._validate
if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
raise UFOLibError(
f"Images are not allowed in UFO {self._formatVersion.major}."
)
fileName = fsdecode(fileName)
if validate:
valid, error = pngValidator(data=data)
if not valid:
raise UFOLibError(error)
self.writeBytesToPath(f"{IMAGES_DIRNAME}/{fileName}", data)
|
Write data to fileName in the images directory.
The data must be a valid PNG.
|
writeImage
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'?
"""
Remove the file named fileName from the
images directory.
"""
if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
raise UFOLibError(
f"Images are not allowed in UFO {self._formatVersion.major}."
)
self.removePath(f"{IMAGES_DIRNAME}/{fsdecode(fileName)}")
|
Remove the file named fileName from the
images directory.
|
removeImage
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def copyImageFromReader(self, reader, sourceFileName, destFileName, validate=None):
"""
Copy the sourceFileName in the provided UFOReader to destFileName
in this writer. This uses the most memory efficient method possible
for copying the data possible.
"""
if validate is None:
validate = self._validate
if self._formatVersion < UFOFormatVersion.FORMAT_3_0:
raise UFOLibError(
f"Images are not allowed in UFO {self._formatVersion.major}."
)
sourcePath = f"{IMAGES_DIRNAME}/{fsdecode(sourceFileName)}"
destPath = f"{IMAGES_DIRNAME}/{fsdecode(destFileName)}"
self.copyFromReader(reader, sourcePath, destPath)
|
Copy the sourceFileName in the provided UFOReader to destFileName
in this writer. This uses the most memory efficient method possible
for copying the data possible.
|
copyImageFromReader
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def _sniffFileStructure(ufo_path):
"""Return UFOFileStructure.ZIP if the UFO at path 'ufo_path' (str)
is a zip file, else return UFOFileStructure.PACKAGE if 'ufo_path' is a
directory.
Raise UFOLibError if it is a file with unknown structure, or if the path
does not exist.
"""
if zipfile.is_zipfile(ufo_path):
return UFOFileStructure.ZIP
elif os.path.isdir(ufo_path):
return UFOFileStructure.PACKAGE
elif os.path.isfile(ufo_path):
raise UFOLibError(
"The specified UFO does not have a known structure: '%s'" % ufo_path
)
else:
raise UFOLibError("No such file or directory: '%s'" % ufo_path)
|
Return UFOFileStructure.ZIP if the UFO at path 'ufo_path' (str)
is a zip file, else return UFOFileStructure.PACKAGE if 'ufo_path' is a
directory.
Raise UFOLibError if it is a file with unknown structure, or if the path
does not exist.
|
_sniffFileStructure
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def makeUFOPath(path):
"""
Return a .ufo pathname.
>>> makeUFOPath("directory/something.ext") == (
... os.path.join('directory', 'something.ufo'))
True
>>> makeUFOPath("directory/something.another.thing.ext") == (
... os.path.join('directory', 'something.another.thing.ufo'))
True
"""
dir, name = os.path.split(path)
name = ".".join([".".join(name.split(".")[:-1]), "ufo"])
return os.path.join(dir, name)
|
Return a .ufo pathname.
>>> makeUFOPath("directory/something.ext") == (
... os.path.join('directory', 'something.ufo'))
True
>>> makeUFOPath("directory/something.another.thing.ext") == (
... os.path.join('directory', 'something.another.thing.ufo'))
True
|
makeUFOPath
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def validateFontInfoVersion2ValueForAttribute(attr, value):
"""
This performs very basic validation of the value for attribute
following the UFO 2 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the value
is of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
"""
dataValidationDict = fontInfoAttributesVersion2ValueData[attr]
valueType = dataValidationDict.get("type")
validator = dataValidationDict.get("valueValidator")
valueOptions = dataValidationDict.get("valueOptions")
# have specific options for the validator
if valueOptions is not None:
isValidValue = validator(value, valueOptions)
# no specific options
else:
if validator == genericTypeValidator:
isValidValue = validator(value, valueType)
else:
isValidValue = validator(value)
return isValidValue
|
This performs very basic validation of the value for attribute
following the UFO 2 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the value
is of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
|
validateFontInfoVersion2ValueForAttribute
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def validateInfoVersion2Data(infoData):
"""
This performs very basic validation of the value for infoData
following the UFO 2 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the values
are of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
"""
validInfoData = {}
for attr, value in list(infoData.items()):
isValidValue = validateFontInfoVersion2ValueForAttribute(attr, value)
if not isValidValue:
raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
else:
validInfoData[attr] = value
return validInfoData
|
This performs very basic validation of the value for infoData
following the UFO 2 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the values
are of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
|
validateInfoVersion2Data
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def validateFontInfoVersion3ValueForAttribute(attr, value):
"""
This performs very basic validation of the value for attribute
following the UFO 3 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the value
is of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
"""
dataValidationDict = fontInfoAttributesVersion3ValueData[attr]
valueType = dataValidationDict.get("type")
validator = dataValidationDict.get("valueValidator")
valueOptions = dataValidationDict.get("valueOptions")
# have specific options for the validator
if valueOptions is not None:
isValidValue = validator(value, valueOptions)
# no specific options
else:
if validator == genericTypeValidator:
isValidValue = validator(value, valueType)
else:
isValidValue = validator(value)
return isValidValue
|
This performs very basic validation of the value for attribute
following the UFO 3 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the value
is of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
|
validateFontInfoVersion3ValueForAttribute
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def validateInfoVersion3Data(infoData):
"""
This performs very basic validation of the value for infoData
following the UFO 3 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the values
are of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
"""
validInfoData = {}
for attr, value in list(infoData.items()):
isValidValue = validateFontInfoVersion3ValueForAttribute(attr, value)
if not isValidValue:
raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")
else:
validInfoData[attr] = value
return validInfoData
|
This performs very basic validation of the value for infoData
following the UFO 3 fontinfo.plist specification. The results
of this should not be interpretted as *correct* for the font
that they are part of. This merely indicates that the values
are of the proper type and, where the specification defines
a set range of possible values for an attribute, that the
value is in the accepted range.
|
validateInfoVersion3Data
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def convertFontInfoValueForAttributeFromVersion1ToVersion2(attr, value):
"""
Convert value from version 1 to version 2 format.
Returns the new attribute name and the converted value.
If the value is None, None will be returned for the new value.
"""
# convert floats to ints if possible
if isinstance(value, float):
if int(value) == value:
value = int(value)
if value is not None:
if attr == "fontStyle":
v = _fontStyle1To2.get(value)
if v is None:
raise UFOLibError(
f"Cannot convert value ({value!r}) for attribute {attr}."
)
value = v
elif attr == "widthName":
v = _widthName1To2.get(value)
if v is None:
raise UFOLibError(
f"Cannot convert value ({value!r}) for attribute {attr}."
)
value = v
elif attr == "msCharSet":
v = _msCharSet1To2.get(value)
if v is None:
raise UFOLibError(
f"Cannot convert value ({value!r}) for attribute {attr}."
)
value = v
attr = fontInfoAttributesVersion1To2.get(attr, attr)
return attr, value
|
Convert value from version 1 to version 2 format.
Returns the new attribute name and the converted value.
If the value is None, None will be returned for the new value.
|
convertFontInfoValueForAttributeFromVersion1ToVersion2
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def convertFontInfoValueForAttributeFromVersion2ToVersion1(attr, value):
"""
Convert value from version 2 to version 1 format.
Returns the new attribute name and the converted value.
If the value is None, None will be returned for the new value.
"""
if value is not None:
if attr == "styleMapStyleName":
value = _fontStyle2To1.get(value)
elif attr == "openTypeOS2WidthClass":
value = _widthName2To1.get(value)
elif attr == "postscriptWindowsCharacterSet":
value = _msCharSet2To1.get(value)
attr = fontInfoAttributesVersion2To1.get(attr, attr)
return attr, value
|
Convert value from version 2 to version 1 format.
Returns the new attribute name and the converted value.
If the value is None, None will be returned for the new value.
|
convertFontInfoValueForAttributeFromVersion2ToVersion1
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def convertFontInfoValueForAttributeFromVersion2ToVersion3(attr, value):
"""
Convert value from version 2 to version 3 format.
Returns the new attribute name and the converted value.
If the value is None, None will be returned for the new value.
"""
if attr in _ufo2To3FloatToInt:
try:
value = round(value)
except (ValueError, TypeError):
raise UFOLibError("Could not convert value for %s." % attr)
if attr in _ufo2To3NonNegativeInt:
try:
value = int(abs(value))
except (ValueError, TypeError):
raise UFOLibError("Could not convert value for %s." % attr)
elif attr in _ufo2To3NonNegativeIntOrFloat:
try:
v = float(abs(value))
except (ValueError, TypeError):
raise UFOLibError("Could not convert value for %s." % attr)
if v == int(v):
v = int(v)
if v != value:
value = v
return attr, value
|
Convert value from version 2 to version 3 format.
Returns the new attribute name and the converted value.
If the value is None, None will be returned for the new value.
|
convertFontInfoValueForAttributeFromVersion2ToVersion3
|
python
|
fonttools/fonttools
|
Lib/fontTools/ufoLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ufoLib/__init__.py
|
MIT
|
def script(char):
"""Return the four-letter script code assigned to the Unicode character
'char' as string.
>>> script("a")
'Latn'
>>> script(",")
'Zyyy'
>>> script(chr(0x10FFFF))
'Zzzz'
"""
code = byteord(char)
# 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which
# comes after (to the right of) any existing entries of x in a, and it
# partitions array a into two halves so that, for the left side
# all(val <= x for val in a[lo:i]), and for the right side
# all(val > x for val in a[i:hi]).
# Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting
# breakpoints); we want to use `bisect_right` to look up the range that
# contains the given codepoint: i.e. whose start is less than or equal
# to the codepoint. Thus, we subtract -1 from the index returned.
i = bisect_right(Scripts.RANGES, code)
return Scripts.VALUES[i - 1]
|
Return the four-letter script code assigned to the Unicode character
'char' as string.
>>> script("a")
'Latn'
>>> script(",")
'Zyyy'
>>> script(chr(0x10FFFF))
'Zzzz'
|
script
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def script_extension(char):
"""Return the script extension property assigned to the Unicode character
'char' as a set of string.
>>> script_extension("a") == {'Latn'}
True
>>> script_extension(chr(0x060C)) == {'Nkoo', 'Arab', 'Rohg', 'Thaa', 'Syrc', 'Gara', 'Yezi'}
True
>>> script_extension(chr(0x10FFFF)) == {'Zzzz'}
True
"""
code = byteord(char)
i = bisect_right(ScriptExtensions.RANGES, code)
value = ScriptExtensions.VALUES[i - 1]
if value is None:
# code points not explicitly listed for Script Extensions
# have as their value the corresponding Script property value
return {script(char)}
return value
|
Return the script extension property assigned to the Unicode character
'char' as a set of string.
>>> script_extension("a") == {'Latn'}
True
>>> script_extension(chr(0x060C)) == {'Nkoo', 'Arab', 'Rohg', 'Thaa', 'Syrc', 'Gara', 'Yezi'}
True
>>> script_extension(chr(0x10FFFF)) == {'Zzzz'}
True
|
script_extension
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def script_name(code, default=KeyError):
"""Return the long, human-readable script name given a four-letter
Unicode script code.
If no matching name is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback value (e.g.
'Unknown' or None) instead of throwing an error.
"""
try:
return str(Scripts.NAMES[code].replace("_", " "))
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
|
Return the long, human-readable script name given a four-letter
Unicode script code.
If no matching name is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback value (e.g.
'Unknown' or None) instead of throwing an error.
|
script_name
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def script_code(script_name, default=KeyError):
"""Returns the four-letter Unicode script code from its long name
If no matching script code is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback string (e.g.
'Zzzz' or None) instead of throwing an error.
"""
normalized_name = _normalize_property_name(script_name)
try:
return _SCRIPT_CODES[normalized_name]
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
|
Returns the four-letter Unicode script code from its long name
If no matching script code is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback string (e.g.
'Zzzz' or None) instead of throwing an error.
|
script_code
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def script_horizontal_direction(
script_code: str, default: T | type[KeyError] = KeyError
) -> HorizDirection | T:
"""Return "RTL" for scripts that contain right-to-left characters
according to the Bidi_Class property. Otherwise return "LTR".
"""
if script_code not in Scripts.NAMES:
if isinstance(default, type) and issubclass(default, KeyError):
raise default(script_code)
return default
return "RTL" if script_code in RTL_SCRIPTS else "LTR"
|
Return "RTL" for scripts that contain right-to-left characters
according to the Bidi_Class property. Otherwise return "LTR".
|
script_horizontal_direction
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def block(char):
"""Return the block property assigned to the Unicode character 'char'
as a string.
>>> block("a")
'Basic Latin'
>>> block(chr(0x060C))
'Arabic'
>>> block(chr(0xEFFFF))
'No_Block'
"""
code = byteord(char)
i = bisect_right(Blocks.RANGES, code)
return Blocks.VALUES[i - 1]
|
Return the block property assigned to the Unicode character 'char'
as a string.
>>> block("a")
'Basic Latin'
>>> block(chr(0x060C))
'Arabic'
>>> block(chr(0xEFFFF))
'No_Block'
|
block
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def ot_tags_from_script(script_code):
"""Return a list of OpenType script tags associated with a given
Unicode script code.
Return ['DFLT'] script tag for invalid/unknown script codes.
"""
if script_code in OTTags.SCRIPT_EXCEPTIONS:
return [OTTags.SCRIPT_EXCEPTIONS[script_code]]
if script_code not in Scripts.NAMES:
return [OTTags.DEFAULT_SCRIPT]
script_tags = [script_code[0].lower() + script_code[1:]]
if script_code in OTTags.NEW_SCRIPT_TAGS:
script_tags.extend(OTTags.NEW_SCRIPT_TAGS[script_code])
script_tags.reverse() # last in, first out
return script_tags
|
Return a list of OpenType script tags associated with a given
Unicode script code.
Return ['DFLT'] script tag for invalid/unknown script codes.
|
ot_tags_from_script
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def ot_tag_to_script(tag):
"""Return the Unicode script code for the given OpenType script tag, or
None for "DFLT" tag or if there is no Unicode script associated with it.
Raises ValueError if the tag is invalid.
"""
tag = tostr(tag).strip()
if not tag or " " in tag or len(tag) > 4:
raise ValueError("invalid OpenType tag: %r" % tag)
if tag in OTTags.SCRIPT_ALIASES:
tag = OTTags.SCRIPT_ALIASES[tag]
while len(tag) != 4:
tag += str(" ") # pad with spaces
if tag == OTTags.DEFAULT_SCRIPT:
# it's unclear which Unicode script the "DFLT" OpenType tag maps to,
# so here we return None
return None
if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED:
return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag]
if tag in OTTags.SCRIPT_EXCEPTIONS_REVERSED:
return OTTags.SCRIPT_EXCEPTIONS_REVERSED[tag]
# This side of the conversion is fully algorithmic
# Any spaces at the end of the tag are replaced by repeating the last
# letter. Eg 'nko ' -> 'Nkoo'.
# Change first char to uppercase
script_code = tag[0].upper() + tag[1]
for i in range(2, 4):
script_code += script_code[i - 1] if tag[i] == " " else tag[i]
if script_code not in Scripts.NAMES:
return None
return script_code
|
Return the Unicode script code for the given OpenType script tag, or
None for "DFLT" tag or if there is no Unicode script associated with it.
Raises ValueError if the tag is invalid.
|
ot_tag_to_script
|
python
|
fonttools/fonttools
|
Lib/fontTools/unicodedata/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/unicodedata/__init__.py
|
MIT
|
def main(args=None):
"""Add `avar` table from designspace file to variable font."""
if args is None:
import sys
args = sys.argv[1:]
from fontTools import configLogger
from fontTools.ttLib import TTFont
from fontTools.designspaceLib import DesignSpaceDocument
import argparse
parser = argparse.ArgumentParser(
"fonttools varLib.avar",
description="Add `avar` table from designspace file to variable font.",
)
parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")
parser.add_argument(
"designspace",
metavar="family.designspace",
help="Designspace file.",
nargs="?",
default=None,
)
parser.add_argument(
"-o",
"--output-file",
type=str,
help="Output font file name.",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
options = parser.parse_args(args)
configLogger(level=("INFO" if options.verbose else "WARNING"))
font = TTFont(options.font)
if not "fvar" in font:
log.error("Not a variable font.")
return 1
if options.designspace is None:
from pprint import pprint
segments, mappings = mappings_from_avar(font)
pprint(segments)
pprint(mappings)
print(len(mappings), "mappings")
return
axisTags = [a.axisTag for a in font["fvar"].axes]
ds = load_designspace(options.designspace, require_sources=False)
if "avar" in font:
log.warning("avar table already present, overwriting.")
del font["avar"]
_add_avar(font, ds.axes, ds.axisMappings, axisTags)
if options.output_file is None:
outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar")
else:
outfile = options.output_file
if outfile:
log.info("Saving %s", outfile)
font.save(outfile)
|
Add `avar` table from designspace file to variable font.
|
main
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avar.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avar.py
|
MIT
|
def normalizeLog(value, rangeMin, rangeMax):
"""Logarithmically normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""
logMin = math.log(rangeMin)
logMax = math.log(rangeMax)
return (math.log(value) - logMin) / (logMax - logMin)
|
Logarithmically normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation.
|
normalizeLog
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def interpolateLog(t, a, b):
"""Logarithmic interpolation between a and b, with t typically in [0, 1]."""
logA = math.log(a)
logB = math.log(b)
return math.exp(logA + t * (logB - logA))
|
Logarithmic interpolation between a and b, with t typically in [0, 1].
|
interpolateLog
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def normalizeDegrees(value, rangeMin, rangeMax):
"""Angularly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""
tanMin = math.tan(math.radians(rangeMin))
tanMax = math.tan(math.radians(rangeMax))
return (math.tan(math.radians(value)) - tanMin) / (tanMax - tanMin)
|
Angularly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation.
|
normalizeDegrees
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def measureWeight(glyphset, glyphs=None):
"""Measure the perceptual average weight of the given glyphs."""
if isinstance(glyphs, dict):
frequencies = glyphs
else:
frequencies = {g: 1 for g in glyphs}
wght_sum = wdth_sum = 0
for glyph_name in glyphs:
if frequencies is not None:
frequency = frequencies.get(glyph_name, 0)
if frequency == 0:
continue
else:
frequency = 1
glyph = glyphset[glyph_name]
pen = AreaPen(glyphset=glyphset)
glyph.draw(pen)
mult = glyph.width * frequency
wght_sum += mult * abs(pen.value)
wdth_sum += mult
return wght_sum / wdth_sum
|
Measure the perceptual average weight of the given glyphs.
|
measureWeight
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def measureWidth(glyphset, glyphs=None):
"""Measure the average width of the given glyphs."""
if isinstance(glyphs, dict):
frequencies = glyphs
else:
frequencies = {g: 1 for g in glyphs}
wdth_sum = 0
freq_sum = 0
for glyph_name in glyphs:
if frequencies is not None:
frequency = frequencies.get(glyph_name, 0)
if frequency == 0:
continue
else:
frequency = 1
glyph = glyphset[glyph_name]
pen = NullPen()
glyph.draw(pen)
wdth_sum += glyph.width * frequency
freq_sum += frequency
return wdth_sum / freq_sum
|
Measure the average width of the given glyphs.
|
measureWidth
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def measureSlant(glyphset, glyphs=None):
"""Measure the perceptual average slant angle of the given glyphs."""
if isinstance(glyphs, dict):
frequencies = glyphs
else:
frequencies = {g: 1 for g in glyphs}
slnt_sum = 0
freq_sum = 0
for glyph_name in glyphs:
if frequencies is not None:
frequency = frequencies.get(glyph_name, 0)
if frequency == 0:
continue
else:
frequency = 1
glyph = glyphset[glyph_name]
pen = StatisticsPen(glyphset=glyphset)
glyph.draw(pen)
mult = glyph.width * frequency
slnt_sum += mult * pen.slant
freq_sum += mult
return -math.degrees(math.atan(slnt_sum / freq_sum))
|
Measure the perceptual average slant angle of the given glyphs.
|
measureSlant
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def planWeightAxis(
glyphSetFunc,
axisLimits,
weights=None,
samples=None,
glyphs=None,
designLimits=None,
pins=None,
sanitize=False,
):
"""Plan a weight (`wght`) axis.
weights: A list of weight values to plan for. If None, the default
values are used.
This function simply calls planAxis with values=weights, and the appropriate
arguments. See documenation for planAxis for more information.
"""
if weights is None:
weights = WEIGHTS
return planAxis(
measureWeight,
normalizeLinear,
interpolateLog,
glyphSetFunc,
"wght",
axisLimits,
values=weights,
samples=samples,
glyphs=glyphs,
designLimits=designLimits,
pins=pins,
sanitizeFunc=sanitizeWeight if sanitize else None,
)
|
Plan a weight (`wght`) axis.
weights: A list of weight values to plan for. If None, the default
values are used.
This function simply calls planAxis with values=weights, and the appropriate
arguments. See documenation for planAxis for more information.
|
planWeightAxis
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def planWidthAxis(
glyphSetFunc,
axisLimits,
widths=None,
samples=None,
glyphs=None,
designLimits=None,
pins=None,
sanitize=False,
):
"""Plan a width (`wdth`) axis.
widths: A list of width values (percentages) to plan for. If None, the default
values are used.
This function simply calls planAxis with values=widths, and the appropriate
arguments. See documenation for planAxis for more information.
"""
if widths is None:
widths = WIDTHS
return planAxis(
measureWidth,
normalizeLinear,
interpolateLinear,
glyphSetFunc,
"wdth",
axisLimits,
values=widths,
samples=samples,
glyphs=glyphs,
designLimits=designLimits,
pins=pins,
sanitizeFunc=sanitizeWidth if sanitize else None,
)
|
Plan a width (`wdth`) axis.
widths: A list of width values (percentages) to plan for. If None, the default
values are used.
This function simply calls planAxis with values=widths, and the appropriate
arguments. See documenation for planAxis for more information.
|
planWidthAxis
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def planSlantAxis(
glyphSetFunc,
axisLimits,
slants=None,
samples=None,
glyphs=None,
designLimits=None,
pins=None,
sanitize=False,
):
"""Plan a slant (`slnt`) axis.
slants: A list slant angles to plan for. If None, the default
values are used.
This function simply calls planAxis with values=slants, and the appropriate
arguments. See documenation for planAxis for more information.
"""
if slants is None:
slants = SLANTS
return planAxis(
measureSlant,
normalizeDegrees,
interpolateLinear,
glyphSetFunc,
"slnt",
axisLimits,
values=slants,
samples=samples,
glyphs=glyphs,
designLimits=designLimits,
pins=pins,
sanitizeFunc=sanitizeSlant if sanitize else None,
)
|
Plan a slant (`slnt`) axis.
slants: A list slant angles to plan for. If None, the default
values are used.
This function simply calls planAxis with values=slants, and the appropriate
arguments. See documenation for planAxis for more information.
|
planSlantAxis
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def planOpticalSizeAxis(
glyphSetFunc,
axisLimits,
sizes=None,
samples=None,
glyphs=None,
designLimits=None,
pins=None,
sanitize=False,
):
"""Plan a optical-size (`opsz`) axis.
sizes: A list of optical size values to plan for. If None, the default
values are used.
This function simply calls planAxis with values=sizes, and the appropriate
arguments. See documenation for planAxis for more information.
"""
if sizes is None:
sizes = SIZES
return planAxis(
measureWeight,
normalizeLog,
interpolateLog,
glyphSetFunc,
"opsz",
axisLimits,
values=sizes,
samples=samples,
glyphs=glyphs,
designLimits=designLimits,
pins=pins,
)
|
Plan a optical-size (`opsz`) axis.
sizes: A list of optical size values to plan for. If None, the default
values are used.
This function simply calls planAxis with values=sizes, and the appropriate
arguments. See documenation for planAxis for more information.
|
planOpticalSizeAxis
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def makeDesignspaceSnippet(axisTag, axisName, axisLimit, mapping):
"""Make a designspace snippet for a single axis."""
designspaceSnippet = (
' <axis tag="%s" name="%s" minimum="%g" default="%g" maximum="%g"'
% ((axisTag, axisName) + axisLimit)
)
if mapping:
designspaceSnippet += ">\n"
else:
designspaceSnippet += "/>"
for key, value in mapping.items():
designspaceSnippet += ' <map input="%g" output="%g"/>\n' % (key, value)
if mapping:
designspaceSnippet += " </axis>"
return designspaceSnippet
|
Make a designspace snippet for a single axis.
|
makeDesignspaceSnippet
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def addEmptyAvar(font):
"""Add an empty `avar` table to the font."""
font["avar"] = avar = newTable("avar")
for axis in fvar.axes:
avar.segments[axis.axisTag] = {}
|
Add an empty `avar` table to the font.
|
addEmptyAvar
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def main(args=None):
"""Plan the standard axis mappings for a variable font"""
if args is None:
import sys
args = sys.argv[1:]
from fontTools import configLogger
from fontTools.ttLib import TTFont
import argparse
parser = argparse.ArgumentParser(
"fonttools varLib.avarPlanner",
description="Plan `avar` table for variable font",
)
parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")
parser.add_argument(
"-o",
"--output-file",
type=str,
help="Output font file name.",
)
parser.add_argument(
"--weights", type=str, help="Space-separate list of weights to generate."
)
parser.add_argument(
"--widths", type=str, help="Space-separate list of widths to generate."
)
parser.add_argument(
"--slants", type=str, help="Space-separate list of slants to generate."
)
parser.add_argument(
"--sizes", type=str, help="Space-separate list of optical-sizes to generate."
)
parser.add_argument("--samples", type=int, help="Number of samples.")
parser.add_argument(
"-s", "--sanitize", action="store_true", help="Sanitize axis limits"
)
parser.add_argument(
"-g",
"--glyphs",
type=str,
help="Space-separate list of glyphs to use for sampling.",
)
parser.add_argument(
"--weight-design-limits",
type=str,
help="min:default:max in design units for the `wght` axis.",
)
parser.add_argument(
"--width-design-limits",
type=str,
help="min:default:max in design units for the `wdth` axis.",
)
parser.add_argument(
"--slant-design-limits",
type=str,
help="min:default:max in design units for the `slnt` axis.",
)
parser.add_argument(
"--optical-size-design-limits",
type=str,
help="min:default:max in design units for the `opsz` axis.",
)
parser.add_argument(
"--weight-pins",
type=str,
help="Space-separate list of before:after pins for the `wght` axis.",
)
parser.add_argument(
"--width-pins",
type=str,
help="Space-separate list of before:after pins for the `wdth` axis.",
)
parser.add_argument(
"--slant-pins",
type=str,
help="Space-separate list of before:after pins for the `slnt` axis.",
)
parser.add_argument(
"--optical-size-pins",
type=str,
help="Space-separate list of before:after pins for the `opsz` axis.",
)
parser.add_argument(
"-p", "--plot", action="store_true", help="Plot the resulting mapping."
)
logging_group = parser.add_mutually_exclusive_group(required=False)
logging_group.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
logging_group.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
configLogger(
level=("DEBUG" if options.verbose else "WARNING" if options.quiet else "INFO")
)
font = TTFont(options.font)
if not "fvar" in font:
log.error("Not a variable font.")
return 1
if options.glyphs is not None:
glyphs = options.glyphs.split()
if ":" in options.glyphs:
glyphs = {}
for g in options.glyphs.split():
if ":" in g:
glyph, frequency = g.split(":")
glyphs[glyph] = float(frequency)
else:
glyphs[g] = 1.0
else:
glyphs = None
designspaceSnippets = []
designspaceSnippets.append(
processAxis(
font,
planWeightAxis,
"wght",
"Weight",
values=options.weights,
samples=options.samples,
glyphs=glyphs,
designLimits=options.weight_design_limits,
pins=options.weight_pins,
sanitize=options.sanitize,
plot=options.plot,
)
)
designspaceSnippets.append(
processAxis(
font,
planWidthAxis,
"wdth",
"Width",
values=options.widths,
samples=options.samples,
glyphs=glyphs,
designLimits=options.width_design_limits,
pins=options.width_pins,
sanitize=options.sanitize,
plot=options.plot,
)
)
designspaceSnippets.append(
processAxis(
font,
planSlantAxis,
"slnt",
"Slant",
values=options.slants,
samples=options.samples,
glyphs=glyphs,
designLimits=options.slant_design_limits,
pins=options.slant_pins,
sanitize=options.sanitize,
plot=options.plot,
)
)
designspaceSnippets.append(
processAxis(
font,
planOpticalSizeAxis,
"opsz",
"OpticalSize",
values=options.sizes,
samples=options.samples,
glyphs=glyphs,
designLimits=options.optical_size_design_limits,
pins=options.optical_size_pins,
sanitize=options.sanitize,
plot=options.plot,
)
)
log.info("Designspace snippet:")
for snippet in designspaceSnippets:
if snippet:
print(snippet)
if options.output_file is None:
outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar")
else:
outfile = options.output_file
if outfile:
log.info("Saving %s", outfile)
font.save(outfile)
|
Plan the standard axis mappings for a variable font
|
main
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/avarPlanner.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/avarPlanner.py
|
MIT
|
def merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):
"""
I step through the FontDicts in the FDArray of the varfont TopDict.
For each varfont FontDict:
* step through each key in FontDict.Private.
* For each key, step through each relevant source font Private dict, and
build a list of values to blend.
The 'relevant' source fonts are selected by first getting the right
submodel using ``vsindex_dict[vsindex]``. The indices of the
``subModel.locations`` are mapped to source font list indices by
assuming the latter order is the same as the order of the
``var_model.locations``. I can then get the index of each subModel
location in the list of ``var_model.locations``.
"""
topDict = top_dicts[0]
region_top_dicts = top_dicts[1:]
if hasattr(region_top_dicts[0], "FDArray"):
regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts]
else:
regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts]
for fd_index, font_dict in enumerate(topDict.FDArray):
private_dict = font_dict.Private
vsindex = getattr(private_dict, "vsindex", 0)
# At the moment, no PrivateDict has a vsindex key, but let's support
# how it should work. See comment at end of
# merge_charstrings() - still need to optimize use of vsindex.
sub_model, _ = vsindex_dict[vsindex]
master_indices = []
for loc in sub_model.locations[1:]:
i = var_model.locations.index(loc) - 1
master_indices.append(i)
pds = [private_dict]
last_pd = private_dict
for ri in master_indices:
pd = get_private(regionFDArrays, fd_index, ri, fd_map)
# If the region font doesn't have this FontDict, just reference
# the last one used.
if pd is None:
pd = last_pd
else:
last_pd = pd
pds.append(pd)
num_masters = len(pds)
for key, value in private_dict.rawDict.items():
dataList = []
if key not in pd_blend_fields:
continue
if isinstance(value, list):
try:
values = [pd.rawDict[key] for pd in pds]
except KeyError:
print(
"Warning: {key} in default font Private dict is "
"missing from another font, and was "
"discarded.".format(key=key)
)
continue
try:
values = zip(*values)
except IndexError:
raise VarLibCFFDictMergeError(key, value, values)
"""
Row 0 contains the first value from each master.
Convert each row from absolute values to relative
values from the previous row.
e.g for three masters, a list of values was:
master 0 OtherBlues = [-217,-205]
master 1 OtherBlues = [-234,-222]
master 1 OtherBlues = [-188,-176]
The call to zip() converts this to:
[(-217, -234, -188), (-205, -222, -176)]
and is converted finally to:
OtherBlues = [[-217, 17.0, 46.0], [-205, 0.0, 0.0]]
"""
prev_val_list = [0] * num_masters
any_points_differ = False
for val_list in values:
rel_list = [
(val - prev_val_list[i]) for (i, val) in enumerate(val_list)
]
if (not any_points_differ) and not allEqual(rel_list):
any_points_differ = True
prev_val_list = val_list
deltas = sub_model.getDeltas(rel_list)
# For PrivateDict BlueValues, the default font
# values are absolute, not relative to the prior value.
deltas[0] = val_list[0]
dataList.append(deltas)
# If there are no blend values,then
# we can collapse the blend lists.
if not any_points_differ:
dataList = [data[0] for data in dataList]
else:
values = [pd.rawDict[key] for pd in pds]
if not allEqual(values):
dataList = sub_model.getDeltas(values)
else:
dataList = values[0]
# Convert numbers with no decimal part to an int
if isinstance(dataList, list):
for i, item in enumerate(dataList):
if isinstance(item, list):
for j, jtem in enumerate(item):
dataList[i][j] = conv_to_int(jtem)
else:
dataList[i] = conv_to_int(item)
else:
dataList = conv_to_int(dataList)
private_dict.rawDict[key] = dataList
|
I step through the FontDicts in the FDArray of the varfont TopDict.
For each varfont FontDict:
* step through each key in FontDict.Private.
* For each key, step through each relevant source font Private dict, and
build a list of values to blend.
The 'relevant' source fonts are selected by first getting the right
submodel using ``vsindex_dict[vsindex]``. The indices of the
``subModel.locations`` are mapped to source font list indices by
assuming the latter order is the same as the order of the
``var_model.locations``. I can then get the index of each subModel
location in the list of ``var_model.locations``.
|
merge_PrivateDicts
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/cff.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/cff.py
|
MIT
|
def getfd_map(varFont, fonts_list):
"""Since a subset source font may have fewer FontDicts in their
FDArray than the default font, we have to match up the FontDicts in
the different fonts . We do this with the FDSelect array, and by
assuming that the same glyph will reference matching FontDicts in
each source font. We return a mapping from fdIndex in the default
font to a dictionary which maps each master list index of each
region font to the equivalent fdIndex in the region font."""
fd_map = {}
default_font = fonts_list[0]
region_fonts = fonts_list[1:]
num_regions = len(region_fonts)
topDict = _cff_or_cff2(default_font).cff.topDictIndex[0]
if not hasattr(topDict, "FDSelect"):
# All glyphs reference only one FontDict.
# Map the FD index for regions to index 0.
fd_map[0] = {ri: 0 for ri in range(num_regions)}
return fd_map
gname_mapping = {}
default_fdSelect = topDict.FDSelect
glyphOrder = default_font.getGlyphOrder()
for gid, fdIndex in enumerate(default_fdSelect):
gname_mapping[glyphOrder[gid]] = fdIndex
if fdIndex not in fd_map:
fd_map[fdIndex] = {}
for ri, region_font in enumerate(region_fonts):
region_glyphOrder = region_font.getGlyphOrder()
region_topDict = _cff_or_cff2(region_font).cff.topDictIndex[0]
if not hasattr(region_topDict, "FDSelect"):
# All the glyphs share the same FontDict. Pick any glyph.
default_fdIndex = gname_mapping[region_glyphOrder[0]]
fd_map[default_fdIndex][ri] = 0
else:
region_fdSelect = region_topDict.FDSelect
for gid, fdIndex in enumerate(region_fdSelect):
default_fdIndex = gname_mapping[region_glyphOrder[gid]]
region_map = fd_map[default_fdIndex]
if ri not in region_map:
region_map[ri] = fdIndex
return fd_map
|
Since a subset source font may have fewer FontDicts in their
FDArray than the default font, we have to match up the FontDicts in
the different fonts . We do this with the FDSelect array, and by
assuming that the same glyph will reference matching FontDicts in
each source font. We return a mapping from fdIndex in the default
font to a dictionary which maps each master list index of each
region font to the equivalent fdIndex in the region font.
|
getfd_map
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/cff.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/cff.py
|
MIT
|
def reorder_blend_args(self, commands, get_delta_func):
"""
We first re-order the master coordinate values.
For a moveto to lineto, the args are now arranged as::
[ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]
We re-arrange this to::
[ [master_0 x, master_1 x, master_2 x],
[master_0 y, master_1 y, master_2 y]
]
If the master values are all the same, we collapse the list to
as single value instead of a list.
We then convert this to::
[ [master_0 x] + [x delta tuple] + [numBlends=1]
[master_0 y] + [y delta tuple] + [numBlends=1]
]
"""
for cmd in commands:
# arg[i] is the set of arguments for this operator from master i.
args = cmd[1]
m_args = zip(*args)
# m_args[n] is now all num_master args for the i'th argument
# for this operation.
cmd[1] = list(m_args)
lastOp = None
for cmd in commands:
op = cmd[0]
# masks are represented by two cmd's: first has only op names,
# second has only args.
if lastOp in ["hintmask", "cntrmask"]:
coord = list(cmd[1])
if not allEqual(coord):
raise VarLibMergeError(
"Hintmask values cannot differ between source fonts."
)
cmd[1] = [coord[0][0]]
else:
coords = cmd[1]
new_coords = []
for coord in coords:
if allEqual(coord):
new_coords.append(coord[0])
else:
# convert to deltas
deltas = get_delta_func(coord)[1:]
coord = [coord[0]] + deltas
coord.append(1)
new_coords.append(coord)
cmd[1] = new_coords
lastOp = op
return commands
|
We first re-order the master coordinate values.
For a moveto to lineto, the args are now arranged as::
[ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]
We re-arrange this to::
[ [master_0 x, master_1 x, master_2 x],
[master_0 y, master_1 y, master_2 y]
]
If the master values are all the same, we collapse the list to
as single value instead of a list.
We then convert this to::
[ [master_0 x] + [x delta tuple] + [numBlends=1]
[master_0 y] + [y delta tuple] + [numBlends=1]
]
|
reorder_blend_args
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/cff.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/cff.py
|
MIT
|
def overlayBox(top, bot):
"""Overlays ``top`` box on top of ``bot`` box.
Returns two items:
* Box for intersection of ``top`` and ``bot``, or None if they don't intersect.
* Box for remainder of ``bot``. Remainder box might not be exact (since the
remainder might not be a simple box), but is inclusive of the exact
remainder.
"""
# Intersection
intersection = {}
intersection.update(top)
intersection.update(bot)
for axisTag in set(top) & set(bot):
min1, max1 = top[axisTag]
min2, max2 = bot[axisTag]
minimum = max(min1, min2)
maximum = min(max1, max2)
if not minimum < maximum:
return None, bot # Do not intersect
intersection[axisTag] = minimum, maximum
# Remainder
#
# Remainder is empty if bot's each axis range lies within that of intersection.
#
# Remainder is shrank if bot's each, except for exactly one, axis range lies
# within that of intersection, and that one axis, it extrudes out of the
# intersection only on one side.
#
# Bot is returned in full as remainder otherwise, as true remainder is not
# representable as a single box.
remainder = dict(bot)
extruding = False
fullyInside = True
for axisTag in top:
if axisTag in bot:
continue
extruding = True
fullyInside = False
break
for axisTag in bot:
if axisTag not in top:
continue # Axis range lies fully within
min1, max1 = intersection[axisTag]
min2, max2 = bot[axisTag]
if min1 <= min2 and max2 <= max1:
continue # Axis range lies fully within
# Bot's range doesn't fully lie within that of top's for this axis.
# We know they intersect, so it cannot lie fully without either; so they
# overlap.
# If we have had an overlapping axis before, remainder is not
# representable as a box, so return full bottom and go home.
if extruding:
return intersection, bot
extruding = True
fullyInside = False
# Otherwise, cut remainder on this axis and continue.
if min1 <= min2:
# Right side survives.
minimum = max(max1, min2)
maximum = max2
elif max2 <= max1:
# Left side survives.
minimum = min2
maximum = min(min1, max2)
else:
# Remainder leaks out from both sides. Can't cut either.
return intersection, bot
remainder[axisTag] = minimum, maximum
if fullyInside:
# bot is fully within intersection. Remainder is empty.
return intersection, None
return intersection, remainder
|
Overlays ``top`` box on top of ``bot`` box.
Returns two items:
* Box for intersection of ``top`` and ``bot``, or None if they don't intersect.
* Box for remainder of ``bot``. Remainder box might not be exact (since the
remainder might not be a simple box), but is inclusive of the exact
remainder.
|
overlayBox
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/featureVars.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/featureVars.py
|
MIT
|
def addFeatureVariationsRaw(font, table, conditionalSubstitutions, featureTag="rvrn"):
"""Low level implementation of addFeatureVariations that directly
models the possibilities of the FeatureVariations table."""
featureTags = [featureTag] if isinstance(featureTag, str) else sorted(featureTag)
processLast = "rvrn" not in featureTags or len(featureTags) > 1
#
# if a <featureTag> feature is not present:
# make empty <featureTag> feature
# sort features, get <featureTag> feature index
# add <featureTag> feature to all scripts
# if a <featureTag> feature is present:
# reuse <featureTag> feature index
# make lookups
# add feature variations
#
if table.Version < 0x00010001:
table.Version = 0x00010001 # allow table.FeatureVariations
varFeatureIndices = set()
existingTags = {
feature.FeatureTag
for feature in table.FeatureList.FeatureRecord
if feature.FeatureTag in featureTags
}
newTags = set(featureTags) - existingTags
if newTags:
varFeatures = []
for featureTag in sorted(newTags):
varFeature = buildFeatureRecord(featureTag, [])
table.FeatureList.FeatureRecord.append(varFeature)
varFeatures.append(varFeature)
table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord)
sortFeatureList(table)
for varFeature in varFeatures:
varFeatureIndex = table.FeatureList.FeatureRecord.index(varFeature)
for scriptRecord in table.ScriptList.ScriptRecord:
if scriptRecord.Script.DefaultLangSys is None:
# We need to have a default LangSys to attach variations to.
langSys = ot.LangSys()
langSys.LookupOrder = None
langSys.ReqFeatureIndex = 0xFFFF
langSys.FeatureIndex = []
langSys.FeatureCount = 0
scriptRecord.Script.DefaultLangSys = langSys
langSystems = [lsr.LangSys for lsr in scriptRecord.Script.LangSysRecord]
for langSys in [scriptRecord.Script.DefaultLangSys] + langSystems:
langSys.FeatureIndex.append(varFeatureIndex)
langSys.FeatureCount = len(langSys.FeatureIndex)
varFeatureIndices.add(varFeatureIndex)
if existingTags:
# indices may have changed if we inserted new features and sorted feature list
# so we must do this after the above
varFeatureIndices.update(
index
for index, feature in enumerate(table.FeatureList.FeatureRecord)
if feature.FeatureTag in existingTags
)
axisIndices = {
axis.axisTag: axisIndex for axisIndex, axis in enumerate(font["fvar"].axes)
}
hasFeatureVariations = (
hasattr(table, "FeatureVariations") and table.FeatureVariations is not None
)
featureVariationRecords = []
for conditionSet, lookupIndices in conditionalSubstitutions:
conditionTable = []
for axisTag, (minValue, maxValue) in sorted(conditionSet.items()):
if minValue > maxValue:
raise VarLibValidationError(
"A condition set has a minimum value above the maximum value."
)
ct = buildConditionTable(axisIndices[axisTag], minValue, maxValue)
conditionTable.append(ct)
records = []
for varFeatureIndex in sorted(varFeatureIndices):
existingLookupIndices = table.FeatureList.FeatureRecord[
varFeatureIndex
].Feature.LookupListIndex
combinedLookupIndices = (
existingLookupIndices + lookupIndices
if processLast
else lookupIndices + existingLookupIndices
)
records.append(
buildFeatureTableSubstitutionRecord(
varFeatureIndex, combinedLookupIndices
)
)
if hasFeatureVariations and (
fvr := findFeatureVariationRecord(table.FeatureVariations, conditionTable)
):
fvr.FeatureTableSubstitution.SubstitutionRecord.extend(records)
fvr.FeatureTableSubstitution.SubstitutionCount = len(
fvr.FeatureTableSubstitution.SubstitutionRecord
)
else:
featureVariationRecords.append(
buildFeatureVariationRecord(conditionTable, records)
)
if hasFeatureVariations:
if table.FeatureVariations.Version != 0x00010000:
raise VarLibError(
"Unsupported FeatureVariations table version: "
f"0x{table.FeatureVariations.Version:08x} (expected 0x00010000)."
)
table.FeatureVariations.FeatureVariationRecord.extend(featureVariationRecords)
table.FeatureVariations.FeatureVariationCount = len(
table.FeatureVariations.FeatureVariationRecord
)
else:
table.FeatureVariations = buildFeatureVariations(featureVariationRecords)
|
Low level implementation of addFeatureVariations that directly
models the possibilities of the FeatureVariations table.
|
addFeatureVariationsRaw
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/featureVars.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/featureVars.py
|
MIT
|
def makeSubstitutionsHashable(conditionalSubstitutions):
"""Turn all the substitution dictionaries in sorted tuples of tuples so
they are hashable, to detect duplicates so we don't write out redundant
data."""
allSubstitutions = set()
condSubst = []
for conditionSet, substitutionMaps in conditionalSubstitutions:
substitutions = []
for substitutionMap in substitutionMaps:
subst = tuple(sorted(substitutionMap.items()))
substitutions.append(subst)
allSubstitutions.add(subst)
condSubst.append((conditionSet, substitutions))
return condSubst, sorted(allSubstitutions)
|
Turn all the substitution dictionaries in sorted tuples of tuples so
they are hashable, to detect duplicates so we don't write out redundant
data.
|
makeSubstitutionsHashable
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/featureVars.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/featureVars.py
|
MIT
|
def buildSubstitutionLookups(gsub, allSubstitutions, processLast=False):
"""Build the lookups for the glyph substitutions, return a dict mapping
the substitution to lookup indices."""
# Insert lookups at the beginning of the lookup vector
# https://github.com/googlefonts/fontmake/issues/950
firstIndex = len(gsub.LookupList.Lookup) if processLast else 0
lookupMap = {}
for i, substitutionMap in enumerate(allSubstitutions):
lookupMap[substitutionMap] = firstIndex + i
if not processLast:
# Shift all lookup indices in gsub by len(allSubstitutions)
shift = len(allSubstitutions)
visitor = ShifterVisitor(shift)
visitor.visit(gsub.FeatureList.FeatureRecord)
visitor.visit(gsub.LookupList.Lookup)
for i, subst in enumerate(allSubstitutions):
substMap = dict(subst)
lookup = buildLookup([buildSingleSubstSubtable(substMap)])
if processLast:
gsub.LookupList.Lookup.append(lookup)
else:
gsub.LookupList.Lookup.insert(i, lookup)
assert gsub.LookupList.Lookup[lookupMap[subst]] is lookup
gsub.LookupList.LookupCount = len(gsub.LookupList.Lookup)
return lookupMap
|
Build the lookups for the glyph substitutions, return a dict mapping
the substitution to lookup indices.
|
buildSubstitutionLookups
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/featureVars.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/featureVars.py
|
MIT
|
def findFeatureVariationRecord(featureVariations, conditionTable):
"""Find a FeatureVariationRecord that has the same conditionTable."""
if featureVariations.Version != 0x00010000:
raise VarLibError(
"Unsupported FeatureVariations table version: "
f"0x{featureVariations.Version:08x} (expected 0x00010000)."
)
for fvr in featureVariations.FeatureVariationRecord:
if conditionTable == fvr.ConditionSet.ConditionTable:
return fvr
return None
|
Find a FeatureVariationRecord that has the same conditionTable.
|
findFeatureVariationRecord
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/featureVars.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/featureVars.py
|
MIT
|
def sortFeatureList(table):
"""Sort the feature list by feature tag, and remap the feature indices
elsewhere. This is needed after the feature list has been modified.
"""
# decorate, sort, undecorate, because we need to make an index remapping table
tagIndexFea = [
(fea.FeatureTag, index, fea)
for index, fea in enumerate(table.FeatureList.FeatureRecord)
]
tagIndexFea.sort()
table.FeatureList.FeatureRecord = [fea for tag, index, fea in tagIndexFea]
featureRemap = dict(
zip([index for tag, index, fea in tagIndexFea], range(len(tagIndexFea)))
)
# Remap the feature indices
remapFeatures(table, featureRemap)
|
Sort the feature list by feature tag, and remap the feature indices
elsewhere. This is needed after the feature list has been modified.
|
sortFeatureList
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/featureVars.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/featureVars.py
|
MIT
|
def remapFeatures(table, featureRemap):
"""Go through the scripts list, and remap feature indices."""
for scriptIndex, script in enumerate(table.ScriptList.ScriptRecord):
defaultLangSys = script.Script.DefaultLangSys
if defaultLangSys is not None:
_remapLangSys(defaultLangSys, featureRemap)
for langSysRecordIndex, langSysRec in enumerate(script.Script.LangSysRecord):
langSys = langSysRec.LangSys
_remapLangSys(langSys, featureRemap)
if hasattr(table, "FeatureVariations") and table.FeatureVariations is not None:
for fvr in table.FeatureVariations.FeatureVariationRecord:
for ftsr in fvr.FeatureTableSubstitution.SubstitutionRecord:
ftsr.FeatureIndex = featureRemap[ftsr.FeatureIndex]
|
Go through the scripts list, and remap feature indices.
|
remapFeatures
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/featureVars.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/featureVars.py
|
MIT
|
def main(args=None):
"""Add `HVAR` table to variable font."""
if args is None:
import sys
args = sys.argv[1:]
from fontTools import configLogger
from fontTools.designspaceLib import DesignSpaceDocument
import argparse
parser = argparse.ArgumentParser(
"fonttools varLib.hvar",
description="Add `HVAR` table from to variable font.",
)
parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")
parser.add_argument(
"-o",
"--output-file",
type=str,
help="Output font file name.",
)
options = parser.parse_args(args)
configLogger(level="WARNING")
font = TTFont(options.font)
if not "fvar" in font:
log.error("Not a variable font.")
return 1
add_HVAR(font)
if "vmtx" in font:
add_VVAR(font)
if options.output_file is None:
outfile = makeOutputFileName(options.font, overWrite=True, suffix=".hvar")
else:
outfile = options.output_file
if outfile:
log.info("Saving %s", outfile)
font.save(outfile)
|
Add `HVAR` table to variable font.
|
main
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/hvar.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/hvar.py
|
MIT
|
def main(args=None):
"""Test for interpolatability issues between fonts"""
import argparse
import sys
parser = argparse.ArgumentParser(
"fonttools varLib.interpolatable",
description=main.__doc__,
)
parser.add_argument(
"--glyphs",
action="store",
help="Space-separate name of glyphs to check",
)
parser.add_argument(
"--show-all",
action="store_true",
help="Show all glyph pairs, even if no problems are found",
)
parser.add_argument(
"--tolerance",
action="store",
type=float,
help="Error tolerance. Between 0 and 1. Default %s" % DEFAULT_TOLERANCE,
)
parser.add_argument(
"--kinkiness",
action="store",
type=float,
help="How aggressively report kinks. Default %s" % DEFAULT_KINKINESS,
)
parser.add_argument(
"--json",
action="store_true",
help="Output report in JSON format",
)
parser.add_argument(
"--pdf",
action="store",
help="Output report in PDF format",
)
parser.add_argument(
"--ps",
action="store",
help="Output report in PostScript format",
)
parser.add_argument(
"--html",
action="store",
help="Output report in HTML format",
)
parser.add_argument(
"--quiet",
action="store_true",
help="Only exit with code 1 or 0, no output",
)
parser.add_argument(
"--output",
action="store",
help="Output file for the problem report; Default: stdout",
)
parser.add_argument(
"--ignore-missing",
action="store_true",
help="Will not report glyphs missing from sparse masters as errors",
)
parser.add_argument(
"inputs",
metavar="FILE",
type=str,
nargs="+",
help="Input a single variable font / DesignSpace / Glyphs file, or multiple TTF/UFO files",
)
parser.add_argument(
"--name",
metavar="NAME",
type=str,
action="append",
help="Name of the master to use in the report. If not provided, all are used.",
)
parser.add_argument("-v", "--verbose", action="store_true", help="Run verbosely.")
parser.add_argument("--debug", action="store_true", help="Run with debug output.")
args = parser.parse_args(args)
from fontTools import configLogger
configLogger(level=("INFO" if args.verbose else "WARNING"))
if args.debug:
configLogger(level="DEBUG")
glyphs = args.glyphs.split() if args.glyphs else None
from os.path import basename
fonts = []
names = []
locations = []
discrete_axes = set()
upem = DEFAULT_UPEM
original_args_inputs = tuple(args.inputs)
if len(args.inputs) == 1:
designspace = None
if args.inputs[0].endswith(".designspace"):
from fontTools.designspaceLib import DesignSpaceDocument
designspace = DesignSpaceDocument.fromfile(args.inputs[0])
args.inputs = [master.path for master in designspace.sources]
locations = [master.location for master in designspace.sources]
discrete_axes = {
a.name for a in designspace.axes if not hasattr(a, "minimum")
}
axis_triples = {
a.name: (a.minimum, a.default, a.maximum)
for a in designspace.axes
if a.name not in discrete_axes
}
axis_mappings = {a.name: a.map for a in designspace.axes}
axis_triples = {
k: tuple(piecewiseLinearMap(v, dict(axis_mappings[k])) for v in vv)
for k, vv in axis_triples.items()
}
elif args.inputs[0].endswith((".glyphs", ".glyphspackage")):
from glyphsLib import GSFont, to_designspace
gsfont = GSFont(args.inputs[0])
upem = gsfont.upm
designspace = to_designspace(gsfont)
fonts = [source.font for source in designspace.sources]
names = ["%s-%s" % (f.info.familyName, f.info.styleName) for f in fonts]
args.inputs = []
locations = [master.location for master in designspace.sources]
axis_triples = {
a.name: (a.minimum, a.default, a.maximum) for a in designspace.axes
}
axis_mappings = {a.name: a.map for a in designspace.axes}
axis_triples = {
k: tuple(piecewiseLinearMap(v, dict(axis_mappings[k])) for v in vv)
for k, vv in axis_triples.items()
}
elif args.inputs[0].endswith(".ttf") or args.inputs[0].endswith(".otf"):
from fontTools.ttLib import TTFont
# Is variable font?
font = TTFont(args.inputs[0])
upem = font["head"].unitsPerEm
fvar = font["fvar"]
axisMapping = {}
for axis in fvar.axes:
axisMapping[axis.axisTag] = {
-1: axis.minValue,
0: axis.defaultValue,
1: axis.maxValue,
}
normalized = False
if "avar" in font:
avar = font["avar"]
if getattr(avar.table, "VarStore", None):
axisMapping = {tag: {-1: -1, 0: 0, 1: 1} for tag in axisMapping}
normalized = True
else:
for axisTag, segments in avar.segments.items():
fvarMapping = axisMapping[axisTag].copy()
for location, value in segments.items():
axisMapping[axisTag][value] = piecewiseLinearMap(
location, fvarMapping
)
# Gather all glyphs at their "master" locations
ttGlyphSets = {}
glyphsets = defaultdict(dict)
if "gvar" in font:
gvar = font["gvar"]
glyf = font["glyf"]
if glyphs is None:
glyphs = sorted(gvar.variations.keys())
for glyphname in glyphs:
for var in gvar.variations[glyphname]:
locDict = {}
loc = []
for tag, val in sorted(var.axes.items()):
locDict[tag] = val[1]
loc.append((tag, val[1]))
locTuple = tuple(loc)
if locTuple not in ttGlyphSets:
ttGlyphSets[locTuple] = font.getGlyphSet(
location=locDict, normalized=True, recalcBounds=False
)
recursivelyAddGlyph(
glyphname, glyphsets[locTuple], ttGlyphSets[locTuple], glyf
)
elif "CFF2" in font:
fvarAxes = font["fvar"].axes
cff2 = font["CFF2"].cff.topDictIndex[0]
charstrings = cff2.CharStrings
if glyphs is None:
glyphs = sorted(charstrings.keys())
for glyphname in glyphs:
cs = charstrings[glyphname]
private = cs.private
# Extract vsindex for the glyph
vsindices = {getattr(private, "vsindex", 0)}
vsindex = getattr(private, "vsindex", 0)
last_op = 0
# The spec says vsindex can only appear once and must be the first
# operator in the charstring, but we support multiple.
# https://github.com/harfbuzz/boring-expansion-spec/issues/158
for op in enumerate(cs.program):
if op == "blend":
vsindices.add(vsindex)
elif op == "vsindex":
assert isinstance(last_op, int)
vsindex = last_op
last_op = op
if not hasattr(private, "vstore"):
continue
varStore = private.vstore.otVarStore
for vsindex in vsindices:
varData = varStore.VarData[vsindex]
for regionIndex in varData.VarRegionIndex:
region = varStore.VarRegionList.Region[regionIndex]
locDict = {}
loc = []
for axisIndex, axis in enumerate(region.VarRegionAxis):
tag = fvarAxes[axisIndex].axisTag
val = axis.PeakCoord
locDict[tag] = val
loc.append((tag, val))
locTuple = tuple(loc)
if locTuple not in ttGlyphSets:
ttGlyphSets[locTuple] = font.getGlyphSet(
location=locDict,
normalized=True,
recalcBounds=False,
)
glyphset = glyphsets[locTuple]
glyphset[glyphname] = ttGlyphSets[locTuple][glyphname]
names = ["''"]
fonts = [font.getGlyphSet()]
locations = [{}]
axis_triples = {a: (-1, 0, +1) for a in sorted(axisMapping.keys())}
for locTuple in sorted(glyphsets.keys(), key=lambda v: (len(v), v)):
name = (
"'"
+ " ".join(
"%s=%s"
% (
k,
floatToFixedToStr(
piecewiseLinearMap(v, axisMapping[k]), 14
),
)
for k, v in locTuple
)
+ "'"
)
if normalized:
name += " (normalized)"
names.append(name)
fonts.append(glyphsets[locTuple])
locations.append(dict(locTuple))
args.ignore_missing = True
args.inputs = []
if not locations:
locations = [{} for _ in fonts]
for filename in args.inputs:
if filename.endswith(".ufo"):
from fontTools.ufoLib import UFOReader
font = UFOReader(filename)
info = SimpleNamespace()
font.readInfo(info)
upem = info.unitsPerEm
fonts.append(font)
else:
from fontTools.ttLib import TTFont
font = TTFont(filename)
upem = font["head"].unitsPerEm
fonts.append(font)
names.append(basename(filename).rsplit(".", 1)[0])
if len(fonts) < 2:
log.warning("Font file does not seem to be variable. Nothing to check.")
return
glyphsets = []
for font in fonts:
if hasattr(font, "getGlyphSet"):
glyphset = font.getGlyphSet()
else:
glyphset = font
glyphsets.append({k: glyphset[k] for k in glyphset.keys()})
if args.name:
accepted_names = set(args.name)
glyphsets = [
glyphset
for name, glyphset in zip(names, glyphsets)
if name in accepted_names
]
locations = [
location
for name, location in zip(names, locations)
if name in accepted_names
]
names = [name for name in names if name in accepted_names]
if not glyphs:
glyphs = sorted(set([gn for glyphset in glyphsets for gn in glyphset.keys()]))
glyphsSet = set(glyphs)
for glyphset in glyphsets:
glyphSetGlyphNames = set(glyphset.keys())
diff = glyphsSet - glyphSetGlyphNames
if diff:
for gn in diff:
glyphset[gn] = None
# Normalize locations
locations = [
{
**normalizeLocation(loc, axis_triples),
**{k: v for k, v in loc.items() if k in discrete_axes},
}
for loc in locations
]
tolerance = args.tolerance or DEFAULT_TOLERANCE
kinkiness = args.kinkiness if args.kinkiness is not None else DEFAULT_KINKINESS
try:
log.info("Running on %d glyphsets", len(glyphsets))
log.info("Locations: %s", pformat(locations))
problems_gen = test_gen(
glyphsets,
glyphs=glyphs,
names=names,
locations=locations,
upem=upem,
ignore_missing=args.ignore_missing,
tolerance=tolerance,
kinkiness=kinkiness,
show_all=args.show_all,
discrete_axes=discrete_axes,
)
problems = defaultdict(list)
f = (
sys.stdout
if args.output is None
else open(ensure_parent_dir(args.output), "w")
)
if not args.quiet:
if args.json:
import json
for glyphname, problem in problems_gen:
problems[glyphname].append(problem)
print(json.dumps(problems), file=f)
else:
last_glyphname = None
for glyphname, p in problems_gen:
problems[glyphname].append(p)
if glyphname != last_glyphname:
print(f"Glyph {glyphname} was not compatible:", file=f)
last_glyphname = glyphname
last_master_idxs = None
master_idxs = (
(p["master_idx"],)
if "master_idx" in p
else (p["master_1_idx"], p["master_2_idx"])
)
if master_idxs != last_master_idxs:
master_names = (
(p["master"],)
if "master" in p
else (p["master_1"], p["master_2"])
)
print(f" Masters: %s:" % ", ".join(master_names), file=f)
last_master_idxs = master_idxs
if p["type"] == InterpolatableProblem.MISSING:
print(
" Glyph was missing in master %s" % p["master"], file=f
)
elif p["type"] == InterpolatableProblem.OPEN_PATH:
print(
" Glyph has an open path in master %s" % p["master"],
file=f,
)
elif p["type"] == InterpolatableProblem.PATH_COUNT:
print(
" Path count differs: %i in %s, %i in %s"
% (
p["value_1"],
p["master_1"],
p["value_2"],
p["master_2"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.NODE_COUNT:
print(
" Node count differs in path %i: %i in %s, %i in %s"
% (
p["path"],
p["value_1"],
p["master_1"],
p["value_2"],
p["master_2"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.NODE_INCOMPATIBILITY:
print(
" Node %o incompatible in path %i: %s in %s, %s in %s"
% (
p["node"],
p["path"],
p["value_1"],
p["master_1"],
p["value_2"],
p["master_2"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.CONTOUR_ORDER:
print(
" Contour order differs: %s in %s, %s in %s"
% (
p["value_1"],
p["master_1"],
p["value_2"],
p["master_2"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.WRONG_START_POINT:
print(
" Contour %d start point differs: %s in %s, %s in %s; reversed: %s"
% (
p["contour"],
p["value_1"],
p["master_1"],
p["value_2"],
p["master_2"],
p["reversed"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.UNDERWEIGHT:
print(
" Contour %d interpolation is underweight: %s, %s"
% (
p["contour"],
p["master_1"],
p["master_2"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.OVERWEIGHT:
print(
" Contour %d interpolation is overweight: %s, %s"
% (
p["contour"],
p["master_1"],
p["master_2"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.KINK:
print(
" Contour %d has a kink at %s: %s, %s"
% (
p["contour"],
p["value"],
p["master_1"],
p["master_2"],
),
file=f,
)
elif p["type"] == InterpolatableProblem.NOTHING:
print(
" Showing %s and %s"
% (
p["master_1"],
p["master_2"],
),
file=f,
)
else:
for glyphname, problem in problems_gen:
problems[glyphname].append(problem)
problems = sort_problems(problems)
for p in "ps", "pdf":
arg = getattr(args, p)
if arg is None:
continue
log.info("Writing %s to %s", p.upper(), arg)
from .interpolatablePlot import InterpolatablePS, InterpolatablePDF
PlotterClass = InterpolatablePS if p == "ps" else InterpolatablePDF
with PlotterClass(
ensure_parent_dir(arg), glyphsets=glyphsets, names=names
) as doc:
doc.add_title_page(
original_args_inputs, tolerance=tolerance, kinkiness=kinkiness
)
if problems:
doc.add_summary(problems)
doc.add_problems(problems)
if not problems and not args.quiet:
doc.draw_cupcake()
if problems:
doc.add_index()
doc.add_table_of_contents()
if args.html:
log.info("Writing HTML to %s", args.html)
from .interpolatablePlot import InterpolatableSVG
svgs = []
glyph_starts = {}
with InterpolatableSVG(svgs, glyphsets=glyphsets, names=names) as svg:
svg.add_title_page(
original_args_inputs,
show_tolerance=False,
tolerance=tolerance,
kinkiness=kinkiness,
)
for glyph, glyph_problems in problems.items():
glyph_starts[len(svgs)] = glyph
svg.add_problems(
{glyph: glyph_problems},
show_tolerance=False,
show_page_number=False,
)
if not problems and not args.quiet:
svg.draw_cupcake()
import base64
with open(ensure_parent_dir(args.html), "wb") as f:
f.write(b"<!DOCTYPE html>\n")
f.write(
b'<html><body align="center" style="font-family: sans-serif; text-color: #222">\n'
)
f.write(b"<title>fonttools varLib.interpolatable report</title>\n")
for i, svg in enumerate(svgs):
if i in glyph_starts:
f.write(f"<h1>Glyph {glyph_starts[i]}</h1>\n".encode("utf-8"))
f.write("<img src='data:image/svg+xml;base64,".encode("utf-8"))
f.write(base64.b64encode(svg))
f.write(b"' />\n")
f.write(b"<hr>\n")
f.write(b"</body></html>\n")
except Exception as e:
e.args += original_args_inputs
log.error(e)
raise
if problems:
return problems
|
Test for interpolatability issues between fonts
|
main
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/interpolatable.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/interpolatable.py
|
MIT
|
def sort_problems(problems):
"""Sort problems by severity, then by glyph name, then by problem message."""
return dict(
sorted(
problems.items(),
key=lambda _: -min(
(
(InterpolatableProblem.severity[p["type"]] + p.get("tolerance", 0))
for p in _[1]
),
),
reverse=True,
)
)
|
Sort problems by severity, then by glyph name, then by problem message.
|
sort_problems
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/interpolatableHelpers.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/interpolatableHelpers.py
|
MIT
|
def interpolate_layout(designspace, loc, master_finder=lambda s: s, mapped=False):
"""
Interpolate GPOS from a designspace file and location.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
If mapped is False (default), then location is mapped using the
map element of the axes in designspace file. If mapped is True,
it is assumed that location is in designspace's internal space and
no mapping is performed.
"""
if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
pass
else: # Assume a file path
from fontTools.designspaceLib import DesignSpaceDocument
designspace = DesignSpaceDocument.fromfile(designspace)
ds = load_designspace(designspace)
log.info("Building interpolated font")
log.info("Loading master fonts")
master_fonts = load_masters(designspace, master_finder)
font = deepcopy(master_fonts[ds.base_idx])
log.info("Location: %s", pformat(loc))
if not mapped:
loc = {name: ds.axes[name].map_forward(v) for name, v in loc.items()}
log.info("Internal location: %s", pformat(loc))
loc = models.normalizeLocation(loc, ds.internal_axis_supports)
log.info("Normalized location: %s", pformat(loc))
# Assume single-model for now.
model = models.VariationModel(ds.normalized_master_locs)
assert 0 == model.mapping[ds.base_idx]
merger = InstancerMerger(font, model, loc)
log.info("Building interpolated tables")
# TODO GSUB/GDEF
merger.mergeTables(font, master_fonts, ["GPOS"])
return font
|
Interpolate GPOS from a designspace file and location.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
If mapped is False (default), then location is mapped using the
map element of the axes in designspace file. If mapped is True,
it is assumed that location is in designspace's internal space and
no mapping is performed.
|
interpolate_layout
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/interpolate_layout.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/interpolate_layout.py
|
MIT
|
def main(args=None):
"""Interpolate GDEF/GPOS/GSUB tables for a point on a designspace"""
from fontTools import configLogger
import argparse
import sys
parser = argparse.ArgumentParser(
"fonttools varLib.interpolate_layout",
description=main.__doc__,
)
parser.add_argument(
"designspace_filename", metavar="DESIGNSPACE", help="Input TTF files"
)
parser.add_argument(
"locations",
metavar="LOCATION",
type=str,
nargs="+",
help="Axis locations (e.g. wdth=120",
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT",
help="Output font file (defaults to <designspacename>-instance.ttf)",
)
parser.add_argument(
"-l",
"--loglevel",
metavar="LEVEL",
default="INFO",
help="Logging level (defaults to INFO)",
)
args = parser.parse_args(args)
if not args.output:
args.output = os.path.splitext(args.designspace_filename)[0] + "-instance.ttf"
configLogger(level=args.loglevel)
finder = lambda s: s.replace("master_ufo", "master_ttf_interpolatable").replace(
".ufo", ".ttf"
)
loc = {}
for arg in args.locations:
tag, val = arg.split("=")
loc[tag] = float(val)
font = interpolate_layout(args.designspace_filename, loc, finder)
log.info("Saving font %s", args.output)
font.save(args.output)
|
Interpolate GDEF/GPOS/GSUB tables for a point on a designspace
|
main
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/interpolate_layout.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/interpolate_layout.py
|
MIT
|
def iup_segment(
coords: _PointSegment, rc1: _Point, rd1: _Delta, rc2: _Point, rd2: _Delta
): # -> _DeltaSegment:
"""Given two reference coordinates `rc1` & `rc2` and their respective
delta vectors `rd1` & `rd2`, returns interpolated deltas for the set of
coordinates `coords`."""
# rc1 = reference coord 1
# rd1 = reference delta 1
out_arrays = [None, None]
for j in 0, 1:
out_arrays[j] = out = []
x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j]
if x1 == x2:
n = len(coords)
if d1 == d2:
out.extend([d1] * n)
else:
out.extend([0] * n)
continue
if x1 > x2:
x1, x2 = x2, x1
d1, d2 = d2, d1
# x1 < x2
scale = (d2 - d1) / (x2 - x1)
for pair in coords:
x = pair[j]
if x <= x1:
d = d1
elif x >= x2:
d = d2
else:
# Interpolate
#
# NOTE: we assign an explicit intermediate variable here in
# order to disable a fused mul-add optimization. See:
#
# - https://godbolt.org/z/YsP4T3TqK,
# - https://github.com/fonttools/fonttools/issues/3703
nudge = (x - x1) * scale
d = d1 + nudge
out.append(d)
return zip(*out_arrays)
|
Given two reference coordinates `rc1` & `rc2` and their respective
delta vectors `rd1` & `rd2`, returns interpolated deltas for the set of
coordinates `coords`.
|
iup_segment
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def iup_contour(deltas: _DeltaOrNoneSegment, coords: _PointSegment) -> _DeltaSegment:
"""For the contour given in `coords`, interpolate any missing
delta values in delta vector `deltas`.
Returns fully filled-out delta vector."""
assert len(deltas) == len(coords)
if None not in deltas:
return deltas
n = len(deltas)
# indices of points with explicit deltas
indices = [i for i, v in enumerate(deltas) if v is not None]
if not indices:
# All deltas are None. Return 0,0 for all.
return [(0, 0)] * n
out = []
it = iter(indices)
start = next(it)
if start != 0:
# Initial segment that wraps around
i1, i2, ri1, ri2 = 0, start, start, indices[-1]
out.extend(
iup_segment(
coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
)
)
out.append(deltas[start])
for end in it:
if end - start > 1:
i1, i2, ri1, ri2 = start + 1, end, start, end
out.extend(
iup_segment(
coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
)
)
out.append(deltas[end])
start = end
if start != n - 1:
# Final segment that wraps around
i1, i2, ri1, ri2 = start + 1, n, start, indices[0]
out.extend(
iup_segment(
coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]
)
)
assert len(deltas) == len(out), (len(deltas), len(out))
return out
|
For the contour given in `coords`, interpolate any missing
delta values in delta vector `deltas`.
Returns fully filled-out delta vector.
|
iup_contour
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def iup_delta(
deltas: _DeltaOrNoneSegment, coords: _PointSegment, ends: _Endpoints
) -> _DeltaSegment:
"""For the outline given in `coords`, with contour endpoints given
in sorted increasing order in `ends`, interpolate any missing
delta values in delta vector `deltas`.
Returns fully filled-out delta vector."""
assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4
n = len(coords)
ends = ends + [n - 4, n - 3, n - 2, n - 1]
out = []
start = 0
for end in ends:
end += 1
contour = iup_contour(deltas[start:end], coords[start:end])
out.extend(contour)
start = end
return out
|
For the outline given in `coords`, with contour endpoints given
in sorted increasing order in `ends`, interpolate any missing
delta values in delta vector `deltas`.
Returns fully filled-out delta vector.
|
iup_delta
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def can_iup_in_between(
deltas: _DeltaSegment,
coords: _PointSegment,
i: Integral,
j: Integral,
tolerance: Real,
): # -> bool:
"""Return true if the deltas for points at `i` and `j` (`i < j`) can be
successfully used to interpolate deltas for points in between them within
provided error tolerance."""
assert j - i >= 2
interp = iup_segment(coords[i + 1 : j], coords[i], deltas[i], coords[j], deltas[j])
deltas = deltas[i + 1 : j]
return all(
abs(complex(x - p, y - q)) <= tolerance
for (x, y), (p, q) in zip(deltas, interp)
)
|
Return true if the deltas for points at `i` and `j` (`i < j`) can be
successfully used to interpolate deltas for points in between them within
provided error tolerance.
|
can_iup_in_between
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def _iup_contour_bound_forced_set(
deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0
) -> set:
"""The forced set is a conservative set of points on the contour that must be encoded
explicitly (ie. cannot be interpolated). Calculating this set allows for significantly
speeding up the dynamic-programming, as well as resolve circularity in DP.
The set is precise; that is, if an index is in the returned set, then there is no way
that IUP can generate delta for that point, given `coords` and `deltas`.
"""
assert len(deltas) == len(coords)
n = len(deltas)
forced = set()
# Track "last" and "next" points on the contour as we sweep.
for i in range(len(deltas) - 1, -1, -1):
ld, lc = deltas[i - 1], coords[i - 1]
d, c = deltas[i], coords[i]
nd, nc = deltas[i - n + 1], coords[i - n + 1]
for j in (0, 1): # For X and for Y
cj = c[j]
dj = d[j]
lcj = lc[j]
ldj = ld[j]
ncj = nc[j]
ndj = nd[j]
if lcj <= ncj:
c1, c2 = lcj, ncj
d1, d2 = ldj, ndj
else:
c1, c2 = ncj, lcj
d1, d2 = ndj, ldj
force = False
# If the two coordinates are the same, then the interpolation
# algorithm produces the same delta if both deltas are equal,
# and zero if they differ.
#
# This test has to be before the next one.
if c1 == c2:
if abs(d1 - d2) > tolerance and abs(dj) > tolerance:
force = True
# If coordinate for current point is between coordinate of adjacent
# points on the two sides, but the delta for current point is NOT
# between delta for those adjacent points (considering tolerance
# allowance), then there is no way that current point can be IUP-ed.
# Mark it forced.
elif c1 <= cj <= c2: # and c1 != c2
if not (min(d1, d2) - tolerance <= dj <= max(d1, d2) + tolerance):
force = True
# Otherwise, the delta should either match the closest, or have the
# same sign as the interpolation of the two deltas.
else: # cj < c1 or c2 < cj
if d1 != d2:
if cj < c1:
if (
abs(dj) > tolerance
and abs(dj - d1) > tolerance
and ((dj - tolerance < d1) != (d1 < d2))
):
force = True
else: # c2 < cj
if (
abs(dj) > tolerance
and abs(dj - d2) > tolerance
and ((d2 < dj + tolerance) != (d1 < d2))
):
force = True
if force:
forced.add(i)
break
return forced
|
The forced set is a conservative set of points on the contour that must be encoded
explicitly (ie. cannot be interpolated). Calculating this set allows for significantly
speeding up the dynamic-programming, as well as resolve circularity in DP.
The set is precise; that is, if an index is in the returned set, then there is no way
that IUP can generate delta for that point, given `coords` and `deltas`.
|
_iup_contour_bound_forced_set
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def _iup_contour_optimize_dp(
deltas: _DeltaSegment,
coords: _PointSegment,
forced=set(),
tolerance: Real = 0,
lookback: Integral = None,
):
"""Straightforward Dynamic-Programming. For each index i, find least-costly encoding of
points 0 to i where i is explicitly encoded. We find this by considering all previous
explicit points j and check whether interpolation can fill points between j and i.
Note that solution always encodes last point explicitly. Higher-level is responsible
for removing that restriction.
As major speedup, we stop looking further whenever we see a "forced" point."""
n = len(deltas)
if lookback is None:
lookback = n
lookback = min(lookback, MAX_LOOKBACK)
costs = {-1: 0}
chain = {-1: None}
for i in range(0, n):
best_cost = costs[i - 1] + 1
costs[i] = best_cost
chain[i] = i - 1
if i - 1 in forced:
continue
for j in range(i - 2, max(i - lookback, -2), -1):
cost = costs[j] + 1
if cost < best_cost and can_iup_in_between(deltas, coords, j, i, tolerance):
costs[i] = best_cost = cost
chain[i] = j
if j in forced:
break
return chain, costs
|
Straightforward Dynamic-Programming. For each index i, find least-costly encoding of
points 0 to i where i is explicitly encoded. We find this by considering all previous
explicit points j and check whether interpolation can fill points between j and i.
Note that solution always encodes last point explicitly. Higher-level is responsible
for removing that restriction.
As major speedup, we stop looking further whenever we see a "forced" point.
|
_iup_contour_optimize_dp
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def _rot_list(l: list, k: int):
"""Rotate list by k items forward. Ie. item at position 0 will be
at position k in returned list. Negative k is allowed."""
n = len(l)
k %= n
if not k:
return l
return l[n - k :] + l[: n - k]
|
Rotate list by k items forward. Ie. item at position 0 will be
at position k in returned list. Negative k is allowed.
|
_rot_list
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def iup_contour_optimize(
deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0.0
) -> _DeltaOrNoneSegment:
"""For contour with coordinates `coords`, optimize a set of delta
values `deltas` within error `tolerance`.
Returns delta vector that has most number of None items instead of
the input delta.
"""
n = len(deltas)
# Get the easy cases out of the way:
# If all are within tolerance distance of 0, encode nothing:
if all(abs(complex(*p)) <= tolerance for p in deltas):
return [None] * n
# If there's exactly one point, return it:
if n == 1:
return deltas
# If all deltas are exactly the same, return just one (the first one):
d0 = deltas[0]
if all(d0 == d for d in deltas):
return [d0] + [None] * (n - 1)
# Else, solve the general problem using Dynamic Programming.
forced = _iup_contour_bound_forced_set(deltas, coords, tolerance)
# The _iup_contour_optimize_dp() routine returns the optimal encoding
# solution given the constraint that the last point is always encoded.
# To remove this constraint, we use two different methods, depending on
# whether forced set is non-empty or not:
# Debugging: Make the next if always take the second branch and observe
# if the font size changes (reduced); that would mean the forced-set
# has members it should not have.
if forced:
# Forced set is non-empty: rotate the contour start point
# such that the last point in the list is a forced point.
k = (n - 1) - max(forced)
assert k >= 0
deltas = _rot_list(deltas, k)
coords = _rot_list(coords, k)
forced = _rot_set(forced, k, n)
# Debugging: Pass a set() instead of forced variable to the next call
# to exercise forced-set computation for under-counting.
chain, costs = _iup_contour_optimize_dp(deltas, coords, forced, tolerance)
# Assemble solution.
solution = set()
i = n - 1
while i is not None:
solution.add(i)
i = chain[i]
solution.remove(-1)
# if not forced <= solution:
# print("coord", coords)
# print("deltas", deltas)
# print("len", len(deltas))
assert forced <= solution, (forced, solution)
deltas = [deltas[i] if i in solution else None for i in range(n)]
deltas = _rot_list(deltas, -k)
else:
# Repeat the contour an extra time, solve the new case, then look for solutions of the
# circular n-length problem in the solution for new linear case. I cannot prove that
# this always produces the optimal solution...
chain, costs = _iup_contour_optimize_dp(
deltas + deltas, coords + coords, forced, tolerance, n
)
best_sol, best_cost = None, n + 1
for start in range(n - 1, len(costs) - 1):
# Assemble solution.
solution = set()
i = start
while i > start - n:
solution.add(i % n)
i = chain[i]
if i == start - n:
cost = costs[start] - costs[start - n]
if cost <= best_cost:
best_sol, best_cost = solution, cost
# if not forced <= best_sol:
# print("coord", coords)
# print("deltas", deltas)
# print("len", len(deltas))
assert forced <= best_sol, (forced, best_sol)
deltas = [deltas[i] if i in best_sol else None for i in range(n)]
return deltas
|
For contour with coordinates `coords`, optimize a set of delta
values `deltas` within error `tolerance`.
Returns delta vector that has most number of None items instead of
the input delta.
|
iup_contour_optimize
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def iup_delta_optimize(
deltas: _DeltaSegment,
coords: _PointSegment,
ends: _Endpoints,
tolerance: Real = 0.0,
) -> _DeltaOrNoneSegment:
"""For the outline given in `coords`, with contour endpoints given
in sorted increasing order in `ends`, optimize a set of delta
values `deltas` within error `tolerance`.
Returns delta vector that has most number of None items instead of
the input delta.
"""
assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4
n = len(coords)
ends = ends + [n - 4, n - 3, n - 2, n - 1]
out = []
start = 0
for end in ends:
contour = iup_contour_optimize(
deltas[start : end + 1], coords[start : end + 1], tolerance
)
assert len(contour) == end - start + 1
out.extend(contour)
start = end + 1
return out
|
For the outline given in `coords`, with contour endpoints given
in sorted increasing order in `ends`, optimize a set of delta
values `deltas` within error `tolerance`.
Returns delta vector that has most number of None items instead of
the input delta.
|
iup_delta_optimize
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/iup.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/iup.py
|
MIT
|
def _merge_GlyphOrders(font, lst, values_lst=None, default=None):
"""Takes font and list of glyph lists (must be sorted by glyph id), and returns
two things:
- Combined glyph list,
- If values_lst is None, return input glyph lists, but padded with None when a glyph
was missing in a list. Otherwise, return values_lst list-of-list, padded with None
to match combined glyph lists.
"""
if values_lst is None:
dict_sets = [set(l) for l in lst]
else:
dict_sets = [{g: v for g, v in zip(l, vs)} for l, vs in zip(lst, values_lst)]
combined = set()
combined.update(*dict_sets)
sortKey = font.getReverseGlyphMap().__getitem__
order = sorted(combined, key=sortKey)
# Make sure all input glyphsets were in proper order
if not all(sorted(vs, key=sortKey) == vs for vs in lst):
raise InconsistentGlyphOrder()
del combined
paddedValues = None
if values_lst is None:
padded = [
[glyph if glyph in dict_set else default for glyph in order]
for dict_set in dict_sets
]
else:
assert len(lst) == len(values_lst)
padded = [
[dict_set[glyph] if glyph in dict_set else default for glyph in order]
for dict_set in dict_sets
]
return order, padded
|
Takes font and list of glyph lists (must be sorted by glyph id), and returns
two things:
- Combined glyph list,
- If values_lst is None, return input glyph lists, but padded with None when a glyph
was missing in a list. Otherwise, return values_lst list-of-list, padded with None
to match combined glyph lists.
|
_merge_GlyphOrders
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/merger.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/merger.py
|
MIT
|
def _Lookup_PairPos_subtables_canonicalize(lst, font):
"""Merge multiple Format1 subtables at the beginning of lst,
and merge multiple consecutive Format2 subtables that have the same
Class2 (ie. were split because of offset overflows). Returns new list."""
lst = list(lst)
l = len(lst)
i = 0
while i < l and lst[i].Format == 1:
i += 1
lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)]
l = len(lst)
i = l
while i > 0 and lst[i - 1].Format == 2:
i -= 1
lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)]
return lst
|
Merge multiple Format1 subtables at the beginning of lst,
and merge multiple consecutive Format2 subtables that have the same
Class2 (ie. were split because of offset overflows). Returns new list.
|
_Lookup_PairPos_subtables_canonicalize
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/merger.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/merger.py
|
MIT
|
def expandPaintColrLayers(colr):
"""Rebuild LayerList without PaintColrLayers reuse.
Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph
which are irrelevant for this); any layers referenced via PaintColrLayers are
collected into a new LayerList and duplicated when reuse is detected, to ensure
that all paints are distinct objects at the end of the process.
PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap
is left. Also, any consecutively nested PaintColrLayers are flattened.
The COLR table's LayerList is replaced with the new unique layers.
A side effect is also that any layer from the old LayerList which is not
referenced by any PaintColrLayers is dropped.
"""
if not colr.LayerList:
# if no LayerList, there's nothing to expand
return
uniqueLayerIDs = set()
newLayerList = []
for rec in colr.BaseGlyphList.BaseGlyphPaintRecord:
frontier = [rec.Paint]
while frontier:
paint = frontier.pop()
if paint.Format == ot.PaintFormat.PaintColrGlyph:
# don't traverse these, we treat them as constant for merging
continue
elif paint.Format == ot.PaintFormat.PaintColrLayers:
# de-treeify any nested PaintColrLayers, append unique copies to
# the new layer list and update PaintColrLayers index/count
children = list(_flatten_layers(paint, colr))
first_layer_index = len(newLayerList)
for layer in children:
if id(layer) in uniqueLayerIDs:
layer = copy.deepcopy(layer)
assert id(layer) not in uniqueLayerIDs
newLayerList.append(layer)
uniqueLayerIDs.add(id(layer))
paint.FirstLayerIndex = first_layer_index
paint.NumLayers = len(children)
else:
children = paint.getChildren(colr)
frontier.extend(reversed(children))
# sanity check all the new layers are distinct objects
assert len(newLayerList) == len(uniqueLayerIDs)
colr.LayerList.Paint = newLayerList
colr.LayerList.LayerCount = len(newLayerList)
|
Rebuild LayerList without PaintColrLayers reuse.
Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph
which are irrelevant for this); any layers referenced via PaintColrLayers are
collected into a new LayerList and duplicated when reuse is detected, to ensure
that all paints are distinct objects at the end of the process.
PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap
is left. Also, any consecutively nested PaintColrLayers are flattened.
The COLR table's LayerList is replaced with the new unique layers.
A side effect is also that any layer from the old LayerList which is not
referenced by any PaintColrLayers is dropped.
|
expandPaintColrLayers
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/merger.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/merger.py
|
MIT
|
def normalizeValue(v, triple, extrapolate=False):
"""Normalizes value based on a min/default/max triple.
>>> normalizeValue(400, (100, 400, 900))
0.0
>>> normalizeValue(100, (100, 400, 900))
-1.0
>>> normalizeValue(650, (100, 400, 900))
0.5
"""
lower, default, upper = triple
if not (lower <= default <= upper):
raise ValueError(
f"Invalid axis values, must be minimum, default, maximum: "
f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
)
if not extrapolate:
v = max(min(v, upper), lower)
if v == default or lower == upper:
return 0.0
if (v < default and lower != default) or (v > default and upper == default):
return (v - default) / (default - lower)
else:
assert (v > default and upper != default) or (
v < default and lower == default
), f"Ooops... v={v}, triple=({lower}, {default}, {upper})"
return (v - default) / (upper - default)
|
Normalizes value based on a min/default/max triple.
>>> normalizeValue(400, (100, 400, 900))
0.0
>>> normalizeValue(100, (100, 400, 900))
-1.0
>>> normalizeValue(650, (100, 400, 900))
0.5
|
normalizeValue
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def normalizeLocation(location, axes, extrapolate=False, *, validate=False):
"""Normalizes location based on axis min/default/max values from axes.
>>> axes = {"wght": (100, 400, 900)}
>>> normalizeLocation({"wght": 400}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": 100}, axes)
{'wght': -1.0}
>>> normalizeLocation({"wght": 900}, axes)
{'wght': 1.0}
>>> normalizeLocation({"wght": 650}, axes)
{'wght': 0.5}
>>> normalizeLocation({"wght": 1000}, axes)
{'wght': 1.0}
>>> normalizeLocation({"wght": 0}, axes)
{'wght': -1.0}
>>> axes = {"wght": (0, 0, 1000)}
>>> normalizeLocation({"wght": 0}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": -1}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": 1000}, axes)
{'wght': 1.0}
>>> normalizeLocation({"wght": 500}, axes)
{'wght': 0.5}
>>> normalizeLocation({"wght": 1001}, axes)
{'wght': 1.0}
>>> axes = {"wght": (0, 1000, 1000)}
>>> normalizeLocation({"wght": 0}, axes)
{'wght': -1.0}
>>> normalizeLocation({"wght": -1}, axes)
{'wght': -1.0}
>>> normalizeLocation({"wght": 500}, axes)
{'wght': -0.5}
>>> normalizeLocation({"wght": 1000}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": 1001}, axes)
{'wght': 0.0}
"""
if validate:
assert set(location.keys()) <= set(axes.keys()), set(location.keys()) - set(
axes.keys()
)
out = {}
for tag, triple in axes.items():
v = location.get(tag, triple[1])
out[tag] = normalizeValue(v, triple, extrapolate=extrapolate)
return out
|
Normalizes location based on axis min/default/max values from axes.
>>> axes = {"wght": (100, 400, 900)}
>>> normalizeLocation({"wght": 400}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": 100}, axes)
{'wght': -1.0}
>>> normalizeLocation({"wght": 900}, axes)
{'wght': 1.0}
>>> normalizeLocation({"wght": 650}, axes)
{'wght': 0.5}
>>> normalizeLocation({"wght": 1000}, axes)
{'wght': 1.0}
>>> normalizeLocation({"wght": 0}, axes)
{'wght': -1.0}
>>> axes = {"wght": (0, 0, 1000)}
>>> normalizeLocation({"wght": 0}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": -1}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": 1000}, axes)
{'wght': 1.0}
>>> normalizeLocation({"wght": 500}, axes)
{'wght': 0.5}
>>> normalizeLocation({"wght": 1001}, axes)
{'wght': 1.0}
>>> axes = {"wght": (0, 1000, 1000)}
>>> normalizeLocation({"wght": 0}, axes)
{'wght': -1.0}
>>> normalizeLocation({"wght": -1}, axes)
{'wght': -1.0}
>>> normalizeLocation({"wght": 500}, axes)
{'wght': -0.5}
>>> normalizeLocation({"wght": 1000}, axes)
{'wght': 0.0}
>>> normalizeLocation({"wght": 1001}, axes)
{'wght': 0.0}
|
normalizeLocation
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def supportScalar(location, support, ot=True, extrapolate=False, axisRanges=None):
"""Returns the scalar multiplier at location, for a master
with support. If ot is True, then a peak value of zero
for support of an axis means "axis does not participate". That
is how OpenType Variation Font technology works.
If extrapolate is True, axisRanges must be a dict that maps axis
names to (axisMin, axisMax) tuples.
>>> supportScalar({}, {})
1.0
>>> supportScalar({'wght':.2}, {})
1.0
>>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
0.1
>>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
0.75
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
0.75
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
0.375
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
0.75
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
0.75
>>> supportScalar({'wght':3}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
-1.0
>>> supportScalar({'wght':-1}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
-1.0
>>> supportScalar({'wght':3}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
1.5
>>> supportScalar({'wght':-1}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
-0.5
"""
if extrapolate and axisRanges is None:
raise TypeError("axisRanges must be passed when extrapolate is True")
scalar = 1.0
for axis, (lower, peak, upper) in support.items():
if ot:
# OpenType-specific case handling
if peak == 0.0:
continue
if lower > peak or peak > upper:
continue
if lower < 0.0 and upper > 0.0:
continue
v = location.get(axis, 0.0)
else:
assert axis in location
v = location[axis]
if v == peak:
continue
if extrapolate:
axisMin, axisMax = axisRanges[axis]
if v < axisMin and lower <= axisMin:
if peak <= axisMin and peak < upper:
scalar *= (v - upper) / (peak - upper)
continue
elif axisMin < peak:
scalar *= (v - lower) / (peak - lower)
continue
elif axisMax < v and axisMax <= upper:
if axisMax <= peak and lower < peak:
scalar *= (v - lower) / (peak - lower)
continue
elif peak < axisMax:
scalar *= (v - upper) / (peak - upper)
continue
if v <= lower or upper <= v:
scalar = 0.0
break
if v < peak:
scalar *= (v - lower) / (peak - lower)
else: # v > peak
scalar *= (v - upper) / (peak - upper)
return scalar
|
Returns the scalar multiplier at location, for a master
with support. If ot is True, then a peak value of zero
for support of an axis means "axis does not participate". That
is how OpenType Variation Font technology works.
If extrapolate is True, axisRanges must be a dict that maps axis
names to (axisMin, axisMax) tuples.
>>> supportScalar({}, {})
1.0
>>> supportScalar({'wght':.2}, {})
1.0
>>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
0.1
>>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
0.75
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
0.75
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
0.375
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
0.75
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
0.75
>>> supportScalar({'wght':3}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
-1.0
>>> supportScalar({'wght':-1}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
-1.0
>>> supportScalar({'wght':3}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
1.5
>>> supportScalar({'wght':-1}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})
-0.5
|
supportScalar
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def getSubModel(self, items):
"""Return a sub-model and the items that are not None.
The sub-model is necessary for working with the subset
of items when some are None.
The sub-model is cached."""
if None not in items:
return self, items
key = tuple(v is not None for v in items)
subModel = self._subModels.get(key)
if subModel is None:
subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)
self._subModels[key] = subModel
return subModel, subList(key, items)
|
Return a sub-model and the items that are not None.
The sub-model is necessary for working with the subset
of items when some are None.
The sub-model is cached.
|
getSubModel
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def getScalars(self, loc):
"""Return scalars for each delta, for the given location.
If interpolating many master-values at the same location,
this function allows speed up by fetching the scalars once
and using them with interpolateFromMastersAndScalars()."""
return [
supportScalar(
loc, support, extrapolate=self.extrapolate, axisRanges=self.axisRanges
)
for support in self.supports
]
|
Return scalars for each delta, for the given location.
If interpolating many master-values at the same location,
this function allows speed up by fetching the scalars once
and using them with interpolateFromMastersAndScalars().
|
getScalars
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def getMasterScalars(self, targetLocation):
"""Return multipliers for each master, for the given location.
If interpolating many master-values at the same location,
this function allows speed up by fetching the scalars once
and using them with interpolateFromValuesAndScalars().
Note that the scalars used in interpolateFromMastersAndScalars(),
are *not* the same as the ones returned here. They are the result
of getScalars()."""
out = self.getScalars(targetLocation)
for i, weights in reversed(list(enumerate(self.deltaWeights))):
for j, weight in weights.items():
out[j] -= out[i] * weight
out = [out[self.mapping[i]] for i in range(len(out))]
return out
|
Return multipliers for each master, for the given location.
If interpolating many master-values at the same location,
this function allows speed up by fetching the scalars once
and using them with interpolateFromValuesAndScalars().
Note that the scalars used in interpolateFromMastersAndScalars(),
are *not* the same as the ones returned here. They are the result
of getScalars().
|
getMasterScalars
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def interpolateFromValuesAndScalars(values, scalars):
"""Interpolate from values and scalars coefficients.
If the values are master-values, then the scalars should be
fetched from getMasterScalars().
If the values are deltas, then the scalars should be fetched
from getScalars(); in which case this is the same as
interpolateFromDeltasAndScalars().
"""
v = None
assert len(values) == len(scalars)
for value, scalar in zip(values, scalars):
if not scalar:
continue
contribution = value * scalar
if v is None:
v = contribution
else:
v += contribution
return v
|
Interpolate from values and scalars coefficients.
If the values are master-values, then the scalars should be
fetched from getMasterScalars().
If the values are deltas, then the scalars should be fetched
from getScalars(); in which case this is the same as
interpolateFromDeltasAndScalars().
|
interpolateFromValuesAndScalars
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def interpolateFromDeltas(self, loc, deltas):
"""Interpolate from deltas, at location loc."""
scalars = self.getScalars(loc)
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
Interpolate from deltas, at location loc.
|
interpolateFromDeltas
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def interpolateFromMasters(self, loc, masterValues, *, round=noRound):
"""Interpolate from master-values, at location loc."""
scalars = self.getMasterScalars(loc)
return self.interpolateFromValuesAndScalars(masterValues, scalars)
|
Interpolate from master-values, at location loc.
|
interpolateFromMasters
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):
"""Interpolate from master-values, and scalars fetched from
getScalars(), which is useful when you want to interpolate
multiple master-values with the same location."""
deltas = self.getDeltas(masterValues, round=round)
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
Interpolate from master-values, and scalars fetched from
getScalars(), which is useful when you want to interpolate
multiple master-values with the same location.
|
interpolateFromMastersAndScalars
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/models.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/models.py
|
MIT
|
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
"""Unlike TrueType glyphs, neither advance width nor bounding box
info is stored in a CFF2 charstring. The width data exists only in
the hmtx and HVAR tables. Since LSB data cannot be interpolated
reliably from the master LSB values in the hmtx table, we traverse
the charstring to determine the actual bound box."""
charstrings = topDict.CharStrings
boundsPen = BoundsPen(glyphOrder)
hmtx = varfont["hmtx"]
hvar_table = None
if "HVAR" in varfont:
hvar_table = varfont["HVAR"].table
fvar = varfont["fvar"]
varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
for gid, gname in enumerate(glyphOrder):
entry = list(hmtx[gname])
# get width delta.
if hvar_table:
if hvar_table.AdvWidthMap:
width_idx = hvar_table.AdvWidthMap.mapping[gname]
else:
width_idx = gid
width_delta = otRound(varStoreInstancer[width_idx])
else:
width_delta = 0
# get LSB.
boundsPen.init()
charstring = charstrings[gname]
charstring.draw(boundsPen)
if boundsPen.bounds is None:
# Happens with non-marking glyphs
lsb_delta = 0
else:
lsb = otRound(boundsPen.bounds[0])
lsb_delta = entry[1] - lsb
if lsb_delta or width_delta:
if width_delta:
entry[0] = max(0, entry[0] + width_delta)
if lsb_delta:
entry[1] = lsb
hmtx[gname] = tuple(entry)
|
Unlike TrueType glyphs, neither advance width nor bounding box
info is stored in a CFF2 charstring. The width data exists only in
the hmtx and HVAR tables. Since LSB data cannot be interpolated
reliably from the master LSB values in the hmtx table, we traverse
the charstring to determine the actual bound box.
|
interpolate_cff2_metrics
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/mutator.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/mutator.py
|
MIT
|
def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
"""Generate a static instance from a variable TTFont and a dictionary
defining the desired location along the variable font's axes.
The location values must be specified as user-space coordinates, e.g.:
.. code-block::
{'wght': 400, 'wdth': 100}
By default, a new TTFont object is returned. If ``inplace`` is True, the
input varfont is modified and reduced to a static font.
When the overlap parameter is defined as True,
OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
"""
if not inplace:
# make a copy to leave input varfont unmodified
stream = BytesIO()
varfont.save(stream)
stream.seek(0)
varfont = TTFont(stream)
fvar = varfont["fvar"]
axes = {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in fvar.axes}
loc = normalizeLocation(location, axes)
if "avar" in varfont:
maps = varfont["avar"].segments
loc = {k: piecewiseLinearMap(v, maps[k]) for k, v in loc.items()}
# Quantize to F2Dot14, to avoid surprise interpolations.
loc = {k: floatToFixedToFloat(v, 14) for k, v in loc.items()}
# Location is normalized now
log.info("Normalized location: %s", loc)
if "gvar" in varfont:
log.info("Mutating glyf/gvar tables")
gvar = varfont["gvar"]
glyf = varfont["glyf"]
hMetrics = varfont["hmtx"].metrics
vMetrics = getattr(varfont.get("vmtx"), "metrics", None)
# get list of glyph names in gvar sorted by component depth
glyphnames = sorted(
gvar.variations.keys(),
key=lambda name: (
(
glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
if glyf[name].isComposite()
else 0
),
name,
),
)
for glyphname in glyphnames:
variations = gvar.variations[glyphname]
coordinates, _ = glyf._getCoordinatesAndControls(
glyphname, hMetrics, vMetrics
)
origCoords, endPts = None, None
for var in variations:
scalar = supportScalar(loc, var.axes)
if not scalar:
continue
delta = var.coordinates
if None in delta:
if origCoords is None:
origCoords, g = glyf._getCoordinatesAndControls(
glyphname, hMetrics, vMetrics
)
delta = iup_delta(delta, origCoords, g.endPts)
coordinates += GlyphCoordinates(delta) * scalar
glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
else:
glyf = None
if "DSIG" in varfont:
del varfont["DSIG"]
if "cvar" in varfont:
log.info("Mutating cvt/cvar tables")
cvar = varfont["cvar"]
cvt = varfont["cvt "]
deltas = {}
for var in cvar.variations:
scalar = supportScalar(loc, var.axes)
if not scalar:
continue
for i, c in enumerate(var.coordinates):
if c is not None:
deltas[i] = deltas.get(i, 0) + scalar * c
for i, delta in deltas.items():
cvt[i] += otRound(delta)
if "CFF2" in varfont:
log.info("Mutating CFF2 table")
glyphOrder = varfont.getGlyphOrder()
CFF2 = varfont["CFF2"]
topDict = CFF2.cff.topDictIndex[0]
vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
interpolateFromDeltas = vsInstancer.interpolateFromDeltas
interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
CFF2.desubroutinize()
interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
del topDict.rawDict["VarStore"]
del topDict.VarStore
if "MVAR" in varfont:
log.info("Mutating MVAR table")
mvar = varfont["MVAR"].table
varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
records = mvar.ValueRecord
for rec in records:
mvarTag = rec.ValueTag
if mvarTag not in MVAR_ENTRIES:
continue
tableTag, itemName = MVAR_ENTRIES[mvarTag]
delta = otRound(varStoreInstancer[rec.VarIdx])
if not delta:
continue
setattr(
varfont[tableTag],
itemName,
getattr(varfont[tableTag], itemName) + delta,
)
log.info("Mutating FeatureVariations")
for tableTag in "GSUB", "GPOS":
if not tableTag in varfont:
continue
table = varfont[tableTag].table
if not getattr(table, "FeatureVariations", None):
continue
variations = table.FeatureVariations
for record in variations.FeatureVariationRecord:
applies = True
for condition in record.ConditionSet.ConditionTable:
if condition.Format == 1:
axisIdx = condition.AxisIndex
axisTag = fvar.axes[axisIdx].axisTag
Min = condition.FilterRangeMinValue
Max = condition.FilterRangeMaxValue
v = loc[axisTag]
if not (Min <= v <= Max):
applies = False
else:
applies = False
if not applies:
break
if applies:
assert record.FeatureTableSubstitution.Version == 0x00010000
for rec in record.FeatureTableSubstitution.SubstitutionRecord:
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = (
rec.Feature
)
break
del table.FeatureVariations
if "GDEF" in varfont and varfont["GDEF"].table.Version >= 0x00010003:
log.info("Mutating GDEF/GPOS/GSUB tables")
gdef = varfont["GDEF"].table
instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
merger = MutatorMerger(varfont, instancer)
merger.mergeTables(varfont, [varfont], ["GDEF", "GPOS"])
# Downgrade GDEF.
del gdef.VarStore
gdef.Version = 0x00010002
if gdef.MarkGlyphSetsDef is None:
del gdef.MarkGlyphSetsDef
gdef.Version = 0x00010000
if not (
gdef.LigCaretList
or gdef.MarkAttachClassDef
or gdef.GlyphClassDef
or gdef.AttachList
or (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)
):
del varfont["GDEF"]
addidef = False
if glyf:
for glyph in glyf.glyphs.values():
if hasattr(glyph, "program"):
instructions = glyph.program.getAssembly()
# If GETVARIATION opcode is used in bytecode of any glyph add IDEF
addidef = any(op.startswith("GETVARIATION") for op in instructions)
if addidef:
break
if overlap:
for glyph_name in glyf.keys():
glyph = glyf[glyph_name]
# Set OVERLAP_COMPOUND bit for compound glyphs
if glyph.isComposite():
glyph.components[0].flags |= OVERLAP_COMPOUND
# Set OVERLAP_SIMPLE bit for simple glyphs
elif glyph.numberOfContours > 0:
glyph.flags[0] |= flagOverlapSimple
if addidef:
log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
asm = []
if "fpgm" in varfont:
fpgm = varfont["fpgm"]
asm = fpgm.program.getAssembly()
else:
fpgm = newTable("fpgm")
fpgm.program = ttProgram.Program()
varfont["fpgm"] = fpgm
asm.append("PUSHB[000] 145")
asm.append("IDEF[ ]")
args = [str(len(loc))]
for a in fvar.axes:
args.append(str(floatToFixed(loc[a.axisTag], 14)))
asm.append("NPUSHW[ ] " + " ".join(args))
asm.append("ENDF[ ]")
fpgm.program.fromAssembly(asm)
# Change maxp attributes as IDEF is added
if "maxp" in varfont:
maxp = varfont["maxp"]
setattr(
maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0)
)
setattr(
maxp,
"maxStackElements",
max(len(loc), getattr(maxp, "maxStackElements", 0)),
)
if "name" in varfont:
log.info("Pruning name table")
exclude = {a.axisNameID for a in fvar.axes}
for i in fvar.instances:
exclude.add(i.subfamilyNameID)
exclude.add(i.postscriptNameID)
if "ltag" in varfont:
# Drop the whole 'ltag' table if all its language tags are referenced by
# name records to be pruned.
# TODO: prune unused ltag tags and re-enumerate langIDs accordingly
excludedUnicodeLangIDs = [
n.langID
for n in varfont["name"].names
if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF
]
if set(excludedUnicodeLangIDs) == set(range(len((varfont["ltag"].tags)))):
del varfont["ltag"]
varfont["name"].names[:] = [
n
for n in varfont["name"].names
if n.nameID < 256 or n.nameID not in exclude
]
if "wght" in location and "OS/2" in varfont:
varfont["OS/2"].usWeightClass = otRound(max(1, min(location["wght"], 1000)))
if "wdth" in location:
wdth = location["wdth"]
for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
if wdth < percent:
varfont["OS/2"].usWidthClass = widthClass
break
else:
varfont["OS/2"].usWidthClass = 9
if "slnt" in location and "post" in varfont:
varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
log.info("Removing variable tables")
for tag in ("avar", "cvar", "fvar", "gvar", "HVAR", "MVAR", "VVAR", "STAT"):
if tag in varfont:
del varfont[tag]
return varfont
|
Generate a static instance from a variable TTFont and a dictionary
defining the desired location along the variable font's axes.
The location values must be specified as user-space coordinates, e.g.:
.. code-block::
{'wght': 400, 'wdth': 100}
By default, a new TTFont object is returned. If ``inplace`` is True, the
input varfont is modified and reduced to a static font.
When the overlap parameter is defined as True,
OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
|
instantiateVariableFont
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/mutator.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/mutator.py
|
MIT
|
def plotModelFromMasters(model, masterValues, fig, **kwargs):
"""Plot a variation model and set of master values corresponding
to the locations to the model into a pyplot figure. Variation
model must have axisOrder of size 1 or 2."""
if len(model.axisOrder) == 1:
_plotModelFromMasters2D(model, masterValues, fig, **kwargs)
elif len(model.axisOrder) == 2:
_plotModelFromMasters3D(model, masterValues, fig, **kwargs)
else:
raise ValueError("Only 1 or 2 axes are supported")
|
Plot a variation model and set of master values corresponding
to the locations to the model into a pyplot figure. Variation
model must have axisOrder of size 1 or 2.
|
plotModelFromMasters
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/plot.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/plot.py
|
MIT
|
def buildVFStatTable(ttFont: TTFont, doc: DesignSpaceDocument, vfName: str) -> None:
"""Build the STAT table for the variable font identified by its name in
the given document.
Knowing which variable we're building STAT data for is needed to subset
the STAT locations to only include what the variable font actually ships.
.. versionadded:: 5.0
.. seealso::
- :func:`getStatAxes()`
- :func:`getStatLocations()`
- :func:`fontTools.otlLib.builder.buildStatTable()`
"""
for vf in doc.getVariableFonts():
if vf.name == vfName:
break
else:
raise DesignSpaceDocumentError(
f"Cannot find the variable font by name {vfName}"
)
region = getVFUserRegion(doc, vf)
# if there are not currently any mac names don't add them here, that's inconsistent
# https://github.com/fonttools/fonttools/issues/683
macNames = any(
nr.platformID == 1 for nr in getattr(ttFont.get("name"), "names", ())
)
return fontTools.otlLib.builder.buildStatTable(
ttFont,
getStatAxes(doc, region),
getStatLocations(doc, region),
doc.elidedFallbackName if doc.elidedFallbackName is not None else 2,
macNames=macNames,
)
|
Build the STAT table for the variable font identified by its name in
the given document.
Knowing which variable we're building STAT data for is needed to subset
the STAT locations to only include what the variable font actually ships.
.. versionadded:: 5.0
.. seealso::
- :func:`getStatAxes()`
- :func:`getStatLocations()`
- :func:`fontTools.otlLib.builder.buildStatTable()`
|
buildVFStatTable
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/stat.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/stat.py
|
MIT
|
def getStatAxes(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]:
"""Return a list of axis dicts suitable for use as the ``axes``
argument to :func:`fontTools.otlLib.builder.buildStatTable()`.
.. versionadded:: 5.0
"""
# First, get the axis labels with explicit ordering
# then append the others in the order they appear.
maxOrdering = max(
(axis.axisOrdering for axis in doc.axes if axis.axisOrdering is not None),
default=-1,
)
axisOrderings = []
for axis in doc.axes:
if axis.axisOrdering is not None:
axisOrderings.append(axis.axisOrdering)
else:
maxOrdering += 1
axisOrderings.append(maxOrdering)
return [
dict(
tag=axis.tag,
name={"en": axis.name, **axis.labelNames},
ordering=ordering,
values=[
_axisLabelToStatLocation(label)
for label in axis.axisLabels
if locationInRegion({axis.name: label.userValue}, userRegion)
],
)
for axis, ordering in zip(doc.axes, axisOrderings)
]
|
Return a list of axis dicts suitable for use as the ``axes``
argument to :func:`fontTools.otlLib.builder.buildStatTable()`.
.. versionadded:: 5.0
|
getStatAxes
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/stat.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/stat.py
|
MIT
|
def getStatLocations(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]:
"""Return a list of location dicts suitable for use as the ``locations``
argument to :func:`fontTools.otlLib.builder.buildStatTable()`.
.. versionadded:: 5.0
"""
axesByName = {axis.name: axis for axis in doc.axes}
return [
dict(
name={"en": label.name, **label.labelNames},
# Location in the designspace is keyed by axis name
# Location in buildStatTable by axis tag
location={
axesByName[name].tag: value
for name, value in label.getFullUserLocation(doc).items()
},
flags=_labelToFlags(label),
)
for label in doc.locationLabels
if locationInRegion(label.getFullUserLocation(doc), userRegion)
]
|
Return a list of location dicts suitable for use as the ``locations``
argument to :func:`fontTools.otlLib.builder.buildStatTable()`.
.. versionadded:: 5.0
|
getStatLocations
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/stat.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/stat.py
|
MIT
|
def _visit(self, func):
"""Recurse down from self, if type of an object is ot.Device,
call func() on it. Works on otData-style classes."""
if type(self) == ot.Device:
func(self)
elif isinstance(self, list):
for that in self:
_visit(that, func)
elif hasattr(self, "getConverters") and not hasattr(self, "postRead"):
for conv in self.getConverters():
that = getattr(self, conv.name, None)
if that is not None:
_visit(that, func)
elif isinstance(self, ot.ValueRecord):
for that in self.__dict__.values():
_visit(that, func)
|
Recurse down from self, if type of an object is ot.Device,
call func() on it. Works on otData-style classes.
|
_visit
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/varStore.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/varStore.py
|
MIT
|
def _Device_recordVarIdx(self, s):
"""Add VarIdx in this Device table (if any) to the set s."""
if self.DeltaFormat == 0x8000:
s.add((self.StartSize << 16) + self.EndSize)
|
Add VarIdx in this Device table (if any) to the set s.
|
_Device_recordVarIdx
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/varStore.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/varStore.py
|
MIT
|
def _Device_mapVarIdx(self, mapping, done):
"""Map VarIdx in this Device table (if any) through mapping."""
if id(self) in done:
return
done.add(id(self))
if self.DeltaFormat == 0x8000:
varIdx = mapping[(self.StartSize << 16) + self.EndSize]
self.StartSize = varIdx >> 16
self.EndSize = varIdx & 0xFFFF
|
Map VarIdx in this Device table (if any) through mapping.
|
_Device_mapVarIdx
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/varStore.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/varStore.py
|
MIT
|
def _characteristic_overhead(columns):
"""Returns overhead in bytes of encoding this characteristic
as a VarData."""
c = 4 + 6 # 4 bytes for LOffset, 6 bytes for VarData header
c += bit_count(columns) * 2
return c
|
Returns overhead in bytes of encoding this characteristic
as a VarData.
|
_characteristic_overhead
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/varStore.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/varStore.py
|
MIT
|
def VarStore_optimize(self, use_NO_VARIATION_INDEX=True, quantization=1):
"""Optimize storage. Returns mapping from old VarIdxes to new ones."""
# Overview:
#
# For each VarData row, we first extend it with zeroes to have
# one column per region in VarRegionList. We then group the
# rows into _Encoding objects, by their "characteristic" bitmap.
# The characteristic bitmap is a binary number representing how
# many bytes each column of the data takes up to encode. Each
# column is encoded in four bits. For example, if a column has
# only values in the range -128..127, it would only have a single
# bit set in the characteristic bitmap for that column. If it has
# values in the range -32768..32767, it would have two bits set.
# The number of ones in the characteristic bitmap is the "width"
# of the encoding.
#
# Each encoding as such has a number of "active" (ie. non-zero)
# columns. The overhead of encoding the characteristic bitmap
# is 10 bytes, plus 2 bytes per active column.
#
# When an encoding is merged into another one, if the characteristic
# of the old encoding is a subset of the new one, then the overhead
# of the old encoding is completely eliminated. However, each row
# now would require more bytes to encode, to the tune of one byte
# per characteristic bit that is active in the new encoding but not
# in the old one.
#
# The "gain" of merging two encodings is how many bytes we save by doing so.
#
# High-level algorithm:
#
# - Each encoding has a minimal way to encode it. However, because
# of the overhead of encoding the characteristic bitmap, it may
# be beneficial to merge two encodings together, if there is
# gain in doing so. As such, we need to search for the best
# such successive merges.
#
# Algorithm:
#
# - Put all encodings into a "todo" list.
#
# - Sort todo list (for stability) by width_sort_key(), which is a tuple
# of the following items:
# * The "width" of the encoding.
# * The characteristic bitmap of the encoding, with higher-numbered
# columns compared first.
#
# - Make a priority-queue of the gain from combining each two
# encodings in the todo list. The priority queue is sorted by
# decreasing gain. Only positive gains are included.
#
# - While priority queue is not empty:
# - Pop the first item from the priority queue,
# - Merge the two encodings it represents,
# - Remove the two encodings from the todo list,
# - Insert positive gains from combining the new encoding with
# all existing todo list items into the priority queue,
# - If a todo list item with the same characteristic bitmap as
# the new encoding exists, remove it from the todo list and
# merge it into the new encoding.
# - Insert the new encoding into the todo list,
#
# - Encode all remaining items in the todo list.
#
# The output is then sorted for stability, in the following way:
# - The VarRegionList of the input is kept intact.
# - The VarData is sorted by the same width_sort_key() used at the beginning.
# - Within each VarData, the items are sorted as vectors of numbers.
#
# Finally, each VarData is optimized to remove the empty columns and
# reorder columns as needed.
# TODO
# Check that no two VarRegions are the same; if they are, fold them.
n = len(self.VarRegionList.Region) # Number of columns
zeroes = [0] * n
front_mapping = {} # Map from old VarIdxes to full row tuples
encodings = _EncodingDict()
# Collect all items into a set of full rows (with lots of zeroes.)
for major, data in enumerate(self.VarData):
regionIndices = data.VarRegionIndex
for minor, item in enumerate(data.Item):
row = list(zeroes)
if quantization == 1:
for regionIdx, v in zip(regionIndices, item):
row[regionIdx] += v
else:
for regionIdx, v in zip(regionIndices, item):
row[regionIdx] += (
round(v / quantization) * quantization
) # TODO https://github.com/fonttools/fonttools/pull/3126#discussion_r1205439785
row = tuple(row)
if use_NO_VARIATION_INDEX and not any(row):
front_mapping[(major << 16) + minor] = None
continue
encodings.add_row(row)
front_mapping[(major << 16) + minor] = row
# Prepare for the main algorithm.
todo = sorted(encodings.values(), key=_Encoding.width_sort_key)
del encodings
# Repeatedly pick two best encodings to combine, and combine them.
heap = []
for i, encoding in enumerate(todo):
for j in range(i + 1, len(todo)):
other_encoding = todo[j]
combining_gain = encoding.gain_from_merging(other_encoding)
if combining_gain > 0:
heappush(heap, (-combining_gain, i, j))
while heap:
_, i, j = heappop(heap)
if todo[i] is None or todo[j] is None:
continue
encoding, other_encoding = todo[i], todo[j]
todo[i], todo[j] = None, None
# Combine the two encodings
combined_chars = other_encoding.chars | encoding.chars
combined_encoding = _Encoding(combined_chars)
combined_encoding.extend(encoding.items)
combined_encoding.extend(other_encoding.items)
for k, enc in enumerate(todo):
if enc is None:
continue
# In the unlikely event that the same encoding exists already,
# combine it.
if enc.chars == combined_chars:
combined_encoding.extend(enc.items)
todo[k] = None
continue
combining_gain = combined_encoding.gain_from_merging(enc)
if combining_gain > 0:
heappush(heap, (-combining_gain, k, len(todo)))
todo.append(combined_encoding)
encodings = [encoding for encoding in todo if encoding is not None]
# Assemble final store.
back_mapping = {} # Mapping from full rows to new VarIdxes
encodings.sort(key=_Encoding.width_sort_key)
self.VarData = []
for encoding in encodings:
items = sorted(encoding.items)
while items:
major = len(self.VarData)
data = ot.VarData()
self.VarData.append(data)
data.VarRegionIndex = range(n)
data.VarRegionCount = len(data.VarRegionIndex)
# Each major can only encode up to 0xFFFF entries.
data.Item, items = items[:0xFFFF], items[0xFFFF:]
for minor, item in enumerate(data.Item):
back_mapping[item] = (major << 16) + minor
# Compile final mapping.
varidx_map = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}
for k, v in front_mapping.items():
varidx_map[k] = back_mapping[v] if v is not None else NO_VARIATION_INDEX
# Recalculate things and go home.
self.VarRegionList.RegionCount = len(self.VarRegionList.Region)
self.VarDataCount = len(self.VarData)
for data in self.VarData:
data.ItemCount = len(data.Item)
data.optimize()
# Remove unused regions.
self.prune_regions()
return varidx_map
|
Optimize storage. Returns mapping from old VarIdxes to new ones.
|
VarStore_optimize
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/varStore.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/varStore.py
|
MIT
|
def _add_fvar(font, axes, instances: List[InstanceDescriptor]):
"""
Add 'fvar' table to font.
axes is an ordered dictionary of DesignspaceAxis objects.
instances is list of dictionary objects with 'location', 'stylename',
and possibly 'postscriptfontname' entries.
"""
assert axes
assert isinstance(axes, OrderedDict)
log.info("Generating fvar")
fvar = newTable("fvar")
nameTable = font["name"]
# if there are not currently any mac names don't add them here, that's inconsistent
# https://github.com/fonttools/fonttools/issues/683
macNames = any(nr.platformID == 1 for nr in getattr(nameTable, "names", ()))
# we have all the best ways to express mac names
platforms = ((3, 1, 0x409),)
if macNames:
platforms = ((1, 0, 0),) + platforms
for a in axes.values():
axis = Axis()
axis.axisTag = Tag(a.tag)
# TODO Skip axes that have no variation.
axis.minValue, axis.defaultValue, axis.maxValue = (
a.minimum,
a.default,
a.maximum,
)
axis.axisNameID = nameTable.addMultilingualName(
a.labelNames, font, minNameID=256, mac=macNames
)
axis.flags = int(a.hidden)
fvar.axes.append(axis)
default_coordinates = {axis.axisTag: axis.defaultValue for axis in fvar.axes}
for instance in instances:
# Filter out discrete axis locations
coordinates = {
name: value for name, value in instance.location.items() if name in axes
}
if "en" not in instance.localisedStyleName:
if not instance.styleName:
raise VarLibValidationError(
f"Instance at location '{coordinates}' must have a default English "
"style name ('stylename' attribute on the instance element or a "
"stylename element with an 'xml:lang=\"en\"' attribute)."
)
localisedStyleName = dict(instance.localisedStyleName)
localisedStyleName["en"] = tostr(instance.styleName)
else:
localisedStyleName = instance.localisedStyleName
psname = instance.postScriptFontName
inst = NamedInstance()
inst.coordinates = {
axes[k].tag: axes[k].map_backward(v) for k, v in coordinates.items()
}
subfamilyNameID = nameTable.findMultilingualName(
localisedStyleName, windows=True, mac=macNames
)
if subfamilyNameID in {2, 17} and inst.coordinates == default_coordinates:
# Instances can only reuse an existing name ID 2 or 17 if they are at the
# default location across all axes, see:
# https://github.com/fonttools/fonttools/issues/3825.
inst.subfamilyNameID = subfamilyNameID
else:
inst.subfamilyNameID = nameTable.addMultilingualName(
localisedStyleName, windows=True, mac=macNames, minNameID=256
)
if psname is not None:
psname = tostr(psname)
inst.postscriptNameID = nameTable.addName(psname, platforms=platforms)
fvar.instances.append(inst)
assert "fvar" not in font
font["fvar"] = fvar
return fvar
|
Add 'fvar' table to font.
axes is an ordered dictionary of DesignspaceAxis objects.
instances is list of dictionary objects with 'location', 'stylename',
and possibly 'postscriptfontname' entries.
|
_add_fvar
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def _add_avar(font, axes, mappings, axisTags):
"""
Add 'avar' table to font.
axes is an ordered dictionary of AxisDescriptor objects.
"""
assert axes
assert isinstance(axes, OrderedDict)
log.info("Generating avar")
avar = newTable("avar")
interesting = False
vals_triples = {}
for axis in axes.values():
# Currently, some rasterizers require that the default value maps
# (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment
# maps, even when the default normalization mapping for the axis
# was not modified.
# https://github.com/googlei18n/fontmake/issues/295
# https://github.com/fonttools/fonttools/issues/1011
# TODO(anthrotype) revert this (and 19c4b37) when issue is fixed
curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}
keys_triple = (axis.minimum, axis.default, axis.maximum)
vals_triple = tuple(axis.map_forward(v) for v in keys_triple)
vals_triples[axis.tag] = vals_triple
if not axis.map:
continue
items = sorted(axis.map)
keys = [item[0] for item in items]
vals = [item[1] for item in items]
# Current avar requirements. We don't have to enforce
# these on the designer and can deduce some ourselves,
# but for now just enforce them.
if axis.minimum != min(keys):
raise VarLibValidationError(
f"Axis '{axis.name}': there must be a mapping for the axis minimum "
f"value {axis.minimum} and it must be the lowest input mapping value."
)
if axis.maximum != max(keys):
raise VarLibValidationError(
f"Axis '{axis.name}': there must be a mapping for the axis maximum "
f"value {axis.maximum} and it must be the highest input mapping value."
)
if axis.default not in keys:
raise VarLibValidationError(
f"Axis '{axis.name}': there must be a mapping for the axis default "
f"value {axis.default}."
)
# No duplicate input values (output values can be >= their preceeding value).
if len(set(keys)) != len(keys):
raise VarLibValidationError(
f"Axis '{axis.name}': All axis mapping input='...' values must be "
"unique, but we found duplicates."
)
# Ascending values
if sorted(vals) != vals:
raise VarLibValidationError(
f"Axis '{axis.name}': mapping output values must be in ascending order."
)
keys = [models.normalizeValue(v, keys_triple) for v in keys]
vals = [models.normalizeValue(v, vals_triple) for v in vals]
if all(k == v for k, v in zip(keys, vals)):
continue
interesting = True
curve.update(zip(keys, vals))
assert 0.0 in curve and curve[0.0] == 0.0
assert -1.0 not in curve or curve[-1.0] == -1.0
assert +1.0 not in curve or curve[+1.0] == +1.0
# curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0})
if mappings:
interesting = True
inputLocations = [
{
axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag])
for name, v in mapping.inputLocation.items()
}
for mapping in mappings
]
outputLocations = [
{
axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag])
for name, v in mapping.outputLocation.items()
}
for mapping in mappings
]
assert len(inputLocations) == len(outputLocations)
# If base-master is missing, insert it at zero location.
if not any(all(v == 0 for k, v in loc.items()) for loc in inputLocations):
inputLocations.insert(0, {})
outputLocations.insert(0, {})
model = models.VariationModel(inputLocations, axisTags)
storeBuilder = varStore.OnlineVarStoreBuilder(axisTags)
storeBuilder.setModel(model)
varIdxes = {}
for tag in axisTags:
masterValues = []
for vo, vi in zip(outputLocations, inputLocations):
if tag not in vo:
masterValues.append(0)
continue
v = vo[tag] - vi.get(tag, 0)
masterValues.append(fl2fi(v, 14))
varIdxes[tag] = storeBuilder.storeMasters(masterValues)[1]
store = storeBuilder.finish()
optimized = store.optimize()
varIdxes = {axis: optimized[value] for axis, value in varIdxes.items()}
varIdxMap = builder.buildDeltaSetIndexMap(varIdxes[t] for t in axisTags)
avar.majorVersion = 2
avar.table = ot.avar()
avar.table.VarIdxMap = varIdxMap
avar.table.VarStore = store
assert "avar" not in font
if not interesting:
log.info("No need for avar")
avar = None
else:
font["avar"] = avar
return avar
|
Add 'avar' table to font.
axes is an ordered dictionary of AxisDescriptor objects.
|
_add_avar
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def drop_implied_oncurve_points(*masters: TTFont) -> int:
"""Drop impliable on-curve points from all the simple glyphs in masters.
In TrueType glyf outlines, on-curve points can be implied when they are located
exactly at the midpoint of the line connecting two consecutive off-curve points.
The input masters' glyf tables are assumed to contain same-named glyphs that are
interpolatable. Oncurve points are only dropped if they can be implied for all
the masters. The fonts are modified in-place.
Args:
masters: The TTFont(s) to modify
Returns:
The total number of points that were dropped if any.
Reference:
https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
"""
count = 0
glyph_masters = defaultdict(list)
# multiple DS source may point to the same TTFont object and we want to
# avoid processing the same glyph twice as they are modified in-place
for font in {id(m): m for m in masters}.values():
glyf = font["glyf"]
for glyphName in glyf.keys():
glyph_masters[glyphName].append(glyf[glyphName])
count = 0
for glyphName, glyphs in glyph_masters.items():
try:
dropped = dropImpliedOnCurvePoints(*glyphs)
except ValueError as e:
# we don't fail for incompatible glyphs in _add_gvar so we shouldn't here
log.warning("Failed to drop implied oncurves for %r: %s", glyphName, e)
else:
count += len(dropped)
return count
|
Drop impliable on-curve points from all the simple glyphs in masters.
In TrueType glyf outlines, on-curve points can be implied when they are located
exactly at the midpoint of the line connecting two consecutive off-curve points.
The input masters' glyf tables are assumed to contain same-named glyphs that are
interpolatable. Oncurve points are only dropped if they can be implied for all
the masters. The fonts are modified in-place.
Args:
masters: The TTFont(s) to modify
Returns:
The total number of points that were dropped if any.
Reference:
https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
|
drop_implied_oncurve_points
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def build_many(
designspace: DesignSpaceDocument,
master_finder=lambda s: s,
exclude=[],
optimize=True,
skip_vf=lambda vf_name: False,
colr_layer_reuse=True,
drop_implied_oncurves=False,
):
"""
Build variable fonts from a designspace file, version 5 which can define
several VFs, or version 4 which has implicitly one VF covering the whole doc.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
skip_vf can be used to skip building some of the variable fonts defined in
the input designspace. It's a predicate that takes as argument the name
of the variable font and returns `bool`.
Always returns a Dict[str, TTFont] keyed by VariableFontDescriptor.name
"""
res = {}
# varLib.build (used further below) by default only builds an incomplete 'STAT'
# with an empty AxisValueArray--unless the VF inherited 'STAT' from its base master.
# Designspace version 5 can also be used to define 'STAT' labels or customize
# axes ordering, etc. To avoid overwriting a pre-existing 'STAT' or redoing the
# same work twice, here we check if designspace contains any 'STAT' info before
# proceeding to call buildVFStatTable for each VF.
# https://github.com/fonttools/fonttools/pull/3024
# https://github.com/fonttools/fonttools/issues/3045
doBuildStatFromDSv5 = (
"STAT" not in exclude
and designspace.formatTuple >= (5, 0)
and (
any(a.axisLabels or a.axisOrdering is not None for a in designspace.axes)
or designspace.locationLabels
)
)
for _location, subDoc in splitInterpolable(designspace):
for name, vfDoc in splitVariableFonts(subDoc):
if skip_vf(name):
log.debug(f"Skipping variable TTF font: {name}")
continue
vf = build(
vfDoc,
master_finder,
exclude=exclude,
optimize=optimize,
colr_layer_reuse=colr_layer_reuse,
drop_implied_oncurves=drop_implied_oncurves,
)[0]
if doBuildStatFromDSv5:
buildVFStatTable(vf, designspace, name)
res[name] = vf
return res
|
Build variable fonts from a designspace file, version 5 which can define
several VFs, or version 4 which has implicitly one VF covering the whole doc.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
skip_vf can be used to skip building some of the variable fonts defined in
the input designspace. It's a predicate that takes as argument the name
of the variable font and returns `bool`.
Always returns a Dict[str, TTFont] keyed by VariableFontDescriptor.name
|
build_many
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def build(
designspace,
master_finder=lambda s: s,
exclude=[],
optimize=True,
colr_layer_reuse=True,
drop_implied_oncurves=False,
):
"""
Build variation font from a designspace file.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
"""
if hasattr(designspace, "sources"): # Assume a DesignspaceDocument
pass
else: # Assume a file path
designspace = DesignSpaceDocument.fromfile(designspace)
ds = load_designspace(designspace)
log.info("Building variable font")
log.info("Loading master fonts")
master_fonts = load_masters(designspace, master_finder)
# TODO: 'master_ttfs' is unused except for return value, remove later
master_ttfs = []
for master in master_fonts:
try:
master_ttfs.append(master.reader.file.name)
except AttributeError:
master_ttfs.append(None) # in-memory fonts have no path
if drop_implied_oncurves and "glyf" in master_fonts[ds.base_idx]:
drop_count = drop_implied_oncurve_points(*master_fonts)
log.info(
"Dropped %s on-curve points from simple glyphs in the 'glyf' table",
drop_count,
)
# Copy the base master to work from it
vf = deepcopy(master_fonts[ds.base_idx])
if "DSIG" in vf:
del vf["DSIG"]
# TODO append masters as named-instances as well; needs .designspace change.
fvar = _add_fvar(vf, ds.axes, ds.instances)
if "STAT" not in exclude:
_add_stat(vf)
# Map from axis names to axis tags...
normalized_master_locs = [
{ds.axes[k].tag: v for k, v in loc.items()} for loc in ds.normalized_master_locs
]
# From here on, we use fvar axes only
axisTags = [axis.axisTag for axis in fvar.axes]
# Assume single-model for now.
model = models.VariationModel(normalized_master_locs, axisOrder=axisTags)
assert 0 == model.mapping[ds.base_idx]
log.info("Building variations tables")
if "avar" not in exclude:
_add_avar(vf, ds.axes, ds.axisMappings, axisTags)
if "BASE" not in exclude and "BASE" in vf:
_add_BASE(vf, model, master_fonts, axisTags)
if "MVAR" not in exclude:
_add_MVAR(vf, model, master_fonts, axisTags)
if "HVAR" not in exclude:
_add_HVAR(vf, model, master_fonts, axisTags)
if "VVAR" not in exclude and "vmtx" in vf:
_add_VVAR(vf, model, master_fonts, axisTags)
if "GDEF" not in exclude or "GPOS" not in exclude:
_merge_OTL(vf, model, master_fonts, axisTags)
if "gvar" not in exclude and "glyf" in vf:
_add_gvar(vf, model, master_fonts, optimize=optimize)
if "cvar" not in exclude and "glyf" in vf:
_merge_TTHinting(vf, model, master_fonts)
if "GSUB" not in exclude and ds.rules:
featureTags = _feature_variations_tags(ds)
_add_GSUB_feature_variations(
vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTags
)
if "CFF2" not in exclude and ("CFF " in vf or "CFF2" in vf):
_add_CFF2(vf, model, master_fonts)
if "post" in vf:
# set 'post' to format 2 to keep the glyph names dropped from CFF2
post = vf["post"]
if post.formatType != 2.0:
post.formatType = 2.0
post.extraNames = []
post.mapping = {}
if "COLR" not in exclude and "COLR" in vf and vf["COLR"].version > 0:
_add_COLR(vf, model, master_fonts, axisTags, colr_layer_reuse)
set_default_weight_width_slant(
vf, location={axis.axisTag: axis.defaultValue for axis in vf["fvar"].axes}
)
for tag in exclude:
if tag in vf:
del vf[tag]
# TODO: Only return vf for 4.0+, the rest is unused.
return vf, model, master_ttfs
|
Build variation font from a designspace file.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
|
build
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def load_masters(designspace, master_finder=lambda s: s):
"""Ensure that all SourceDescriptor.font attributes have an appropriate TTFont
object loaded, or else open TTFont objects from the SourceDescriptor.path
attributes.
The paths can point to either an OpenType font, a TTX file, or a UFO. In the
latter case, use the provided master_finder callable to map from UFO paths to
the respective master font binaries (e.g. .ttf, .otf or .ttx).
Return list of master TTFont objects in the same order they are listed in the
DesignSpaceDocument.
"""
for master in designspace.sources:
# If a SourceDescriptor has a layer name, demand that the compiled TTFont
# be supplied by the caller. This spares us from modifying MasterFinder.
if master.layerName and master.font is None:
raise VarLibValidationError(
f"Designspace source '{master.name or '<Unknown>'}' specified a "
"layer name but lacks the required TTFont object in the 'font' "
"attribute."
)
return designspace.loadSourceFonts(_open_font, master_finder=master_finder)
|
Ensure that all SourceDescriptor.font attributes have an appropriate TTFont
object loaded, or else open TTFont objects from the SourceDescriptor.path
attributes.
The paths can point to either an OpenType font, a TTX file, or a UFO. In the
latter case, use the provided master_finder callable to map from UFO paths to
the respective master font binaries (e.g. .ttf, .otf or .ttx).
Return list of master TTFont objects in the same order they are listed in the
DesignSpaceDocument.
|
load_masters
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def addGSUBFeatureVariations(vf, designspace, featureTags=(), *, log_enabled=False):
"""Add GSUB FeatureVariations table to variable font, based on DesignSpace rules.
Args:
vf: A TTFont object representing the variable font.
designspace: A DesignSpaceDocument object.
featureTags: Optional feature tag(s) to use for the FeatureVariations records.
If unset, the key 'com.github.fonttools.varLib.featureVarsFeatureTag' is
looked up in the DS <lib> and used; otherwise the default is 'rclt' if
the <rules processing="last"> attribute is set, else 'rvrn'.
See <https://fonttools.readthedocs.io/en/latest/designspaceLib/xml.html#rules-element>
log_enabled: If True, log info about DS axes and sources. Default is False, as
the same info may have already been logged as part of varLib.build.
"""
ds = load_designspace(designspace, log_enabled=log_enabled)
if not ds.rules:
return
if not featureTags:
featureTags = _feature_variations_tags(ds)
_add_GSUB_feature_variations(
vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTags
)
|
Add GSUB FeatureVariations table to variable font, based on DesignSpace rules.
Args:
vf: A TTFont object representing the variable font.
designspace: A DesignSpaceDocument object.
featureTags: Optional feature tag(s) to use for the FeatureVariations records.
If unset, the key 'com.github.fonttools.varLib.featureVarsFeatureTag' is
looked up in the DS <lib> and used; otherwise the default is 'rclt' if
the <rules processing="last"> attribute is set, else 'rvrn'.
See <https://fonttools.readthedocs.io/en/latest/designspaceLib/xml.html#rules-element>
log_enabled: If True, log info about DS axes and sources. Default is False, as
the same info may have already been logged as part of varLib.build.
|
addGSUBFeatureVariations
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def main(args=None):
"""Build variable fonts from a designspace file and masters"""
from argparse import ArgumentParser
from fontTools import configLogger
parser = ArgumentParser(prog="varLib", description=main.__doc__)
parser.add_argument("designspace")
output_group = parser.add_mutually_exclusive_group()
output_group.add_argument(
"-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file"
)
output_group.add_argument(
"-d",
"--output-dir",
metavar="OUTPUTDIR",
default=None,
help="output dir (default: same as input designspace file)",
)
parser.add_argument(
"-x",
metavar="TAG",
dest="exclude",
action="append",
default=[],
help="exclude table",
)
parser.add_argument(
"--disable-iup",
dest="optimize",
action="store_false",
help="do not perform IUP optimization",
)
parser.add_argument(
"--no-colr-layer-reuse",
dest="colr_layer_reuse",
action="store_false",
help="do not rebuild variable COLR table to optimize COLR layer reuse",
)
parser.add_argument(
"--drop-implied-oncurves",
action="store_true",
help=(
"drop on-curve points that can be implied when exactly in the middle of "
"two off-curve points (only applies to TrueType fonts)"
),
)
parser.add_argument(
"--master-finder",
default="master_ttf_interpolatable/{stem}.ttf",
help=(
"templated string used for finding binary font "
"files given the source file names defined in the "
"designspace document. The following special strings "
"are defined: {fullname} is the absolute source file "
"name; {basename} is the file name without its "
"directory; {stem} is the basename without the file "
"extension; {ext} is the source file extension; "
"{dirname} is the directory of the absolute file "
'name. The default value is "%(default)s".'
),
)
parser.add_argument(
"--variable-fonts",
default=".*",
metavar="VF_NAME",
help=(
"Filter the list of variable fonts produced from the input "
"Designspace v5 file. By default all listed variable fonts are "
"generated. To generate a specific variable font (or variable fonts) "
'that match a given "name" attribute, you can pass as argument '
"the full name or a regular expression. E.g.: --variable-fonts "
'"MyFontVF_WeightOnly"; or --variable-fonts "MyFontVFItalic_.*".'
),
)
logging_group = parser.add_mutually_exclusive_group(required=False)
logging_group.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
logging_group.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
designspace_filename = options.designspace
designspace = DesignSpaceDocument.fromfile(designspace_filename)
vf_descriptors = designspace.getVariableFonts()
if not vf_descriptors:
parser.error(f"No variable fonts in given designspace {designspace.path!r}")
vfs_to_build = []
for vf in vf_descriptors:
# Skip variable fonts that do not match the user's inclusion regex if given.
if not fullmatch(options.variable_fonts, vf.name):
continue
vfs_to_build.append(vf)
if not vfs_to_build:
parser.error(f"No variable fonts matching {options.variable_fonts!r}")
if options.outfile is not None and len(vfs_to_build) > 1:
parser.error(
"can't specify -o because there are multiple VFs to build; "
"use --output-dir, or select a single VF with --variable-fonts"
)
output_dir = options.output_dir
if output_dir is None:
output_dir = os.path.dirname(designspace_filename)
vf_name_to_output_path = {}
if len(vfs_to_build) == 1 and options.outfile is not None:
vf_name_to_output_path[vfs_to_build[0].name] = options.outfile
else:
for vf in vfs_to_build:
filename = vf.filename if vf.filename is not None else vf.name + ".{ext}"
vf_name_to_output_path[vf.name] = os.path.join(output_dir, filename)
finder = MasterFinder(options.master_finder)
vfs = build_many(
designspace,
finder,
exclude=options.exclude,
optimize=options.optimize,
colr_layer_reuse=options.colr_layer_reuse,
drop_implied_oncurves=options.drop_implied_oncurves,
)
for vf_name, vf in vfs.items():
ext = "otf" if vf.sfntVersion == "OTTO" else "ttf"
output_path = vf_name_to_output_path[vf_name].format(ext=ext)
output_dir = os.path.dirname(output_path)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
log.info("Saving variation font %s", output_path)
vf.save(output_path)
|
Build variable fonts from a designspace file and masters
|
main
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/__init__.py
|
MIT
|
def updateNameTable(varfont, axisLimits):
"""Update instatiated variable font's name table using STAT AxisValues.
Raises ValueError if the STAT table is missing or an Axis Value table is
missing for requested axis locations.
First, collect all STAT AxisValues that match the new default axis locations
(excluding "elided" ones); concatenate the strings in design axis order,
while giving priority to "synthetic" values (Format 4), to form the
typographic subfamily name associated with the new default instance.
Finally, update all related records in the name table, making sure that
legacy family/sub-family names conform to the the R/I/B/BI (Regular, Italic,
Bold, Bold Italic) naming model.
Example: Updating a partial variable font:
| >>> ttFont = TTFont("OpenSans[wdth,wght].ttf")
| >>> updateNameTable(ttFont, {"wght": (400, 900), "wdth": 75})
The name table records will be updated in the following manner:
NameID 1 familyName: "Open Sans" --> "Open Sans Condensed"
NameID 2 subFamilyName: "Regular" --> "Regular"
NameID 3 Unique font identifier: "3.000;GOOG;OpenSans-Regular" --> \
"3.000;GOOG;OpenSans-Condensed"
NameID 4 Full font name: "Open Sans Regular" --> "Open Sans Condensed"
NameID 6 PostScript name: "OpenSans-Regular" --> "OpenSans-Condensed"
NameID 16 Typographic Family name: None --> "Open Sans"
NameID 17 Typographic Subfamily name: None --> "Condensed"
References:
https://docs.microsoft.com/en-us/typography/opentype/spec/stat
https://docs.microsoft.com/en-us/typography/opentype/spec/name#name-ids
"""
from . import AxisLimits, axisValuesFromAxisLimits
if "STAT" not in varfont:
raise ValueError("Cannot update name table since there is no STAT table.")
stat = varfont["STAT"].table
if not stat.AxisValueArray:
raise ValueError("Cannot update name table since there are no STAT Axis Values")
fvar = varfont["fvar"]
# The updated name table will reflect the new 'zero origin' of the font.
# If we're instantiating a partial font, we will populate the unpinned
# axes with their default axis values from fvar.
axisLimits = AxisLimits(axisLimits).limitAxesAndPopulateDefaults(varfont)
partialDefaults = axisLimits.defaultLocation()
fvarDefaults = {a.axisTag: a.defaultValue for a in fvar.axes}
defaultAxisCoords = AxisLimits({**fvarDefaults, **partialDefaults})
assert all(v.minimum == v.maximum for v in defaultAxisCoords.values())
axisValueTables = axisValuesFromAxisLimits(stat, defaultAxisCoords)
checkAxisValuesExist(stat, axisValueTables, defaultAxisCoords.pinnedLocation())
# ignore "elidable" axis values, should be omitted in application font menus.
axisValueTables = [
v for v in axisValueTables if not v.Flags & ELIDABLE_AXIS_VALUE_NAME
]
axisValueTables = _sortAxisValues(axisValueTables)
_updateNameRecords(varfont, axisValueTables)
|
Update instatiated variable font's name table using STAT AxisValues.
Raises ValueError if the STAT table is missing or an Axis Value table is
missing for requested axis locations.
First, collect all STAT AxisValues that match the new default axis locations
(excluding "elided" ones); concatenate the strings in design axis order,
while giving priority to "synthetic" values (Format 4), to form the
typographic subfamily name associated with the new default instance.
Finally, update all related records in the name table, making sure that
legacy family/sub-family names conform to the the R/I/B/BI (Regular, Italic,
Bold, Bold Italic) naming model.
Example: Updating a partial variable font:
| >>> ttFont = TTFont("OpenSans[wdth,wght].ttf")
| >>> updateNameTable(ttFont, {"wght": (400, 900), "wdth": 75})
The name table records will be updated in the following manner:
NameID 1 familyName: "Open Sans" --> "Open Sans Condensed"
NameID 2 subFamilyName: "Regular" --> "Regular"
NameID 3 Unique font identifier: "3.000;GOOG;OpenSans-Regular" --> "3.000;GOOG;OpenSans-Condensed"
NameID 4 Full font name: "Open Sans Regular" --> "Open Sans Condensed"
NameID 6 PostScript name: "OpenSans-Regular" --> "OpenSans-Condensed"
NameID 16 Typographic Family name: None --> "Open Sans"
NameID 17 Typographic Subfamily name: None --> "Condensed"
References:
https://docs.microsoft.com/en-us/typography/opentype/spec/stat
https://docs.microsoft.com/en-us/typography/opentype/spec/name#name-ids
|
updateNameTable
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/instancer/names.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/instancer/names.py
|
MIT
|
def expand(
cls,
v: Union[
"AxisTriple",
float, # pin axis at single value, same as min==default==max
Tuple[float, float], # (min, max), restrict axis and keep default
Tuple[float, float, float], # (min, default, max)
],
) -> "AxisTriple":
"""Convert a single value or a tuple into an AxisTriple.
If the input is a single value, it is interpreted as a pin at that value.
If the input is a tuple, it is interpreted as (min, max) or (min, default, max).
"""
if isinstance(v, cls):
return v
if isinstance(v, (int, float)):
return cls(v, v, v)
try:
n = len(v)
except TypeError as e:
raise ValueError(
f"expected float, 2- or 3-tuple of floats; got {type(v)}: {v!r}"
) from e
default = None
if n == 2:
minimum, maximum = v
elif n >= 3:
return cls(*v)
else:
raise ValueError(f"expected sequence of 2 or 3; got {n}: {v!r}")
return cls(minimum, default, maximum)
|
Convert a single value or a tuple into an AxisTriple.
If the input is a single value, it is interpreted as a pin at that value.
If the input is a tuple, it is interpreted as (min, max) or (min, default, max).
|
expand
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/instancer/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/instancer/__init__.py
|
MIT
|
def limitRangeAndPopulateDefaults(self, fvarTriple) -> "AxisTriple":
"""Return a new AxisTriple with the default value filled in.
Set default to fvar axis default if the latter is within the min/max range,
otherwise set default to the min or max value, whichever is closer to the
fvar axis default.
If the default value is already set, return self.
"""
minimum = self.minimum
if minimum is None:
minimum = fvarTriple[0]
default = self.default
if default is None:
default = fvarTriple[1]
maximum = self.maximum
if maximum is None:
maximum = fvarTriple[2]
minimum = max(minimum, fvarTriple[0])
maximum = max(maximum, fvarTriple[0])
minimum = min(minimum, fvarTriple[2])
maximum = min(maximum, fvarTriple[2])
default = max(minimum, min(maximum, default))
return AxisTriple(minimum, default, maximum)
|
Return a new AxisTriple with the default value filled in.
Set default to fvar axis default if the latter is within the min/max range,
otherwise set default to the min or max value, whichever is closer to the
fvar axis default.
If the default value is already set, return self.
|
limitRangeAndPopulateDefaults
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/instancer/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/instancer/__init__.py
|
MIT
|
def instantiateGvarGlyph(varfont, glyphname, axisLimits, optimize=True):
"""Remove?
https://github.com/fonttools/fonttools/pull/2266"""
gvar = varfont["gvar"]
glyf = varfont["glyf"]
hMetrics = varfont["hmtx"].metrics
vMetrics = getattr(varfont.get("vmtx"), "metrics", None)
_instantiateGvarGlyph(
glyphname, glyf, gvar, hMetrics, vMetrics, axisLimits, optimize=optimize
)
|
Remove?
https://github.com/fonttools/fonttools/pull/2266
|
instantiateGvarGlyph
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/instancer/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/instancer/__init__.py
|
MIT
|
def verticalMetricsKeptInSync(varfont):
"""Ensure hhea vertical metrics stay in sync with OS/2 ones after instancing.
When applying MVAR deltas to the OS/2 table, if the ascender, descender and
line gap change but they were the same as the respective hhea metrics in the
original font, this context manager ensures that hhea metrcs also get updated
accordingly.
The MVAR spec only has tags for the OS/2 metrics, but it is common in fonts
to have the hhea metrics be equal to those for compat reasons.
https://learn.microsoft.com/en-us/typography/opentype/spec/mvar
https://googlefonts.github.io/gf-guide/metrics.html#7-hhea-and-typo-metrics-should-be-equal
https://github.com/fonttools/fonttools/issues/3297
"""
current_os2_vmetrics = [
getattr(varfont["OS/2"], attr)
for attr in ("sTypoAscender", "sTypoDescender", "sTypoLineGap")
]
metrics_are_synced = current_os2_vmetrics == [
getattr(varfont["hhea"], attr) for attr in ("ascender", "descender", "lineGap")
]
yield metrics_are_synced
if metrics_are_synced:
new_os2_vmetrics = [
getattr(varfont["OS/2"], attr)
for attr in ("sTypoAscender", "sTypoDescender", "sTypoLineGap")
]
if current_os2_vmetrics != new_os2_vmetrics:
for attr, value in zip(
("ascender", "descender", "lineGap"), new_os2_vmetrics
):
setattr(varfont["hhea"], attr, value)
|
Ensure hhea vertical metrics stay in sync with OS/2 ones after instancing.
When applying MVAR deltas to the OS/2 table, if the ascender, descender and
line gap change but they were the same as the respective hhea metrics in the
original font, this context manager ensures that hhea metrcs also get updated
accordingly.
The MVAR spec only has tags for the OS/2 metrics, but it is common in fonts
to have the hhea metrics be equal to those for compat reasons.
https://learn.microsoft.com/en-us/typography/opentype/spec/mvar
https://googlefonts.github.io/gf-guide/metrics.html#7-hhea-and-typo-metrics-should-be-equal
https://github.com/fonttools/fonttools/issues/3297
|
verticalMetricsKeptInSync
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/instancer/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/instancer/__init__.py
|
MIT
|
def setRibbiBits(font):
"""Set the `head.macStyle` and `OS/2.fsSelection` style bits
appropriately."""
english_ribbi_style = font["name"].getName(names.NameID.SUBFAMILY_NAME, 3, 1, 0x409)
if english_ribbi_style is None:
return
styleMapStyleName = english_ribbi_style.toStr().lower()
if styleMapStyleName not in {"regular", "bold", "italic", "bold italic"}:
return
if styleMapStyleName == "bold":
font["head"].macStyle = 0b01
elif styleMapStyleName == "bold italic":
font["head"].macStyle = 0b11
elif styleMapStyleName == "italic":
font["head"].macStyle = 0b10
selection = font["OS/2"].fsSelection
# First clear...
selection &= ~(1 << 0)
selection &= ~(1 << 5)
selection &= ~(1 << 6)
# ...then re-set the bits.
if styleMapStyleName == "regular":
selection |= 1 << 6
elif styleMapStyleName == "bold":
selection |= 1 << 5
elif styleMapStyleName == "italic":
selection |= 1 << 0
elif styleMapStyleName == "bold italic":
selection |= 1 << 0
selection |= 1 << 5
font["OS/2"].fsSelection = selection
|
Set the `head.macStyle` and `OS/2.fsSelection` style bits
appropriately.
|
setRibbiBits
|
python
|
fonttools/fonttools
|
Lib/fontTools/varLib/instancer/__init__.py
|
https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/varLib/instancer/__init__.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.