repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
DinoTools/python-overpy | overpy/__init__.py | Result.append | def append(self, element):
"""
Append a new element to the result.
:param element: The element to append
:type element: overpy.Element
"""
if is_valid_type(element, Element):
self._class_collection_map[element.__class__].setdefault(element.id, element) | python | def append(self, element):
"""
Append a new element to the result.
:param element: The element to append
:type element: overpy.Element
"""
if is_valid_type(element, Element):
self._class_collection_map[element.__class__].setdefault(element.id, element) | Append a new element to the result.
:param element: The element to append
:type element: overpy.Element | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L289-L297 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_elements | def get_elements(self, filter_cls, elem_id=None):
"""
Get a list of elements from the result and filter the element type by a class.
:param filter_cls:
:param elem_id: ID of the object
:type elem_id: Integer
:return: List of available elements
:rtype: List
"""
result = []
if elem_id is not None:
try:
result = [self._class_collection_map[filter_cls][elem_id]]
except KeyError:
result = []
else:
for e in self._class_collection_map[filter_cls].values():
result.append(e)
return result | python | def get_elements(self, filter_cls, elem_id=None):
"""
Get a list of elements from the result and filter the element type by a class.
:param filter_cls:
:param elem_id: ID of the object
:type elem_id: Integer
:return: List of available elements
:rtype: List
"""
result = []
if elem_id is not None:
try:
result = [self._class_collection_map[filter_cls][elem_id]]
except KeyError:
result = []
else:
for e in self._class_collection_map[filter_cls].values():
result.append(e)
return result | Get a list of elements from the result and filter the element type by a class.
:param filter_cls:
:param elem_id: ID of the object
:type elem_id: Integer
:return: List of available elements
:rtype: List | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L299-L318 |
DinoTools/python-overpy | overpy/__init__.py | Result.from_json | def from_json(cls, data, api=None):
"""
Create a new instance and load data from json object.
:param data: JSON data returned by the Overpass API
:type data: Dict
:param api:
:type api: overpy.Overpass
:return: New instance of Result object
:rtype: overpy.Result
"""
result = cls(api=api)
for elem_cls in [Node, Way, Relation, Area]:
for element in data.get("elements", []):
e_type = element.get("type")
if hasattr(e_type, "lower") and e_type.lower() == elem_cls._type_value:
result.append(elem_cls.from_json(element, result=result))
return result | python | def from_json(cls, data, api=None):
"""
Create a new instance and load data from json object.
:param data: JSON data returned by the Overpass API
:type data: Dict
:param api:
:type api: overpy.Overpass
:return: New instance of Result object
:rtype: overpy.Result
"""
result = cls(api=api)
for elem_cls in [Node, Way, Relation, Area]:
for element in data.get("elements", []):
e_type = element.get("type")
if hasattr(e_type, "lower") and e_type.lower() == elem_cls._type_value:
result.append(elem_cls.from_json(element, result=result))
return result | Create a new instance and load data from json object.
:param data: JSON data returned by the Overpass API
:type data: Dict
:param api:
:type api: overpy.Overpass
:return: New instance of Result object
:rtype: overpy.Result | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L341-L359 |
DinoTools/python-overpy | overpy/__init__.py | Result.from_xml | def from_xml(cls, data, api=None, parser=None):
"""
Create a new instance and load data from xml data or object.
.. note::
If parser is set to None, the functions tries to find the best parse.
By default the SAX parser is chosen if a string is provided as data.
The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value.
:param data: Root element
:type data: str | xml.etree.ElementTree.Element
:param api: The instance to query additional information if required.
:type api: Overpass
:param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX)
:type parser: Integer | None
:return: New instance of Result object
:rtype: Result
"""
if parser is None:
if isinstance(data, str):
parser = XML_PARSER_SAX
else:
parser = XML_PARSER_DOM
result = cls(api=api)
if parser == XML_PARSER_DOM:
import xml.etree.ElementTree as ET
if isinstance(data, str):
root = ET.fromstring(data)
elif isinstance(data, ET.Element):
root = data
else:
raise exception.OverPyException("Unable to detect data type.")
for elem_cls in [Node, Way, Relation, Area]:
for child in root:
if child.tag.lower() == elem_cls._type_value:
result.append(elem_cls.from_xml(child, result=result))
elif parser == XML_PARSER_SAX:
if PY2:
from StringIO import StringIO
else:
from io import StringIO
source = StringIO(data)
sax_handler = OSMSAXHandler(result)
parser = make_parser()
parser.setContentHandler(sax_handler)
parser.parse(source)
else:
# ToDo: better exception
raise Exception("Unknown XML parser")
return result | python | def from_xml(cls, data, api=None, parser=None):
"""
Create a new instance and load data from xml data or object.
.. note::
If parser is set to None, the functions tries to find the best parse.
By default the SAX parser is chosen if a string is provided as data.
The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value.
:param data: Root element
:type data: str | xml.etree.ElementTree.Element
:param api: The instance to query additional information if required.
:type api: Overpass
:param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX)
:type parser: Integer | None
:return: New instance of Result object
:rtype: Result
"""
if parser is None:
if isinstance(data, str):
parser = XML_PARSER_SAX
else:
parser = XML_PARSER_DOM
result = cls(api=api)
if parser == XML_PARSER_DOM:
import xml.etree.ElementTree as ET
if isinstance(data, str):
root = ET.fromstring(data)
elif isinstance(data, ET.Element):
root = data
else:
raise exception.OverPyException("Unable to detect data type.")
for elem_cls in [Node, Way, Relation, Area]:
for child in root:
if child.tag.lower() == elem_cls._type_value:
result.append(elem_cls.from_xml(child, result=result))
elif parser == XML_PARSER_SAX:
if PY2:
from StringIO import StringIO
else:
from io import StringIO
source = StringIO(data)
sax_handler = OSMSAXHandler(result)
parser = make_parser()
parser.setContentHandler(sax_handler)
parser.parse(source)
else:
# ToDo: better exception
raise Exception("Unknown XML parser")
return result | Create a new instance and load data from xml data or object.
.. note::
If parser is set to None, the functions tries to find the best parse.
By default the SAX parser is chosen if a string is provided as data.
The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value.
:param data: Root element
:type data: str | xml.etree.ElementTree.Element
:param api: The instance to query additional information if required.
:type api: Overpass
:param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX)
:type parser: Integer | None
:return: New instance of Result object
:rtype: Result | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L362-L414 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_area | def get_area(self, area_id, resolve_missing=False):
"""
Get an area by its ID.
:param area_id: The area ID
:type area_id: Integer
:param resolve_missing: Query the Overpass API if the area is missing in the result set.
:return: The area
:rtype: overpy.Area
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the area can't be resolved.
"""
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing area is disabled")
query = ("\n"
"[out:json];\n"
"area({area_id});\n"
"out body;\n"
)
query = query.format(
area_id=area_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
raise exception.DataIncomplete("Unable to resolve requested areas")
return areas[0] | python | def get_area(self, area_id, resolve_missing=False):
"""
Get an area by its ID.
:param area_id: The area ID
:type area_id: Integer
:param resolve_missing: Query the Overpass API if the area is missing in the result set.
:return: The area
:rtype: overpy.Area
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the area can't be resolved.
"""
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing area is disabled")
query = ("\n"
"[out:json];\n"
"area({area_id});\n"
"out body;\n"
)
query = query.format(
area_id=area_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
raise exception.DataIncomplete("Unable to resolve requested areas")
return areas[0] | Get an area by its ID.
:param area_id: The area ID
:type area_id: Integer
:param resolve_missing: Query the Overpass API if the area is missing in the result set.
:return: The area
:rtype: overpy.Area
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the area can't be resolved. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L416-L449 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_areas | def get_areas(self, area_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Area
:param area_id: The Id of the area
:type area_id: Integer
:return: List of elements
"""
return self.get_elements(Area, elem_id=area_id, **kwargs) | python | def get_areas(self, area_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Area
:param area_id: The Id of the area
:type area_id: Integer
:return: List of elements
"""
return self.get_elements(Area, elem_id=area_id, **kwargs) | Alias for get_elements() but filter the result by Area
:param area_id: The Id of the area
:type area_id: Integer
:return: List of elements | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L451-L459 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_node | def get_node(self, node_id, resolve_missing=False):
"""
Get a node by its ID.
:param node_id: The node ID
:type node_id: Integer
:param resolve_missing: Query the Overpass API if the node is missing in the result set.
:return: The node
:rtype: overpy.Node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
query = ("\n"
"[out:json];\n"
"node({node_id});\n"
"out body;\n"
)
query = query.format(
node_id=node_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
raise exception.DataIncomplete("Unable to resolve all nodes")
return nodes[0] | python | def get_node(self, node_id, resolve_missing=False):
"""
Get a node by its ID.
:param node_id: The node ID
:type node_id: Integer
:param resolve_missing: Query the Overpass API if the node is missing in the result set.
:return: The node
:rtype: overpy.Node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
query = ("\n"
"[out:json];\n"
"node({node_id});\n"
"out body;\n"
)
query = query.format(
node_id=node_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
raise exception.DataIncomplete("Unable to resolve all nodes")
return nodes[0] | Get a node by its ID.
:param node_id: The node ID
:type node_id: Integer
:param resolve_missing: Query the Overpass API if the node is missing in the result set.
:return: The node
:rtype: overpy.Node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L461-L494 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_nodes | def get_nodes(self, node_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Node()
:param node_id: The Id of the node
:type node_id: Integer
:return: List of elements
"""
return self.get_elements(Node, elem_id=node_id, **kwargs) | python | def get_nodes(self, node_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Node()
:param node_id: The Id of the node
:type node_id: Integer
:return: List of elements
"""
return self.get_elements(Node, elem_id=node_id, **kwargs) | Alias for get_elements() but filter the result by Node()
:param node_id: The Id of the node
:type node_id: Integer
:return: List of elements | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L496-L504 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_relation | def get_relation(self, rel_id, resolve_missing=False):
"""
Get a relation by its ID.
:param rel_id: The relation ID
:type rel_id: Integer
:param resolve_missing: Query the Overpass API if the relation is missing in the result set.
:return: The relation
:rtype: overpy.Relation
:raises overpy.exception.DataIncomplete: The requested relation is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the relation can't be resolved.
"""
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing relations is disabled")
query = ("\n"
"[out:json];\n"
"relation({relation_id});\n"
"out body;\n"
)
query = query.format(
relation_id=rel_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
raise exception.DataIncomplete("Unable to resolve requested reference")
return relations[0] | python | def get_relation(self, rel_id, resolve_missing=False):
"""
Get a relation by its ID.
:param rel_id: The relation ID
:type rel_id: Integer
:param resolve_missing: Query the Overpass API if the relation is missing in the result set.
:return: The relation
:rtype: overpy.Relation
:raises overpy.exception.DataIncomplete: The requested relation is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the relation can't be resolved.
"""
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing relations is disabled")
query = ("\n"
"[out:json];\n"
"relation({relation_id});\n"
"out body;\n"
)
query = query.format(
relation_id=rel_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
raise exception.DataIncomplete("Unable to resolve requested reference")
return relations[0] | Get a relation by its ID.
:param rel_id: The relation ID
:type rel_id: Integer
:param resolve_missing: Query the Overpass API if the relation is missing in the result set.
:return: The relation
:rtype: overpy.Relation
:raises overpy.exception.DataIncomplete: The requested relation is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the relation can't be resolved. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L506-L539 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_relations | def get_relations(self, rel_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Relation
:param rel_id: Id of the relation
:type rel_id: Integer
:return: List of elements
"""
return self.get_elements(Relation, elem_id=rel_id, **kwargs) | python | def get_relations(self, rel_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Relation
:param rel_id: Id of the relation
:type rel_id: Integer
:return: List of elements
"""
return self.get_elements(Relation, elem_id=rel_id, **kwargs) | Alias for get_elements() but filter the result by Relation
:param rel_id: Id of the relation
:type rel_id: Integer
:return: List of elements | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L541-L549 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_way | def get_way(self, way_id, resolve_missing=False):
"""
Get a way by its ID.
:param way_id: The way ID
:type way_id: Integer
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:rtype: overpy.Way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
"""
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing way is disabled")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"out body;\n"
)
query = query.format(
way_id=way_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
raise exception.DataIncomplete("Unable to resolve requested way")
return ways[0] | python | def get_way(self, way_id, resolve_missing=False):
"""
Get a way by its ID.
:param way_id: The way ID
:type way_id: Integer
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:rtype: overpy.Way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
"""
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing way is disabled")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"out body;\n"
)
query = query.format(
way_id=way_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
raise exception.DataIncomplete("Unable to resolve requested way")
return ways[0] | Get a way by its ID.
:param way_id: The way ID
:type way_id: Integer
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:rtype: overpy.Way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L551-L584 |
DinoTools/python-overpy | overpy/__init__.py | Result.get_ways | def get_ways(self, way_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:type way_id: Integer
:return: List of elements
"""
return self.get_elements(Way, elem_id=way_id, **kwargs) | python | def get_ways(self, way_id=None, **kwargs):
"""
Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:type way_id: Integer
:return: List of elements
"""
return self.get_elements(Way, elem_id=way_id, **kwargs) | Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:type way_id: Integer
:return: List of elements | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L586-L594 |
DinoTools/python-overpy | overpy/__init__.py | Element.get_center_from_json | def get_center_from_json(cls, data):
"""
Get center information from json data
:param data: json data
:return: tuple with two elements: lat and lon
:rtype: tuple
"""
center_lat = None
center_lon = None
center = data.get("center")
if isinstance(center, dict):
center_lat = center.get("lat")
center_lon = center.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
center_lat = Decimal(center_lat)
center_lon = Decimal(center_lon)
return (center_lat, center_lon) | python | def get_center_from_json(cls, data):
"""
Get center information from json data
:param data: json data
:return: tuple with two elements: lat and lon
:rtype: tuple
"""
center_lat = None
center_lon = None
center = data.get("center")
if isinstance(center, dict):
center_lat = center.get("lat")
center_lon = center.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
center_lat = Decimal(center_lat)
center_lon = Decimal(center_lon)
return (center_lat, center_lon) | Get center information from json data
:param data: json data
:return: tuple with two elements: lat and lon
:rtype: tuple | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L631-L649 |
DinoTools/python-overpy | overpy/__init__.py | Area.from_xml | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
area_id = child.attrib.get("id")
if area_id is not None:
area_id = int(area_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(area_id=area_id, attributes=attributes, tags=tags, result=result) | python | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
area_id = child.attrib.get("id")
if area_id is not None:
area_id = int(area_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(area_id=area_id, attributes=attributes, tags=tags, result=result) | Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L717-L758 |
DinoTools/python-overpy | overpy/__init__.py | Node.from_json | def from_json(cls, data, result=None):
"""
Create new Node element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Node
:rtype: overpy.Node
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
node_id = data.get("id")
lat = data.get("lat")
lon = data.get("lon")
attributes = {}
ignore = ["type", "id", "lat", "lon", "tags"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result) | python | def from_json(cls, data, result=None):
"""
Create new Node element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Node
:rtype: overpy.Node
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
node_id = data.get("id")
lat = data.get("lat")
lon = data.get("lon")
attributes = {}
ignore = ["type", "id", "lat", "lon", "tags"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result) | Create new Node element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Node
:rtype: overpy.Node
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L788-L819 |
DinoTools/python-overpy | overpy/__init__.py | Node.from_xml | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Node
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
node_id = child.attrib.get("id")
if node_id is not None:
node_id = int(node_id)
lat = child.attrib.get("lat")
if lat is not None:
lat = Decimal(lat)
lon = child.attrib.get("lon")
if lon is not None:
lon = Decimal(lon)
attributes = {}
ignore = ["id", "lat", "lon"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result) | python | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Node
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
node_id = child.attrib.get("id")
if node_id is not None:
node_id = int(node_id)
lat = child.attrib.get("lat")
if lat is not None:
lat = Decimal(lat)
lon = child.attrib.get("lon")
if lon is not None:
lon = Decimal(lon)
attributes = {}
ignore = ["id", "lat", "lon"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result) | Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Node
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L822-L868 |
DinoTools/python-overpy | overpy/__init__.py | Way.get_nodes | def get_nodes(self, resolve_missing=False):
"""
Get the nodes defining the geometry of the way
:param resolve_missing: Try to resolve missing nodes.
:type resolve_missing: Boolean
:return: List of nodes
:rtype: List of overpy.Node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
result = []
resolved = False
for node_id in self._node_ids:
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is not None:
result.append(node)
continue
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
# We tried to resolve the data but some nodes are still missing
if resolved:
raise exception.DataIncomplete("Unable to resolve all nodes")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"node(w);\n"
"out body;\n"
)
query = query.format(
way_id=self.id
)
tmp_result = self._result.api.query(query)
self._result.expand(tmp_result)
resolved = True
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is None:
raise exception.DataIncomplete("Unable to resolve all nodes")
result.append(node)
return result | python | def get_nodes(self, resolve_missing=False):
"""
Get the nodes defining the geometry of the way
:param resolve_missing: Try to resolve missing nodes.
:type resolve_missing: Boolean
:return: List of nodes
:rtype: List of overpy.Node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
result = []
resolved = False
for node_id in self._node_ids:
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is not None:
result.append(node)
continue
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
# We tried to resolve the data but some nodes are still missing
if resolved:
raise exception.DataIncomplete("Unable to resolve all nodes")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"node(w);\n"
"out body;\n"
)
query = query.format(
way_id=self.id
)
tmp_result = self._result.api.query(query)
self._result.expand(tmp_result)
resolved = True
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is None:
raise exception.DataIncomplete("Unable to resolve all nodes")
result.append(node)
return result | Get the nodes defining the geometry of the way
:param resolve_missing: Try to resolve missing nodes.
:type resolve_missing: Boolean
:return: List of nodes
:rtype: List of overpy.Node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L909-L963 |
DinoTools/python-overpy | overpy/__init__.py | Way.from_json | def from_json(cls, data, result=None):
"""
Create new Way element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Way
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
way_id = data.get("id")
node_ids = data.get("nodes")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
attributes = {}
ignore = ["center", "id", "nodes", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
node_ids=node_ids,
tags=tags,
result=result,
way_id=way_id
) | python | def from_json(cls, data, result=None):
"""
Create new Way element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Way
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
way_id = data.get("id")
node_ids = data.get("nodes")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
attributes = {}
ignore = ["center", "id", "nodes", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
node_ids=node_ids,
tags=tags,
result=result,
way_id=way_id
) | Create new Way element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Way
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L966-L1005 |
DinoTools/python-overpy | overpy/__init__.py | Way.from_xml | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
node_ids = []
center_lat = None
center_lon = None
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "nd":
ref_id = sub_child.attrib.get("ref")
if ref_id is None:
raise ValueError("Unable to find required ref value.")
ref_id = int(ref_id)
node_ids.append(ref_id)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
way_id = child.attrib.get("id")
if way_id is not None:
way_id = int(way_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon,
attributes=attributes, node_ids=node_ids, tags=tags, result=result) | python | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
node_ids = []
center_lat = None
center_lon = None
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "nd":
ref_id = sub_child.attrib.get("ref")
if ref_id is None:
raise ValueError("Unable to find required ref value.")
ref_id = int(ref_id)
node_ids.append(ref_id)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
way_id = child.attrib.get("id")
if way_id is not None:
way_id = int(way_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon,
attributes=attributes, node_ids=node_ids, tags=tags, result=result) | Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Way
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1008-L1061 |
DinoTools/python-overpy | overpy/__init__.py | Relation.from_json | def from_json(cls, data, result=None):
"""
Create new Relation element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Relation
:rtype: overpy.Relation
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
rel_id = data.get("id")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
members = []
supported_members = [RelationNode, RelationWay, RelationRelation]
for member in data.get("members", []):
type_value = member.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_json(
member,
result=result
)
)
attributes = {}
ignore = ["id", "members", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
) | python | def from_json(cls, data, result=None):
"""
Create new Relation element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Relation
:rtype: overpy.Relation
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
rel_id = data.get("id")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
members = []
supported_members = [RelationNode, RelationWay, RelationRelation]
for member in data.get("members", []):
type_value = member.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_json(
member,
result=result
)
)
attributes = {}
ignore = ["id", "members", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
) | Create new Relation element from JSON data
:param data: Element data from JSON
:type data: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of Relation
:rtype: overpy.Relation
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1092-L1144 |
DinoTools/python-overpy | overpy/__init__.py | Relation.from_xml | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Relation
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
members = []
center_lat = None
center_lon = None
supported_members = [RelationNode, RelationWay, RelationRelation, RelationArea]
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "member":
type_value = sub_child.attrib.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_xml(
sub_child,
result=result
)
)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
rel_id = child.attrib.get("id")
if rel_id is not None:
rel_id = int(rel_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
) | python | def from_xml(cls, child, result=None):
"""
Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Relation
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
members = []
center_lat = None
center_lon = None
supported_members = [RelationNode, RelationWay, RelationRelation, RelationArea]
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "member":
type_value = sub_child.attrib.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_xml(
sub_child,
result=result
)
)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
rel_id = child.attrib.get("id")
if rel_id is not None:
rel_id = int(rel_id)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
) | Create new way element from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this node belongs to
:type result: overpy.Result
:return: New Way oject
:rtype: overpy.Relation
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1147-L1211 |
DinoTools/python-overpy | overpy/__init__.py | RelationMember.from_json | def from_json(cls, data, result=None):
"""
Create new RelationMember element from JSON data
:param child: Element data from JSON
:type child: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of RelationMember
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
ref = data.get("ref")
role = data.get("role")
attributes = {}
ignore = ["geometry", "type", "ref", "role"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
geometry = data.get("geometry")
if isinstance(geometry, list):
geometry_orig = geometry
geometry = []
for v in geometry_orig:
geometry.append(
RelationWayGeometryValue(
lat=v.get("lat"),
lon=v.get("lon")
)
)
else:
geometry = None
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
) | python | def from_json(cls, data, result=None):
"""
Create new RelationMember element from JSON data
:param child: Element data from JSON
:type child: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of RelationMember
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
ref = data.get("ref")
role = data.get("role")
attributes = {}
ignore = ["geometry", "type", "ref", "role"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
geometry = data.get("geometry")
if isinstance(geometry, list):
geometry_orig = geometry
geometry = []
for v in geometry_orig:
geometry.append(
RelationWayGeometryValue(
lat=v.get("lat"),
lon=v.get("lon")
)
)
else:
geometry = None
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
) | Create new RelationMember element from JSON data
:param child: Element data from JSON
:type child: Dict
:param result: The result this element belongs to
:type result: overpy.Result
:return: New instance of RelationMember
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1234-L1282 |
DinoTools/python-overpy | overpy/__init__.py | RelationMember.from_xml | def from_xml(cls, child, result=None):
"""
Create new RelationMember from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this element belongs to
:type result: overpy.Result
:return: New relation member oject
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
"""
if child.attrib.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
ref = child.attrib.get("ref")
if ref is not None:
ref = int(ref)
role = child.attrib.get("role")
attributes = {}
ignore = ["geometry", "ref", "role", "type"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
geometry = None
for sub_child in child:
if sub_child.tag.lower() == "nd":
if geometry is None:
geometry = []
geometry.append(
RelationWayGeometryValue(
lat=Decimal(sub_child.attrib["lat"]),
lon=Decimal(sub_child.attrib["lon"])
)
)
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
) | python | def from_xml(cls, child, result=None):
"""
Create new RelationMember from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this element belongs to
:type result: overpy.Result
:return: New relation member oject
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
"""
if child.attrib.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
ref = child.attrib.get("ref")
if ref is not None:
ref = int(ref)
role = child.attrib.get("role")
attributes = {}
ignore = ["geometry", "ref", "role", "type"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
geometry = None
for sub_child in child:
if sub_child.tag.lower() == "nd":
if geometry is None:
geometry = []
geometry.append(
RelationWayGeometryValue(
lat=Decimal(sub_child.attrib["lat"]),
lon=Decimal(sub_child.attrib["lon"])
)
)
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
) | Create new RelationMember from XML data
:param child: XML node to be parsed
:type child: xml.etree.ElementTree.Element
:param result: The result this element belongs to
:type result: overpy.Result
:return: New relation member oject
:rtype: overpy.RelationMember
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1285-L1333 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler.startElement | def startElement(self, name, attrs):
"""
Handle opening elements.
:param name: Name of the element
:type name: String
:param attrs: Attributes of the element
:type attrs: Dict
"""
if name in self.ignore_start:
return
try:
handler = getattr(self, '_handle_start_%s' % name)
except AttributeError:
raise KeyError("Unknown element start '%s'" % name)
handler(attrs) | python | def startElement(self, name, attrs):
"""
Handle opening elements.
:param name: Name of the element
:type name: String
:param attrs: Attributes of the element
:type attrs: Dict
"""
if name in self.ignore_start:
return
try:
handler = getattr(self, '_handle_start_%s' % name)
except AttributeError:
raise KeyError("Unknown element start '%s'" % name)
handler(attrs) | Handle opening elements.
:param name: Name of the element
:type name: String
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1405-L1420 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler.endElement | def endElement(self, name):
"""
Handle closing elements
:param name: Name of the element
:type name: String
"""
if name in self.ignore_end:
return
try:
handler = getattr(self, '_handle_end_%s' % name)
except AttributeError:
raise KeyError("Unknown element end '%s'" % name)
handler() | python | def endElement(self, name):
"""
Handle closing elements
:param name: Name of the element
:type name: String
"""
if name in self.ignore_end:
return
try:
handler = getattr(self, '_handle_end_%s' % name)
except AttributeError:
raise KeyError("Unknown element end '%s'" % name)
handler() | Handle closing elements
:param name: Name of the element
:type name: String | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1422-L1435 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_center | def _handle_start_center(self, attrs):
"""
Handle opening center element
:param attrs: Attributes of the element
:type attrs: Dict
"""
center_lat = attrs.get("lat")
center_lon = attrs.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
self._curr["center_lat"] = Decimal(center_lat)
self._curr["center_lon"] = Decimal(center_lon) | python | def _handle_start_center(self, attrs):
"""
Handle opening center element
:param attrs: Attributes of the element
:type attrs: Dict
"""
center_lat = attrs.get("lat")
center_lon = attrs.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
self._curr["center_lat"] = Decimal(center_lat)
self._curr["center_lon"] = Decimal(center_lon) | Handle opening center element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1437-L1449 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_tag | def _handle_start_tag(self, attrs):
"""
Handle opening tag element
:param attrs: Attributes of the element
:type attrs: Dict
"""
try:
tag_key = attrs['k']
except KeyError:
raise ValueError("Tag without name/key.")
self._curr['tags'][tag_key] = attrs.get('v') | python | def _handle_start_tag(self, attrs):
"""
Handle opening tag element
:param attrs: Attributes of the element
:type attrs: Dict
"""
try:
tag_key = attrs['k']
except KeyError:
raise ValueError("Tag without name/key.")
self._curr['tags'][tag_key] = attrs.get('v') | Handle opening tag element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1451-L1462 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_node | def _handle_start_node(self, attrs):
"""
Handle opening node element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'lat': None,
'lon': None,
'node_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['node_id'] = int(attrs['id'])
del self._curr['attributes']['id']
if attrs.get('lat', None) is not None:
self._curr['lat'] = Decimal(attrs['lat'])
del self._curr['attributes']['lat']
if attrs.get('lon', None) is not None:
self._curr['lon'] = Decimal(attrs['lon'])
del self._curr['attributes']['lon'] | python | def _handle_start_node(self, attrs):
"""
Handle opening node element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'lat': None,
'lon': None,
'node_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['node_id'] = int(attrs['id'])
del self._curr['attributes']['id']
if attrs.get('lat', None) is not None:
self._curr['lat'] = Decimal(attrs['lat'])
del self._curr['attributes']['lat']
if attrs.get('lon', None) is not None:
self._curr['lon'] = Decimal(attrs['lon'])
del self._curr['attributes']['lon'] | Handle opening node element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1464-L1486 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_end_node | def _handle_end_node(self):
"""
Handle closing node element
"""
self._result.append(Node(result=self._result, **self._curr))
self._curr = {} | python | def _handle_end_node(self):
"""
Handle closing node element
"""
self._result.append(Node(result=self._result, **self._curr))
self._curr = {} | Handle closing node element | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1488-L1493 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_way | def _handle_start_way(self, attrs):
"""
Handle opening way element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'center_lat': None,
'center_lon': None,
'attributes': dict(attrs),
'node_ids': [],
'tags': {},
'way_id': None
}
if attrs.get('id', None) is not None:
self._curr['way_id'] = int(attrs['id'])
del self._curr['attributes']['id'] | python | def _handle_start_way(self, attrs):
"""
Handle opening way element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'center_lat': None,
'center_lon': None,
'attributes': dict(attrs),
'node_ids': [],
'tags': {},
'way_id': None
}
if attrs.get('id', None) is not None:
self._curr['way_id'] = int(attrs['id'])
del self._curr['attributes']['id'] | Handle opening way element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1495-L1512 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_end_way | def _handle_end_way(self):
"""
Handle closing way element
"""
self._result.append(Way(result=self._result, **self._curr))
self._curr = {} | python | def _handle_end_way(self):
"""
Handle closing way element
"""
self._result.append(Way(result=self._result, **self._curr))
self._curr = {} | Handle closing way element | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1514-L1519 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_area | def _handle_start_area(self, attrs):
"""
Handle opening area element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'tags': {},
'area_id': None
}
if attrs.get('id', None) is not None:
self._curr['area_id'] = int(attrs['id'])
del self._curr['attributes']['id'] | python | def _handle_start_area(self, attrs):
"""
Handle opening area element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'tags': {},
'area_id': None
}
if attrs.get('id', None) is not None:
self._curr['area_id'] = int(attrs['id'])
del self._curr['attributes']['id'] | Handle opening area element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1521-L1535 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_end_area | def _handle_end_area(self):
"""
Handle closing area element
"""
self._result.append(Area(result=self._result, **self._curr))
self._curr = {} | python | def _handle_end_area(self):
"""
Handle closing area element
"""
self._result.append(Area(result=self._result, **self._curr))
self._curr = {} | Handle closing area element | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1537-L1542 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_nd | def _handle_start_nd(self, attrs):
"""
Handle opening nd element
:param attrs: Attributes of the element
:type attrs: Dict
"""
if isinstance(self.cur_relation_member, RelationWay):
if self.cur_relation_member.geometry is None:
self.cur_relation_member.geometry = []
self.cur_relation_member.geometry.append(
RelationWayGeometryValue(
lat=Decimal(attrs["lat"]),
lon=Decimal(attrs["lon"])
)
)
else:
try:
node_ref = attrs['ref']
except KeyError:
raise ValueError("Unable to find required ref value.")
self._curr['node_ids'].append(int(node_ref)) | python | def _handle_start_nd(self, attrs):
"""
Handle opening nd element
:param attrs: Attributes of the element
:type attrs: Dict
"""
if isinstance(self.cur_relation_member, RelationWay):
if self.cur_relation_member.geometry is None:
self.cur_relation_member.geometry = []
self.cur_relation_member.geometry.append(
RelationWayGeometryValue(
lat=Decimal(attrs["lat"]),
lon=Decimal(attrs["lon"])
)
)
else:
try:
node_ref = attrs['ref']
except KeyError:
raise ValueError("Unable to find required ref value.")
self._curr['node_ids'].append(int(node_ref)) | Handle opening nd element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1544-L1565 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_relation | def _handle_start_relation(self, attrs):
"""
Handle opening relation element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'members': [],
'rel_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['rel_id'] = int(attrs['id'])
del self._curr['attributes']['id'] | python | def _handle_start_relation(self, attrs):
"""
Handle opening relation element
:param attrs: Attributes of the element
:type attrs: Dict
"""
self._curr = {
'attributes': dict(attrs),
'members': [],
'rel_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['rel_id'] = int(attrs['id'])
del self._curr['attributes']['id'] | Handle opening relation element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1567-L1582 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_end_relation | def _handle_end_relation(self):
"""
Handle closing relation element
"""
self._result.append(Relation(result=self._result, **self._curr))
self._curr = {} | python | def _handle_end_relation(self):
"""
Handle closing relation element
"""
self._result.append(Relation(result=self._result, **self._curr))
self._curr = {} | Handle closing relation element | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1584-L1589 |
DinoTools/python-overpy | overpy/__init__.py | OSMSAXHandler._handle_start_member | def _handle_start_member(self, attrs):
"""
Handle opening member element
:param attrs: Attributes of the element
:type attrs: Dict
"""
params = {
# ToDo: Parse attributes
'attributes': {},
'ref': None,
'result': self._result,
'role': None
}
if attrs.get('ref', None):
params['ref'] = int(attrs['ref'])
if attrs.get('role', None):
params['role'] = attrs['role']
cls_map = {
"area": RelationArea,
"node": RelationNode,
"relation": RelationRelation,
"way": RelationWay
}
cls = cls_map.get(attrs["type"])
if cls is None:
raise ValueError("Undefined type for member: '%s'" % attrs['type'])
self.cur_relation_member = cls(**params)
self._curr['members'].append(self.cur_relation_member) | python | def _handle_start_member(self, attrs):
"""
Handle opening member element
:param attrs: Attributes of the element
:type attrs: Dict
"""
params = {
# ToDo: Parse attributes
'attributes': {},
'ref': None,
'result': self._result,
'role': None
}
if attrs.get('ref', None):
params['ref'] = int(attrs['ref'])
if attrs.get('role', None):
params['role'] = attrs['role']
cls_map = {
"area": RelationArea,
"node": RelationNode,
"relation": RelationRelation,
"way": RelationWay
}
cls = cls_map.get(attrs["type"])
if cls is None:
raise ValueError("Undefined type for member: '%s'" % attrs['type'])
self.cur_relation_member = cls(**params)
self._curr['members'].append(self.cur_relation_member) | Handle opening member element
:param attrs: Attributes of the element
:type attrs: Dict | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/__init__.py#L1591-L1622 |
DinoTools/python-overpy | overpy/helper.py | get_street | def get_street(street, areacode, api=None):
"""
Retrieve streets in a given bounding area
:param overpy.Overpass api: First street of intersection
:param String street: Name of street
:param String areacode: The OSM id of the bounding area
:return: Parsed result
:raises overpy.exception.OverPyException: If something bad happens.
"""
if api is None:
api = overpy.Overpass()
query = """
area(%s)->.location;
(
way[highway][name="%s"](area.location);
- (
way[highway=service](area.location);
way[highway=track](area.location);
);
);
out body;
>;
out skel qt;
"""
data = api.query(query % (areacode, street))
return data | python | def get_street(street, areacode, api=None):
"""
Retrieve streets in a given bounding area
:param overpy.Overpass api: First street of intersection
:param String street: Name of street
:param String areacode: The OSM id of the bounding area
:return: Parsed result
:raises overpy.exception.OverPyException: If something bad happens.
"""
if api is None:
api = overpy.Overpass()
query = """
area(%s)->.location;
(
way[highway][name="%s"](area.location);
- (
way[highway=service](area.location);
way[highway=track](area.location);
);
);
out body;
>;
out skel qt;
"""
data = api.query(query % (areacode, street))
return data | Retrieve streets in a given bounding area
:param overpy.Overpass api: First street of intersection
:param String street: Name of street
:param String areacode: The OSM id of the bounding area
:return: Parsed result
:raises overpy.exception.OverPyException: If something bad happens. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/helper.py#L6-L35 |
DinoTools/python-overpy | overpy/helper.py | get_intersection | def get_intersection(street1, street2, areacode, api=None):
"""
Retrieve intersection of two streets in a given bounding area
:param overpy.Overpass api: First street of intersection
:param String street1: Name of first street of intersection
:param String street2: Name of second street of intersection
:param String areacode: The OSM id of the bounding area
:return: List of intersections
:raises overpy.exception.OverPyException: If something bad happens.
"""
if api is None:
api = overpy.Overpass()
query = """
area(%s)->.location;
(
way[highway][name="%s"](area.location); node(w)->.n1;
way[highway][name="%s"](area.location); node(w)->.n2;
);
node.n1.n2;
out meta;
"""
data = api.query(query % (areacode, street1, street2))
return data.get_nodes() | python | def get_intersection(street1, street2, areacode, api=None):
"""
Retrieve intersection of two streets in a given bounding area
:param overpy.Overpass api: First street of intersection
:param String street1: Name of first street of intersection
:param String street2: Name of second street of intersection
:param String areacode: The OSM id of the bounding area
:return: List of intersections
:raises overpy.exception.OverPyException: If something bad happens.
"""
if api is None:
api = overpy.Overpass()
query = """
area(%s)->.location;
(
way[highway][name="%s"](area.location); node(w)->.n1;
way[highway][name="%s"](area.location); node(w)->.n2;
);
node.n1.n2;
out meta;
"""
data = api.query(query % (areacode, street1, street2))
return data.get_nodes() | Retrieve intersection of two streets in a given bounding area
:param overpy.Overpass api: First street of intersection
:param String street1: Name of first street of intersection
:param String street2: Name of second street of intersection
:param String areacode: The OSM id of the bounding area
:return: List of intersections
:raises overpy.exception.OverPyException: If something bad happens. | https://github.com/DinoTools/python-overpy/blob/db8f80eeb1b4d1405816bd62c16ddb3364e0c46d/overpy/helper.py#L38-L64 |
clemtoy/pptree | pptree/pptree.py | print_tree | def print_tree(current_node, childattr='children', nameattr='name', indent='', last='updown'):
if hasattr(current_node, nameattr):
name = lambda node: getattr(node, nameattr)
else:
name = lambda node: str(node)
children = lambda node: getattr(node, childattr)
nb_children = lambda node: sum(nb_children(child) for child in children(node)) + 1
size_branch = {child: nb_children(child) for child in children(current_node)}
""" Creation of balanced lists for "up" branch and "down" branch. """
up = sorted(children(current_node), key=lambda node: nb_children(node))
down = []
while up and sum(size_branch[node] for node in down) < sum(size_branch[node] for node in up):
down.append(up.pop())
""" Printing of "up" branch. """
for child in up:
next_last = 'up' if up.index(child) is 0 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'up' in last else '│', ' ' * len(name(current_node)))
print_tree(child, childattr, nameattr, next_indent, next_last)
""" Printing of current node. """
if last == 'up': start_shape = '┌'
elif last == 'down': start_shape = '└'
elif last == 'updown': start_shape = ' '
else: start_shape = '├'
if up: end_shape = '┤'
elif down: end_shape = '┐'
else: end_shape = ''
print('{0}{1}{2}{3}'.format(indent, start_shape, name(current_node), end_shape))
""" Printing of "down" branch. """
for child in down:
next_last = 'down' if down.index(child) is len(down) - 1 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'down' in last else '│', ' ' * len(name(current_node)))
print_tree(child, childattr, nameattr, next_indent, next_last) | python | def print_tree(current_node, childattr='children', nameattr='name', indent='', last='updown'):
if hasattr(current_node, nameattr):
name = lambda node: getattr(node, nameattr)
else:
name = lambda node: str(node)
children = lambda node: getattr(node, childattr)
nb_children = lambda node: sum(nb_children(child) for child in children(node)) + 1
size_branch = {child: nb_children(child) for child in children(current_node)}
""" Creation of balanced lists for "up" branch and "down" branch. """
up = sorted(children(current_node), key=lambda node: nb_children(node))
down = []
while up and sum(size_branch[node] for node in down) < sum(size_branch[node] for node in up):
down.append(up.pop())
""" Printing of "up" branch. """
for child in up:
next_last = 'up' if up.index(child) is 0 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'up' in last else '│', ' ' * len(name(current_node)))
print_tree(child, childattr, nameattr, next_indent, next_last)
""" Printing of current node. """
if last == 'up': start_shape = '┌'
elif last == 'down': start_shape = '└'
elif last == 'updown': start_shape = ' '
else: start_shape = '├'
if up: end_shape = '┤'
elif down: end_shape = '┐'
else: end_shape = ''
print('{0}{1}{2}{3}'.format(indent, start_shape, name(current_node), end_shape))
""" Printing of "down" branch. """
for child in down:
next_last = 'down' if down.index(child) is len(down) - 1 else ''
next_indent = '{0}{1}{2}'.format(indent, ' ' if 'down' in last else '│', ' ' * len(name(current_node)))
print_tree(child, childattr, nameattr, next_indent, next_last) | Creation of balanced lists for "up" branch and "down" branch. | https://github.com/clemtoy/pptree/blob/16099da42b1da6d03b3a0ed0e27d0b6e90947a54/pptree/pptree.py#L16-L55 |
architv/chcli | challenges/cli.py | check_platforms | def check_platforms(platforms):
"""Checks if the platforms have a valid platform code"""
if len(platforms) > 0:
return all(platform in PLATFORM_IDS for platform in platforms)
return True | python | def check_platforms(platforms):
"""Checks if the platforms have a valid platform code"""
if len(platforms) > 0:
return all(platform in PLATFORM_IDS for platform in platforms)
return True | Checks if the platforms have a valid platform code | https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/cli.py#L17-L21 |
architv/chcli | challenges/cli.py | main | def main(active, upcoming, hiring, short, goto, platforms, time):
"""A CLI for active and upcoming programming challenges from various platforms"""
if not check_platforms(platforms):
raise IncorrectParametersException('Invlaid code for platform. Please check the platform ids')
try:
if active:
active_challenges = active_contests(platforms)
if goto:
webbrowser.open(active_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(active_challenges, "active")
return
if upcoming:
upcoming_challenges = upcoming_contests(platforms, time)
if goto:
goto = int(goto)
webbrowser.open(upcoming_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(upcoming_challenges, "upcoming")
return
if hiring:
hiring_challenges = hiring_contests()
if goto:
webbrowser.open(hiring_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(hiring_challenges, "hiring")
return
if short:
short_challenges = short_contests(platforms)
if goto:
goto = int(goto)
webbrowser.open(short_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(short_challenges, "short")
return
all_contests = get_all_contests(platforms, time)
if goto:
webbrowser.open(all_contests[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(all_contests, "all")
except IncorrectParametersException as e:
click.secho(e.message, fg="red", bold=True) | python | def main(active, upcoming, hiring, short, goto, platforms, time):
"""A CLI for active and upcoming programming challenges from various platforms"""
if not check_platforms(platforms):
raise IncorrectParametersException('Invlaid code for platform. Please check the platform ids')
try:
if active:
active_challenges = active_contests(platforms)
if goto:
webbrowser.open(active_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(active_challenges, "active")
return
if upcoming:
upcoming_challenges = upcoming_contests(platforms, time)
if goto:
goto = int(goto)
webbrowser.open(upcoming_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(upcoming_challenges, "upcoming")
return
if hiring:
hiring_challenges = hiring_contests()
if goto:
webbrowser.open(hiring_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(hiring_challenges, "hiring")
return
if short:
short_challenges = short_contests(platforms)
if goto:
goto = int(goto)
webbrowser.open(short_challenges[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(short_challenges, "short")
return
all_contests = get_all_contests(platforms, time)
if goto:
webbrowser.open(all_contests[goto - 1]["contest_url"], new=2)
else:
writers.write_contests(all_contests, "all")
except IncorrectParametersException as e:
click.secho(e.message, fg="red", bold=True) | A CLI for active and upcoming programming challenges from various platforms | https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/cli.py#L117-L164 |
architv/chcli | challenges/writers.py | colors | def colors():
"""Creates an enum for colors"""
enums = dict(
TIME_LEFT="red",
CONTEST_NAME="yellow",
HOST="green",
MISC="blue",
TIME_TO_START="green",
)
return type('Enum', (), enums) | python | def colors():
"""Creates an enum for colors"""
enums = dict(
TIME_LEFT="red",
CONTEST_NAME="yellow",
HOST="green",
MISC="blue",
TIME_TO_START="green",
)
return type('Enum', (), enums) | Creates an enum for colors | https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/writers.py#L7-L17 |
architv/chcli | challenges/writers.py | challenge | def challenge():
"""Creates an enum for contest type"""
enums = dict(
ACTIVE="active",
UPCOMING="upcoming",
HIRING="hiring",
ALL="all",
SHORT="short",
)
return type('Enum', (), enums) | python | def challenge():
"""Creates an enum for contest type"""
enums = dict(
ACTIVE="active",
UPCOMING="upcoming",
HIRING="hiring",
ALL="all",
SHORT="short",
)
return type('Enum', (), enums) | Creates an enum for contest type | https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/writers.py#L20-L30 |
architv/chcli | challenges/writers.py | get_time_string | def get_time_string(contest, contest_type):
"""Return a string with time for the contest to begin/end"""
if contest_type == challenge().ACTIVE:
time_diff = time_difference(contest["end"])
elif contest_type == challenge().UPCOMING:
time_diff = time_difference(contest["start"])
elif contest_type in [challenge().HIRING, challenge().SHORT, challenge().ALL]:
try:
time_diff = time_difference(contest["start"])
except:
time_diff = time_difference(contest["end"])
time_diff_string = ""
if time_diff.days > 0:
time_diff_string = "{0} days {1} hours".format(time_diff.days, time_diff.hours)
elif time_diff.hours > 0:
time_diff_string = "{0} hours {1} minutes".format(time_diff.hours, time_diff.minutes)
else:
time_diff_string = "{0} minutes".format(time_diff.minutes)
return time_diff_string | python | def get_time_string(contest, contest_type):
"""Return a string with time for the contest to begin/end"""
if contest_type == challenge().ACTIVE:
time_diff = time_difference(contest["end"])
elif contest_type == challenge().UPCOMING:
time_diff = time_difference(contest["start"])
elif contest_type in [challenge().HIRING, challenge().SHORT, challenge().ALL]:
try:
time_diff = time_difference(contest["start"])
except:
time_diff = time_difference(contest["end"])
time_diff_string = ""
if time_diff.days > 0:
time_diff_string = "{0} days {1} hours".format(time_diff.days, time_diff.hours)
elif time_diff.hours > 0:
time_diff_string = "{0} hours {1} minutes".format(time_diff.hours, time_diff.minutes)
else:
time_diff_string = "{0} minutes".format(time_diff.minutes)
return time_diff_string | Return a string with time for the contest to begin/end | https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/writers.py#L52-L72 |
architv/chcli | challenges/utilities.py | time_difference | def time_difference(target_time):
"""Calculate the difference between the current time and the given time"""
TimeDiff = namedtuple("TimeDiff", ["days", "hours", "minutes", "seconds"])
time_diff = format_date(target_time) - datetime.utcnow()
hours, remainder = divmod(time_diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return TimeDiff(days=time_diff.days, hours=hours, minutes=minutes, seconds=seconds) | python | def time_difference(target_time):
"""Calculate the difference between the current time and the given time"""
TimeDiff = namedtuple("TimeDiff", ["days", "hours", "minutes", "seconds"])
time_diff = format_date(target_time) - datetime.utcnow()
hours, remainder = divmod(time_diff.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return TimeDiff(days=time_diff.days, hours=hours, minutes=minutes, seconds=seconds) | Calculate the difference between the current time and the given time | https://github.com/architv/chcli/blob/e9e387b9a85c6b64bc74b1a7c5b85baa4d4ea7d7/challenges/utilities.py#L12-L18 |
manjitkumar/drf-url-filters | filters/validations.py | IntegerLike | def IntegerLike(msg=None):
'''
Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of digits
'''
def fn(value):
if not any([
isinstance(value, numbers.Integral),
(isinstance(value, float) and value.is_integer()),
(isinstance(value, basestring) and value.isdigit())
]):
raise Invalid(msg or (
'Invalid input <{0}>; expected an integer'.format(value))
)
else:
return value
return fn | python | def IntegerLike(msg=None):
'''
Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of digits
'''
def fn(value):
if not any([
isinstance(value, numbers.Integral),
(isinstance(value, float) and value.is_integer()),
(isinstance(value, basestring) and value.isdigit())
]):
raise Invalid(msg or (
'Invalid input <{0}>; expected an integer'.format(value))
)
else:
return value
return fn | Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of digits | https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/filters/validations.py#L13-L32 |
manjitkumar/drf-url-filters | filters/validations.py | Alphanumeric | def Alphanumeric(msg=None):
'''
Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of alphanumeric characters
'''
def fn(value):
if not any([
isinstance(value, numbers.Integral),
(isinstance(value, float) and value.is_integer()),
(isinstance(value, basestring) and value.isalnum())
]):
raise Invalid(msg or (
'Invalid input <{0}>; expected an integer'.format(value))
)
else:
return value
return fn | python | def Alphanumeric(msg=None):
'''
Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of alphanumeric characters
'''
def fn(value):
if not any([
isinstance(value, numbers.Integral),
(isinstance(value, float) and value.is_integer()),
(isinstance(value, basestring) and value.isalnum())
]):
raise Invalid(msg or (
'Invalid input <{0}>; expected an integer'.format(value))
)
else:
return value
return fn | Checks whether a value is:
- int, or
- long, or
- float without a fractional part, or
- str or unicode composed only of alphanumeric characters | https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/filters/validations.py#L35-L54 |
manjitkumar/drf-url-filters | filters/validations.py | StrictlyAlphanumeric | def StrictlyAlphanumeric(msg=None):
'''
Checks whether a value is:
- str or unicode, and
- composed of both alphabets and digits
'''
def fn(value):
if not (
isinstance(value, basestring) and
value.isalnum() and not
value.isdigit() and not
value.isalpha()
):
raise Invalid(msg or (
'Invalid input <{0}>; expected an integer'.format(value))
)
else:
return value
return fn | python | def StrictlyAlphanumeric(msg=None):
'''
Checks whether a value is:
- str or unicode, and
- composed of both alphabets and digits
'''
def fn(value):
if not (
isinstance(value, basestring) and
value.isalnum() and not
value.isdigit() and not
value.isalpha()
):
raise Invalid(msg or (
'Invalid input <{0}>; expected an integer'.format(value))
)
else:
return value
return fn | Checks whether a value is:
- str or unicode, and
- composed of both alphabets and digits | https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/filters/validations.py#L57-L75 |
manjitkumar/drf-url-filters | filters/validations.py | DatetimeWithTZ | def DatetimeWithTZ(msg=None):
'''
Checks whether a value is :
- a valid castable datetime object with timezone.
'''
def fn(value):
try:
date = parse_datetime(value) or parse_date(value)
if date is not None:
return date
else:
raise ValueError
except ValueError:
raise Invalid('<{0}> is not a valid datetime.'.format(value))
return fn | python | def DatetimeWithTZ(msg=None):
'''
Checks whether a value is :
- a valid castable datetime object with timezone.
'''
def fn(value):
try:
date = parse_datetime(value) or parse_date(value)
if date is not None:
return date
else:
raise ValueError
except ValueError:
raise Invalid('<{0}> is not a valid datetime.'.format(value))
return fn | Checks whether a value is :
- a valid castable datetime object with timezone. | https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/filters/validations.py#L78-L92 |
manjitkumar/drf-url-filters | filters/validations.py | CSVofIntegers | def CSVofIntegers(msg=None):
'''
Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string.
'''
def fn(value):
try:
if isinstance(value, basestring):
if ',' in value:
value = list(map(
int, filter(
bool, list(map(
lambda x: x.strip(), value.split(',')
))
)
))
return value
else:
return [int(value)]
else:
raise ValueError
except ValueError:
raise Invalid(
'<{0}> is not a valid csv of integers'.format(value)
)
return fn | python | def CSVofIntegers(msg=None):
'''
Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string.
'''
def fn(value):
try:
if isinstance(value, basestring):
if ',' in value:
value = list(map(
int, filter(
bool, list(map(
lambda x: x.strip(), value.split(',')
))
)
))
return value
else:
return [int(value)]
else:
raise ValueError
except ValueError:
raise Invalid(
'<{0}> is not a valid csv of integers'.format(value)
)
return fn | Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string. | https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/filters/validations.py#L95-L121 |
manjitkumar/drf-url-filters | example_app/views.py | TeamsViewSet.get_queryset | def get_queryset(self):
"""
Optionally restricts the queryset by filtering against
query parameters in the URL.
"""
query_params = self.request.query_params
url_params = self.kwargs
# get queryset_filters from FilterMixin
queryset_filters = self.get_db_filters(url_params, query_params)
# This dict will hold filter kwargs to pass in to Django ORM calls.
db_filters = queryset_filters['db_filters']
# This dict will hold exclude kwargs to pass in to Django ORM calls.
db_excludes = queryset_filters['db_excludes']
queryset = Team.objects.prefetch_related(
'players'
).all()
return queryset.filter(**db_filters).exclude(**db_excludes) | python | def get_queryset(self):
"""
Optionally restricts the queryset by filtering against
query parameters in the URL.
"""
query_params = self.request.query_params
url_params = self.kwargs
# get queryset_filters from FilterMixin
queryset_filters = self.get_db_filters(url_params, query_params)
# This dict will hold filter kwargs to pass in to Django ORM calls.
db_filters = queryset_filters['db_filters']
# This dict will hold exclude kwargs to pass in to Django ORM calls.
db_excludes = queryset_filters['db_excludes']
queryset = Team.objects.prefetch_related(
'players'
).all()
return queryset.filter(**db_filters).exclude(**db_excludes) | Optionally restricts the queryset by filtering against
query parameters in the URL. | https://github.com/manjitkumar/drf-url-filters/blob/ebac358729bcd9aa70537247b2ccd6005f5678c1/example_app/views.py#L90-L112 |
nimbis/cmsplugin-newsplus | cmsplugin_newsplus/settings.py | get_setting | def get_setting(name, default):
"""
A little helper for fetching global settings with a common prefix.
"""
parent_name = "CMSPLUGIN_NEWS_{0}".format(name)
return getattr(django_settings, parent_name, default) | python | def get_setting(name, default):
"""
A little helper for fetching global settings with a common prefix.
"""
parent_name = "CMSPLUGIN_NEWS_{0}".format(name)
return getattr(django_settings, parent_name, default) | A little helper for fetching global settings with a common prefix. | https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/settings.py#L5-L10 |
nimbis/cmsplugin-newsplus | cmsplugin_newsplus/admin.py | NewsAdmin.make_published | def make_published(self, request, queryset):
"""
Marks selected news items as published
"""
rows_updated = queryset.update(is_published=True)
self.message_user(request,
ungettext('%(count)d newsitem was published',
'%(count)d newsitems were published',
rows_updated) % {'count': rows_updated}) | python | def make_published(self, request, queryset):
"""
Marks selected news items as published
"""
rows_updated = queryset.update(is_published=True)
self.message_user(request,
ungettext('%(count)d newsitem was published',
'%(count)d newsitems were published',
rows_updated) % {'count': rows_updated}) | Marks selected news items as published | https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/admin.py#L38-L46 |
nimbis/cmsplugin-newsplus | cmsplugin_newsplus/admin.py | NewsAdmin.make_unpublished | def make_unpublished(self, request, queryset):
"""
Marks selected news items as unpublished
"""
rows_updated = queryset.update(is_published=False)
self.message_user(request,
ungettext('%(count)d newsitem was unpublished',
'%(count)d newsitems were unpublished',
rows_updated) % {'count': rows_updated}) | python | def make_unpublished(self, request, queryset):
"""
Marks selected news items as unpublished
"""
rows_updated = queryset.update(is_published=False)
self.message_user(request,
ungettext('%(count)d newsitem was unpublished',
'%(count)d newsitems were unpublished',
rows_updated) % {'count': rows_updated}) | Marks selected news items as unpublished | https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/admin.py#L49-L57 |
nimbis/cmsplugin-newsplus | cmsplugin_newsplus/widgets/tinymce_widget.py | TinyMCEEditor.render | def render(self, name, value, attrs=None):
if value is None:
value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, \
"TinyMCE widget attributes must contain 'id'"
mce_config = cms.plugins.text.settings.TINYMCE_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['mode'] = 'exact'
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
"""
plugins = mce_config.get("plugins", "")
if len(plugins):
plugins += ","
plugins += "-cmsplugins"
mce_config['plugins'] = plugins
adv2 = mce_config.get('theme_advanced_buttons1', "")
if len(adv2):
adv2 = "," + adv2
adv2 = "cmsplugins,cmspluginsedit" + adv2
mce_config['theme_advanced_buttons1'] = adv2
"""
json = simplejson.dumps(mce_config)
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs),
escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
c_json = simplejson.dumps(compressor_config)
html.append(
(u'<script type="text/javascript">'
'tinyMCE_GZ.init(%s);</script>') % (c_json))
html.append(
(u'<script type="text/javascript">%s;\ntinyMCE.init(%s);'
'</script>') % (
self.render_additions(
name,
value,
attrs),
json))
return mark_safe(u'\n'.join(html)) | python | def render(self, name, value, attrs=None):
if value is None:
value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, \
"TinyMCE widget attributes must contain 'id'"
mce_config = cms.plugins.text.settings.TINYMCE_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['mode'] = 'exact'
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
"""
plugins = mce_config.get("plugins", "")
if len(plugins):
plugins += ","
plugins += "-cmsplugins"
mce_config['plugins'] = plugins
adv2 = mce_config.get('theme_advanced_buttons1', "")
if len(adv2):
adv2 = "," + adv2
adv2 = "cmsplugins,cmspluginsedit" + adv2
mce_config['theme_advanced_buttons1'] = adv2
"""
json = simplejson.dumps(mce_config)
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs),
escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
c_json = simplejson.dumps(compressor_config)
html.append(
(u'<script type="text/javascript">'
'tinyMCE_GZ.init(%s);</script>') % (c_json))
html.append(
(u'<script type="text/javascript">%s;\ntinyMCE.init(%s);'
'</script>') % (
self.render_additions(
name,
value,
attrs),
json))
return mark_safe(u'\n'.join(html)) | plugins = mce_config.get("plugins", "")
if len(plugins):
plugins += ","
plugins += "-cmsplugins"
mce_config['plugins'] = plugins
adv2 = mce_config.get('theme_advanced_buttons1', "")
if len(adv2):
adv2 = "," + adv2
adv2 = "cmsplugins,cmspluginsedit" + adv2
mce_config['theme_advanced_buttons1'] = adv2 | https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/widgets/tinymce_widget.py#L48-L100 |
tutorcruncher/pydf | pydf/wkhtmltopdf.py | _execute_wk | def _execute_wk(*args, input=None):
"""
Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr
"""
wk_args = (WK_PATH,) + args
return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | python | def _execute_wk(*args, input=None):
"""
Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr
"""
wk_args = (WK_PATH,) + args
return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr | https://github.com/tutorcruncher/pydf/blob/53dd030f02f112593ed6e2655160a40b892a23c0/pydf/wkhtmltopdf.py#L22-L30 |
tutorcruncher/pydf | pydf/wkhtmltopdf.py | generate_pdf | def generate_pdf(html, *,
cache_dir: Path=DFT_CACHE_DIR,
grayscale: bool=False,
lowquality: bool=False,
margin_bottom: str=None,
margin_left: str=None,
margin_right: str=None,
margin_top: str=None,
orientation: str=None,
page_height: str=None,
page_width: str=None,
page_size: str=None,
image_dpi: str=None,
image_quality: str=None,
**extra_kwargs):
"""
Generate a pdf from either a url or a html string.
After the html and url arguments all other arguments are
passed straight to wkhtmltopdf
For details on extra arguments see the output of get_help()
and get_extended_help()
All arguments whether specified or caught with extra_kwargs are converted
to command line args with "'--' + original_name.replace('_', '-')"
Arguments which are True are passed with no value eg. just --quiet, False
and None arguments are missed, everything else is passed with str(value).
:param html: html string to generate pdf from
:param grayscale: bool
:param lowquality: bool
:param margin_bottom: string eg. 10mm
:param margin_left: string eg. 10mm
:param margin_right: string eg. 10mm
:param margin_top: string eg. 10mm
:param orientation: Portrait or Landscape
:param page_height: string eg. 10mm
:param page_width: string eg. 10mm
:param page_size: string: A4, Letter, etc.
:param image_dpi: int default 600
:param image_quality: int default 94
:param extra_kwargs: any exotic extra options for wkhtmltopdf
:return: string representing pdf
"""
if not cache_dir.exists():
Path.mkdir(cache_dir)
py_args = dict(
cache_dir=cache_dir,
grayscale=grayscale,
lowquality=lowquality,
margin_bottom=margin_bottom,
margin_left=margin_left,
margin_right=margin_right,
margin_top=margin_top,
orientation=orientation,
page_height=page_height,
page_width=page_width,
page_size=page_size,
image_dpi=image_dpi,
image_quality=image_quality,
)
py_args.update(extra_kwargs)
cmd_args = _convert_args(**py_args)
p = _execute_wk(*cmd_args, input=html.encode())
pdf_content = p.stdout
# it seems wkhtmltopdf's error codes can be false, we'll ignore them if we
# seem to have generated a pdf
if p.returncode != 0 and pdf_content[:4] != b'%PDF':
raise RuntimeError('error running wkhtmltopdf, command: {!r}\n'
'response: "{}"'.format(cmd_args, p.stderr.decode().strip()))
return pdf_content | python | def generate_pdf(html, *,
cache_dir: Path=DFT_CACHE_DIR,
grayscale: bool=False,
lowquality: bool=False,
margin_bottom: str=None,
margin_left: str=None,
margin_right: str=None,
margin_top: str=None,
orientation: str=None,
page_height: str=None,
page_width: str=None,
page_size: str=None,
image_dpi: str=None,
image_quality: str=None,
**extra_kwargs):
"""
Generate a pdf from either a url or a html string.
After the html and url arguments all other arguments are
passed straight to wkhtmltopdf
For details on extra arguments see the output of get_help()
and get_extended_help()
All arguments whether specified or caught with extra_kwargs are converted
to command line args with "'--' + original_name.replace('_', '-')"
Arguments which are True are passed with no value eg. just --quiet, False
and None arguments are missed, everything else is passed with str(value).
:param html: html string to generate pdf from
:param grayscale: bool
:param lowquality: bool
:param margin_bottom: string eg. 10mm
:param margin_left: string eg. 10mm
:param margin_right: string eg. 10mm
:param margin_top: string eg. 10mm
:param orientation: Portrait or Landscape
:param page_height: string eg. 10mm
:param page_width: string eg. 10mm
:param page_size: string: A4, Letter, etc.
:param image_dpi: int default 600
:param image_quality: int default 94
:param extra_kwargs: any exotic extra options for wkhtmltopdf
:return: string representing pdf
"""
if not cache_dir.exists():
Path.mkdir(cache_dir)
py_args = dict(
cache_dir=cache_dir,
grayscale=grayscale,
lowquality=lowquality,
margin_bottom=margin_bottom,
margin_left=margin_left,
margin_right=margin_right,
margin_top=margin_top,
orientation=orientation,
page_height=page_height,
page_width=page_width,
page_size=page_size,
image_dpi=image_dpi,
image_quality=image_quality,
)
py_args.update(extra_kwargs)
cmd_args = _convert_args(**py_args)
p = _execute_wk(*cmd_args, input=html.encode())
pdf_content = p.stdout
# it seems wkhtmltopdf's error codes can be false, we'll ignore them if we
# seem to have generated a pdf
if p.returncode != 0 and pdf_content[:4] != b'%PDF':
raise RuntimeError('error running wkhtmltopdf, command: {!r}\n'
'response: "{}"'.format(cmd_args, p.stderr.decode().strip()))
return pdf_content | Generate a pdf from either a url or a html string.
After the html and url arguments all other arguments are
passed straight to wkhtmltopdf
For details on extra arguments see the output of get_help()
and get_extended_help()
All arguments whether specified or caught with extra_kwargs are converted
to command line args with "'--' + original_name.replace('_', '-')"
Arguments which are True are passed with no value eg. just --quiet, False
and None arguments are missed, everything else is passed with str(value).
:param html: html string to generate pdf from
:param grayscale: bool
:param lowquality: bool
:param margin_bottom: string eg. 10mm
:param margin_left: string eg. 10mm
:param margin_right: string eg. 10mm
:param margin_top: string eg. 10mm
:param orientation: Portrait or Landscape
:param page_height: string eg. 10mm
:param page_width: string eg. 10mm
:param page_size: string: A4, Letter, etc.
:param image_dpi: int default 600
:param image_quality: int default 94
:param extra_kwargs: any exotic extra options for wkhtmltopdf
:return: string representing pdf | https://github.com/tutorcruncher/pydf/blob/53dd030f02f112593ed6e2655160a40b892a23c0/pydf/wkhtmltopdf.py#L78-L153 |
tutorcruncher/pydf | pydf/wkhtmltopdf.py | get_version | def get_version():
"""
Get version of pydf and wkhtmltopdf binary
:return: version string
"""
try:
wk_version = _string_execute('-V')
except Exception as e:
# we catch all errors here to make sure we get a version no matter what
wk_version = '%s: %s' % (e.__class__.__name__, e)
return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version) | python | def get_version():
"""
Get version of pydf and wkhtmltopdf binary
:return: version string
"""
try:
wk_version = _string_execute('-V')
except Exception as e:
# we catch all errors here to make sure we get a version no matter what
wk_version = '%s: %s' % (e.__class__.__name__, e)
return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version) | Get version of pydf and wkhtmltopdf binary
:return: version string | https://github.com/tutorcruncher/pydf/blob/53dd030f02f112593ed6e2655160a40b892a23c0/pydf/wkhtmltopdf.py#L160-L171 |
PiotrDabkowski/pyjsparser | pyjsparser/parser.py | PyJsParser._interpret_regexp | def _interpret_regexp(self, string, flags):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
inside_square = 0
while (self.index < self.length):
template = '[%s]' if not inside_square else '%s'
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch == 'u':
digs = self.source[self.index:self.index + 4]
if len(digs) == 4 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 4
else:
st += 'u'
elif ch == 'x':
digs = self.source[self.index:self.index + 2]
if len(digs) == 2 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 2
else:
st += 'x'
# special meaning - single char.
elif ch == '0':
st += '\\0'
elif ch == 'n':
st += '\\n'
elif ch == 'r':
st += '\\r'
elif ch == 't':
st += '\\t'
elif ch == 'f':
st += '\\f'
elif ch == 'v':
st += '\\v'
# unescape special single characters like . so that they are interpreted literally
elif ch in REGEXP_SPECIAL_SINGLE:
st += '\\' + ch
# character groups
elif ch == 'b':
st += '\\b'
elif ch == 'B':
st += '\\B'
elif ch == 'w':
st += '\\w'
elif ch == 'W':
st += '\\W'
elif ch == 'd':
st += '\\d'
elif ch == 'D':
st += '\\D'
elif ch == 's':
st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff'
elif ch == 'S':
st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff'
else:
if isDecimalDigit(ch):
num = ch
while self.index < self.length and isDecimalDigit(
self.source[self.index]):
num += self.source[self.index]
self.index += 1
st += '\\' + num
else:
st += ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
else:
if ch == '[':
inside_square = True
elif ch == ']':
inside_square = False
st += ch
# print string, 'was transformed to', st
return st | python | def _interpret_regexp(self, string, flags):
'''Perform sctring escape - for regexp literals'''
self.index = 0
self.length = len(string)
self.source = string
self.lineNumber = 0
self.lineStart = 0
octal = False
st = ''
inside_square = 0
while (self.index < self.length):
template = '[%s]' if not inside_square else '%s'
ch = self.source[self.index]
self.index += 1
if ch == '\\':
ch = self.source[self.index]
self.index += 1
if (not isLineTerminator(ch)):
if ch == 'u':
digs = self.source[self.index:self.index + 4]
if len(digs) == 4 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 4
else:
st += 'u'
elif ch == 'x':
digs = self.source[self.index:self.index + 2]
if len(digs) == 2 and all(isHexDigit(d) for d in digs):
st += template % unichr(int(digs, 16))
self.index += 2
else:
st += 'x'
# special meaning - single char.
elif ch == '0':
st += '\\0'
elif ch == 'n':
st += '\\n'
elif ch == 'r':
st += '\\r'
elif ch == 't':
st += '\\t'
elif ch == 'f':
st += '\\f'
elif ch == 'v':
st += '\\v'
# unescape special single characters like . so that they are interpreted literally
elif ch in REGEXP_SPECIAL_SINGLE:
st += '\\' + ch
# character groups
elif ch == 'b':
st += '\\b'
elif ch == 'B':
st += '\\B'
elif ch == 'w':
st += '\\w'
elif ch == 'W':
st += '\\W'
elif ch == 'd':
st += '\\d'
elif ch == 'D':
st += '\\D'
elif ch == 's':
st += template % u' \f\n\r\t\v\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff'
elif ch == 'S':
st += template % u'\u0000-\u0008\u000e-\u001f\u0021-\u009f\u00a1-\u167f\u1681-\u180d\u180f-\u1fff\u200b-\u2027\u202a-\u202e\u2030-\u205e\u2060-\u2fff\u3001-\ufefe\uff00-\uffff'
else:
if isDecimalDigit(ch):
num = ch
while self.index < self.length and isDecimalDigit(
self.source[self.index]):
num += self.source[self.index]
self.index += 1
st += '\\' + num
else:
st += ch # DONT ESCAPE!!!
else:
self.lineNumber += 1
if (ch == '\r' and self.source[self.index] == '\n'):
self.index += 1
self.lineStart = self.index
else:
if ch == '[':
inside_square = True
elif ch == ']':
inside_square = False
st += ch
# print string, 'was transformed to', st
return st | Perform sctring escape - for regexp literals | https://github.com/PiotrDabkowski/pyjsparser/blob/5465d037b30e334cb0997f2315ec1e451b8ad4c1/pyjsparser/parser.py#L518-L608 |
sckott/habanero | habanero/crossref/crossref.py | Crossref.works | def works(self, ids = None, query = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, select = None, cursor = None,
cursor_max = 5000, **kwargs):
'''
Search Crossref works
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois.
Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`.
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param cursor: [String] Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used.
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.works()
cr.works(ids = '10.1371/journal.pone.0033693')
dois = ['10.1371/journal.pone.0033693', ]
cr.works(ids = dois)
x = cr.works(query = "ecology")
x['status']
x['message-type']
x['message-version']
x['message']
x['message']['total-results']
x['message']['items-per-page']
x['message']['query']
x['message']['items']
# Get full text links
x = cr.works(filter = {'has_full_text': True})
x
# Parse output to various data pieces
x = cr.works(filter = {'has_full_text': True})
## get doi for each item
[ z['DOI'] for z in x['message']['items'] ]
## get doi and url for each item
[ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ]
### print every doi
for i in x['message']['items']:
print i['DOI']
# filters - pass in as a dict
## see https://github.com/CrossRef/rest-api-doc#filter-names
cr.works(filter = {'has_full_text': True})
cr.works(filter = {'has_funder': True, 'has_full_text': True})
cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'})
## to repeat a filter name, pass in a list
x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100)
map(lambda z:z['funder'][0]['DOI'], x['message']['items'])
# Deep paging, using the cursor parameter
## this search should lead to only ~215 results
cr.works(query = "widget", cursor = "*", cursor_max = 100)
## this search should lead to only ~2500 results, in chunks of 500
res = cr.works(query = "octopus", cursor = "*", limit = 500)
sum([ len(z['message']['items']) for z in res ])
## about 167 results
res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500)
sum([ len(z['message']['items']) for z in res ])
## cursor_max to get back only a maximum set of results
res = cr.works(query = "widget", cursor = "*", cursor_max = 100)
sum([ len(z['message']['items']) for z in res ])
## cursor_max - especially useful when a request could be very large
### e.g., "ecology" results in ~275K records, lets max at 10,000
### with 1000 at a time
res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.works(query = "ecology", query_author = 'carl boettiger')
[ x['author'][0]['family'] for x in res['message']['items'] ]
# select certain fields to return
## as a comma separated string
cr.works(query = "ecology", select = "DOI,title")
## or as a list
cr.works(query = "ecology", select = ["DOI","title"])
'''
if ids.__class__.__name__ != 'NoneType':
return request(self.mailto, self.base_url, "/works/", ids,
query, filter, offset, limit, sample, sort,
order, facet, select, None, None, None, None, **kwargs)
else:
return Request(self.mailto, self.base_url, "/works/",
query, filter, offset, limit, sample, sort,
order, facet, select, cursor, cursor_max, None, **kwargs).do_request() | python | def works(self, ids = None, query = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, select = None, cursor = None,
cursor_max = 5000, **kwargs):
'''
Search Crossref works
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois.
Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`.
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param cursor: [String] Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used.
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.works()
cr.works(ids = '10.1371/journal.pone.0033693')
dois = ['10.1371/journal.pone.0033693', ]
cr.works(ids = dois)
x = cr.works(query = "ecology")
x['status']
x['message-type']
x['message-version']
x['message']
x['message']['total-results']
x['message']['items-per-page']
x['message']['query']
x['message']['items']
# Get full text links
x = cr.works(filter = {'has_full_text': True})
x
# Parse output to various data pieces
x = cr.works(filter = {'has_full_text': True})
## get doi for each item
[ z['DOI'] for z in x['message']['items'] ]
## get doi and url for each item
[ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ]
### print every doi
for i in x['message']['items']:
print i['DOI']
# filters - pass in as a dict
## see https://github.com/CrossRef/rest-api-doc#filter-names
cr.works(filter = {'has_full_text': True})
cr.works(filter = {'has_funder': True, 'has_full_text': True})
cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'})
## to repeat a filter name, pass in a list
x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100)
map(lambda z:z['funder'][0]['DOI'], x['message']['items'])
# Deep paging, using the cursor parameter
## this search should lead to only ~215 results
cr.works(query = "widget", cursor = "*", cursor_max = 100)
## this search should lead to only ~2500 results, in chunks of 500
res = cr.works(query = "octopus", cursor = "*", limit = 500)
sum([ len(z['message']['items']) for z in res ])
## about 167 results
res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500)
sum([ len(z['message']['items']) for z in res ])
## cursor_max to get back only a maximum set of results
res = cr.works(query = "widget", cursor = "*", cursor_max = 100)
sum([ len(z['message']['items']) for z in res ])
## cursor_max - especially useful when a request could be very large
### e.g., "ecology" results in ~275K records, lets max at 10,000
### with 1000 at a time
res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.works(query = "ecology", query_author = 'carl boettiger')
[ x['author'][0]['family'] for x in res['message']['items'] ]
# select certain fields to return
## as a comma separated string
cr.works(query = "ecology", select = "DOI,title")
## or as a list
cr.works(query = "ecology", select = ["DOI","title"])
'''
if ids.__class__.__name__ != 'NoneType':
return request(self.mailto, self.base_url, "/works/", ids,
query, filter, offset, limit, sample, sort,
order, facet, select, None, None, None, None, **kwargs)
else:
return Request(self.mailto, self.base_url, "/works/",
query, filter, offset, limit, sample, sort,
order, facet, select, cursor, cursor_max, None, **kwargs).do_request() | Search Crossref works
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relavant when searching with specific dois.
Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`.
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param cursor: [String] Cursor character string to do deep paging. Default is None.
Pass in '*' to start deep paging. Any combination of query, filters and facets may be
used with deep paging cursors. While rows may be specified along with cursor, offset
and sample cannot be used.
See https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#deep-paging-with-cursors
:param cursor_max: [Fixnum] Max records to retrieve. Only used when cursor param used. Because
deep paging can result in continuous requests until all are retrieved, use this
parameter to set a maximum number of records. Of course, if there are less records
found than this value, you will get only those found.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.works()
cr.works(ids = '10.1371/journal.pone.0033693')
dois = ['10.1371/journal.pone.0033693', ]
cr.works(ids = dois)
x = cr.works(query = "ecology")
x['status']
x['message-type']
x['message-version']
x['message']
x['message']['total-results']
x['message']['items-per-page']
x['message']['query']
x['message']['items']
# Get full text links
x = cr.works(filter = {'has_full_text': True})
x
# Parse output to various data pieces
x = cr.works(filter = {'has_full_text': True})
## get doi for each item
[ z['DOI'] for z in x['message']['items'] ]
## get doi and url for each item
[ {"doi": z['DOI'], "url": z['URL']} for z in x['message']['items'] ]
### print every doi
for i in x['message']['items']:
print i['DOI']
# filters - pass in as a dict
## see https://github.com/CrossRef/rest-api-doc#filter-names
cr.works(filter = {'has_full_text': True})
cr.works(filter = {'has_funder': True, 'has_full_text': True})
cr.works(filter = {'award_number': 'CBET-0756451', 'award_funder': '10.13039/100000001'})
## to repeat a filter name, pass in a list
x = cr.works(filter = {'award_funder': ['10.13039/100004440', '10.13039/100000861']}, limit = 100)
map(lambda z:z['funder'][0]['DOI'], x['message']['items'])
# Deep paging, using the cursor parameter
## this search should lead to only ~215 results
cr.works(query = "widget", cursor = "*", cursor_max = 100)
## this search should lead to only ~2500 results, in chunks of 500
res = cr.works(query = "octopus", cursor = "*", limit = 500)
sum([ len(z['message']['items']) for z in res ])
## about 167 results
res = cr.works(query = "extravagant", cursor = "*", limit = 50, cursor_max = 500)
sum([ len(z['message']['items']) for z in res ])
## cursor_max to get back only a maximum set of results
res = cr.works(query = "widget", cursor = "*", cursor_max = 100)
sum([ len(z['message']['items']) for z in res ])
## cursor_max - especially useful when a request could be very large
### e.g., "ecology" results in ~275K records, lets max at 10,000
### with 1000 at a time
res = cr.works(query = "ecology", cursor = "*", cursor_max = 10000, limit = 1000)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.works(query = "ecology", query_author = 'carl boettiger')
[ x['author'][0]['family'] for x in res['message']['items'] ]
# select certain fields to return
## as a comma separated string
cr.works(query = "ecology", select = "DOI,title")
## or as a list
cr.works(query = "ecology", select = ["DOI","title"]) | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L171-L296 |
sckott/habanero | habanero/crossref/crossref.py | Crossref.prefixes | def prefixes(self, ids = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, works = False, select = None,
cursor = None, cursor_max = 5000, **kwargs):
'''
Search Crossref prefixes
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.prefixes(ids = "10.1016")
cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093'])
# get works
cr.prefixes(ids = "10.1016", works = True)
# Limit number of results
cr.prefixes(ids = "10.1016", works = True, limit = 3)
# Sort and order
cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc")
# cursor - deep paging
res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'})
eds = [ x.get('editor') for x in res['message']['items'] ]
[ z for z in eds if z is not None ]
'''
check_kwargs(["query"], kwargs)
return request(self.mailto, self.base_url, "/prefixes/", ids,
query = None, filter = filter, offset = offset, limit = limit,
sample = sample, sort = sort, order = order, facet = facet,
select = select, works = works, cursor = cursor, cursor_max = cursor_max,
**kwargs) | python | def prefixes(self, ids = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, works = False, select = None,
cursor = None, cursor_max = 5000, **kwargs):
'''
Search Crossref prefixes
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.prefixes(ids = "10.1016")
cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093'])
# get works
cr.prefixes(ids = "10.1016", works = True)
# Limit number of results
cr.prefixes(ids = "10.1016", works = True, limit = 3)
# Sort and order
cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc")
# cursor - deep paging
res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'})
eds = [ x.get('editor') for x in res['message']['items'] ]
[ z for z in eds if z is not None ]
'''
check_kwargs(["query"], kwargs)
return request(self.mailto, self.base_url, "/prefixes/", ids,
query = None, filter = filter, offset = offset, limit = limit,
sample = sample, sort = sort, order = order, facet = facet,
select = select, works = works, cursor = cursor, cursor_max = cursor_max,
**kwargs) | Search Crossref prefixes
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.prefixes(ids = "10.1016")
cr.prefixes(ids = ['10.1016','10.1371','10.1023','10.4176','10.1093'])
# get works
cr.prefixes(ids = "10.1016", works = True)
# Limit number of results
cr.prefixes(ids = "10.1016", works = True, limit = 3)
# Sort and order
cr.prefixes(ids = "10.1016", works = True, sort = "relevance", order = "asc")
# cursor - deep paging
res = cr.prefixes(ids = "10.1016", works = True, cursor = "*", limit = 200)
sum([ len(z['message']['items']) for z in res ])
items = [ z['message']['items'] for z in res ]
items = [ item for sublist in items for item in sublist ]
[ z['DOI'] for z in items ][0:50]
# field queries
res = cr.prefixes(ids = "10.1371", works = True, query_editor = 'cooper', filter = {'type': 'journal-article'})
eds = [ x.get('editor') for x in res['message']['items'] ]
[ z for z in eds if z is not None ] | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L361-L430 |
sckott/habanero | habanero/crossref/crossref.py | Crossref.types | def types(self, ids = None, query = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, works = False, select = None,
cursor = None, cursor_max = 5000, **kwargs):
'''
Search Crossref types
:param ids: [Array] Type identifier, e.g., journal
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.types()
cr.types(ids = "journal")
cr.types(ids = "journal-article")
cr.types(ids = "journal", works = True)
# field queries
res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100)
[ x.get('title') for x in res['message']['items'] ]
'''
return request(self.mailto, self.base_url, "/types/", ids,
query, filter, offset, limit, sample, sort,
order, facet, select, works, cursor, cursor_max, **kwargs) | python | def types(self, ids = None, query = None, filter = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, works = False, select = None,
cursor = None, cursor_max = 5000, **kwargs):
'''
Search Crossref types
:param ids: [Array] Type identifier, e.g., journal
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.types()
cr.types(ids = "journal")
cr.types(ids = "journal-article")
cr.types(ids = "journal", works = True)
# field queries
res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100)
[ x.get('title') for x in res['message']['items'] ]
'''
return request(self.mailto, self.base_url, "/types/", ids,
query, filter, offset, limit, sample, sort,
order, facet, select, works, cursor, cursor_max, **kwargs) | Search Crossref types
:param ids: [Array] Type identifier, e.g., journal
:param query: [String] A query string
:param filter: [Hash] Filter options. See examples for usage.
Accepts a dict, with filter names and their values. For repeating filter names
pass in a list of the values to that filter name, e.g.,
`{'award_funder': ['10.13039/100004440', '10.13039/100000861']}`.
See https://github.com/CrossRef/rest-api-doc#filter-names
for filter names and their descriptions and :func:`~habanero.Crossref.filter_names`
and :func:`~habanero.Crossref.filter_details`
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sample: [Fixnum] Number of random results to return. when you use the sample parameter,
the limit and offset parameters are ignored. This parameter only used when works requested. Max: 100
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param select: [String/list(Strings)] Crossref metadata records can be
quite large. Sometimes you just want a few elements from the schema. You can "select"
a subset of elements to return. This can make your API calls much more efficient. Not
clear yet which fields are allowed here.
:param works: [Boolean] If true, works returned as well. Default: false
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.types()
cr.types(ids = "journal")
cr.types(ids = "journal-article")
cr.types(ids = "journal", works = True)
# field queries
res = cr.types(ids = "journal-article", works = True, query_title = 'gender', rows = 100)
[ x.get('title') for x in res['message']['items'] ] | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L573-L625 |
sckott/habanero | habanero/crossref/crossref.py | Crossref.licenses | def licenses(self, query = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, **kwargs):
'''
Search Crossref licenses
:param query: [String] A query string
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.licenses()
cr.licenses(query = "creative")
'''
check_kwargs(["ids", "filter", "works"], kwargs)
res = request(self.mailto, self.base_url, "/licenses/", None,
query, None, offset, limit, None, sort,
order, facet, None, None, None, None, **kwargs)
return res | python | def licenses(self, query = None, offset = None,
limit = None, sample = None, sort = None,
order = None, facet = None, **kwargs):
'''
Search Crossref licenses
:param query: [String] A query string
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.licenses()
cr.licenses(query = "creative")
'''
check_kwargs(["ids", "filter", "works"], kwargs)
res = request(self.mailto, self.base_url, "/licenses/", None,
query, None, offset, limit, None, sort,
order, facet, None, None, None, None, **kwargs)
return res | Search Crossref licenses
:param query: [String] A query string
:param offset: [Fixnum] Number of record to start at, from 1 to 10000
:param limit: [Fixnum] Number of results to return. Not relevant when searching with specific dois. Default: 20. Max: 1000
:param sort: [String] Field to sort on. Note: If the API call includes a query, then the sort
order will be by the relevance score. If no query is included, then the sort order
will be by DOI update date. See sorting_ for possible values.
:param order: [String] Sort order, one of 'asc' or 'desc'
:param facet: [Boolean/String] Set to `true` to include facet results (default: false).
Optionally, pass a query string, e.g., `facet=type-name:*` or `facet=license=*`
See Facets_ for options.
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples and FieldQueries_)
:return: A dict
Usage::
from habanero import Crossref
cr = Crossref()
cr.licenses()
cr.licenses(query = "creative") | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L627-L659 |
sckott/habanero | habanero/crossref/crossref.py | Crossref.registration_agency | def registration_agency(self, ids, **kwargs):
'''
Determine registration agency for DOIs
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: list of DOI minting agencies
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
'''
check_kwargs(["query", "filter", "offset", "limit", "sample", "sort",
"order", "facet", "works"], kwargs)
res = request(self.mailto, self.base_url, "/works/", ids,
None, None, None, None, None, None,
None, None, None, None, None, None, True, **kwargs)
if res.__class__ != list:
k = []
k.append(res)
else:
k = res
return [ z['message']['agency']['label'] for z in k ] | python | def registration_agency(self, ids, **kwargs):
'''
Determine registration agency for DOIs
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: list of DOI minting agencies
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993'])
'''
check_kwargs(["query", "filter", "offset", "limit", "sample", "sort",
"order", "facet", "works"], kwargs)
res = request(self.mailto, self.base_url, "/works/", ids,
None, None, None, None, None, None,
None, None, None, None, None, None, True, **kwargs)
if res.__class__ != list:
k = []
k.append(res)
else:
k = res
return [ z['message']['agency']['label'] for z in k ] | Determine registration agency for DOIs
:param ids: [Array] DOIs (digital object identifier) or other identifiers
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: list of DOI minting agencies
Usage::
from habanero import Crossref
cr = Crossref()
cr.registration_agency('10.1371/journal.pone.0033693')
cr.registration_agency(ids = ['10.1007/12080.1874-1746','10.1007/10452.1573-5125', '10.1111/(issn)1442-9993']) | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L661-L688 |
sckott/habanero | habanero/crossref/crossref.py | Crossref.random_dois | def random_dois(self, sample = 10, **kwargs):
'''
Get a random set of DOIs
:param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: [Array] of DOIs
Usage::
from habanero import Crossref
cr = Crossref()
cr.random_dois(1)
cr.random_dois(10)
cr.random_dois(50)
cr.random_dois(100)
'''
res = request(self.mailto, self.base_url, "/works/", None,
None, None, None, None, sample, None,
None, None, None, True, None, None, None, **kwargs)
return [ z['DOI'] for z in res['message']['items'] ] | python | def random_dois(self, sample = 10, **kwargs):
'''
Get a random set of DOIs
:param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: [Array] of DOIs
Usage::
from habanero import Crossref
cr = Crossref()
cr.random_dois(1)
cr.random_dois(10)
cr.random_dois(50)
cr.random_dois(100)
'''
res = request(self.mailto, self.base_url, "/works/", None,
None, None, None, None, sample, None,
None, None, None, True, None, None, None, **kwargs)
return [ z['DOI'] for z in res['message']['items'] ] | Get a random set of DOIs
:param sample: [Fixnum] Number of random DOIs to return. Default: 10. Max: 100
:param kwargs: additional named arguments passed on to `requests.get`, e.g., field
queries (see examples)
:return: [Array] of DOIs
Usage::
from habanero import Crossref
cr = Crossref()
cr.random_dois(1)
cr.random_dois(10)
cr.random_dois(50)
cr.random_dois(100) | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/crossref/crossref.py#L690-L712 |
sckott/habanero | habanero/cn/styles.py | csl_styles | def csl_styles(**kwargs):
'''
Get list of styles from https://github.com/citation-style-language/styles
:param kwargs: any additional arguments will be passed on to `requests.get`
:return: list, of CSL styles
Usage::
from habanero import cn
cn.csl_styles()
'''
base = "https://api.github.com/repos/citation-style-language/styles"
tt = requests.get(base + '/commits?per_page=1', **kwargs)
tt.raise_for_status()
check_json(tt)
commres = tt.json()
sha = commres[0]['sha']
sty = requests.get(base + "/git/trees/" + sha, **kwargs)
sty.raise_for_status()
check_json(sty)
res = sty.json()
files = [ z['path'] for z in res['tree'] ]
matches = [ re.search(".csl", g) for g in files ]
csls = [ x.string for x in filter(None, matches) ]
return [ re.sub(".csl", "", x) for x in csls ] | python | def csl_styles(**kwargs):
'''
Get list of styles from https://github.com/citation-style-language/styles
:param kwargs: any additional arguments will be passed on to `requests.get`
:return: list, of CSL styles
Usage::
from habanero import cn
cn.csl_styles()
'''
base = "https://api.github.com/repos/citation-style-language/styles"
tt = requests.get(base + '/commits?per_page=1', **kwargs)
tt.raise_for_status()
check_json(tt)
commres = tt.json()
sha = commres[0]['sha']
sty = requests.get(base + "/git/trees/" + sha, **kwargs)
sty.raise_for_status()
check_json(sty)
res = sty.json()
files = [ z['path'] for z in res['tree'] ]
matches = [ re.search(".csl", g) for g in files ]
csls = [ x.string for x in filter(None, matches) ]
return [ re.sub(".csl", "", x) for x in csls ] | Get list of styles from https://github.com/citation-style-language/styles
:param kwargs: any additional arguments will be passed on to `requests.get`
:return: list, of CSL styles
Usage::
from habanero import cn
cn.csl_styles() | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/cn/styles.py#L7-L33 |
sckott/habanero | habanero/cn/cn.py | content_negotiation | def content_negotiation(ids = None, format = "bibtex", style = 'apa',
locale = "en-US", url = None, **kwargs):
'''
Get citations in various formats from CrossRef
:param ids: [str] Search by a single DOI or many DOIs, each a string. If many
passed in, do so in a list
:param format: [str] Name of the format. One of "rdf-xml", "turtle", "citeproc-json",
"citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml",
"datacite-xml","bibentry", or "crossref-tdm"
:param style: [str] A CSL style (for text format only). See :func:`~habanero.cn.csl_styles`
for options. Default: "apa". If there's a style that CrossRef doesn't support
you'll get a `(500) Internal Server Error`
:param locale: [str] Language locale. See `locale.locale_alias`
:param url: [str] Base URL for the content negotiation request. Default: `https://doi.org`
:param kwargs: any additional arguments will be passed on to `requests.get`
:return: string, which can be parsed to various formats depending on what
format you request (e.g., JSON vs. XML vs. bibtex)
Usage::
from habanero import cn
cn.content_negotiation(ids = '10.1126/science.169.3946.635')
# get citeproc-json
cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json")
# some other formats
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text")
# return an R bibentry type
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry")
cn.content_negotiation(ids = "10.6084/m9.figshare.97218", format = "bibentry")
# return an apa style citation
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos")
# Using DataCite DOIs
## some formats don't work
# cn.content_negotiation(ids = "10.5284/1011335", format = "text")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-xml")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-tdm")
## But most do work
cn.content_negotiation(ids = "10.5284/1011335", format = "datacite-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "rdf-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "turtle")
cn.content_negotiation(ids = "10.5284/1011335", format = "citeproc-json")
cn.content_negotiation(ids = "10.5284/1011335", format = "ris")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibentry")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
# many DOIs
dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217']
x = cn.content_negotiation(ids = dois)
# Use a different base url
url = "http://dx.doi.org"
cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url)
cn.content_negotiation(ids = "10.5284/1011335", url = url)
'''
if url is None:
url = cn_base_url
return CNRequest(url, ids, format, style, locale, **kwargs) | python | def content_negotiation(ids = None, format = "bibtex", style = 'apa',
locale = "en-US", url = None, **kwargs):
'''
Get citations in various formats from CrossRef
:param ids: [str] Search by a single DOI or many DOIs, each a string. If many
passed in, do so in a list
:param format: [str] Name of the format. One of "rdf-xml", "turtle", "citeproc-json",
"citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml",
"datacite-xml","bibentry", or "crossref-tdm"
:param style: [str] A CSL style (for text format only). See :func:`~habanero.cn.csl_styles`
for options. Default: "apa". If there's a style that CrossRef doesn't support
you'll get a `(500) Internal Server Error`
:param locale: [str] Language locale. See `locale.locale_alias`
:param url: [str] Base URL for the content negotiation request. Default: `https://doi.org`
:param kwargs: any additional arguments will be passed on to `requests.get`
:return: string, which can be parsed to various formats depending on what
format you request (e.g., JSON vs. XML vs. bibtex)
Usage::
from habanero import cn
cn.content_negotiation(ids = '10.1126/science.169.3946.635')
# get citeproc-json
cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json")
# some other formats
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text")
# return an R bibentry type
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry")
cn.content_negotiation(ids = "10.6084/m9.figshare.97218", format = "bibentry")
# return an apa style citation
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos")
# Using DataCite DOIs
## some formats don't work
# cn.content_negotiation(ids = "10.5284/1011335", format = "text")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-xml")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-tdm")
## But most do work
cn.content_negotiation(ids = "10.5284/1011335", format = "datacite-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "rdf-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "turtle")
cn.content_negotiation(ids = "10.5284/1011335", format = "citeproc-json")
cn.content_negotiation(ids = "10.5284/1011335", format = "ris")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibentry")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
# many DOIs
dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217']
x = cn.content_negotiation(ids = dois)
# Use a different base url
url = "http://dx.doi.org"
cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url)
cn.content_negotiation(ids = "10.5284/1011335", url = url)
'''
if url is None:
url = cn_base_url
return CNRequest(url, ids, format, style, locale, **kwargs) | Get citations in various formats from CrossRef
:param ids: [str] Search by a single DOI or many DOIs, each a string. If many
passed in, do so in a list
:param format: [str] Name of the format. One of "rdf-xml", "turtle", "citeproc-json",
"citeproc-json-ish", "text", "ris", "bibtex" (Default), "crossref-xml",
"datacite-xml","bibentry", or "crossref-tdm"
:param style: [str] A CSL style (for text format only). See :func:`~habanero.cn.csl_styles`
for options. Default: "apa". If there's a style that CrossRef doesn't support
you'll get a `(500) Internal Server Error`
:param locale: [str] Language locale. See `locale.locale_alias`
:param url: [str] Base URL for the content negotiation request. Default: `https://doi.org`
:param kwargs: any additional arguments will be passed on to `requests.get`
:return: string, which can be parsed to various formats depending on what
format you request (e.g., JSON vs. XML vs. bibtex)
Usage::
from habanero import cn
cn.content_negotiation(ids = '10.1126/science.169.3946.635')
# get citeproc-json
cn.content_negotiation(ids = '10.1126/science.169.3946.635', format = "citeproc-json")
# some other formats
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "rdf-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "crossref-xml")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text")
# return an R bibentry type
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "bibentry")
cn.content_negotiation(ids = "10.6084/m9.figshare.97218", format = "bibentry")
# return an apa style citation
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "apa")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "harvard3")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "elsevier-harvard")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "ecoscience")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "heredity")
cn.content_negotiation(ids = "10.1126/science.169.3946.635", format = "text", style = "oikos")
# Using DataCite DOIs
## some formats don't work
# cn.content_negotiation(ids = "10.5284/1011335", format = "text")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-xml")
# cn.content_negotiation(ids = "10.5284/1011335", format = "crossref-tdm")
## But most do work
cn.content_negotiation(ids = "10.5284/1011335", format = "datacite-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "rdf-xml")
cn.content_negotiation(ids = "10.5284/1011335", format = "turtle")
cn.content_negotiation(ids = "10.5284/1011335", format = "citeproc-json")
cn.content_negotiation(ids = "10.5284/1011335", format = "ris")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibentry")
cn.content_negotiation(ids = "10.5284/1011335", format = "bibtex")
# many DOIs
dois = ['10.5167/UZH-30455','10.5167/UZH-49216','10.5167/UZH-503', '10.5167/UZH-38402','10.5167/UZH-41217']
x = cn.content_negotiation(ids = dois)
# Use a different base url
url = "http://dx.doi.org"
cn.content_negotiation(ids = "10.1126/science.169.3946.635", url = url)
cn.content_negotiation(ids = "10.5284/1011335", url = url) | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/cn/cn.py#L4-L76 |
sckott/habanero | habanero/counts/counts.py | citation_count | def citation_count(doi, url = "http://www.crossref.org/openurl/",
key = "[email protected]", **kwargs):
'''
Get a citation count with a DOI
:param doi: [String] DOI, digital object identifier
:param url: [String] the API url for the function (should be left to default)
:param keyc: [String] your API key
See http://labs.crossref.org/openurl/ for more info on this Crossref API service.
Usage::
from habanero import counts
counts.citation_count(doi = "10.1371/journal.pone.0042793")
counts.citation_count(doi = "10.1016/j.fbr.2012.01.001")
# DOI not found
## FIXME
counts.citation_count(doi = "10.1016/j.fbr.2012")
'''
args = {"id": "doi:" + doi, "pid": key, "noredirect": True}
args = dict((k, v) for k, v in args.items() if v)
res = requests.get(url, params = args, headers = make_ua(), **kwargs)
xmldoc = minidom.parseString(res.content)
val = xmldoc.getElementsByTagName('query')[0].attributes['fl_count'].value
return int(str(val)) | python | def citation_count(doi, url = "http://www.crossref.org/openurl/",
key = "[email protected]", **kwargs):
'''
Get a citation count with a DOI
:param doi: [String] DOI, digital object identifier
:param url: [String] the API url for the function (should be left to default)
:param keyc: [String] your API key
See http://labs.crossref.org/openurl/ for more info on this Crossref API service.
Usage::
from habanero import counts
counts.citation_count(doi = "10.1371/journal.pone.0042793")
counts.citation_count(doi = "10.1016/j.fbr.2012.01.001")
# DOI not found
## FIXME
counts.citation_count(doi = "10.1016/j.fbr.2012")
'''
args = {"id": "doi:" + doi, "pid": key, "noredirect": True}
args = dict((k, v) for k, v in args.items() if v)
res = requests.get(url, params = args, headers = make_ua(), **kwargs)
xmldoc = minidom.parseString(res.content)
val = xmldoc.getElementsByTagName('query')[0].attributes['fl_count'].value
return int(str(val)) | Get a citation count with a DOI
:param doi: [String] DOI, digital object identifier
:param url: [String] the API url for the function (should be left to default)
:param keyc: [String] your API key
See http://labs.crossref.org/openurl/ for more info on this Crossref API service.
Usage::
from habanero import counts
counts.citation_count(doi = "10.1371/journal.pone.0042793")
counts.citation_count(doi = "10.1016/j.fbr.2012.01.001")
# DOI not found
## FIXME
counts.citation_count(doi = "10.1016/j.fbr.2012") | https://github.com/sckott/habanero/blob/a17d87070378786bbb138e1c9712ecad9aacf38e/habanero/counts/counts.py#L5-L30 |
alvinwan/tex2py | tex2py/tex2py.py | TreeOfContents.findHierarchy | def findHierarchy(self, max_subs=10):
"""Find hierarchy for the LaTeX source.
>>> TOC.fromLatex(r'\subsection{yo}\section{hello}').findHierarchy()
('section', 'subsection')
>>> TOC.fromLatex(
... r'\subsubsubsection{huh}\subsubsection{hah}').findHierarchy()
('subsubsection', 'subsubsubsection')
>>> TOC.fromLatex('\section{h1}\subsection{subh1}\section{h2}\
... \subsection{subh2}').findHierarchy()
('section', 'subsection')
"""
hierarchy = []
defaults = TOC.default_hierarchy + tuple(
'%ssection' % ('sub'*i) for i in range(2, max_subs))
for level in defaults:
if getattr(self.source, level, False):
hierarchy.append(level)
return tuple(hierarchy) | python | def findHierarchy(self, max_subs=10):
"""Find hierarchy for the LaTeX source.
>>> TOC.fromLatex(r'\subsection{yo}\section{hello}').findHierarchy()
('section', 'subsection')
>>> TOC.fromLatex(
... r'\subsubsubsection{huh}\subsubsection{hah}').findHierarchy()
('subsubsection', 'subsubsubsection')
>>> TOC.fromLatex('\section{h1}\subsection{subh1}\section{h2}\
... \subsection{subh2}').findHierarchy()
('section', 'subsection')
"""
hierarchy = []
defaults = TOC.default_hierarchy + tuple(
'%ssection' % ('sub'*i) for i in range(2, max_subs))
for level in defaults:
if getattr(self.source, level, False):
hierarchy.append(level)
return tuple(hierarchy) | Find hierarchy for the LaTeX source.
>>> TOC.fromLatex(r'\subsection{yo}\section{hello}').findHierarchy()
('section', 'subsection')
>>> TOC.fromLatex(
... r'\subsubsubsection{huh}\subsubsection{hah}').findHierarchy()
('subsubsection', 'subsubsubsection')
>>> TOC.fromLatex('\section{h1}\subsection{subh1}\section{h2}\
... \subsection{subh2}').findHierarchy()
('section', 'subsection') | https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L69-L87 |
alvinwan/tex2py | tex2py/tex2py.py | TreeOfContents.getHeadingLevel | def getHeadingLevel(ts, hierarchy=default_hierarchy):
"""Extract heading level for a particular Tex element, given a specified
hierarchy.
>>> ts = TexSoup(r'\section{Hello}').section
>>> TOC.getHeadingLevel(ts)
2
>>> ts2 = TexSoup(r'\chapter{hello again}').chapter
>>> TOC.getHeadingLevel(ts2)
1
>>> ts3 = TexSoup(r'\subsubsubsubsection{Hello}').subsubsubsubsection
>>> TOC.getHeadingLevel(ts3)
6
"""
try:
return hierarchy.index(ts.name)+1
except ValueError:
if ts.name.endswith('section'):
i, name = 0, ts.name
while name.startswith('sub'):
name, i = name[3:], i+1
if name == 'section':
return i+2
return float('inf')
except (AttributeError, TypeError):
return float('inf') | python | def getHeadingLevel(ts, hierarchy=default_hierarchy):
"""Extract heading level for a particular Tex element, given a specified
hierarchy.
>>> ts = TexSoup(r'\section{Hello}').section
>>> TOC.getHeadingLevel(ts)
2
>>> ts2 = TexSoup(r'\chapter{hello again}').chapter
>>> TOC.getHeadingLevel(ts2)
1
>>> ts3 = TexSoup(r'\subsubsubsubsection{Hello}').subsubsubsubsection
>>> TOC.getHeadingLevel(ts3)
6
"""
try:
return hierarchy.index(ts.name)+1
except ValueError:
if ts.name.endswith('section'):
i, name = 0, ts.name
while name.startswith('sub'):
name, i = name[3:], i+1
if name == 'section':
return i+2
return float('inf')
except (AttributeError, TypeError):
return float('inf') | Extract heading level for a particular Tex element, given a specified
hierarchy.
>>> ts = TexSoup(r'\section{Hello}').section
>>> TOC.getHeadingLevel(ts)
2
>>> ts2 = TexSoup(r'\chapter{hello again}').chapter
>>> TOC.getHeadingLevel(ts2)
1
>>> ts3 = TexSoup(r'\subsubsubsubsection{Hello}').subsubsubsubsection
>>> TOC.getHeadingLevel(ts3)
6 | https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L90-L115 |
alvinwan/tex2py | tex2py/tex2py.py | TreeOfContents.parseTopDepth | def parseTopDepth(self, descendants=()):
"""Parse tex for highest tag in hierarchy
>>> TOC.fromLatex('\\section{Hah}\\subsection{No}').parseTopDepth()
1
>>> s = '\\subsubsubsection{Yo}\\subsubsection{Hah}'
>>> TOC.fromLatex(s).parseTopDepth()
1
>>> h = ('section', 'subsubsection', 'subsubsubsection')
>>> TOC.fromLatex(s, hierarchy=h).parseTopDepth()
2
"""
descendants = list(descendants) or \
list(getattr(self.source, 'descendants', descendants))
if not descendants:
return -1
return min(TOC.getHeadingLevel(e, self.hierarchy) for e in descendants) | python | def parseTopDepth(self, descendants=()):
"""Parse tex for highest tag in hierarchy
>>> TOC.fromLatex('\\section{Hah}\\subsection{No}').parseTopDepth()
1
>>> s = '\\subsubsubsection{Yo}\\subsubsection{Hah}'
>>> TOC.fromLatex(s).parseTopDepth()
1
>>> h = ('section', 'subsubsection', 'subsubsubsection')
>>> TOC.fromLatex(s, hierarchy=h).parseTopDepth()
2
"""
descendants = list(descendants) or \
list(getattr(self.source, 'descendants', descendants))
if not descendants:
return -1
return min(TOC.getHeadingLevel(e, self.hierarchy) for e in descendants) | Parse tex for highest tag in hierarchy
>>> TOC.fromLatex('\\section{Hah}\\subsection{No}').parseTopDepth()
1
>>> s = '\\subsubsubsection{Yo}\\subsubsection{Hah}'
>>> TOC.fromLatex(s).parseTopDepth()
1
>>> h = ('section', 'subsubsection', 'subsubsubsection')
>>> TOC.fromLatex(s, hierarchy=h).parseTopDepth()
2 | https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L117-L133 |
alvinwan/tex2py | tex2py/tex2py.py | TreeOfContents.parseBranches | def parseBranches(self, descendants):
"""
Parse top level of latex
:param list elements: list of source objects
:return: list of filtered TreeOfContents objects
>>> toc = TOC.fromLatex(r'\section{h1}\subsection{subh1}\section{h2}\
... \subsection{subh2}')
>>> toc.parseTopDepth(toc.descendants)
1
>>> toc.parseBranches(toc.descendants)
[h1, h2]
>>> len(toc.branches)
2
>>> len(toc.section.branches)
1
"""
i, branches = self.parseTopDepth(descendants), []
for descendant in descendants:
if self.getHeadingLevel(descendant, self.hierarchy) == i:
branches.append({'source': descendant})
if self.getHeadingLevel(descendant, self.hierarchy) > i \
and branches:
branches[-1].setdefault('descendants', []).append(descendant)
return [TOC(str(descendant), depth=i, hierarchy=self.hierarchy,
**branch) for branch in branches] | python | def parseBranches(self, descendants):
"""
Parse top level of latex
:param list elements: list of source objects
:return: list of filtered TreeOfContents objects
>>> toc = TOC.fromLatex(r'\section{h1}\subsection{subh1}\section{h2}\
... \subsection{subh2}')
>>> toc.parseTopDepth(toc.descendants)
1
>>> toc.parseBranches(toc.descendants)
[h1, h2]
>>> len(toc.branches)
2
>>> len(toc.section.branches)
1
"""
i, branches = self.parseTopDepth(descendants), []
for descendant in descendants:
if self.getHeadingLevel(descendant, self.hierarchy) == i:
branches.append({'source': descendant})
if self.getHeadingLevel(descendant, self.hierarchy) > i \
and branches:
branches[-1].setdefault('descendants', []).append(descendant)
return [TOC(str(descendant), depth=i, hierarchy=self.hierarchy,
**branch) for branch in branches] | Parse top level of latex
:param list elements: list of source objects
:return: list of filtered TreeOfContents objects
>>> toc = TOC.fromLatex(r'\section{h1}\subsection{subh1}\section{h2}\
... \subsection{subh2}')
>>> toc.parseTopDepth(toc.descendants)
1
>>> toc.parseBranches(toc.descendants)
[h1, h2]
>>> len(toc.branches)
2
>>> len(toc.section.branches)
1 | https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L152-L177 |
alvinwan/tex2py | tex2py/tex2py.py | TreeOfContents.fromFile | def fromFile(path_or_buffer):
"""Creates abstraction using path to file
:param str path_or_buffer: path to tex file or buffer
:return: TreeOfContents object
"""
return TOC.fromLatex(open(path_or_buffer).read()
if isinstance(path_or_buffer, str)
else path_or_buffer) | python | def fromFile(path_or_buffer):
"""Creates abstraction using path to file
:param str path_or_buffer: path to tex file or buffer
:return: TreeOfContents object
"""
return TOC.fromLatex(open(path_or_buffer).read()
if isinstance(path_or_buffer, str)
else path_or_buffer) | Creates abstraction using path to file
:param str path_or_buffer: path to tex file or buffer
:return: TreeOfContents object | https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L213-L221 |
alvinwan/tex2py | tex2py/tex2py.py | TreeOfContents.fromLatex | def fromLatex(tex, *args, **kwargs):
"""Creates abstraction using Latex
:param str tex: Latex
:return: TreeOfContents object
"""
source = TexSoup(tex)
return TOC('[document]', source=source,
descendants=list(source.descendants), *args, **kwargs) | python | def fromLatex(tex, *args, **kwargs):
"""Creates abstraction using Latex
:param str tex: Latex
:return: TreeOfContents object
"""
source = TexSoup(tex)
return TOC('[document]', source=source,
descendants=list(source.descendants), *args, **kwargs) | Creates abstraction using Latex
:param str tex: Latex
:return: TreeOfContents object | https://github.com/alvinwan/tex2py/blob/85ce4a23ad8dbeb49a360171877dd14d099b3e9a/tex2py/tex2py.py#L224-L232 |
mar10/pyftpsync | ftpsync/synchronizers.py | process_options | def process_options(opts):
"""Check and prepare options dict."""
# Convert match and exclude args into pattern lists
match = opts.get("match")
if match and type(match) is str:
opts["match"] = [pat.strip() for pat in match.split(",")]
elif match:
assert type(match) is list
else:
opts["match"] = []
exclude = opts.get("exclude")
if exclude and type(exclude) is str:
opts["exclude"] = [pat.strip() for pat in exclude.split(",")]
elif exclude:
assert type(exclude) is list
else:
# opts["exclude"] = DEFAULT_OMIT
opts["exclude"] = [] | python | def process_options(opts):
"""Check and prepare options dict."""
# Convert match and exclude args into pattern lists
match = opts.get("match")
if match and type(match) is str:
opts["match"] = [pat.strip() for pat in match.split(",")]
elif match:
assert type(match) is list
else:
opts["match"] = []
exclude = opts.get("exclude")
if exclude and type(exclude) is str:
opts["exclude"] = [pat.strip() for pat in exclude.split(",")]
elif exclude:
assert type(exclude) is list
else:
# opts["exclude"] = DEFAULT_OMIT
opts["exclude"] = [] | Check and prepare options dict. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L41-L59 |
mar10/pyftpsync | ftpsync/synchronizers.py | match_path | def match_path(entry, opts):
"""Return True if `path` matches `match` and `exclude` options."""
if entry.name in ALWAYS_OMIT:
return False
# TODO: currently we use fnmatch syntax and match against names.
# We also might allow glob syntax and match against the whole relative path instead
# path = entry.get_rel_path()
path = entry.name
ok = True
match = opts.get("match")
exclude = opts.get("exclude")
if entry.is_file() and match:
assert type(match) is list
ok = False
for pat in match:
if fnmatch.fnmatch(path, pat):
ok = True
break
if ok and exclude:
assert type(exclude) is list
for pat in exclude:
if fnmatch.fnmatch(path, pat):
ok = False
break
# write("match", ok, entry)
return ok | python | def match_path(entry, opts):
"""Return True if `path` matches `match` and `exclude` options."""
if entry.name in ALWAYS_OMIT:
return False
# TODO: currently we use fnmatch syntax and match against names.
# We also might allow glob syntax and match against the whole relative path instead
# path = entry.get_rel_path()
path = entry.name
ok = True
match = opts.get("match")
exclude = opts.get("exclude")
if entry.is_file() and match:
assert type(match) is list
ok = False
for pat in match:
if fnmatch.fnmatch(path, pat):
ok = True
break
if ok and exclude:
assert type(exclude) is list
for pat in exclude:
if fnmatch.fnmatch(path, pat):
ok = False
break
# write("match", ok, entry)
return ok | Return True if `path` matches `match` and `exclude` options. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L63-L88 |
mar10/pyftpsync | ftpsync/synchronizers.py | BaseSynchronizer._compare_file | def _compare_file(self, local, remote):
"""Byte compare two files (early out on first difference)."""
assert isinstance(local, FileEntry) and isinstance(remote, FileEntry)
if not local or not remote:
write(" Files cannot be compared ({} != {}).".format(local, remote))
return False
elif local.size != remote.size:
write(
" Files are different (size {:,d} != {:,d}).".format(
local.size, remote.size
)
)
return False
with local.target.open_readable(
local.name
) as fp_src, remote.target.open_readable(remote.name) as fp_dest:
res, ofs = byte_compare(fp_src, fp_dest)
if not res:
write(" Files are different at offset {:,d}.".format(ofs))
else:
write(" Files are equal.")
return res | python | def _compare_file(self, local, remote):
"""Byte compare two files (early out on first difference)."""
assert isinstance(local, FileEntry) and isinstance(remote, FileEntry)
if not local or not remote:
write(" Files cannot be compared ({} != {}).".format(local, remote))
return False
elif local.size != remote.size:
write(
" Files are different (size {:,d} != {:,d}).".format(
local.size, remote.size
)
)
return False
with local.target.open_readable(
local.name
) as fp_src, remote.target.open_readable(remote.name) as fp_dest:
res, ofs = byte_compare(fp_src, fp_dest)
if not res:
write(" Files are different at offset {:,d}.".format(ofs))
else:
write(" Files are equal.")
return res | Byte compare two files (early out on first difference). | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L231-L255 |
mar10/pyftpsync | ftpsync/synchronizers.py | BaseSynchronizer._tick | def _tick(self):
"""Write progress info and move cursor to beginning of line."""
if (self.verbose >= 3 and not IS_REDIRECTED) or self.options.get("progress"):
stats = self.get_stats()
prefix = DRY_RUN_PREFIX if self.dry_run else ""
sys.stdout.write(
"{}Touched {}/{} entries in {} directories...\r".format(
prefix,
stats["entries_touched"],
stats["entries_seen"],
stats["local_dirs"],
)
)
sys.stdout.flush()
return | python | def _tick(self):
"""Write progress info and move cursor to beginning of line."""
if (self.verbose >= 3 and not IS_REDIRECTED) or self.options.get("progress"):
stats = self.get_stats()
prefix = DRY_RUN_PREFIX if self.dry_run else ""
sys.stdout.write(
"{}Touched {}/{} entries in {} directories...\r".format(
prefix,
stats["entries_touched"],
stats["entries_seen"],
stats["local_dirs"],
)
)
sys.stdout.flush()
return | Write progress info and move cursor to beginning of line. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L437-L451 |
mar10/pyftpsync | ftpsync/synchronizers.py | BaseSynchronizer._sync_dir | def _sync_dir(self):
"""Traverse the local folder structure and remote peers.
This is the core algorithm that generates calls to self.sync_XXX()
handler methods.
_sync_dir() is called by self.run().
"""
local_entries = self.local.get_dir()
# Convert into a dict {name: FileEntry, ...}
local_entry_map = dict(map(lambda e: (e.name, e), local_entries))
remote_entries = self.remote.get_dir()
# Convert into a dict {name: FileEntry, ...}
remote_entry_map = dict(map(lambda e: (e.name, e), remote_entries))
entry_pair_list = []
# 1. Loop over all local files and classify the relationship to the
# peer entries.
for local_entry in local_entries:
if isinstance(local_entry, DirectoryEntry):
self._inc_stat("local_dirs")
else:
self._inc_stat("local_files")
if not self._before_sync(local_entry):
# TODO: currently, if a file is skipped, it will not be
# considered for deletion on the peer target
continue
# TODO: case insensitive?
# We should use os.path.normcase() to convert to lowercase on windows
# (i.e. if the FTP server is based on Windows)
remote_entry = remote_entry_map.get(local_entry.name)
entry_pair = EntryPair(local_entry, remote_entry)
entry_pair_list.append(entry_pair)
# TODO: renaming could be triggered, if we find an existing
# entry.unique with a different entry.name
# 2. Collect all remote entries that do NOT exist on the local target.
for remote_entry in remote_entries:
if isinstance(remote_entry, DirectoryEntry):
self._inc_stat("remote_dirs")
else:
self._inc_stat("remote_files")
if not self._before_sync(remote_entry):
continue
if remote_entry.name not in local_entry_map:
entry_pair = EntryPair(None, remote_entry)
entry_pair_list.append(entry_pair)
# print("NOT IN LOCAL")
# print(remote_entry.name)
# print(self.remote.get_id())
# print(local_entry_map.keys())
# print(self.local.cur_dir_meta.peer_sync.get(self.remote.get_id()))
# 3. Classify all entries and pairs.
# We pass the additional meta data here
peer_dir_meta = self.local.cur_dir_meta.peer_sync.get(self.remote.get_id())
for pair in entry_pair_list:
pair.classify(peer_dir_meta)
# 4. Perform (or schedule) resulting file operations
for pair in entry_pair_list:
# print(pair)
# Let synchronizer modify the default operation (e.g. apply `--force` option)
hook_result = self.re_classify_pair(pair)
# Let synchronizer implement special handling of unmatched entries
# (e.g. `--delete_unmatched`)
if not self._match(pair.any_entry):
self.on_mismatch(pair)
# ... do not call operation handler...
elif hook_result is not False:
handler = getattr(self, "on_" + pair.operation, None)
# print(handler)
if handler:
try:
res = handler(pair)
except Exception as e:
if self.on_error(e, pair) is not True:
raise
else:
# write("NO HANDLER")
raise NotImplementedError("No handler for {}".format(pair))
if pair.is_conflict():
self._inc_stat("conflict_files")
# 5. Let the target provider write its meta data for the files in the
# current directory.
self.local.flush_meta()
self.remote.flush_meta()
# 6. Finally visit all local sub-directories recursively that also
# exist on the remote target.
for local_dir in local_entries:
# write("local_dir(%s, %s)" % (local_dir, local_dir))
if not local_dir.is_dir():
continue
elif not self._before_sync(local_dir):
continue
remote_dir = remote_entry_map.get(local_dir.name)
if remote_dir:
# write("sync_equal_dir(%s, %s)" % (local_dir, remote_dir))
# self._log_call("sync_equal_dir(%s, %s)" % (local_dir, remote_dir))
# res = self.sync_equal_dir(local_dir, remote_dir)
# res = self.on_equal(local_dir, remote_dir)
if res is not False:
self.local.cwd(local_dir.name)
self.remote.cwd(local_dir.name)
self._sync_dir()
self.local.cwd("..")
self.remote.cwd("..")
return True | python | def _sync_dir(self):
"""Traverse the local folder structure and remote peers.
This is the core algorithm that generates calls to self.sync_XXX()
handler methods.
_sync_dir() is called by self.run().
"""
local_entries = self.local.get_dir()
# Convert into a dict {name: FileEntry, ...}
local_entry_map = dict(map(lambda e: (e.name, e), local_entries))
remote_entries = self.remote.get_dir()
# Convert into a dict {name: FileEntry, ...}
remote_entry_map = dict(map(lambda e: (e.name, e), remote_entries))
entry_pair_list = []
# 1. Loop over all local files and classify the relationship to the
# peer entries.
for local_entry in local_entries:
if isinstance(local_entry, DirectoryEntry):
self._inc_stat("local_dirs")
else:
self._inc_stat("local_files")
if not self._before_sync(local_entry):
# TODO: currently, if a file is skipped, it will not be
# considered for deletion on the peer target
continue
# TODO: case insensitive?
# We should use os.path.normcase() to convert to lowercase on windows
# (i.e. if the FTP server is based on Windows)
remote_entry = remote_entry_map.get(local_entry.name)
entry_pair = EntryPair(local_entry, remote_entry)
entry_pair_list.append(entry_pair)
# TODO: renaming could be triggered, if we find an existing
# entry.unique with a different entry.name
# 2. Collect all remote entries that do NOT exist on the local target.
for remote_entry in remote_entries:
if isinstance(remote_entry, DirectoryEntry):
self._inc_stat("remote_dirs")
else:
self._inc_stat("remote_files")
if not self._before_sync(remote_entry):
continue
if remote_entry.name not in local_entry_map:
entry_pair = EntryPair(None, remote_entry)
entry_pair_list.append(entry_pair)
# print("NOT IN LOCAL")
# print(remote_entry.name)
# print(self.remote.get_id())
# print(local_entry_map.keys())
# print(self.local.cur_dir_meta.peer_sync.get(self.remote.get_id()))
# 3. Classify all entries and pairs.
# We pass the additional meta data here
peer_dir_meta = self.local.cur_dir_meta.peer_sync.get(self.remote.get_id())
for pair in entry_pair_list:
pair.classify(peer_dir_meta)
# 4. Perform (or schedule) resulting file operations
for pair in entry_pair_list:
# print(pair)
# Let synchronizer modify the default operation (e.g. apply `--force` option)
hook_result = self.re_classify_pair(pair)
# Let synchronizer implement special handling of unmatched entries
# (e.g. `--delete_unmatched`)
if not self._match(pair.any_entry):
self.on_mismatch(pair)
# ... do not call operation handler...
elif hook_result is not False:
handler = getattr(self, "on_" + pair.operation, None)
# print(handler)
if handler:
try:
res = handler(pair)
except Exception as e:
if self.on_error(e, pair) is not True:
raise
else:
# write("NO HANDLER")
raise NotImplementedError("No handler for {}".format(pair))
if pair.is_conflict():
self._inc_stat("conflict_files")
# 5. Let the target provider write its meta data for the files in the
# current directory.
self.local.flush_meta()
self.remote.flush_meta()
# 6. Finally visit all local sub-directories recursively that also
# exist on the remote target.
for local_dir in local_entries:
# write("local_dir(%s, %s)" % (local_dir, local_dir))
if not local_dir.is_dir():
continue
elif not self._before_sync(local_dir):
continue
remote_dir = remote_entry_map.get(local_dir.name)
if remote_dir:
# write("sync_equal_dir(%s, %s)" % (local_dir, remote_dir))
# self._log_call("sync_equal_dir(%s, %s)" % (local_dir, remote_dir))
# res = self.sync_equal_dir(local_dir, remote_dir)
# res = self.on_equal(local_dir, remote_dir)
if res is not False:
self.local.cwd(local_dir.name)
self.remote.cwd(local_dir.name)
self._sync_dir()
self.local.cwd("..")
self.remote.cwd("..")
return True | Traverse the local folder structure and remote peers.
This is the core algorithm that generates calls to self.sync_XXX()
handler methods.
_sync_dir() is called by self.run(). | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L474-L596 |
mar10/pyftpsync | ftpsync/synchronizers.py | BaseSynchronizer.on_error | def on_error(self, e, pair):
"""Called for pairs that don't match `match` and `exclude` filters."""
RED = ansi_code("Fore.LIGHTRED_EX")
R = ansi_code("Style.RESET_ALL")
# any_entry = pair.any_entry
write((RED + "ERROR: {}\n {}" + R).format(e, pair))
# Return True to ignore this error (instead of raising and terminating the app)
if "[Errno 92] Illegal byte sequence" in "{}".format(e) and compat.PY2:
write(RED + "This _may_ be solved by using Python 3." + R)
# return True
return False | python | def on_error(self, e, pair):
"""Called for pairs that don't match `match` and `exclude` filters."""
RED = ansi_code("Fore.LIGHTRED_EX")
R = ansi_code("Style.RESET_ALL")
# any_entry = pair.any_entry
write((RED + "ERROR: {}\n {}" + R).format(e, pair))
# Return True to ignore this error (instead of raising and terminating the app)
if "[Errno 92] Illegal byte sequence" in "{}".format(e) and compat.PY2:
write(RED + "This _may_ be solved by using Python 3." + R)
# return True
return False | Called for pairs that don't match `match` and `exclude` filters. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L606-L616 |
mar10/pyftpsync | ftpsync/synchronizers.py | BaseSynchronizer.on_copy_local | def on_copy_local(self, pair):
"""Called when the local resource should be copied to remote."""
status = pair.remote_classification
self._log_action("copy", status, ">", pair.local) | python | def on_copy_local(self, pair):
"""Called when the local resource should be copied to remote."""
status = pair.remote_classification
self._log_action("copy", status, ">", pair.local) | Called when the local resource should be copied to remote. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L626-L629 |
mar10/pyftpsync | ftpsync/synchronizers.py | BaseSynchronizer.on_copy_remote | def on_copy_remote(self, pair):
"""Called when the remote resource should be copied to local."""
status = pair.local_classification
self._log_action("copy", status, "<", pair.remote) | python | def on_copy_remote(self, pair):
"""Called when the remote resource should be copied to local."""
status = pair.local_classification
self._log_action("copy", status, "<", pair.remote) | Called when the remote resource should be copied to local. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L631-L634 |
mar10/pyftpsync | ftpsync/synchronizers.py | BiDirSynchronizer.on_need_compare | def on_need_compare(self, pair):
"""Re-classify pair based on file attributes and options."""
# print("on_need_compare", pair)
# If no metadata is available, we could only classify file entries as
# 'existing'.
# Now we use peer information to improve this classification.
c_pair = (pair.local_classification, pair.remote_classification)
org_pair = c_pair
org_operation = pair.operation
# print("need_compare", pair)
if pair.is_dir:
# For directores, we cannot compare existing peer entries.
# Instead, we simply log (and traverse the children later).
pair.local_classification = pair.remote_classification = "existing"
pair.operation = "equal"
self._log_action("", "visit", "?", pair.local, min_level=4)
# self._log_action("", "equal", "=", pair.local, min_level=4)
return
elif c_pair == ("existing", "existing"):
# Naive classification derived from file time and size
time_cmp = eps_compare(
pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME
)
if time_cmp < 0:
c_pair = ("unmodified", "modified") # remote is newer
elif time_cmp > 0:
c_pair = ("modified", "unmodified") # local is newer
elif pair.local.size == pair.remote.size:
c_pair = ("unmodified", "unmodified") # equal
else:
c_pair = ("modified", "modified") # conflict!
elif c_pair == ("new", "new"):
# Naive classification derived from file time and size
time_cmp = eps_compare(
pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME
)
if time_cmp == 0 and pair.local.size == pair.remote.size:
c_pair = ("unmodified", "unmodified") # equal
else:
c_pair = ("modified", "modified") # conflict!
# elif c_pair == ("unmodified", "unmodified"):
pair.local_classification = c_pair[0]
pair.remote_classification = c_pair[1]
pair.operation = operation_map.get(c_pair)
# print("on_need_compare {} => {}".format(org_pair, pair))
if not pair.operation:
raise RuntimeError(
"Undefined operation for pair classification {}".format(c_pair)
)
elif pair.operation == org_operation:
raise RuntimeError("Could not re-classify {}".format(org_pair))
handler = getattr(self, "on_" + pair.operation, None)
res = handler(pair)
# self._log_action("", "different", "?", pair.local, min_level=2)
return res | python | def on_need_compare(self, pair):
"""Re-classify pair based on file attributes and options."""
# print("on_need_compare", pair)
# If no metadata is available, we could only classify file entries as
# 'existing'.
# Now we use peer information to improve this classification.
c_pair = (pair.local_classification, pair.remote_classification)
org_pair = c_pair
org_operation = pair.operation
# print("need_compare", pair)
if pair.is_dir:
# For directores, we cannot compare existing peer entries.
# Instead, we simply log (and traverse the children later).
pair.local_classification = pair.remote_classification = "existing"
pair.operation = "equal"
self._log_action("", "visit", "?", pair.local, min_level=4)
# self._log_action("", "equal", "=", pair.local, min_level=4)
return
elif c_pair == ("existing", "existing"):
# Naive classification derived from file time and size
time_cmp = eps_compare(
pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME
)
if time_cmp < 0:
c_pair = ("unmodified", "modified") # remote is newer
elif time_cmp > 0:
c_pair = ("modified", "unmodified") # local is newer
elif pair.local.size == pair.remote.size:
c_pair = ("unmodified", "unmodified") # equal
else:
c_pair = ("modified", "modified") # conflict!
elif c_pair == ("new", "new"):
# Naive classification derived from file time and size
time_cmp = eps_compare(
pair.local.mtime, pair.remote.mtime, FileEntry.EPS_TIME
)
if time_cmp == 0 and pair.local.size == pair.remote.size:
c_pair = ("unmodified", "unmodified") # equal
else:
c_pair = ("modified", "modified") # conflict!
# elif c_pair == ("unmodified", "unmodified"):
pair.local_classification = c_pair[0]
pair.remote_classification = c_pair[1]
pair.operation = operation_map.get(c_pair)
# print("on_need_compare {} => {}".format(org_pair, pair))
if not pair.operation:
raise RuntimeError(
"Undefined operation for pair classification {}".format(c_pair)
)
elif pair.operation == org_operation:
raise RuntimeError("Could not re-classify {}".format(org_pair))
handler = getattr(self, "on_" + pair.operation, None)
res = handler(pair)
# self._log_action("", "different", "?", pair.local, min_level=2)
return res | Re-classify pair based on file attributes and options. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L871-L934 |
mar10/pyftpsync | ftpsync/synchronizers.py | BiDirSynchronizer.on_conflict | def on_conflict(self, pair):
"""Return False to prevent visiting of children."""
# self._log_action("skip", "conflict", "!", pair.local, min_level=2)
# print("on_conflict", pair)
any_entry = pair.any_entry
if not self._test_match_or_print(any_entry):
return
resolve = self._interactive_resolve(pair)
if resolve == "skip":
self._log_action("skip", "conflict", "*?*", any_entry)
self._inc_stat("conflict_files_skipped")
return
if pair.local and pair.remote:
assert pair.local.is_file()
is_newer = pair.local > pair.remote
if (
resolve == "local"
or (is_newer and resolve == "new")
or (not is_newer and resolve == "old")
):
self._log_action("copy", "conflict", "*>*", pair.local)
self._copy_file(self.local, self.remote, pair.local)
elif (
resolve == "remote"
or (is_newer and resolve == "old")
or (not is_newer and resolve == "new")
):
self._log_action("copy", "conflict", "*<*", pair.local)
self._copy_file(self.remote, self.local, pair.remote)
else:
raise NotImplementedError
elif pair.local:
assert pair.local.is_file()
if resolve == "local":
self._log_action("restore", "conflict", "*>x", pair.local)
self._copy_file(self.local, self.remote, pair.local)
elif resolve == "remote":
self._log_action("delete", "conflict", "*<x", pair.local)
self._remove_file(pair.local)
else:
raise NotImplementedError
else:
assert pair.remote.is_file()
if resolve == "local":
self._log_action("delete", "conflict", "x>*", pair.remote)
self._remove_file(pair.remote)
elif resolve == "remote":
self._log_action("restore", "conflict", "x<*", pair.remote)
self._copy_file(self.remote, self.local, pair.remote)
else:
raise NotImplementedError
return | python | def on_conflict(self, pair):
"""Return False to prevent visiting of children."""
# self._log_action("skip", "conflict", "!", pair.local, min_level=2)
# print("on_conflict", pair)
any_entry = pair.any_entry
if not self._test_match_or_print(any_entry):
return
resolve = self._interactive_resolve(pair)
if resolve == "skip":
self._log_action("skip", "conflict", "*?*", any_entry)
self._inc_stat("conflict_files_skipped")
return
if pair.local and pair.remote:
assert pair.local.is_file()
is_newer = pair.local > pair.remote
if (
resolve == "local"
or (is_newer and resolve == "new")
or (not is_newer and resolve == "old")
):
self._log_action("copy", "conflict", "*>*", pair.local)
self._copy_file(self.local, self.remote, pair.local)
elif (
resolve == "remote"
or (is_newer and resolve == "old")
or (not is_newer and resolve == "new")
):
self._log_action("copy", "conflict", "*<*", pair.local)
self._copy_file(self.remote, self.local, pair.remote)
else:
raise NotImplementedError
elif pair.local:
assert pair.local.is_file()
if resolve == "local":
self._log_action("restore", "conflict", "*>x", pair.local)
self._copy_file(self.local, self.remote, pair.local)
elif resolve == "remote":
self._log_action("delete", "conflict", "*<x", pair.local)
self._remove_file(pair.local)
else:
raise NotImplementedError
else:
assert pair.remote.is_file()
if resolve == "local":
self._log_action("delete", "conflict", "x>*", pair.remote)
self._remove_file(pair.remote)
elif resolve == "remote":
self._log_action("restore", "conflict", "x<*", pair.remote)
self._copy_file(self.remote, self.local, pair.remote)
else:
raise NotImplementedError
return | Return False to prevent visiting of children. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L936-L990 |
mar10/pyftpsync | ftpsync/synchronizers.py | UploadSynchronizer.on_mismatch | def on_mismatch(self, pair):
"""Called for pairs that don't match `match` and `exclude` filters.
If --delete-unmatched is on, remove the remote resource.
"""
remote_entry = pair.remote
if self.options.get("delete_unmatched") and remote_entry:
self._log_action("delete", "unmatched", ">", remote_entry)
if remote_entry.is_dir():
self._remove_dir(remote_entry)
else:
self._remove_file(remote_entry)
else:
self._log_action("skip", "unmatched", "-", pair.any_entry, min_level=4) | python | def on_mismatch(self, pair):
"""Called for pairs that don't match `match` and `exclude` filters.
If --delete-unmatched is on, remove the remote resource.
"""
remote_entry = pair.remote
if self.options.get("delete_unmatched") and remote_entry:
self._log_action("delete", "unmatched", ">", remote_entry)
if remote_entry.is_dir():
self._remove_dir(remote_entry)
else:
self._remove_file(remote_entry)
else:
self._log_action("skip", "unmatched", "-", pair.any_entry, min_level=4) | Called for pairs that don't match `match` and `exclude` filters.
If --delete-unmatched is on, remove the remote resource. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L1109-L1122 |
mar10/pyftpsync | ftpsync/synchronizers.py | DownloadSynchronizer._interactive_resolve | def _interactive_resolve(self, pair):
"""Return 'local', 'remote', or 'skip' to use local, remote resource or skip."""
if self.resolve_all:
if self.verbose >= 5:
self._print_pair_diff(pair)
return self.resolve_all
resolve = self.options.get("resolve", "skip")
assert resolve in ("remote", "ask", "skip")
if resolve == "ask" or self.verbose >= 5:
self._print_pair_diff(pair)
if resolve in ("remote", "skip"):
# self.resolve_all = resolve
return resolve
# RED = ansi_code("Fore.LIGHTRED_EX")
M = ansi_code("Style.BRIGHT") + ansi_code("Style.UNDERLINE")
R = ansi_code("Style.RESET_ALL")
# self._print_pair_diff(pair)
self._inc_stat("interactive_ask")
while True:
prompt = (
"Use "
+ M
+ "R"
+ R
+ "emote, "
+ M
+ "S"
+ R
+ "kip, "
+ M
+ "B"
+ R
+ "inary compare, "
+ M
+ "H"
+ R
+ "elp? "
)
r = compat.console_input(prompt).strip()
if r in ("h", "H", "?"):
print("The following keys are supported:")
print(" 'b': Binary compare")
print(" 'r': Download remote file")
print(" 's': Skip this file (leave both targets unchanged)")
print(
"Hold Shift (upper case letters) to apply choice for all "
"remaining conflicts."
)
print("Hit Ctrl+C to abort.")
continue
elif r in ("B", "b"):
self._compare_file(pair.local, pair.remote)
continue
elif r in ("R", "S"):
r = self._resolve_shortcuts[r.lower()]
self.resolve_all = r
break
elif r in ("r", "s"):
r = self._resolve_shortcuts[r]
break
return r | python | def _interactive_resolve(self, pair):
"""Return 'local', 'remote', or 'skip' to use local, remote resource or skip."""
if self.resolve_all:
if self.verbose >= 5:
self._print_pair_diff(pair)
return self.resolve_all
resolve = self.options.get("resolve", "skip")
assert resolve in ("remote", "ask", "skip")
if resolve == "ask" or self.verbose >= 5:
self._print_pair_diff(pair)
if resolve in ("remote", "skip"):
# self.resolve_all = resolve
return resolve
# RED = ansi_code("Fore.LIGHTRED_EX")
M = ansi_code("Style.BRIGHT") + ansi_code("Style.UNDERLINE")
R = ansi_code("Style.RESET_ALL")
# self._print_pair_diff(pair)
self._inc_stat("interactive_ask")
while True:
prompt = (
"Use "
+ M
+ "R"
+ R
+ "emote, "
+ M
+ "S"
+ R
+ "kip, "
+ M
+ "B"
+ R
+ "inary compare, "
+ M
+ "H"
+ R
+ "elp? "
)
r = compat.console_input(prompt).strip()
if r in ("h", "H", "?"):
print("The following keys are supported:")
print(" 'b': Binary compare")
print(" 'r': Download remote file")
print(" 's': Skip this file (leave both targets unchanged)")
print(
"Hold Shift (upper case letters) to apply choice for all "
"remaining conflicts."
)
print("Hit Ctrl+C to abort.")
continue
elif r in ("B", "b"):
self._compare_file(pair.local, pair.remote)
continue
elif r in ("R", "S"):
r = self._resolve_shortcuts[r.lower()]
self.resolve_all = r
break
elif r in ("r", "s"):
r = self._resolve_shortcuts[r]
break
return r | Return 'local', 'remote', or 'skip' to use local, remote resource or skip. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L1194-L1263 |
mar10/pyftpsync | ftpsync/synchronizers.py | DownloadSynchronizer.on_mismatch | def on_mismatch(self, pair):
"""Called for pairs that don't match `match` and `exclude` filters.
If --delete-unmatched is on, remove the remote resource.
"""
local_entry = pair.local
if self.options.get("delete_unmatched") and local_entry:
self._log_action("delete", "unmatched", "<", local_entry)
if local_entry.is_dir():
self._remove_dir(local_entry)
else:
self._remove_file(local_entry)
else:
self._log_action("skip", "unmatched", "-", pair.any_entry, min_level=4) | python | def on_mismatch(self, pair):
"""Called for pairs that don't match `match` and `exclude` filters.
If --delete-unmatched is on, remove the remote resource.
"""
local_entry = pair.local
if self.options.get("delete_unmatched") and local_entry:
self._log_action("delete", "unmatched", "<", local_entry)
if local_entry.is_dir():
self._remove_dir(local_entry)
else:
self._remove_file(local_entry)
else:
self._log_action("skip", "unmatched", "-", pair.any_entry, min_level=4) | Called for pairs that don't match `match` and `exclude` filters.
If --delete-unmatched is on, remove the remote resource. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/synchronizers.py#L1271-L1284 |
mar10/pyftpsync | ftpsync/run_command.py | handle_run_command | def handle_run_command(parser, args):
"""Implement `run` sub-command."""
MAX_LEVELS = 15
# --- Look for `pyftpsync.yaml` in current folder and parents ---
cur_level = 0
cur_folder = os.getcwd()
config_path = None
while cur_level < MAX_LEVELS:
path = os.path.join(cur_folder, CONFIG_FILE_NAME)
# print("Searching for {}...".format(path))
if os.path.isfile(path):
config_path = path
break
parent = os.path.dirname(cur_folder)
if parent == cur_folder:
break
cur_folder = parent
cur_level += 1
if not config_path:
parser.error(
"Could not locate `.pyftpsync.yaml` in {} or {} parent folders.".format(
os.getcwd(), cur_level
)
)
# --- Parse `pyftpsync.yaml` and set `args` attributes ---
try:
with open(config_path, "rb") as f:
config = yaml.safe_load(f)
except Exception as e:
parser.error("Error parsing {}: {}".format(config_path, e))
# write_error("Error parsing {}: {}".format(config_path, e))
# raise
# print(config)
if "tasks" not in config:
parser.error("Missing option `tasks` in {}".format(config_path))
common_config = config.get("common_config", {})
default_task = config.get("default_task", "default")
task_name = args.task or default_task
if task_name not in config["tasks"]:
parser.error("Missing option `tasks.{}` in {}".format(task_name, config_path))
task = config["tasks"][task_name]
write("Running task '{}' from {}".format(task_name, config_path))
common_config.update(task)
task = common_config
# write("task", task)
# --- Check task syntax ---
task_args = set(task.keys())
missing_args = MANDATORY_TASK_ARGS.difference(task_args)
if missing_args:
parser.error(
"Missing mandatory options: tasks.{}.{}".format(
task_name, ", ".join(missing_args)
)
)
allowed_args = KNOWN_TASK_ARGS.union(MANDATORY_TASK_ARGS)
invalid_args = task_args.difference(allowed_args)
if invalid_args:
parser.error(
"Invalid options: tasks.{}.{}".format(task_name, ", ".join(invalid_args))
)
# write("args", args)
for name in allowed_args:
val = task.get(name, None) # default)
if val is None:
continue # option not specified in yaml
# Override yaml entry by command line
cmd_val = getattr(args, name, None)
# write("check --{}: {} => {}".format(name, val, cmd_val))
if cmd_val != val:
override = False
if name in OVERRIDABLE_BOOL_ARGS and cmd_val:
override = True
elif name in {"here", "root"} and (args.here or args.root):
override = True
elif name == "verbose" and cmd_val != 3:
override = True
if override:
write(
"Yaml entry overriden by --{}: {} => {}".format(name, val, cmd_val)
)
continue
setattr(args, name, val)
# --- Figure out local target path ---
cur_folder = os.getcwd()
root_folder = os.path.dirname(config_path)
path_ofs = os.path.relpath(os.getcwd(), root_folder)
if cur_level == 0 or args.root:
path_ofs = ""
args.local = root_folder
elif args.here:
write("Using sub-branch {sub} of {root}".format(root=root_folder, sub=path_ofs))
args.local = cur_folder
args.remote = os.path.join(args.remote, path_ofs)
else:
parser.error(
"`.pyftpsync.yaml` configuration was found in a parent directory. "
"Please pass an additional argument to clarify:\n"
" --root: synchronize whole project ({root})\n"
" --here: synchronize sub branch ({root}/{sub})".format(
root=root_folder, sub=path_ofs
)
) | python | def handle_run_command(parser, args):
"""Implement `run` sub-command."""
MAX_LEVELS = 15
# --- Look for `pyftpsync.yaml` in current folder and parents ---
cur_level = 0
cur_folder = os.getcwd()
config_path = None
while cur_level < MAX_LEVELS:
path = os.path.join(cur_folder, CONFIG_FILE_NAME)
# print("Searching for {}...".format(path))
if os.path.isfile(path):
config_path = path
break
parent = os.path.dirname(cur_folder)
if parent == cur_folder:
break
cur_folder = parent
cur_level += 1
if not config_path:
parser.error(
"Could not locate `.pyftpsync.yaml` in {} or {} parent folders.".format(
os.getcwd(), cur_level
)
)
# --- Parse `pyftpsync.yaml` and set `args` attributes ---
try:
with open(config_path, "rb") as f:
config = yaml.safe_load(f)
except Exception as e:
parser.error("Error parsing {}: {}".format(config_path, e))
# write_error("Error parsing {}: {}".format(config_path, e))
# raise
# print(config)
if "tasks" not in config:
parser.error("Missing option `tasks` in {}".format(config_path))
common_config = config.get("common_config", {})
default_task = config.get("default_task", "default")
task_name = args.task or default_task
if task_name not in config["tasks"]:
parser.error("Missing option `tasks.{}` in {}".format(task_name, config_path))
task = config["tasks"][task_name]
write("Running task '{}' from {}".format(task_name, config_path))
common_config.update(task)
task = common_config
# write("task", task)
# --- Check task syntax ---
task_args = set(task.keys())
missing_args = MANDATORY_TASK_ARGS.difference(task_args)
if missing_args:
parser.error(
"Missing mandatory options: tasks.{}.{}".format(
task_name, ", ".join(missing_args)
)
)
allowed_args = KNOWN_TASK_ARGS.union(MANDATORY_TASK_ARGS)
invalid_args = task_args.difference(allowed_args)
if invalid_args:
parser.error(
"Invalid options: tasks.{}.{}".format(task_name, ", ".join(invalid_args))
)
# write("args", args)
for name in allowed_args:
val = task.get(name, None) # default)
if val is None:
continue # option not specified in yaml
# Override yaml entry by command line
cmd_val = getattr(args, name, None)
# write("check --{}: {} => {}".format(name, val, cmd_val))
if cmd_val != val:
override = False
if name in OVERRIDABLE_BOOL_ARGS and cmd_val:
override = True
elif name in {"here", "root"} and (args.here or args.root):
override = True
elif name == "verbose" and cmd_val != 3:
override = True
if override:
write(
"Yaml entry overriden by --{}: {} => {}".format(name, val, cmd_val)
)
continue
setattr(args, name, val)
# --- Figure out local target path ---
cur_folder = os.getcwd()
root_folder = os.path.dirname(config_path)
path_ofs = os.path.relpath(os.getcwd(), root_folder)
if cur_level == 0 or args.root:
path_ofs = ""
args.local = root_folder
elif args.here:
write("Using sub-branch {sub} of {root}".format(root=root_folder, sub=path_ofs))
args.local = cur_folder
args.remote = os.path.join(args.remote, path_ofs)
else:
parser.error(
"`.pyftpsync.yaml` configuration was found in a parent directory. "
"Please pass an additional argument to clarify:\n"
" --root: synchronize whole project ({root})\n"
" --here: synchronize sub branch ({root}/{sub})".format(
root=root_folder, sub=path_ofs
)
) | Implement `run` sub-command. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/run_command.py#L90-L216 |
mar10/pyftpsync | ftpsync/util.py | set_pyftpsync_logger | def set_pyftpsync_logger(logger=True):
"""Define target for common output.
Args:
logger (bool | None | logging.Logger):
Pass None to use `print()` to stdout instead of logging.
Pass True to create a simple standard logger.
"""
global _logger
prev_logger = _logger
if logger is True:
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger("pyftpsync")
_logger.setLevel(logging.DEBUG)
else:
_logger = logger
return prev_logger | python | def set_pyftpsync_logger(logger=True):
"""Define target for common output.
Args:
logger (bool | None | logging.Logger):
Pass None to use `print()` to stdout instead of logging.
Pass True to create a simple standard logger.
"""
global _logger
prev_logger = _logger
if logger is True:
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger("pyftpsync")
_logger.setLevel(logging.DEBUG)
else:
_logger = logger
return prev_logger | Define target for common output.
Args:
logger (bool | None | logging.Logger):
Pass None to use `print()` to stdout instead of logging.
Pass True to create a simple standard logger. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L31-L47 |
mar10/pyftpsync | ftpsync/util.py | write | def write(*args, **kwargs):
"""Redirectable wrapper for print statements."""
debug = kwargs.pop("debug", None)
warning = kwargs.pop("warning", None)
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
if debug:
_logger.debug(*args, **kwargs)
elif warning:
_logger.warning(*args, **kwargs)
else:
_logger.info(*args, **kwargs)
else:
print(*args, **kwargs) | python | def write(*args, **kwargs):
"""Redirectable wrapper for print statements."""
debug = kwargs.pop("debug", None)
warning = kwargs.pop("warning", None)
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
if debug:
_logger.debug(*args, **kwargs)
elif warning:
_logger.warning(*args, **kwargs)
else:
_logger.info(*args, **kwargs)
else:
print(*args, **kwargs) | Redirectable wrapper for print statements. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L54-L68 |
mar10/pyftpsync | ftpsync/util.py | write_error | def write_error(*args, **kwargs):
"""Redirectable wrapper for print sys.stderr statements."""
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
_logger.error(*args, **kwargs)
else:
print(*args, file=sys.stderr, **kwargs) | python | def write_error(*args, **kwargs):
"""Redirectable wrapper for print sys.stderr statements."""
if _logger:
kwargs.pop("end", None)
kwargs.pop("file", None)
_logger.error(*args, **kwargs)
else:
print(*args, file=sys.stderr, **kwargs) | Redirectable wrapper for print sys.stderr statements. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L71-L78 |
mar10/pyftpsync | ftpsync/util.py | namespace_to_dict | def namespace_to_dict(o):
"""Convert an argparse namespace object to a dictionary."""
d = {}
for k, v in o.__dict__.items():
if not callable(v):
d[k] = v
return d | python | def namespace_to_dict(o):
"""Convert an argparse namespace object to a dictionary."""
d = {}
for k, v in o.__dict__.items():
if not callable(v):
d[k] = v
return d | Convert an argparse namespace object to a dictionary. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L109-L115 |
mar10/pyftpsync | ftpsync/util.py | eps_compare | def eps_compare(f1, f2, eps):
"""Return true if |f1-f2| <= eps."""
res = f1 - f2
if abs(res) <= eps: # '<=',so eps == 0 works as expected
return 0
elif res < 0:
return -1
return 1 | python | def eps_compare(f1, f2, eps):
"""Return true if |f1-f2| <= eps."""
res = f1 - f2
if abs(res) <= eps: # '<=',so eps == 0 works as expected
return 0
elif res < 0:
return -1
return 1 | Return true if |f1-f2| <= eps. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L118-L125 |
mar10/pyftpsync | ftpsync/util.py | get_option | def get_option(env_name, section, opt_name, default=None):
"""Return a configuration setting from environment var or .pyftpsyncrc"""
val = os.environ.get(env_name)
if val is None:
try:
val = _pyftpsyncrc_parser.get(section, opt_name)
except (compat.configparser.NoSectionError, compat.configparser.NoOptionError):
pass
if val is None:
val = default
return val | python | def get_option(env_name, section, opt_name, default=None):
"""Return a configuration setting from environment var or .pyftpsyncrc"""
val = os.environ.get(env_name)
if val is None:
try:
val = _pyftpsyncrc_parser.get(section, opt_name)
except (compat.configparser.NoSectionError, compat.configparser.NoOptionError):
pass
if val is None:
val = default
return val | Return a configuration setting from environment var or .pyftpsyncrc | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L139-L149 |
mar10/pyftpsync | ftpsync/util.py | check_cli_verbose | def check_cli_verbose(default=3):
"""Check for presence of `--verbose`/`--quiet` or `-v`/`-q` without using argparse."""
args = sys.argv[1:]
verbose = default + args.count("--verbose") - args.count("--quiet")
for arg in args:
if arg.startswith("-") and not arg.startswith("--"):
verbose += arg[1:].count("v")
verbose -= arg[1:].count("q")
return verbose | python | def check_cli_verbose(default=3):
"""Check for presence of `--verbose`/`--quiet` or `-v`/`-q` without using argparse."""
args = sys.argv[1:]
verbose = default + args.count("--verbose") - args.count("--quiet")
for arg in args:
if arg.startswith("-") and not arg.startswith("--"):
verbose += arg[1:].count("v")
verbose -= arg[1:].count("q")
return verbose | Check for presence of `--verbose`/`--quiet` or `-v`/`-q` without using argparse. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L152-L161 |
mar10/pyftpsync | ftpsync/util.py | prompt_for_password | def prompt_for_password(url, user=None, default_user=None):
"""Prompt for username and password.
If a user name is passed, only prompt for a password.
Args:
url (str): hostname
user (str, optional):
Pass a valid name to skip prompting for a user name
default_user (str, optional):
Pass a valid name that is used as default when prompting
for a user name
Raises:
KeyboardInterrupt if user hits Ctrl-C
Returns:
(username, password) or None
"""
if user is None:
default_user = default_user or getpass.getuser()
while user is None:
user = compat.console_input(
"Enter username for {} [{}]: ".format(url, default_user)
)
if user.strip() == "" and default_user:
user = default_user
if user:
pw = getpass.getpass(
"Enter password for {}@{} (Ctrl+C to abort): ".format(user, url)
)
if pw or pw == "":
return (user, pw)
return None | python | def prompt_for_password(url, user=None, default_user=None):
"""Prompt for username and password.
If a user name is passed, only prompt for a password.
Args:
url (str): hostname
user (str, optional):
Pass a valid name to skip prompting for a user name
default_user (str, optional):
Pass a valid name that is used as default when prompting
for a user name
Raises:
KeyboardInterrupt if user hits Ctrl-C
Returns:
(username, password) or None
"""
if user is None:
default_user = default_user or getpass.getuser()
while user is None:
user = compat.console_input(
"Enter username for {} [{}]: ".format(url, default_user)
)
if user.strip() == "" and default_user:
user = default_user
if user:
pw = getpass.getpass(
"Enter password for {}@{} (Ctrl+C to abort): ".format(user, url)
)
if pw or pw == "":
return (user, pw)
return None | Prompt for username and password.
If a user name is passed, only prompt for a password.
Args:
url (str): hostname
user (str, optional):
Pass a valid name to skip prompting for a user name
default_user (str, optional):
Pass a valid name that is used as default when prompting
for a user name
Raises:
KeyboardInterrupt if user hits Ctrl-C
Returns:
(username, password) or None | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L169-L199 |
mar10/pyftpsync | ftpsync/util.py | get_credentials_for_url | def get_credentials_for_url(url, opts, force_user=None):
"""Lookup credentials for a given target in keyring and .netrc.
Optionally prompts for credentials if not found.
Returns:
2-tuple (username, password) or None
"""
creds = None
verbose = int(opts.get("verbose"))
force_prompt = opts.get("prompt", False)
allow_prompt = not opts.get("no_prompt", True)
allow_keyring = not opts.get("no_keyring", False) and not force_user
allow_netrc = not opts.get("no_netrc", False) and not force_user
# print("get_credentials_for_url", force_user, allow_prompt)
if force_user and not allow_prompt:
raise RuntimeError(
"Cannot get credentials for a distinct user ({}) from keyring or .netrc and "
"prompting is disabled.".format(force_user)
)
# Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x
home_path = os.path.expanduser("~")
file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE)
if os.path.isfile(file_path):
raise RuntimeError(
"Custom password files are no longer supported. Delete {} and use .netrc instead.".format(
file_path
)
)
# Query keyring database
if creds is None and keyring and allow_keyring:
try:
# Note: we pass the url as `username` and username:password as `password`
c = keyring.get_password("pyftpsync", url)
if c is not None:
creds = c.split(":", 1)
write(
"Using credentials from keyring('pyftpsync', '{}'): {}:***.".format(
url, creds[0]
)
)
else:
if verbose >= 4:
write(
"No credentials found in keyring('pyftpsync', '{}').".format(
url
)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
# e.g. user clicked 'no'
write_error("Could not get password from keyring {}".format(e))
# Query .netrc file
# print(opts)
if creds is None and allow_netrc:
try:
authenticators = None
authenticators = netrc.netrc().authenticators(url)
except CompatFileNotFoundError:
if verbose >= 4:
write("Could not get password (no .netrc file).")
except Exception as e:
write_error("Could not read .netrc: {}.".format(e))
if authenticators:
creds = (authenticators[0], authenticators[2])
write("Using credentials from .netrc file: {}:***.".format(creds[0]))
else:
if verbose >= 4:
write("Could not find entry for '{}' in .netrc file.".format(url))
# Prompt for password if we don't have credentials yet, or --prompt was set.
if allow_prompt:
if creds is None:
creds = prompt_for_password(url)
elif force_prompt:
# --prompt was set but we can provide a default for the user name
creds = prompt_for_password(url, default_user=creds[0])
return creds | python | def get_credentials_for_url(url, opts, force_user=None):
"""Lookup credentials for a given target in keyring and .netrc.
Optionally prompts for credentials if not found.
Returns:
2-tuple (username, password) or None
"""
creds = None
verbose = int(opts.get("verbose"))
force_prompt = opts.get("prompt", False)
allow_prompt = not opts.get("no_prompt", True)
allow_keyring = not opts.get("no_keyring", False) and not force_user
allow_netrc = not opts.get("no_netrc", False) and not force_user
# print("get_credentials_for_url", force_user, allow_prompt)
if force_user and not allow_prompt:
raise RuntimeError(
"Cannot get credentials for a distinct user ({}) from keyring or .netrc and "
"prompting is disabled.".format(force_user)
)
# Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x
home_path = os.path.expanduser("~")
file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE)
if os.path.isfile(file_path):
raise RuntimeError(
"Custom password files are no longer supported. Delete {} and use .netrc instead.".format(
file_path
)
)
# Query keyring database
if creds is None and keyring and allow_keyring:
try:
# Note: we pass the url as `username` and username:password as `password`
c = keyring.get_password("pyftpsync", url)
if c is not None:
creds = c.split(":", 1)
write(
"Using credentials from keyring('pyftpsync', '{}'): {}:***.".format(
url, creds[0]
)
)
else:
if verbose >= 4:
write(
"No credentials found in keyring('pyftpsync', '{}').".format(
url
)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
# e.g. user clicked 'no'
write_error("Could not get password from keyring {}".format(e))
# Query .netrc file
# print(opts)
if creds is None and allow_netrc:
try:
authenticators = None
authenticators = netrc.netrc().authenticators(url)
except CompatFileNotFoundError:
if verbose >= 4:
write("Could not get password (no .netrc file).")
except Exception as e:
write_error("Could not read .netrc: {}.".format(e))
if authenticators:
creds = (authenticators[0], authenticators[2])
write("Using credentials from .netrc file: {}:***.".format(creds[0]))
else:
if verbose >= 4:
write("Could not find entry for '{}' in .netrc file.".format(url))
# Prompt for password if we don't have credentials yet, or --prompt was set.
if allow_prompt:
if creds is None:
creds = prompt_for_password(url)
elif force_prompt:
# --prompt was set but we can provide a default for the user name
creds = prompt_for_password(url, default_user=creds[0])
return creds | Lookup credentials for a given target in keyring and .netrc.
Optionally prompts for credentials if not found.
Returns:
2-tuple (username, password) or None | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L202-L285 |
mar10/pyftpsync | ftpsync/util.py | save_password | def save_password(url, username, password):
"""Store credentials in keyring."""
if keyring:
if ":" in username:
raise RuntimeError(
"Unable to store credentials if username contains a ':' ({}).".format(
username
)
)
try:
# Note: we pass the url as `username` and username:password as `password`
if password is None:
keyring.delete_password("pyftpsync", url)
write("Delete credentials from keyring ({})".format(url))
else:
keyring.set_password(
"pyftpsync", url, "{}:{}".format(username, password)
)
write(
"Store credentials in keyring ({}, {}:***).".format(url, username)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
write("Could not delete/set password {}.".format(e))
pass # e.g. user clicked 'no'
else:
write("Could not store credentials (missing keyring support).")
return | python | def save_password(url, username, password):
"""Store credentials in keyring."""
if keyring:
if ":" in username:
raise RuntimeError(
"Unable to store credentials if username contains a ':' ({}).".format(
username
)
)
try:
# Note: we pass the url as `username` and username:password as `password`
if password is None:
keyring.delete_password("pyftpsync", url)
write("Delete credentials from keyring ({})".format(url))
else:
keyring.set_password(
"pyftpsync", url, "{}:{}".format(username, password)
)
write(
"Store credentials in keyring ({}, {}:***).".format(url, username)
)
# except keyring.errors.TransientKeyringError:
except Exception as e:
write("Could not delete/set password {}.".format(e))
pass # e.g. user clicked 'no'
else:
write("Could not store credentials (missing keyring support).")
return | Store credentials in keyring. | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L288-L316 |
mar10/pyftpsync | ftpsync/util.py | str_to_bool | def str_to_bool(val):
"""Return a boolean for '0', 'false', 'on', ..."""
val = str(val).lower().strip()
if val in ("1", "true", "on", "yes"):
return True
elif val in ("0", "false", "off", "no"):
return False
raise ValueError(
"Invalid value '{}'"
"(expected '1', '0', 'true', 'false', 'on', 'off', 'yes', 'no').".format(val)
) | python | def str_to_bool(val):
"""Return a boolean for '0', 'false', 'on', ..."""
val = str(val).lower().strip()
if val in ("1", "true", "on", "yes"):
return True
elif val in ("0", "false", "off", "no"):
return False
raise ValueError(
"Invalid value '{}'"
"(expected '1', '0', 'true', 'false', 'on', 'off', 'yes', 'no').".format(val)
) | Return a boolean for '0', 'false', 'on', ... | https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L319-L329 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.