repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
cqparts/cqparts | src/cqparts_fasteners/solidtypes/threads/base.py | Thread.make_pilothole_cutter | def make_pilothole_cutter(self):
"""
Make a solid to subtract from an interfacing solid to bore a pilot-hole.
"""
# get pilothole ratio
# note: not done in .initialize_parameters() because this would cause
# the thread's profile to be created at initialisation (by default).
pilothole_radius = self.pilothole_radius
if pilothole_radius is None:
(inner_radius, outer_radius) = self.get_radii()
pilothole_radius = inner_radius + self.pilothole_ratio * (outer_radius - inner_radius)
return cadquery.Workplane('XY') \
.circle(pilothole_radius) \
.extrude(self.length) | python | def make_pilothole_cutter(self):
"""
Make a solid to subtract from an interfacing solid to bore a pilot-hole.
"""
# get pilothole ratio
# note: not done in .initialize_parameters() because this would cause
# the thread's profile to be created at initialisation (by default).
pilothole_radius = self.pilothole_radius
if pilothole_radius is None:
(inner_radius, outer_radius) = self.get_radii()
pilothole_radius = inner_radius + self.pilothole_ratio * (outer_radius - inner_radius)
return cadquery.Workplane('XY') \
.circle(pilothole_radius) \
.extrude(self.length) | Make a solid to subtract from an interfacing solid to bore a pilot-hole. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_fasteners/solidtypes/threads/base.py#L373-L387 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.components | def components(self):
"""
Returns full :class:`dict` of :class:`Component` instances, after
a successful :meth:`build`
:return: dict of named :class:`Component` instances
:rtype: :class:`dict`
For more information read about the :ref:`parts_assembly-build-cycle` .
"""
if self._components is None:
self.build(recursive=False)
return self._components | python | def components(self):
"""
Returns full :class:`dict` of :class:`Component` instances, after
a successful :meth:`build`
:return: dict of named :class:`Component` instances
:rtype: :class:`dict`
For more information read about the :ref:`parts_assembly-build-cycle` .
"""
if self._components is None:
self.build(recursive=False)
return self._components | Returns full :class:`dict` of :class:`Component` instances, after
a successful :meth:`build`
:return: dict of named :class:`Component` instances
:rtype: :class:`dict`
For more information read about the :ref:`parts_assembly-build-cycle` . | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L94-L106 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.constraints | def constraints(self):
"""
Returns full :class:`list` of :class:`Constraint <cqparts.constraint.Constraint>` instances, after
a successful :meth:`build`
:return: list of named :class:`Constraint <cqparts.constraint.Constraint>` instances
:rtype: :class:`list`
For more information read about the :ref:`parts_assembly-build-cycle` .
"""
if self._constraints is None:
self.build(recursive=False)
return self._constraints | python | def constraints(self):
"""
Returns full :class:`list` of :class:`Constraint <cqparts.constraint.Constraint>` instances, after
a successful :meth:`build`
:return: list of named :class:`Constraint <cqparts.constraint.Constraint>` instances
:rtype: :class:`list`
For more information read about the :ref:`parts_assembly-build-cycle` .
"""
if self._constraints is None:
self.build(recursive=False)
return self._constraints | Returns full :class:`list` of :class:`Constraint <cqparts.constraint.Constraint>` instances, after
a successful :meth:`build`
:return: list of named :class:`Constraint <cqparts.constraint.Constraint>` instances
:rtype: :class:`list`
For more information read about the :ref:`parts_assembly-build-cycle` . | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L109-L121 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.solve | def solve(self):
"""
Run the solver and assign the solution's :class:`CoordSystem` instances
as the corresponding part's world coordinates.
"""
if self.world_coords is None:
log.warning("solving for Assembly without world coordinates set: %r", self)
for (component, world_coords) in solver(self.constraints, self.world_coords):
component.world_coords = world_coords | python | def solve(self):
"""
Run the solver and assign the solution's :class:`CoordSystem` instances
as the corresponding part's world coordinates.
"""
if self.world_coords is None:
log.warning("solving for Assembly without world coordinates set: %r", self)
for (component, world_coords) in solver(self.constraints, self.world_coords):
component.world_coords = world_coords | Run the solver and assign the solution's :class:`CoordSystem` instances
as the corresponding part's world coordinates. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L132-L141 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.verify_components | def verify_components(components):
"""
Verify values returned from :meth:`make_components`.
Used internally during the :meth:`build` process.
:param components: value returned from :meth:`make_components`
:type components: :class:`dict`
:raises ValueError: if verification fails
"""
# verify returned type from user-defined function
if not isinstance(components, dict):
raise ValueError(
"invalid type returned by make_components(): %r (must be a dict)" % components
)
# check types for (name, component) pairs in dict
for (name, component) in components.items():
# name is a string
if not isinstance(name, str):
raise ValueError((
"invalid name from make_components(): (%r, %r) "
"(must be a (str, Component))"
) % (name, component))
# component is a Component instance
if not isinstance(component, Component):
raise ValueError((
"invalid component type from make_components(): (%r, %r) "
"(must be a (str, Component))"
) % (name, component))
# check component name validity
invalid_chars = set(name) - VALID_NAME_CHARS
if invalid_chars:
raise ValueError(
"component name {!r} invalid; cannot include {!r}".format(
name, invalid_chars
)
) | python | def verify_components(components):
"""
Verify values returned from :meth:`make_components`.
Used internally during the :meth:`build` process.
:param components: value returned from :meth:`make_components`
:type components: :class:`dict`
:raises ValueError: if verification fails
"""
# verify returned type from user-defined function
if not isinstance(components, dict):
raise ValueError(
"invalid type returned by make_components(): %r (must be a dict)" % components
)
# check types for (name, component) pairs in dict
for (name, component) in components.items():
# name is a string
if not isinstance(name, str):
raise ValueError((
"invalid name from make_components(): (%r, %r) "
"(must be a (str, Component))"
) % (name, component))
# component is a Component instance
if not isinstance(component, Component):
raise ValueError((
"invalid component type from make_components(): (%r, %r) "
"(must be a (str, Component))"
) % (name, component))
# check component name validity
invalid_chars = set(name) - VALID_NAME_CHARS
if invalid_chars:
raise ValueError(
"component name {!r} invalid; cannot include {!r}".format(
name, invalid_chars
)
) | Verify values returned from :meth:`make_components`.
Used internally during the :meth:`build` process.
:param components: value returned from :meth:`make_components`
:type components: :class:`dict`
:raises ValueError: if verification fails | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L144-L183 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.verify_constraints | def verify_constraints(constraints):
"""
Verify values returned from :meth:`make_constraints`.
Used internally during the :meth:`build` process.
:param constraints: value returned from :meth:`make_constraints`
:type constraints: :class:`list`
:raises ValueError: if verification fails
"""
# verify return is a list
if not isinstance(constraints, list):
raise ValueError(
"invalid type returned by make_constraints: %r (must be a list)" % constraints
)
# verify each list element is a Constraint instance
for constraint in constraints:
if not isinstance(constraint, Constraint):
raise ValueError(
"invalid constraint type: %r (must be a Constriant)" % constraint
) | python | def verify_constraints(constraints):
"""
Verify values returned from :meth:`make_constraints`.
Used internally during the :meth:`build` process.
:param constraints: value returned from :meth:`make_constraints`
:type constraints: :class:`list`
:raises ValueError: if verification fails
"""
# verify return is a list
if not isinstance(constraints, list):
raise ValueError(
"invalid type returned by make_constraints: %r (must be a list)" % constraints
)
# verify each list element is a Constraint instance
for constraint in constraints:
if not isinstance(constraint, Constraint):
raise ValueError(
"invalid constraint type: %r (must be a Constriant)" % constraint
) | Verify values returned from :meth:`make_constraints`.
Used internally during the :meth:`build` process.
:param constraints: value returned from :meth:`make_constraints`
:type constraints: :class:`list`
:raises ValueError: if verification fails | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L186-L207 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.build | def build(self, recursive=True):
"""
Building an assembly buffers the :meth:`components` and :meth:`constraints`.
Running ``build()`` is optional, it's automatically run when requesting
:meth:`components` or :meth:`constraints`.
Mostly it's used to test that there aren't any critical runtime
issues with its construction, but doing anything like *displaying* or
*exporting* will ultimately run a build anyway.
:param recursive: if set, iterates through child components and builds
those as well.
:type recursive: :class:`bool`
"""
# initialize values
self._components = {}
self._constraints = []
def genwrap(obj, name, iter_type=None):
# Force obj to act like a generator.
# this wrapper will always yield at least once.
if isinstance(obj, GeneratorType):
for i in obj:
if (iter_type is not None) and (not isinstance(i, iter_type)):
raise TypeError("%s must yield a %r" % (name, iter_type))
yield i
else:
if (iter_type is not None) and (not isinstance(obj, iter_type)):
raise TypeError("%s must return a %r" % (name, iter_type))
yield obj
# Make Components
components_iter = genwrap(self.make_components(), "make_components", dict)
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
# Make Constraints
constraints_iter = genwrap(self.make_constraints(), "make_components", list)
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
# Run solver : sets components' world coordinates
self.solve()
# Make Alterations
alterations_iter = genwrap(self.make_alterations(), "make_alterations")
next(alterations_iter) # return value is ignored
while True:
(s1, s2, s3) = (True, True, True) # stages
# Make Components
new_components = None
try:
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
except StopIteration:
s1 = False
# Make Constraints
new_constraints = None
try:
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
except StopIteration:
s2 = False
# Run solver : sets components' world coordinates
if new_components or new_constraints:
self.solve()
# Make Alterations
try:
next(alterations_iter) # return value is ignored
except StopIteration:
s3 = False
# end loop when all iters are finished
if not any((s1, s2, s3)):
break
if recursive:
for (name, component) in self._components.items():
component.build(recursive=recursive) | python | def build(self, recursive=True):
"""
Building an assembly buffers the :meth:`components` and :meth:`constraints`.
Running ``build()`` is optional, it's automatically run when requesting
:meth:`components` or :meth:`constraints`.
Mostly it's used to test that there aren't any critical runtime
issues with its construction, but doing anything like *displaying* or
*exporting* will ultimately run a build anyway.
:param recursive: if set, iterates through child components and builds
those as well.
:type recursive: :class:`bool`
"""
# initialize values
self._components = {}
self._constraints = []
def genwrap(obj, name, iter_type=None):
# Force obj to act like a generator.
# this wrapper will always yield at least once.
if isinstance(obj, GeneratorType):
for i in obj:
if (iter_type is not None) and (not isinstance(i, iter_type)):
raise TypeError("%s must yield a %r" % (name, iter_type))
yield i
else:
if (iter_type is not None) and (not isinstance(obj, iter_type)):
raise TypeError("%s must return a %r" % (name, iter_type))
yield obj
# Make Components
components_iter = genwrap(self.make_components(), "make_components", dict)
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
# Make Constraints
constraints_iter = genwrap(self.make_constraints(), "make_components", list)
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
# Run solver : sets components' world coordinates
self.solve()
# Make Alterations
alterations_iter = genwrap(self.make_alterations(), "make_alterations")
next(alterations_iter) # return value is ignored
while True:
(s1, s2, s3) = (True, True, True) # stages
# Make Components
new_components = None
try:
new_components = next(components_iter)
self.verify_components(new_components)
self._components.update(new_components)
except StopIteration:
s1 = False
# Make Constraints
new_constraints = None
try:
new_constraints = next(constraints_iter)
self.verify_constraints(new_constraints)
self._constraints += new_constraints
except StopIteration:
s2 = False
# Run solver : sets components' world coordinates
if new_components or new_constraints:
self.solve()
# Make Alterations
try:
next(alterations_iter) # return value is ignored
except StopIteration:
s3 = False
# end loop when all iters are finished
if not any((s1, s2, s3)):
break
if recursive:
for (name, component) in self._components.items():
component.build(recursive=recursive) | Building an assembly buffers the :meth:`components` and :meth:`constraints`.
Running ``build()`` is optional, it's automatically run when requesting
:meth:`components` or :meth:`constraints`.
Mostly it's used to test that there aren't any critical runtime
issues with its construction, but doing anything like *displaying* or
*exporting* will ultimately run a build anyway.
:param recursive: if set, iterates through child components and builds
those as well.
:type recursive: :class:`bool` | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L209-L298 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.find | def find(self, keys, _index=0):
"""
:param keys: key path. ``'a.b'`` is equivalent to ``['a', 'b']``
:type keys: :class:`str` or :class:`list`
Find a nested :class:`Component` by a "`.`" separated list of names.
for example::
>>> motor.find('bearing.outer_ring')
would return the Part instance of the motor bearing's outer ring.
::
>>> bearing = motor.find('bearing')
>>> ring = bearing.find('inner_ring') # equivalent of 'bearing.inner_ring'
the above code does much the same thing, ``bearing`` is an :class:`Assembly`,
and ``ring`` is a :class:`Part`.
.. note::
For a key path of ``a.b.c`` the ``c`` key can referernce any
:class:`Component` type.
Everything prior (in this case ``a`` and ``b``) must reference an
:class:`Assembly`.
"""
if isinstance(keys, six.string_types):
keys = re.split(r'[\.-]+', keys)
if _index >= len(keys):
return self
key = keys[_index]
if key in self.components:
component = self.components[key]
if isinstance(component, Assembly):
return component.find(keys, _index=(_index + 1))
elif _index == len(keys) - 1:
# this is the last search key; component is a leaf, return it
return component
else:
raise AssemblyFindError(
"could not find '%s' (invalid type at [%i]: %r)" % (
'.'.join(keys), _index, component
)
)
else:
raise AssemblyFindError(
"could not find '%s', '%s' is not a component of %r" % (
'.'.join(keys), key, self
)
) | python | def find(self, keys, _index=0):
"""
:param keys: key path. ``'a.b'`` is equivalent to ``['a', 'b']``
:type keys: :class:`str` or :class:`list`
Find a nested :class:`Component` by a "`.`" separated list of names.
for example::
>>> motor.find('bearing.outer_ring')
would return the Part instance of the motor bearing's outer ring.
::
>>> bearing = motor.find('bearing')
>>> ring = bearing.find('inner_ring') # equivalent of 'bearing.inner_ring'
the above code does much the same thing, ``bearing`` is an :class:`Assembly`,
and ``ring`` is a :class:`Part`.
.. note::
For a key path of ``a.b.c`` the ``c`` key can referernce any
:class:`Component` type.
Everything prior (in this case ``a`` and ``b``) must reference an
:class:`Assembly`.
"""
if isinstance(keys, six.string_types):
keys = re.split(r'[\.-]+', keys)
if _index >= len(keys):
return self
key = keys[_index]
if key in self.components:
component = self.components[key]
if isinstance(component, Assembly):
return component.find(keys, _index=(_index + 1))
elif _index == len(keys) - 1:
# this is the last search key; component is a leaf, return it
return component
else:
raise AssemblyFindError(
"could not find '%s' (invalid type at [%i]: %r)" % (
'.'.join(keys), _index, component
)
)
else:
raise AssemblyFindError(
"could not find '%s', '%s' is not a component of %r" % (
'.'.join(keys), key, self
)
) | :param keys: key path. ``'a.b'`` is equivalent to ``['a', 'b']``
:type keys: :class:`str` or :class:`list`
Find a nested :class:`Component` by a "`.`" separated list of names.
for example::
>>> motor.find('bearing.outer_ring')
would return the Part instance of the motor bearing's outer ring.
::
>>> bearing = motor.find('bearing')
>>> ring = bearing.find('inner_ring') # equivalent of 'bearing.inner_ring'
the above code does much the same thing, ``bearing`` is an :class:`Assembly`,
and ``ring`` is a :class:`Part`.
.. note::
For a key path of ``a.b.c`` the ``c`` key can referernce any
:class:`Component` type.
Everything prior (in this case ``a`` and ``b``) must reference an
:class:`Assembly`. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L309-L362 |
cqparts/cqparts | src/cqparts/assembly.py | Assembly.tree_str | def tree_str(self, name=None, prefix='', add_repr=False, _depth=0):
u"""
Return string listing recursively the assembly hierarchy
:param name: if set, names the tree's trunk, otherwise the object's :meth:`repr` names the tree
:type name: :class:`str`
:param prefix: string prefixed to each line, can be used to indent
:type prefix: :class:`str`
:param add_repr: if set, *component* :meth:`repr` is put after their names
:type add_repr: :class:`bool`
:return: Printable string of an assembly's component hierarchy.
:rtype: :class:`str`
Example output from `block_tree.py <https://github.com/fragmuffin/cqparts/blob/master/tests/manual/block_tree.py>`_
::
>>> log = logging.getLogger(__name__)
>>> isinstance(block_tree, Assembly)
True
>>> log.info(block_tree.tree_str(name="block_tree"))
block_tree
\u251c\u25cb branch_lb
\u251c\u25cb branch_ls
\u251c\u2500 branch_r
\u2502 \u251c\u25cb L
\u2502 \u251c\u25cb R
\u2502 \u251c\u25cb branch
\u2502 \u251c\u2500 house
\u2502 \u2502 \u251c\u25cb bar
\u2502 \u2502 \u2514\u25cb foo
\u2502 \u2514\u25cb split
\u251c\u25cb trunk
\u2514\u25cb trunk_split
Where:
* ``\u2500`` denotes an :class:`Assembly`, and
* ``\u25cb`` denotes a :class:`Part`
"""
# unicode characters
c_t = u'\u251c'
c_l = u'\u2514'
c_dash = u'\u2500'
c_o = u'\u25cb'
c_span = u'\u2502'
output = u''
if not _depth: # first line
output = prefix
if name:
output += (name + u': ') if add_repr else name
if add_repr or not name:
output += repr(self)
output += '\n'
# build tree
for (is_last, (name, component)) in indicate_last(sorted(self.components.items(), key=lambda x: x[0])):
branch_chr = c_l if is_last else c_t
if isinstance(component, Assembly):
# Assembly: also list nested components
output += prefix + ' ' + branch_chr + c_dash + u' ' + name
if add_repr:
output += ': ' + repr(component)
output += '\n'
output += component.tree_str(
prefix=(prefix + (u' ' if is_last else (u' ' + c_span + ' '))),
add_repr=add_repr,
_depth=_depth + 1,
)
else:
# Part (assumed): leaf node
output += prefix + ' ' + branch_chr + c_o + u' ' + name
if add_repr:
output += ': ' + repr(component)
output += '\n'
return output | python | def tree_str(self, name=None, prefix='', add_repr=False, _depth=0):
u"""
Return string listing recursively the assembly hierarchy
:param name: if set, names the tree's trunk, otherwise the object's :meth:`repr` names the tree
:type name: :class:`str`
:param prefix: string prefixed to each line, can be used to indent
:type prefix: :class:`str`
:param add_repr: if set, *component* :meth:`repr` is put after their names
:type add_repr: :class:`bool`
:return: Printable string of an assembly's component hierarchy.
:rtype: :class:`str`
Example output from `block_tree.py <https://github.com/fragmuffin/cqparts/blob/master/tests/manual/block_tree.py>`_
::
>>> log = logging.getLogger(__name__)
>>> isinstance(block_tree, Assembly)
True
>>> log.info(block_tree.tree_str(name="block_tree"))
block_tree
\u251c\u25cb branch_lb
\u251c\u25cb branch_ls
\u251c\u2500 branch_r
\u2502 \u251c\u25cb L
\u2502 \u251c\u25cb R
\u2502 \u251c\u25cb branch
\u2502 \u251c\u2500 house
\u2502 \u2502 \u251c\u25cb bar
\u2502 \u2502 \u2514\u25cb foo
\u2502 \u2514\u25cb split
\u251c\u25cb trunk
\u2514\u25cb trunk_split
Where:
* ``\u2500`` denotes an :class:`Assembly`, and
* ``\u25cb`` denotes a :class:`Part`
"""
# unicode characters
c_t = u'\u251c'
c_l = u'\u2514'
c_dash = u'\u2500'
c_o = u'\u25cb'
c_span = u'\u2502'
output = u''
if not _depth: # first line
output = prefix
if name:
output += (name + u': ') if add_repr else name
if add_repr or not name:
output += repr(self)
output += '\n'
# build tree
for (is_last, (name, component)) in indicate_last(sorted(self.components.items(), key=lambda x: x[0])):
branch_chr = c_l if is_last else c_t
if isinstance(component, Assembly):
# Assembly: also list nested components
output += prefix + ' ' + branch_chr + c_dash + u' ' + name
if add_repr:
output += ': ' + repr(component)
output += '\n'
output += component.tree_str(
prefix=(prefix + (u' ' if is_last else (u' ' + c_span + ' '))),
add_repr=add_repr,
_depth=_depth + 1,
)
else:
# Part (assumed): leaf node
output += prefix + ' ' + branch_chr + c_o + u' ' + name
if add_repr:
output += ': ' + repr(component)
output += '\n'
return output | u"""
Return string listing recursively the assembly hierarchy
:param name: if set, names the tree's trunk, otherwise the object's :meth:`repr` names the tree
:type name: :class:`str`
:param prefix: string prefixed to each line, can be used to indent
:type prefix: :class:`str`
:param add_repr: if set, *component* :meth:`repr` is put after their names
:type add_repr: :class:`bool`
:return: Printable string of an assembly's component hierarchy.
:rtype: :class:`str`
Example output from `block_tree.py <https://github.com/fragmuffin/cqparts/blob/master/tests/manual/block_tree.py>`_
::
>>> log = logging.getLogger(__name__)
>>> isinstance(block_tree, Assembly)
True
>>> log.info(block_tree.tree_str(name="block_tree"))
block_tree
\u251c\u25cb branch_lb
\u251c\u25cb branch_ls
\u251c\u2500 branch_r
\u2502 \u251c\u25cb L
\u2502 \u251c\u25cb R
\u2502 \u251c\u25cb branch
\u2502 \u251c\u2500 house
\u2502 \u2502 \u251c\u25cb bar
\u2502 \u2502 \u2514\u25cb foo
\u2502 \u2514\u25cb split
\u251c\u25cb trunk
\u2514\u25cb trunk_split
Where:
* ``\u2500`` denotes an :class:`Assembly`, and
* ``\u25cb`` denotes a :class:`Part` | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/assembly.py#L365-L442 |
cqparts/cqparts | src/cqparts_template/catalogue/scripts/build.py | _relative_path_to | def _relative_path_to(path_list, filename):
"""Get a neat relative path to files relative to the CWD"""
return os.path.join(
os.path.relpath(os.path.join(*path_list), os.getcwd()),
filename
) | python | def _relative_path_to(path_list, filename):
"""Get a neat relative path to files relative to the CWD"""
return os.path.join(
os.path.relpath(os.path.join(*path_list), os.getcwd()),
filename
) | Get a neat relative path to files relative to the CWD | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_template/catalogue/scripts/build.py#L21-L26 |
cqparts/cqparts | src/cqparts_gears/trapezoidal.py | TrapezoidalGear._make_tooth_template | def _make_tooth_template(self):
"""
Builds a single tooth including the cylinder with tooth faces
tangential to its circumference.
"""
# parameters
period_arc = (2 * pi) / self.tooth_count
tooth_arc = period_arc * self.spacing_ratio # the arc between faces at effective_radius
outer_radius = self.effective_radius + (self.tooth_height / 2)
face_angle_rad = radians(self.face_angle)
# cartesian isosceles trapezoid dimensions
side_angle = face_angle_rad - (tooth_arc / 2)
side_tangent_radius = sin(face_angle_rad) * self.effective_radius
extra_side_angle = side_angle + acos(side_tangent_radius / outer_radius)
tooth = cadquery.Workplane('XY', origin=(0, 0, -self.width / 2)) \
.moveTo(
side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
opposite_point = (
-side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
if self.face_angle:
tooth = tooth.lineTo(*opposite_point)
#tooth = tooth.threePointArc(
# (0, -side_tangent_radius),
# opposite_point
#)
tooth = tooth.lineTo(
-cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
opposite_point = (
cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
if self.flat_top:
tooth = tooth.lineTo(*opposite_point)
else:
tooth = tooth.threePointArc((0, outer_radius), opposite_point)
tooth = tooth.close().extrude(self.width)
return tooth | python | def _make_tooth_template(self):
"""
Builds a single tooth including the cylinder with tooth faces
tangential to its circumference.
"""
# parameters
period_arc = (2 * pi) / self.tooth_count
tooth_arc = period_arc * self.spacing_ratio # the arc between faces at effective_radius
outer_radius = self.effective_radius + (self.tooth_height / 2)
face_angle_rad = radians(self.face_angle)
# cartesian isosceles trapezoid dimensions
side_angle = face_angle_rad - (tooth_arc / 2)
side_tangent_radius = sin(face_angle_rad) * self.effective_radius
extra_side_angle = side_angle + acos(side_tangent_radius / outer_radius)
tooth = cadquery.Workplane('XY', origin=(0, 0, -self.width / 2)) \
.moveTo(
side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
opposite_point = (
-side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
if self.face_angle:
tooth = tooth.lineTo(*opposite_point)
#tooth = tooth.threePointArc(
# (0, -side_tangent_radius),
# opposite_point
#)
tooth = tooth.lineTo(
-cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
opposite_point = (
cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
if self.flat_top:
tooth = tooth.lineTo(*opposite_point)
else:
tooth = tooth.threePointArc((0, outer_radius), opposite_point)
tooth = tooth.close().extrude(self.width)
return tooth | Builds a single tooth including the cylinder with tooth faces
tangential to its circumference. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_gears/trapezoidal.py#L31-L76 |
cqparts/cqparts | src/cqparts_bearings/ball.py | _Ring.get_mate_center | def get_mate_center(self, angle=0):
"""
Mate at ring's center rotated ``angle`` degrees.
:param angle: rotation around z-axis (unit: deg)
:type angle: :class:`float`
:return: mate in ring's center rotated about z-axis
:rtype: :class:`Mate <cqparts.constraint.Mate>`
"""
return Mate(self, CoordSystem.from_plane(
cadquery.Plane(
origin=(0, 0, self.width / 2),
xDir=(1, 0, 0),
normal=(0, 0, 1),
).rotated((0, 0, angle)) # rotate about z-axis
)) | python | def get_mate_center(self, angle=0):
"""
Mate at ring's center rotated ``angle`` degrees.
:param angle: rotation around z-axis (unit: deg)
:type angle: :class:`float`
:return: mate in ring's center rotated about z-axis
:rtype: :class:`Mate <cqparts.constraint.Mate>`
"""
return Mate(self, CoordSystem.from_plane(
cadquery.Plane(
origin=(0, 0, self.width / 2),
xDir=(1, 0, 0),
normal=(0, 0, 1),
).rotated((0, 0, angle)) # rotate about z-axis
)) | Mate at ring's center rotated ``angle`` degrees.
:param angle: rotation around z-axis (unit: deg)
:type angle: :class:`float`
:return: mate in ring's center rotated about z-axis
:rtype: :class:`Mate <cqparts.constraint.Mate>` | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_bearings/ball.py#L50-L66 |
cqparts/cqparts | src/cqparts_bearings/ball.py | _BallRing.get_max_ballcount | def get_max_ballcount(cls, ball_diam, rolling_radius, min_gap=0.):
"""
The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int`
"""
min_arc = asin(((ball_diam + min_gap) / 2) / rolling_radius) * 2
return int((2 * pi) / min_arc) | python | def get_max_ballcount(cls, ball_diam, rolling_radius, min_gap=0.):
"""
The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int`
"""
min_arc = asin(((ball_diam + min_gap) / 2) / rolling_radius) * 2
return int((2 * pi) / min_arc) | The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int` | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_bearings/ball.py#L87-L99 |
cqparts/cqparts | src/cqparts/display/environment.py | map_environment | def map_environment(**kwargs):
"""
Decorator to map a DisplayEnvironment for displaying components.
The decorated environment will be chosen if its condition is ``True``, and
its order is the smallest.
:param add_to: if set to ``globals()``, display environment's constructor
may reference its own type.
:type add_to: :class:`dict`
Any additional named parameters will be passed to the constructor of
the decorated DisplayEnvironment.
See :class:`DisplayEnvironment` for example usage.
**NameError on importing**
The following code::
@map_environment(
name='abc', order=10, condition=lambda: True,
)
class SomeDisplayEnv(DisplayEnvironment):
def __init__(self, *args, **kwargs):
super(SomeDisplayEnv, self).__init__(*args, **kwargs)
Will raise the Exception::
NameError: global name 'SomeDisplayEnv' is not defined
Because this ``map_environment`` decorator attempts to instantiate
this class before it's returned to populate the ``global()`` dict.
To cicrumvent this problem, set ``add_to`` to ``globals()``::
@map_environment(
name='abc', order=10, condition=lambda: True,
add_to=globals(),
)
class SomeDisplayEnv(DisplayEnvironment):
... as above
"""
def inner(cls):
global display_environments
assert issubclass(cls, DisplayEnvironment), "can only map DisplayEnvironment classes"
# Add class to it's local globals() so constructor can reference
# its own type
add_to = kwargs.pop('add_to', {})
add_to[cls.__name__] = cls
# Create display environment
disp_env = cls(**kwargs)
# is already mappped?
try:
i = display_environments.index(disp_env) # raises ValueError
# report duplicate
raise RuntimeError(
("environment %r already mapped, " % display_environments[i]) +
("can't map duplicate %r" % disp_env)
)
except ValueError:
pass # as expected
# map class
display_environments = sorted(display_environments + [disp_env])
return cls
return inner | python | def map_environment(**kwargs):
"""
Decorator to map a DisplayEnvironment for displaying components.
The decorated environment will be chosen if its condition is ``True``, and
its order is the smallest.
:param add_to: if set to ``globals()``, display environment's constructor
may reference its own type.
:type add_to: :class:`dict`
Any additional named parameters will be passed to the constructor of
the decorated DisplayEnvironment.
See :class:`DisplayEnvironment` for example usage.
**NameError on importing**
The following code::
@map_environment(
name='abc', order=10, condition=lambda: True,
)
class SomeDisplayEnv(DisplayEnvironment):
def __init__(self, *args, **kwargs):
super(SomeDisplayEnv, self).__init__(*args, **kwargs)
Will raise the Exception::
NameError: global name 'SomeDisplayEnv' is not defined
Because this ``map_environment`` decorator attempts to instantiate
this class before it's returned to populate the ``global()`` dict.
To cicrumvent this problem, set ``add_to`` to ``globals()``::
@map_environment(
name='abc', order=10, condition=lambda: True,
add_to=globals(),
)
class SomeDisplayEnv(DisplayEnvironment):
... as above
"""
def inner(cls):
global display_environments
assert issubclass(cls, DisplayEnvironment), "can only map DisplayEnvironment classes"
# Add class to it's local globals() so constructor can reference
# its own type
add_to = kwargs.pop('add_to', {})
add_to[cls.__name__] = cls
# Create display environment
disp_env = cls(**kwargs)
# is already mappped?
try:
i = display_environments.index(disp_env) # raises ValueError
# report duplicate
raise RuntimeError(
("environment %r already mapped, " % display_environments[i]) +
("can't map duplicate %r" % disp_env)
)
except ValueError:
pass # as expected
# map class
display_environments = sorted(display_environments + [disp_env])
return cls
return inner | Decorator to map a DisplayEnvironment for displaying components.
The decorated environment will be chosen if its condition is ``True``, and
its order is the smallest.
:param add_to: if set to ``globals()``, display environment's constructor
may reference its own type.
:type add_to: :class:`dict`
Any additional named parameters will be passed to the constructor of
the decorated DisplayEnvironment.
See :class:`DisplayEnvironment` for example usage.
**NameError on importing**
The following code::
@map_environment(
name='abc', order=10, condition=lambda: True,
)
class SomeDisplayEnv(DisplayEnvironment):
def __init__(self, *args, **kwargs):
super(SomeDisplayEnv, self).__init__(*args, **kwargs)
Will raise the Exception::
NameError: global name 'SomeDisplayEnv' is not defined
Because this ``map_environment`` decorator attempts to instantiate
this class before it's returned to populate the ``global()`` dict.
To cicrumvent this problem, set ``add_to`` to ``globals()``::
@map_environment(
name='abc', order=10, condition=lambda: True,
add_to=globals(),
)
class SomeDisplayEnv(DisplayEnvironment):
... as above | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/display/environment.py#L14-L80 |
cqparts/cqparts | src/cqparts/display/environment.py | DisplayEnvironment.display_callback | def display_callback(self, component, **kwargs):
"""
Display given component in this environment.
.. note::
To be overridden by inheriting classes
An example of a introducing a custom display environment.
.. doctest::
import cqparts
from cqparts.display.environment import DisplayEnvironment, map_environment
def is_text_env():
# function that returns True if it's run in the
# desired environment.
import sys
# Python 2.x
if sys.version_info[0] == 2:
return isinstance(sys.stdout, file)
# Python 3.x
import io
return isinstance(sys.stdout, io.TextIOWrapper)
@map_environment(
name="text",
order=0, # force display to be first priority
condition=is_text_env,
)
class TextDisplay(DisplayEnvironment):
def display_callback(self, component, **kwargs):
# Print component details to STDOUT
if isinstance(component, cqparts.Assembly):
sys.stdout.write(component.tree_str(add_repr=True))
else: # assumed to be a cqparts.Part
sys.stdout.write("%r\\n" % (component))
``is_text_env()`` checks if there's a valid ``sys.stdout`` to write to,
``TextDisplay`` defines how to display any given component,
and the ``@map_environment`` decorator adds the display paired with
its environment test function.
When using :meth:`display() <cqparts.display.display>`, this display
will be used if ``is_text_env()`` returns ``True``, and no previously
mapped environment with a smaller ``order`` tested ``True``:
.. doctest::
# create component to display
from cqparts_misc.basic.primatives import Cube
cube = Cube()
# display component
from cqparts.display import display
display(cube)
The ``display_callback`` will be called via
:meth:`display() <DisplayEnvironment.display>`. So to call this
display method directly:
.. doctest::
TextDisplay().display(cube)
:raises: NotImplementedError if not overridden
"""
if type(self) is DisplayEnvironment:
raise RuntimeError(
("%r is not a functional display environment, " % (type(self))) +
"it's meant to be inherited by an implemented environment"
)
raise NotImplementedError(
"display_callback function not overridden by %r" % (type(self))
) | python | def display_callback(self, component, **kwargs):
"""
Display given component in this environment.
.. note::
To be overridden by inheriting classes
An example of a introducing a custom display environment.
.. doctest::
import cqparts
from cqparts.display.environment import DisplayEnvironment, map_environment
def is_text_env():
# function that returns True if it's run in the
# desired environment.
import sys
# Python 2.x
if sys.version_info[0] == 2:
return isinstance(sys.stdout, file)
# Python 3.x
import io
return isinstance(sys.stdout, io.TextIOWrapper)
@map_environment(
name="text",
order=0, # force display to be first priority
condition=is_text_env,
)
class TextDisplay(DisplayEnvironment):
def display_callback(self, component, **kwargs):
# Print component details to STDOUT
if isinstance(component, cqparts.Assembly):
sys.stdout.write(component.tree_str(add_repr=True))
else: # assumed to be a cqparts.Part
sys.stdout.write("%r\\n" % (component))
``is_text_env()`` checks if there's a valid ``sys.stdout`` to write to,
``TextDisplay`` defines how to display any given component,
and the ``@map_environment`` decorator adds the display paired with
its environment test function.
When using :meth:`display() <cqparts.display.display>`, this display
will be used if ``is_text_env()`` returns ``True``, and no previously
mapped environment with a smaller ``order`` tested ``True``:
.. doctest::
# create component to display
from cqparts_misc.basic.primatives import Cube
cube = Cube()
# display component
from cqparts.display import display
display(cube)
The ``display_callback`` will be called via
:meth:`display() <DisplayEnvironment.display>`. So to call this
display method directly:
.. doctest::
TextDisplay().display(cube)
:raises: NotImplementedError if not overridden
"""
if type(self) is DisplayEnvironment:
raise RuntimeError(
("%r is not a functional display environment, " % (type(self))) +
"it's meant to be inherited by an implemented environment"
)
raise NotImplementedError(
"display_callback function not overridden by %r" % (type(self))
) | Display given component in this environment.
.. note::
To be overridden by inheriting classes
An example of a introducing a custom display environment.
.. doctest::
import cqparts
from cqparts.display.environment import DisplayEnvironment, map_environment
def is_text_env():
# function that returns True if it's run in the
# desired environment.
import sys
# Python 2.x
if sys.version_info[0] == 2:
return isinstance(sys.stdout, file)
# Python 3.x
import io
return isinstance(sys.stdout, io.TextIOWrapper)
@map_environment(
name="text",
order=0, # force display to be first priority
condition=is_text_env,
)
class TextDisplay(DisplayEnvironment):
def display_callback(self, component, **kwargs):
# Print component details to STDOUT
if isinstance(component, cqparts.Assembly):
sys.stdout.write(component.tree_str(add_repr=True))
else: # assumed to be a cqparts.Part
sys.stdout.write("%r\\n" % (component))
``is_text_env()`` checks if there's a valid ``sys.stdout`` to write to,
``TextDisplay`` defines how to display any given component,
and the ``@map_environment`` decorator adds the display paired with
its environment test function.
When using :meth:`display() <cqparts.display.display>`, this display
will be used if ``is_text_env()`` returns ``True``, and no previously
mapped environment with a smaller ``order`` tested ``True``:
.. doctest::
# create component to display
from cqparts_misc.basic.primatives import Cube
cube = Cube()
# display component
from cqparts.display import display
display(cube)
The ``display_callback`` will be called via
:meth:`display() <DisplayEnvironment.display>`. So to call this
display method directly:
.. doctest::
TextDisplay().display(cube)
:raises: NotImplementedError if not overridden | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/display/environment.py#L105-L180 |
cqparts/cqparts | src/cqparts/codec/gltf.py | ShapeBuffer.add_vertex | def add_vertex(self, x, y, z):
"""
Add a ``VEC3`` of ``floats`` to the ``vert_data`` buffer
"""
self.vert_data.write(
struct.pack('<f', x) +
struct.pack('<f', y) +
struct.pack('<f', z)
)
# retain min/max values
self.vert_min = _list3_min(self.vert_min, (x, y, z))
self.vert_max = _list3_max(self.vert_max, (x, y, z)) | python | def add_vertex(self, x, y, z):
"""
Add a ``VEC3`` of ``floats`` to the ``vert_data`` buffer
"""
self.vert_data.write(
struct.pack('<f', x) +
struct.pack('<f', y) +
struct.pack('<f', z)
)
# retain min/max values
self.vert_min = _list3_min(self.vert_min, (x, y, z))
self.vert_max = _list3_max(self.vert_max, (x, y, z)) | Add a ``VEC3`` of ``floats`` to the ``vert_data`` buffer | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/gltf.py#L173-L185 |
cqparts/cqparts | src/cqparts/codec/gltf.py | ShapeBuffer.add_poly_index | def add_poly_index(self, i, j, k):
"""
Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer.
"""
self.idx_data.write(
struct.pack(self.idx_fmt, i) +
struct.pack(self.idx_fmt, j) +
struct.pack(self.idx_fmt, k)
) | python | def add_poly_index(self, i, j, k):
"""
Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer.
"""
self.idx_data.write(
struct.pack(self.idx_fmt, i) +
struct.pack(self.idx_fmt, j) +
struct.pack(self.idx_fmt, k)
) | Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/gltf.py#L187-L195 |
cqparts/cqparts | src/cqparts/codec/gltf.py | ShapeBuffer.buffer_iter | def buffer_iter(self, block_size=1024):
"""
Iterate through chunks of the vertices, and indices buffers seamlessly.
.. note::
To see a usage example, look at the :class:`ShapeBuffer` description.
"""
streams = (
self.vert_data,
self.idx_data,
)
# Chain streams seamlessly
for stream in streams:
stream.seek(0)
while True:
chunk = stream.read(block_size)
if chunk:
yield chunk
else:
break | python | def buffer_iter(self, block_size=1024):
"""
Iterate through chunks of the vertices, and indices buffers seamlessly.
.. note::
To see a usage example, look at the :class:`ShapeBuffer` description.
"""
streams = (
self.vert_data,
self.idx_data,
)
# Chain streams seamlessly
for stream in streams:
stream.seek(0)
while True:
chunk = stream.read(block_size)
if chunk:
yield chunk
else:
break | Iterate through chunks of the vertices, and indices buffers seamlessly.
.. note::
To see a usage example, look at the :class:`ShapeBuffer` description. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/gltf.py#L197-L218 |
cqparts/cqparts | src/cqparts/codec/gltf.py | ShapeBuffer.read | def read(self):
"""
Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`.
"""
buffer = BytesIO()
for chunk in self.buffer_iter():
log.debug('buffer.write(%r)', chunk)
buffer.write(chunk)
buffer.seek(0)
return buffer.read() | python | def read(self):
"""
Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`.
"""
buffer = BytesIO()
for chunk in self.buffer_iter():
log.debug('buffer.write(%r)', chunk)
buffer.write(chunk)
buffer.seek(0)
return buffer.read() | Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/gltf.py#L223-L245 |
cqparts/cqparts | src/cqparts/constraint/mate.py | Mate.world_coords | def world_coords(self):
"""
:return: world coordinates of mate.
:rtype: :class:`CoordSystem <cqparts.utils.geometry.CoordSystem>`
:raises ValueError: if ``.component`` does not have valid world coordinates.
If ``.component`` is ``None``, then the ``.local_coords`` are returned.
"""
if self.component is None:
# no component, world == local
return copy(self.local_coords)
else:
cmp_origin = self.component.world_coords
if cmp_origin is None:
raise ValueError(
"mate's component does not have world coordinates; "
"cannot get mate's world coordinates"
)
return cmp_origin + self.local_coords | python | def world_coords(self):
"""
:return: world coordinates of mate.
:rtype: :class:`CoordSystem <cqparts.utils.geometry.CoordSystem>`
:raises ValueError: if ``.component`` does not have valid world coordinates.
If ``.component`` is ``None``, then the ``.local_coords`` are returned.
"""
if self.component is None:
# no component, world == local
return copy(self.local_coords)
else:
cmp_origin = self.component.world_coords
if cmp_origin is None:
raise ValueError(
"mate's component does not have world coordinates; "
"cannot get mate's world coordinates"
)
return cmp_origin + self.local_coords | :return: world coordinates of mate.
:rtype: :class:`CoordSystem <cqparts.utils.geometry.CoordSystem>`
:raises ValueError: if ``.component`` does not have valid world coordinates.
If ``.component`` is ``None``, then the ``.local_coords`` are returned. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/constraint/mate.py#L43-L62 |
cqparts/cqparts | src/cqparts/part.py | Part.make_simple | def make_simple(self):
"""
Create and return *simplified* solid part.
The simplified representation of a ``Part`` is to lower the export
quality of an ``Assembly`` or ``Part`` for rendering.
Overriding this is optional, but highly recommended.
The default behaviour returns the full complexity object's bounding box.
But to do this, theh full complexity object must be generated first.
There are 2 main problems with this:
#. building the full complexity part is not efficient.
#. a bounding box may not be a good representation of the part.
**Bolts**
A good example of this is a bolt.
* building a bolt's thread is not a trivial task;
it can take some time to generate.
* a box is not a good visual representation of a bolt
So for the ``Fastener`` parts, all ``make_simple`` methods are overridden
to provide 2 cylinders, one for the bolt's head, and another for the thread.
"""
complex_obj = self.make()
bb = complex_obj.findSolid().BoundingBox()
simple_obj = cadquery.Workplane('XY', origin=(bb.xmin, bb.ymin, bb.zmin)) \
.box(bb.xlen, bb.ylen, bb.zlen, centered=(False, False, False))
return simple_obj | python | def make_simple(self):
"""
Create and return *simplified* solid part.
The simplified representation of a ``Part`` is to lower the export
quality of an ``Assembly`` or ``Part`` for rendering.
Overriding this is optional, but highly recommended.
The default behaviour returns the full complexity object's bounding box.
But to do this, theh full complexity object must be generated first.
There are 2 main problems with this:
#. building the full complexity part is not efficient.
#. a bounding box may not be a good representation of the part.
**Bolts**
A good example of this is a bolt.
* building a bolt's thread is not a trivial task;
it can take some time to generate.
* a box is not a good visual representation of a bolt
So for the ``Fastener`` parts, all ``make_simple`` methods are overridden
to provide 2 cylinders, one for the bolt's head, and another for the thread.
"""
complex_obj = self.make()
bb = complex_obj.findSolid().BoundingBox()
simple_obj = cadquery.Workplane('XY', origin=(bb.xmin, bb.ymin, bb.zmin)) \
.box(bb.xlen, bb.ylen, bb.zlen, centered=(False, False, False))
return simple_obj | Create and return *simplified* solid part.
The simplified representation of a ``Part`` is to lower the export
quality of an ``Assembly`` or ``Part`` for rendering.
Overriding this is optional, but highly recommended.
The default behaviour returns the full complexity object's bounding box.
But to do this, theh full complexity object must be generated first.
There are 2 main problems with this:
#. building the full complexity part is not efficient.
#. a bounding box may not be a good representation of the part.
**Bolts**
A good example of this is a bolt.
* building a bolt's thread is not a trivial task;
it can take some time to generate.
* a box is not a good visual representation of a bolt
So for the ``Fastener`` parts, all ``make_simple`` methods are overridden
to provide 2 cylinders, one for the bolt's head, and another for the thread. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/part.py#L48-L80 |
cqparts/cqparts | src/cqparts/part.py | Part.local_obj | def local_obj(self):
"""
Buffered result of :meth:`make` which is (probably) a
:class:`cadquery.Workplane` instance. If ``_simple`` is ``True``, then
:meth:`make_simple` is returned instead.
.. note::
This is usually the correct way to get your part's object
for rendering, exporting, or measuring.
Only call :meth:`cqparts.Part.make` directly if you explicitly intend
to re-generate the model from scratch, then dispose of it.
"""
if self._local_obj is None:
# Simplified or Complex
if self._simple:
value = self.make_simple()
else:
value = self.make()
# Verify type
if not isinstance(value, cadquery.CQ):
raise MakeError("invalid object type returned by make(): %r" % value)
# Buffer object
self._local_obj = value
return self._local_obj | python | def local_obj(self):
"""
Buffered result of :meth:`make` which is (probably) a
:class:`cadquery.Workplane` instance. If ``_simple`` is ``True``, then
:meth:`make_simple` is returned instead.
.. note::
This is usually the correct way to get your part's object
for rendering, exporting, or measuring.
Only call :meth:`cqparts.Part.make` directly if you explicitly intend
to re-generate the model from scratch, then dispose of it.
"""
if self._local_obj is None:
# Simplified or Complex
if self._simple:
value = self.make_simple()
else:
value = self.make()
# Verify type
if not isinstance(value, cadquery.CQ):
raise MakeError("invalid object type returned by make(): %r" % value)
# Buffer object
self._local_obj = value
return self._local_obj | Buffered result of :meth:`make` which is (probably) a
:class:`cadquery.Workplane` instance. If ``_simple`` is ``True``, then
:meth:`make_simple` is returned instead.
.. note::
This is usually the correct way to get your part's object
for rendering, exporting, or measuring.
Only call :meth:`cqparts.Part.make` directly if you explicitly intend
to re-generate the model from scratch, then dispose of it. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/part.py#L95-L119 |
cqparts/cqparts | src/cqparts/part.py | Part.world_obj | def world_obj(self):
"""
The :meth:`local_obj <local_obj>` object in the
:meth:`world_coords <Component.world_coords>` coordinate system.
.. note::
This is automatically generated when called, and
:meth:`world_coords <Component.world_coords>` is not ``Null``.
"""
if self._world_obj is None:
local_obj = self.local_obj
world_coords = self.world_coords
if (local_obj is not None) and (world_coords is not None):
# Copy local object, apply transform to move to its new home.
self._world_obj = world_coords + local_obj
return self._world_obj | python | def world_obj(self):
"""
The :meth:`local_obj <local_obj>` object in the
:meth:`world_coords <Component.world_coords>` coordinate system.
.. note::
This is automatically generated when called, and
:meth:`world_coords <Component.world_coords>` is not ``Null``.
"""
if self._world_obj is None:
local_obj = self.local_obj
world_coords = self.world_coords
if (local_obj is not None) and (world_coords is not None):
# Copy local object, apply transform to move to its new home.
self._world_obj = world_coords + local_obj
return self._world_obj | The :meth:`local_obj <local_obj>` object in the
:meth:`world_coords <Component.world_coords>` coordinate system.
.. note::
This is automatically generated when called, and
:meth:`world_coords <Component.world_coords>` is not ``Null``. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/part.py#L128-L144 |
cqparts/cqparts | src/cqparts/part.py | Part.bounding_box | def bounding_box(self):
"""
Generate a bounding box based on the full complexity part.
:return: bounding box of part
:rtype: cadquery.BoundBox
"""
if self.world_coords:
return self.world_obj.findSolid().BoundingBox()
return self.local_obj.findSolid().BoundingBox() | python | def bounding_box(self):
"""
Generate a bounding box based on the full complexity part.
:return: bounding box of part
:rtype: cadquery.BoundBox
"""
if self.world_coords:
return self.world_obj.findSolid().BoundingBox()
return self.local_obj.findSolid().BoundingBox() | Generate a bounding box based on the full complexity part.
:return: bounding box of part
:rtype: cadquery.BoundBox | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/part.py#L152-L161 |
cqparts/cqparts | src/cqparts_fasteners/male.py | MaleFastenerPart.make_cutter | def make_cutter(self):
"""
Makes a shape to be used as a negative; it can be cut away from other
shapes to make a perfectly shaped pocket for this part.
For example, for a countersunk screw with a neck, the following
cutter would be generated.
.. image:: /_static/img/fastenerpart/male.cutter.png
If the head were an externally driven shape (like a hex bolt), then the
cutter's head would be wide enough to accommodate a tool to fasten it.
"""
# head
obj = self.head.make_cutter()
# neck
if self.neck_length:
# neck cut diameter (if thread is larger than the neck, thread must fit through)
(inner_radius, outer_radius) = self.thread.get_radii()
neck_cut_radius = max(outer_radius, self.neck_diam / 2)
neck = cadquery.Workplane(
'XY', origin=(0, 0, -self.neck_length)
).circle(neck_cut_radius).extrude(self.neck_length)
obj = obj.union(neck)
# thread (pilot hole)
pilot_hole = self.thread.make_pilothole_cutter() \
.translate((0, 0, -self.length))
obj = obj.union(pilot_hole)
return obj | python | def make_cutter(self):
"""
Makes a shape to be used as a negative; it can be cut away from other
shapes to make a perfectly shaped pocket for this part.
For example, for a countersunk screw with a neck, the following
cutter would be generated.
.. image:: /_static/img/fastenerpart/male.cutter.png
If the head were an externally driven shape (like a hex bolt), then the
cutter's head would be wide enough to accommodate a tool to fasten it.
"""
# head
obj = self.head.make_cutter()
# neck
if self.neck_length:
# neck cut diameter (if thread is larger than the neck, thread must fit through)
(inner_radius, outer_radius) = self.thread.get_radii()
neck_cut_radius = max(outer_radius, self.neck_diam / 2)
neck = cadquery.Workplane(
'XY', origin=(0, 0, -self.neck_length)
).circle(neck_cut_radius).extrude(self.neck_length)
obj = obj.union(neck)
# thread (pilot hole)
pilot_hole = self.thread.make_pilothole_cutter() \
.translate((0, 0, -self.length))
obj = obj.union(pilot_hole)
return obj | Makes a shape to be used as a negative; it can be cut away from other
shapes to make a perfectly shaped pocket for this part.
For example, for a countersunk screw with a neck, the following
cutter would be generated.
.. image:: /_static/img/fastenerpart/male.cutter.png
If the head were an externally driven shape (like a hex bolt), then the
cutter's head would be wide enough to accommodate a tool to fasten it. | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_fasteners/male.py#L245-L277 |
cqparts/cqparts | src/cqparts_motors/dc.py | _Cup.get_cutout | def get_cutout(self, clearance=0):
" get the cutout for the shaft"
return cq.Workplane('XY', origin=(0, 0, 0)) \
.circle((self.diam / 2) + clearance) \
.extrude(10) | python | def get_cutout(self, clearance=0):
" get the cutout for the shaft"
return cq.Workplane('XY', origin=(0, 0, 0)) \
.circle((self.diam / 2) + clearance) \
.extrude(10) | get the cutout for the shaft | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_motors/dc.py#L68-L72 |
cqparts/cqparts | src/cqparts_motors/dc.py | _Cup.mate_bottom | def mate_bottom(self):
" connect to the bottom of the cup"
return Mate(self, CoordSystem(\
origin=(0, 0, -self.height),\
xDir=(1, 0, 0),\
normal=(0, 0, 1))) | python | def mate_bottom(self):
" connect to the bottom of the cup"
return Mate(self, CoordSystem(\
origin=(0, 0, -self.height),\
xDir=(1, 0, 0),\
normal=(0, 0, 1))) | connect to the bottom of the cup | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts_motors/dc.py#L75-L80 |
cqparts/cqparts | src/cqparts/params/utils.py | as_parameter | def as_parameter(nullable=True, strict=True):
"""
Decorate a container class as a functional :class:`Parameter` class
for a :class:`ParametricObject`.
:param nullable: if set, parameter's value may be Null
:type nullable: :class:`bool`
.. doctest::
>>> from cqparts.params import as_parameter, ParametricObject
>>> @as_parameter(nullable=True)
... class Stuff(object):
... def __init__(self, a=1, b=2, c=3):
... self.a = a
... self.b = b
... self.c = c
... @property
... def abc(self):
... return (self.a, self.b, self.c)
>>> class Thing(ParametricObject):
... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff")
>>> thing = Thing(foo={'a': 20})
>>> thing.foo.a
20
>>> thing.foo.abc
(20, 2, 3)
"""
def decorator(cls):
base_class = Parameter if nullable else NonNullParameter
return type(cls.__name__, (base_class,), {
# Preserve text for documentation
'__name__': cls.__name__,
'__doc__': cls.__doc__,
'__module__': cls.__module__,
# Sphinx doc type string
'_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format(
class_name=cls.__name__, module=__name__
),
#
'type': lambda self, value: cls(**value)
})
return decorator | python | def as_parameter(nullable=True, strict=True):
"""
Decorate a container class as a functional :class:`Parameter` class
for a :class:`ParametricObject`.
:param nullable: if set, parameter's value may be Null
:type nullable: :class:`bool`
.. doctest::
>>> from cqparts.params import as_parameter, ParametricObject
>>> @as_parameter(nullable=True)
... class Stuff(object):
... def __init__(self, a=1, b=2, c=3):
... self.a = a
... self.b = b
... self.c = c
... @property
... def abc(self):
... return (self.a, self.b, self.c)
>>> class Thing(ParametricObject):
... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff")
>>> thing = Thing(foo={'a': 20})
>>> thing.foo.a
20
>>> thing.foo.abc
(20, 2, 3)
"""
def decorator(cls):
base_class = Parameter if nullable else NonNullParameter
return type(cls.__name__, (base_class,), {
# Preserve text for documentation
'__name__': cls.__name__,
'__doc__': cls.__doc__,
'__module__': cls.__module__,
# Sphinx doc type string
'_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format(
class_name=cls.__name__, module=__name__
),
#
'type': lambda self, value: cls(**value)
})
return decorator | Decorate a container class as a functional :class:`Parameter` class
for a :class:`ParametricObject`.
:param nullable: if set, parameter's value may be Null
:type nullable: :class:`bool`
.. doctest::
>>> from cqparts.params import as_parameter, ParametricObject
>>> @as_parameter(nullable=True)
... class Stuff(object):
... def __init__(self, a=1, b=2, c=3):
... self.a = a
... self.b = b
... self.c = c
... @property
... def abc(self):
... return (self.a, self.b, self.c)
>>> class Thing(ParametricObject):
... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff")
>>> thing = Thing(foo={'a': 20})
>>> thing.foo.a
20
>>> thing.foo.abc
(20, 2, 3) | https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/params/utils.py#L6-L54 |
calmh/unifi-api | unifi/controller.py | Controller._construct_api_path | def _construct_api_path(self, version):
"""Returns valid base API path based on version given
The base API path for the URL is different depending on UniFi server version.
Default returns correct path for latest known stable working versions.
"""
V2_PATH = 'api/'
V3_PATH = 'api/s/' + self.site_id + '/'
if(version == 'v2'):
return V2_PATH
if(version == 'v3'):
return V3_PATH
if(version == 'v4'):
return V3_PATH
if(version == 'v5'):
return V3_PATH
else:
return V2_PATH | python | def _construct_api_path(self, version):
"""Returns valid base API path based on version given
The base API path for the URL is different depending on UniFi server version.
Default returns correct path for latest known stable working versions.
"""
V2_PATH = 'api/'
V3_PATH = 'api/s/' + self.site_id + '/'
if(version == 'v2'):
return V2_PATH
if(version == 'v3'):
return V3_PATH
if(version == 'v4'):
return V3_PATH
if(version == 'v5'):
return V3_PATH
else:
return V2_PATH | Returns valid base API path based on version given
The base API path for the URL is different depending on UniFi server version.
Default returns correct path for latest known stable working versions. | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L123-L143 |
calmh/unifi-api | unifi/controller.py | Controller.get_alerts_unarchived | def get_alerts_unarchived(self):
"""Return a list of Alerts unarchived."""
js = json.dumps({'_sort': '-time', 'archived': False})
params = urllib.urlencode({'json': js})
return self._read(self.api_url + 'list/alarm', params) | python | def get_alerts_unarchived(self):
"""Return a list of Alerts unarchived."""
js = json.dumps({'_sort': '-time', 'archived': False})
params = urllib.urlencode({'json': js})
return self._read(self.api_url + 'list/alarm', params) | Return a list of Alerts unarchived. | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L177-L182 |
calmh/unifi-api | unifi/controller.py | Controller.get_statistics_24h | def get_statistics_24h(self, endtime):
"""Return statistical data last 24h from time"""
js = json.dumps(
{'attrs': ["bytes", "num_sta", "time"], 'start': int(endtime - 86400) * 1000, 'end': int(endtime - 3600) * 1000})
params = urllib.urlencode({'json': js})
return self._read(self.api_url + 'stat/report/hourly.system', params) | python | def get_statistics_24h(self, endtime):
"""Return statistical data last 24h from time"""
js = json.dumps(
{'attrs': ["bytes", "num_sta", "time"], 'start': int(endtime - 86400) * 1000, 'end': int(endtime - 3600) * 1000})
params = urllib.urlencode({'json': js})
return self._read(self.api_url + 'stat/report/hourly.system', params) | Return statistical data last 24h from time | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L189-L195 |
calmh/unifi-api | unifi/controller.py | Controller.get_aps | def get_aps(self):
"""Return a list of all AP:s, with significant information about each."""
#Set test to 0 instead of NULL
params = json.dumps({'_depth': 2, 'test': 0})
return self._read(self.api_url + 'stat/device', params) | python | def get_aps(self):
"""Return a list of all AP:s, with significant information about each."""
#Set test to 0 instead of NULL
params = json.dumps({'_depth': 2, 'test': 0})
return self._read(self.api_url + 'stat/device', params) | Return a list of all AP:s, with significant information about each. | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L202-L207 |
calmh/unifi-api | unifi/controller.py | Controller.restart_ap_name | def restart_ap_name(self, name):
"""Restart an access point (by name).
Arguments:
name -- the name address of the AP to restart.
"""
if not name:
raise APIError('%s is not a valid name' % str(name))
for ap in self.get_aps():
if ap.get('state', 0) == 1 and ap.get('name', None) == name:
self.restart_ap(ap['mac']) | python | def restart_ap_name(self, name):
"""Restart an access point (by name).
Arguments:
name -- the name address of the AP to restart.
"""
if not name:
raise APIError('%s is not a valid name' % str(name))
for ap in self.get_aps():
if ap.get('state', 0) == 1 and ap.get('name', None) == name:
self.restart_ap(ap['mac']) | Restart an access point (by name).
Arguments:
name -- the name address of the AP to restart. | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L285-L297 |
calmh/unifi-api | unifi/controller.py | Controller.archive_all_alerts | def archive_all_alerts(self):
"""Archive all Alerts
"""
js = json.dumps({'cmd': 'archive-all-alarms'})
params = urllib.urlencode({'json': js})
answer = self._read(self.api_url + 'cmd/evtmgr', params) | python | def archive_all_alerts(self):
"""Archive all Alerts
"""
js = json.dumps({'cmd': 'archive-all-alarms'})
params = urllib.urlencode({'json': js})
answer = self._read(self.api_url + 'cmd/evtmgr', params) | Archive all Alerts | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L299-L304 |
calmh/unifi-api | unifi/controller.py | Controller.create_backup | def create_backup(self):
"""Ask controller to create a backup archive file, response contains the path to the backup file.
Warning: This process puts significant load on the controller may
render it partially unresponsive for other requests.
"""
js = json.dumps({'cmd': 'backup'})
params = urllib.urlencode({'json': js})
answer = self._read(self.api_url + 'cmd/system', params)
return answer[0].get('url') | python | def create_backup(self):
"""Ask controller to create a backup archive file, response contains the path to the backup file.
Warning: This process puts significant load on the controller may
render it partially unresponsive for other requests.
"""
js = json.dumps({'cmd': 'backup'})
params = urllib.urlencode({'json': js})
answer = self._read(self.api_url + 'cmd/system', params)
return answer[0].get('url') | Ask controller to create a backup archive file, response contains the path to the backup file.
Warning: This process puts significant load on the controller may
render it partially unresponsive for other requests. | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L306-L317 |
calmh/unifi-api | unifi/controller.py | Controller.get_backup | def get_backup(self, target_file='unifi-backup.unf'):
"""Get a backup archive from a controller.
Arguments:
target_file -- Filename or full path to download the backup archive to, should have .unf extension for restore.
"""
download_path = self.create_backup()
opener = self.opener.open(self.url + download_path)
unifi_archive = opener.read()
backupfile = open(target_file, 'w')
backupfile.write(unifi_archive)
backupfile.close() | python | def get_backup(self, target_file='unifi-backup.unf'):
"""Get a backup archive from a controller.
Arguments:
target_file -- Filename or full path to download the backup archive to, should have .unf extension for restore.
"""
download_path = self.create_backup()
opener = self.opener.open(self.url + download_path)
unifi_archive = opener.read()
backupfile = open(target_file, 'w')
backupfile.write(unifi_archive)
backupfile.close() | Get a backup archive from a controller.
Arguments:
target_file -- Filename or full path to download the backup archive to, should have .unf extension for restore. | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L319-L333 |
calmh/unifi-api | unifi/controller.py | Controller.authorize_guest | def authorize_guest(self, guest_mac, minutes, up_bandwidth=None, down_bandwidth=None, byte_quota=None, ap_mac=None):
"""
Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional)
"""
cmd = 'authorize-guest'
js = {'mac': guest_mac, 'minutes': minutes}
if up_bandwidth:
js['up'] = up_bandwidth
if down_bandwidth:
js['down'] = down_bandwidth
if byte_quota:
js['bytes'] = byte_quota
if ap_mac and self.version != 'v2':
js['ap_mac'] = ap_mac
return self._run_command(cmd, params=js) | python | def authorize_guest(self, guest_mac, minutes, up_bandwidth=None, down_bandwidth=None, byte_quota=None, ap_mac=None):
"""
Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional)
"""
cmd = 'authorize-guest'
js = {'mac': guest_mac, 'minutes': minutes}
if up_bandwidth:
js['up'] = up_bandwidth
if down_bandwidth:
js['down'] = down_bandwidth
if byte_quota:
js['bytes'] = byte_quota
if ap_mac and self.version != 'v2':
js['ap_mac'] = ap_mac
return self._run_command(cmd, params=js) | Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional) | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L335-L359 |
calmh/unifi-api | unifi/controller.py | Controller.unauthorize_guest | def unauthorize_guest(self, guest_mac):
"""
Unauthorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
"""
cmd = 'unauthorize-guest'
js = {'mac': guest_mac}
return self._run_command(cmd, params=js) | python | def unauthorize_guest(self, guest_mac):
"""
Unauthorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
"""
cmd = 'unauthorize-guest'
js = {'mac': guest_mac}
return self._run_command(cmd, params=js) | Unauthorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff | https://github.com/calmh/unifi-api/blob/5562d9c7689ef3d08c2d2390fb83d66f65d1086e/unifi/controller.py#L361-L371 |
maybelinot/df2gspread | df2gspread/gspread2df.py | download | def download(gfile, wks_name=None, col_names=False, row_names=False,
credentials=None, start_cell = 'A1'):
"""
Download Google Spreadsheet and convert it to Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: assing top row to column names for Pandas DataFrame
:param row_names: assing left column to row names for Pandas DataFrame
:param credentials: provide own credentials
:param start_cell: specify where to start capturing of the DataFrame; default is A1
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:returns: Pandas DataFrame
:rtype: class 'pandas.core.frame.DataFrame'
:Example:
>>> from df2gspread import gspread2df as g2d
>>> df = g2d.download(gfile="1U-kSDyeD-...", col_names=True, row_names=True)
>>> df
col1 col2
field1 1 2
field2 3 4
"""
# access credentials
credentials = get_credentials(credentials)
# auth for gspread
gc = gspread.authorize(credentials)
try:
# if gfile is file_id
gc.open_by_key(gfile).__repr__()
gfile_id = gfile
except:
# else look for file_id in drive
gfile_id = get_file_id(credentials, gfile)
if gfile_id is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible spreadsheet")
wks = get_worksheet(gc, gfile_id, wks_name)
if wks is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible worksheet")
raw_data = wks.get_all_values()
if not raw_data:
raise ValueError(
'Worksheet is empty or invalid.'
)
start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell)
rows, cols = np.shape(raw_data)
if start_col_int > cols or (row_names and start_col_int + 1 > cols):
raise RuntimeError(
"Start col (%s) out of the table columns(%s)" % (start_col_int +
row_names, cols))
if start_row_int > rows or (col_names and start_row_int + 1 > rows):
raise RuntimeError(
"Start row (%s) out of the table rows(%s)" % (start_row_int +
col_names, rows))
raw_data = [row[start_col_int-1:] for row in raw_data[start_row_int-1:]]
if row_names and col_names:
row_names = [row[0] for row in raw_data[1:]]
col_names = raw_data[0][1:]
raw_data = [row[1:] for row in raw_data[1:]]
elif row_names:
row_names = [row[0] for row in raw_data]
col_names = np.arange(len(raw_data[0]) - 1)
raw_data = [row[1:] for row in raw_data]
elif col_names:
row_names = np.arange(len(raw_data) - 1)
col_names = raw_data[0]
raw_data = raw_data[1:]
else:
row_names = np.arange(len(raw_data))
col_names = np.arange(len(raw_data[0]))
df = pd.DataFrame([pd.Series(row) for row in raw_data], index=row_names)
df.columns = col_names
return df | python | def download(gfile, wks_name=None, col_names=False, row_names=False,
credentials=None, start_cell = 'A1'):
"""
Download Google Spreadsheet and convert it to Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: assing top row to column names for Pandas DataFrame
:param row_names: assing left column to row names for Pandas DataFrame
:param credentials: provide own credentials
:param start_cell: specify where to start capturing of the DataFrame; default is A1
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:returns: Pandas DataFrame
:rtype: class 'pandas.core.frame.DataFrame'
:Example:
>>> from df2gspread import gspread2df as g2d
>>> df = g2d.download(gfile="1U-kSDyeD-...", col_names=True, row_names=True)
>>> df
col1 col2
field1 1 2
field2 3 4
"""
# access credentials
credentials = get_credentials(credentials)
# auth for gspread
gc = gspread.authorize(credentials)
try:
# if gfile is file_id
gc.open_by_key(gfile).__repr__()
gfile_id = gfile
except:
# else look for file_id in drive
gfile_id = get_file_id(credentials, gfile)
if gfile_id is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible spreadsheet")
wks = get_worksheet(gc, gfile_id, wks_name)
if wks is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible worksheet")
raw_data = wks.get_all_values()
if not raw_data:
raise ValueError(
'Worksheet is empty or invalid.'
)
start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell)
rows, cols = np.shape(raw_data)
if start_col_int > cols or (row_names and start_col_int + 1 > cols):
raise RuntimeError(
"Start col (%s) out of the table columns(%s)" % (start_col_int +
row_names, cols))
if start_row_int > rows or (col_names and start_row_int + 1 > rows):
raise RuntimeError(
"Start row (%s) out of the table rows(%s)" % (start_row_int +
col_names, rows))
raw_data = [row[start_col_int-1:] for row in raw_data[start_row_int-1:]]
if row_names and col_names:
row_names = [row[0] for row in raw_data[1:]]
col_names = raw_data[0][1:]
raw_data = [row[1:] for row in raw_data[1:]]
elif row_names:
row_names = [row[0] for row in raw_data]
col_names = np.arange(len(raw_data[0]) - 1)
raw_data = [row[1:] for row in raw_data]
elif col_names:
row_names = np.arange(len(raw_data) - 1)
col_names = raw_data[0]
raw_data = raw_data[1:]
else:
row_names = np.arange(len(raw_data))
col_names = np.arange(len(raw_data[0]))
df = pd.DataFrame([pd.Series(row) for row in raw_data], index=row_names)
df.columns = col_names
return df | Download Google Spreadsheet and convert it to Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: assing top row to column names for Pandas DataFrame
:param row_names: assing left column to row names for Pandas DataFrame
:param credentials: provide own credentials
:param start_cell: specify where to start capturing of the DataFrame; default is A1
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:returns: Pandas DataFrame
:rtype: class 'pandas.core.frame.DataFrame'
:Example:
>>> from df2gspread import gspread2df as g2d
>>> df = g2d.download(gfile="1U-kSDyeD-...", col_names=True, row_names=True)
>>> df
col1 col2
field1 1 2
field2 3 4 | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/gspread2df.py#L29-L123 |
maybelinot/df2gspread | df2gspread/utils.py | get_credentials | def get_credentials(credentials=None, client_secret_file=CLIENT_SECRET_FILE, refresh_token=None):
"""Consistently returns valid credentials object.
See Also:
https://developers.google.com/drive/web/quickstart/python
Args:
client_secret_file (str): path to client secrets file, defaults to .gdrive_private
refresh_token (str): path to a user provided refresh token that is already
pre-authenticated
credentials (`~oauth2client.client.OAuth2Credentials`, optional): handle direct
input of credentials, which will check credentials for valid type and
return them
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object
"""
# if the utility was provided credentials just return those
if credentials:
if _is_valid_credentials(credentials):
# auth for gspread
return credentials
else:
print("Invalid credentials supplied. Will generate from default token.")
token = refresh_token or DEFAULT_TOKEN
dir_name = os.path.dirname(DEFAULT_TOKEN)
try:
os.makedirs(dir_name)
except OSError:
if not os.path.isdir(dir_name):
raise
store = file.Storage(token)
credentials = store.get()
try:
import argparse
flags = argparse.ArgumentParser(
parents=[tools.argparser]).parse_known_args()[0]
except ImportError:
flags = None
logr.error(
'Unable to parse oauth2client args; `pip install argparse`')
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
client_secret_file, SCOPES)
flow.redirect_uri = client.OOB_CALLBACK_URN
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
logr.info('Storing credentials to ' + DEFAULT_TOKEN)
return credentials | python | def get_credentials(credentials=None, client_secret_file=CLIENT_SECRET_FILE, refresh_token=None):
"""Consistently returns valid credentials object.
See Also:
https://developers.google.com/drive/web/quickstart/python
Args:
client_secret_file (str): path to client secrets file, defaults to .gdrive_private
refresh_token (str): path to a user provided refresh token that is already
pre-authenticated
credentials (`~oauth2client.client.OAuth2Credentials`, optional): handle direct
input of credentials, which will check credentials for valid type and
return them
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object
"""
# if the utility was provided credentials just return those
if credentials:
if _is_valid_credentials(credentials):
# auth for gspread
return credentials
else:
print("Invalid credentials supplied. Will generate from default token.")
token = refresh_token or DEFAULT_TOKEN
dir_name = os.path.dirname(DEFAULT_TOKEN)
try:
os.makedirs(dir_name)
except OSError:
if not os.path.isdir(dir_name):
raise
store = file.Storage(token)
credentials = store.get()
try:
import argparse
flags = argparse.ArgumentParser(
parents=[tools.argparser]).parse_known_args()[0]
except ImportError:
flags = None
logr.error(
'Unable to parse oauth2client args; `pip install argparse`')
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
client_secret_file, SCOPES)
flow.redirect_uri = client.OOB_CALLBACK_URN
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
logr.info('Storing credentials to ' + DEFAULT_TOKEN)
return credentials | Consistently returns valid credentials object.
See Also:
https://developers.google.com/drive/web/quickstart/python
Args:
client_secret_file (str): path to client secrets file, defaults to .gdrive_private
refresh_token (str): path to a user provided refresh token that is already
pre-authenticated
credentials (`~oauth2client.client.OAuth2Credentials`, optional): handle direct
input of credentials, which will check credentials for valid type and
return them
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/utils.py#L33-L90 |
maybelinot/df2gspread | df2gspread/utils.py | create_service_credentials | def create_service_credentials(private_key_file=None, client_email=None,
client_secret_file=CLIENT_SECRET_FILE):
"""Create credentials from service account information.
See Also:
https://developers.google.com/api-client-library/python/auth/service-accounts
Args:
client_secret_file (str): path to json file with just the client_email when
providing the `private_key_file` separately, or this file can have both the
`client_email` and `private_key` contained in it. Defaults to .gdrive_private
client_email (str): service email account
private_key_file (str): path to the p12 private key, defaults to same name of file
used for regular authentication
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object
"""
if private_key_file is not None:
with open(os.path.expanduser(private_key_file)) as f:
private_key = f.read()
else:
private_key = None
if client_email is None:
with open(os.path.expanduser(client_secret_file)) as client_file:
client_data = json.load(client_file)
if 'installed' in client_data:
# handle regular json format where key is separate
client_email = client_data['installed']['client_id']
if private_key is None:
raise RuntimeError('You must have the private key file \
with the regular json file. Try creating a new \
public/private key pair and downloading as json.')
else:
# handle newer case where json file has everything in it
client_email = client_data['client_email']
private_key = client_data['private_key']
if client_email is None or private_key is None:
raise RuntimeError(
'Client email and/or private key not provided by inputs.')
credentials = client.SignedJwtAssertionCredentials(
client_email, private_key, SCOPES)
return credentials | python | def create_service_credentials(private_key_file=None, client_email=None,
client_secret_file=CLIENT_SECRET_FILE):
"""Create credentials from service account information.
See Also:
https://developers.google.com/api-client-library/python/auth/service-accounts
Args:
client_secret_file (str): path to json file with just the client_email when
providing the `private_key_file` separately, or this file can have both the
`client_email` and `private_key` contained in it. Defaults to .gdrive_private
client_email (str): service email account
private_key_file (str): path to the p12 private key, defaults to same name of file
used for regular authentication
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object
"""
if private_key_file is not None:
with open(os.path.expanduser(private_key_file)) as f:
private_key = f.read()
else:
private_key = None
if client_email is None:
with open(os.path.expanduser(client_secret_file)) as client_file:
client_data = json.load(client_file)
if 'installed' in client_data:
# handle regular json format where key is separate
client_email = client_data['installed']['client_id']
if private_key is None:
raise RuntimeError('You must have the private key file \
with the regular json file. Try creating a new \
public/private key pair and downloading as json.')
else:
# handle newer case where json file has everything in it
client_email = client_data['client_email']
private_key = client_data['private_key']
if client_email is None or private_key is None:
raise RuntimeError(
'Client email and/or private key not provided by inputs.')
credentials = client.SignedJwtAssertionCredentials(
client_email, private_key, SCOPES)
return credentials | Create credentials from service account information.
See Also:
https://developers.google.com/api-client-library/python/auth/service-accounts
Args:
client_secret_file (str): path to json file with just the client_email when
providing the `private_key_file` separately, or this file can have both the
`client_email` and `private_key` contained in it. Defaults to .gdrive_private
client_email (str): service email account
private_key_file (str): path to the p12 private key, defaults to same name of file
used for regular authentication
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/utils.py#L97-L146 |
maybelinot/df2gspread | df2gspread/gfiles.py | get_file_id | def get_file_id(credentials, gfile, write_access=False):
"""
Get file ID by provided path. If file does not exist and
`write_access` is true, it will create whole path for you.
:param credentials: provide own credentials
:param gfile: path to Google Spreadsheet
:param write_access: allows to create full path if file does not exist
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type gfile: str
:type write_access: boolean
:returns: file ID
:rtype: str
:Example:
>>> from df2gspread.gfiles import get_file_id
>>> from df2gspread.utils import get_credentials
>>> gfile = '/some/folder/with/file'
>>> credentials = get_credentials()
>>> get_file_id(credentials=credentials, gfile=gfile, write_access=True)
u'78asbcsSND8sdSACNsa7ggcasca8shscaSACVD'
"""
# auth for apiclient
http = credentials.authorize(Http())
service = discovery.build('drive', 'v3', http=http, cache_discovery=False)
file_id = service.files().get(fileId='root', fields='id').execute().get('id')
# folder/folder/folder/spreadsheet
pathway = gfile.strip('/').split('/')
for idx, name in enumerate(pathway):
files = service.files().list(
q="name = '{}' and trashed = false and '{}' in parents".format(name, file_id)).execute()['files']
if len(files) > 0:
# Why do you ever need to use several folders with the same name?!
file_id = files[0].get('id')
elif write_access == True:
body = {
'mimeType': 'application/vnd.google-apps.' + ('spreadsheet' if idx == len(pathway)-1 else 'folder'),
'name': name,
'parents': [file_id]
}
file_id = service.files().create(body=body, fields='id').execute().get('id')
else:
return None
return file_id | python | def get_file_id(credentials, gfile, write_access=False):
"""
Get file ID by provided path. If file does not exist and
`write_access` is true, it will create whole path for you.
:param credentials: provide own credentials
:param gfile: path to Google Spreadsheet
:param write_access: allows to create full path if file does not exist
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type gfile: str
:type write_access: boolean
:returns: file ID
:rtype: str
:Example:
>>> from df2gspread.gfiles import get_file_id
>>> from df2gspread.utils import get_credentials
>>> gfile = '/some/folder/with/file'
>>> credentials = get_credentials()
>>> get_file_id(credentials=credentials, gfile=gfile, write_access=True)
u'78asbcsSND8sdSACNsa7ggcasca8shscaSACVD'
"""
# auth for apiclient
http = credentials.authorize(Http())
service = discovery.build('drive', 'v3', http=http, cache_discovery=False)
file_id = service.files().get(fileId='root', fields='id').execute().get('id')
# folder/folder/folder/spreadsheet
pathway = gfile.strip('/').split('/')
for idx, name in enumerate(pathway):
files = service.files().list(
q="name = '{}' and trashed = false and '{}' in parents".format(name, file_id)).execute()['files']
if len(files) > 0:
# Why do you ever need to use several folders with the same name?!
file_id = files[0].get('id')
elif write_access == True:
body = {
'mimeType': 'application/vnd.google-apps.' + ('spreadsheet' if idx == len(pathway)-1 else 'folder'),
'name': name,
'parents': [file_id]
}
file_id = service.files().create(body=body, fields='id').execute().get('id')
else:
return None
return file_id | Get file ID by provided path. If file does not exist and
`write_access` is true, it will create whole path for you.
:param credentials: provide own credentials
:param gfile: path to Google Spreadsheet
:param write_access: allows to create full path if file does not exist
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type gfile: str
:type write_access: boolean
:returns: file ID
:rtype: str
:Example:
>>> from df2gspread.gfiles import get_file_id
>>> from df2gspread.utils import get_credentials
>>> gfile = '/some/folder/with/file'
>>> credentials = get_credentials()
>>> get_file_id(credentials=credentials, gfile=gfile, write_access=True)
u'78asbcsSND8sdSACNsa7ggcasca8shscaSACVD' | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/gfiles.py#L19-L66 |
maybelinot/df2gspread | df2gspread/gfiles.py | get_worksheet | def get_worksheet(gc, gfile_id, wks_name, write_access=False, new_sheet_dimensions=(1000, 100)):
"""DOCS..."""
spsh = gc.open_by_key(gfile_id)
# if worksheet name is not provided , take first worksheet
if wks_name is None:
wks = spsh.sheet1
# if worksheet name provided and exist in given spreadsheet
else:
try:
wks = spsh.worksheet(wks_name)
except:
#rows, cols = new_sheet_dimensions
wks = spsh.add_worksheet(
wks_name, *new_sheet_dimensions) if write_access == True else None
return wks | python | def get_worksheet(gc, gfile_id, wks_name, write_access=False, new_sheet_dimensions=(1000, 100)):
"""DOCS..."""
spsh = gc.open_by_key(gfile_id)
# if worksheet name is not provided , take first worksheet
if wks_name is None:
wks = spsh.sheet1
# if worksheet name provided and exist in given spreadsheet
else:
try:
wks = spsh.worksheet(wks_name)
except:
#rows, cols = new_sheet_dimensions
wks = spsh.add_worksheet(
wks_name, *new_sheet_dimensions) if write_access == True else None
return wks | DOCS... | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/gfiles.py#L69-L86 |
maybelinot/df2gspread | df2gspread/gfiles.py | delete_file | def delete_file(credentials, file_id):
"""DOCS..."""
try:
http = credentials.authorize(Http())
service = discovery.build(
'drive', 'v3', http=http, cache_discovery=False)
service.files().delete(fileId=file_id).execute()
except errors.HttpError as e:
logr.error(e)
raise | python | def delete_file(credentials, file_id):
"""DOCS..."""
try:
http = credentials.authorize(Http())
service = discovery.build(
'drive', 'v3', http=http, cache_discovery=False)
service.files().delete(fileId=file_id).execute()
except errors.HttpError as e:
logr.error(e)
raise | DOCS... | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/gfiles.py#L89-L98 |
maybelinot/df2gspread | df2gspread/df2gspread.py | upload | def upload(df, gfile="/New Spreadsheet", wks_name=None,
col_names=True, row_names=True, clean=True, credentials=None,
start_cell = 'A1', df_size = False, new_sheet_dimensions = (1000,100)):
'''
Upload given Pandas DataFrame to Google Drive and returns
gspread Worksheet object
:param df: Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: passing top row to column names for Pandas DataFrame
:param row_names: passing left column to row names for Pandas DataFrame
:param clean: clean all data in worksheet before uploading
:param credentials: provide own credentials
:param start_cell: specify where to insert the DataFrame; default is A1
:param df_size:
-If True and worksheet name does NOT exist, will create
a new worksheet that is the size of the df; otherwise, by default,
creates sheet of 1000x100 cells.
-If True and worksheet does exist, will resize larger or smaller to
fit new dataframe.
-If False and dataframe is larger than existing sheet, will resize
the sheet larger.
-If False and dataframe is smaller than existing sheet, does not resize.
:param new_sheet_dimensions: tuple of (row, cols) for size of a new sheet
:type df: class 'pandas.core.frame.DataFrame'
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type clean: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:type df_size: bool
:type new_sheet_dimensions: tuple
:returns: gspread Worksheet
:rtype: class 'gspread.models.Worksheet'
:Example:
>>> from df2gspread import df2gspread as d2g
>>> import pandas as pd
>>> df = pd.DataFrame([1 2 3])
>>> wks = d2g.upload(df, wks_name='Example worksheet')
>>> wks.title
'Example worksheet'
'''
# access credentials
credentials = get_credentials(credentials)
# auth for gspread
gc = gspread.authorize(credentials)
try:
gc.open_by_key(gfile).__repr__()
gfile_id = gfile
except:
gfile_id = get_file_id(credentials, gfile, write_access=True)
# Tuple of rows, cols in the dataframe.
# If user did not explicitly specify to resize sheet to dataframe size
# then for new sheets set it to new_sheet_dimensions, which is by default 1000x100
if df_size:
new_sheet_dimensions = (len(df), len(df.columns))
wks = get_worksheet(gc, gfile_id, wks_name, write_access=True,
new_sheet_dimensions=new_sheet_dimensions)
if clean:
wks = clean_worksheet(wks, gfile_id, wks_name, credentials)
start_col = re.split(r'(\d+)',start_cell)[0].upper()
start_row = re.split(r'(\d+)',start_cell)[1]
start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell)
# find last index and column name (A B ... Z AA AB ... AZ BA)
num_rows = len(df.index) + 1 if col_names else len(df.index)
last_idx_adjust = start_row_int - 1
last_idx = num_rows + last_idx_adjust
num_cols = len(df.columns) + 1 if row_names else len(df.columns)
last_col_adjust = start_col_int - 1
last_col_int = num_cols + last_col_adjust
last_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, last_col_int)))[0].upper()
# If user requested to resize sheet to fit dataframe, go ahead and
# resize larger or smaller to better match new size of pandas dataframe.
# Otherwise, leave it the same size unless the sheet needs to be expanded
# to accomodate a larger dataframe.
if df_size:
wks.resize(rows=len(df.index) + col_names, cols=len(df.columns) + row_names)
if len(df.index) + col_names + last_idx_adjust > wks.row_count:
wks.add_rows(len(df.index) - wks.row_count + col_names + last_idx_adjust)
if len(df.columns) + row_names + last_col_adjust > wks.col_count:
wks.add_cols(len(df.columns) - wks.col_count + row_names + last_col_adjust)
# Define first cell for rows and columns
first_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, start_col_int + 1)))[0].upper() if row_names else start_col
first_row = str(start_row_int + 1) if col_names else start_row
# Addition of col names
if col_names:
cell_list = wks.range('%s%s:%s%s' % (first_col, start_row, last_col, start_row))
for idx, cell in enumerate(cell_list):
cell.value = df.columns.astype(str)[idx]
wks.update_cells(cell_list)
# Addition of row names
if row_names:
cell_list = wks.range('%s%s:%s%d' % (
start_col, first_row, start_col, last_idx))
for idx, cell in enumerate(cell_list):
cell.value = df.index.astype(str)[idx]
wks.update_cells(cell_list)
# convert df values to string
df = df.applymap(str)
# Addition of cell values
cell_list = wks.range('%s%s:%s%d' % (
first_col, first_row, last_col, last_idx))
for j, idx in enumerate(df.index):
for i, col in enumerate(df.columns.values):
if not pd.isnull(df[col][idx]):
cell_list[i + j * len(df.columns.values)].value = df[col][idx]
wks.update_cells(cell_list)
return wks | python | def upload(df, gfile="/New Spreadsheet", wks_name=None,
col_names=True, row_names=True, clean=True, credentials=None,
start_cell = 'A1', df_size = False, new_sheet_dimensions = (1000,100)):
'''
Upload given Pandas DataFrame to Google Drive and returns
gspread Worksheet object
:param df: Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: passing top row to column names for Pandas DataFrame
:param row_names: passing left column to row names for Pandas DataFrame
:param clean: clean all data in worksheet before uploading
:param credentials: provide own credentials
:param start_cell: specify where to insert the DataFrame; default is A1
:param df_size:
-If True and worksheet name does NOT exist, will create
a new worksheet that is the size of the df; otherwise, by default,
creates sheet of 1000x100 cells.
-If True and worksheet does exist, will resize larger or smaller to
fit new dataframe.
-If False and dataframe is larger than existing sheet, will resize
the sheet larger.
-If False and dataframe is smaller than existing sheet, does not resize.
:param new_sheet_dimensions: tuple of (row, cols) for size of a new sheet
:type df: class 'pandas.core.frame.DataFrame'
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type clean: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:type df_size: bool
:type new_sheet_dimensions: tuple
:returns: gspread Worksheet
:rtype: class 'gspread.models.Worksheet'
:Example:
>>> from df2gspread import df2gspread as d2g
>>> import pandas as pd
>>> df = pd.DataFrame([1 2 3])
>>> wks = d2g.upload(df, wks_name='Example worksheet')
>>> wks.title
'Example worksheet'
'''
# access credentials
credentials = get_credentials(credentials)
# auth for gspread
gc = gspread.authorize(credentials)
try:
gc.open_by_key(gfile).__repr__()
gfile_id = gfile
except:
gfile_id = get_file_id(credentials, gfile, write_access=True)
# Tuple of rows, cols in the dataframe.
# If user did not explicitly specify to resize sheet to dataframe size
# then for new sheets set it to new_sheet_dimensions, which is by default 1000x100
if df_size:
new_sheet_dimensions = (len(df), len(df.columns))
wks = get_worksheet(gc, gfile_id, wks_name, write_access=True,
new_sheet_dimensions=new_sheet_dimensions)
if clean:
wks = clean_worksheet(wks, gfile_id, wks_name, credentials)
start_col = re.split(r'(\d+)',start_cell)[0].upper()
start_row = re.split(r'(\d+)',start_cell)[1]
start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell)
# find last index and column name (A B ... Z AA AB ... AZ BA)
num_rows = len(df.index) + 1 if col_names else len(df.index)
last_idx_adjust = start_row_int - 1
last_idx = num_rows + last_idx_adjust
num_cols = len(df.columns) + 1 if row_names else len(df.columns)
last_col_adjust = start_col_int - 1
last_col_int = num_cols + last_col_adjust
last_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, last_col_int)))[0].upper()
# If user requested to resize sheet to fit dataframe, go ahead and
# resize larger or smaller to better match new size of pandas dataframe.
# Otherwise, leave it the same size unless the sheet needs to be expanded
# to accomodate a larger dataframe.
if df_size:
wks.resize(rows=len(df.index) + col_names, cols=len(df.columns) + row_names)
if len(df.index) + col_names + last_idx_adjust > wks.row_count:
wks.add_rows(len(df.index) - wks.row_count + col_names + last_idx_adjust)
if len(df.columns) + row_names + last_col_adjust > wks.col_count:
wks.add_cols(len(df.columns) - wks.col_count + row_names + last_col_adjust)
# Define first cell for rows and columns
first_col = re.split(r'(\d+)',(gspread.utils.rowcol_to_a1(1, start_col_int + 1)))[0].upper() if row_names else start_col
first_row = str(start_row_int + 1) if col_names else start_row
# Addition of col names
if col_names:
cell_list = wks.range('%s%s:%s%s' % (first_col, start_row, last_col, start_row))
for idx, cell in enumerate(cell_list):
cell.value = df.columns.astype(str)[idx]
wks.update_cells(cell_list)
# Addition of row names
if row_names:
cell_list = wks.range('%s%s:%s%d' % (
start_col, first_row, start_col, last_idx))
for idx, cell in enumerate(cell_list):
cell.value = df.index.astype(str)[idx]
wks.update_cells(cell_list)
# convert df values to string
df = df.applymap(str)
# Addition of cell values
cell_list = wks.range('%s%s:%s%d' % (
first_col, first_row, last_col, last_idx))
for j, idx in enumerate(df.index):
for i, col in enumerate(df.columns.values):
if not pd.isnull(df[col][idx]):
cell_list[i + j * len(df.columns.values)].value = df[col][idx]
wks.update_cells(cell_list)
return wks | Upload given Pandas DataFrame to Google Drive and returns
gspread Worksheet object
:param df: Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: passing top row to column names for Pandas DataFrame
:param row_names: passing left column to row names for Pandas DataFrame
:param clean: clean all data in worksheet before uploading
:param credentials: provide own credentials
:param start_cell: specify where to insert the DataFrame; default is A1
:param df_size:
-If True and worksheet name does NOT exist, will create
a new worksheet that is the size of the df; otherwise, by default,
creates sheet of 1000x100 cells.
-If True and worksheet does exist, will resize larger or smaller to
fit new dataframe.
-If False and dataframe is larger than existing sheet, will resize
the sheet larger.
-If False and dataframe is smaller than existing sheet, does not resize.
:param new_sheet_dimensions: tuple of (row, cols) for size of a new sheet
:type df: class 'pandas.core.frame.DataFrame'
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type clean: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:type df_size: bool
:type new_sheet_dimensions: tuple
:returns: gspread Worksheet
:rtype: class 'gspread.models.Worksheet'
:Example:
>>> from df2gspread import df2gspread as d2g
>>> import pandas as pd
>>> df = pd.DataFrame([1 2 3])
>>> wks = d2g.upload(df, wks_name='Example worksheet')
>>> wks.title
'Example worksheet' | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/df2gspread.py#L24-L148 |
maybelinot/df2gspread | df2gspread/df2gspread.py | clean_worksheet | def clean_worksheet(wks, gfile_id, wks_name, credentials):
"""DOCS..."""
values = wks.get_all_values()
if values:
df_ = pd.DataFrame(index=range(len(values)),
columns=range(len(values[0])))
df_ = df_.fillna('')
wks = upload(df_, gfile_id, wks_name=wks_name,
col_names=False, row_names=False, clean=False,
credentials=credentials)
return wks | python | def clean_worksheet(wks, gfile_id, wks_name, credentials):
"""DOCS..."""
values = wks.get_all_values()
if values:
df_ = pd.DataFrame(index=range(len(values)),
columns=range(len(values[0])))
df_ = df_.fillna('')
wks = upload(df_, gfile_id, wks_name=wks_name,
col_names=False, row_names=False, clean=False,
credentials=credentials)
return wks | DOCS... | https://github.com/maybelinot/df2gspread/blob/f4cef3800704aceff2ed08a623a594b558d44898/df2gspread/df2gspread.py#L150-L161 |
konstantinstadler/country_converter | country_converter/country_converter.py | agg_conc | def agg_conc(original_countries,
aggregates,
missing_countries='test',
merge_multiple_string='_&_',
log_missing_countries=None,
log_merge_multiple_strings=None,
coco=None,
as_dataframe='sparse',
original_countries_class=None):
""" Builds an aggregation concordance dict, vec or matrix
Parameters
----------
original_countries: list or str
List of countries to aggregated, also accepts and valid column name of
CountryConverter.data
aggregates: list of dict or str
List of aggregation information. This can either be dict mapping the
names of 'original_countries' to aggregates, or a valid column name of
CountryConverter.data Aggregation happens in order given in this
parameter. Thus, country assigned to an aggregate are not re-assigned
by the following aggregation information.
missing_countries: str, boolean, None
Entry to fill in for countries in 'original_countries' which do not
appear in 'aggregates'. str: Use the given name for all missing
countries True: Use the name in original_countries for missing
countries False: Skip these countries None: Use None for these
countries
merge_multiple_string: str or None, optional
If multiple correspondance entries are given in one of the aggregates
join them with the given string (default: '_&_'. To skip these enries,
pass None.
log_missing_countries: function, optional
This function is called with country is country is in
'original_countries' but missing in all 'aggregates'.
For example, pass
lambda x: logging.error('Country {} missing'.format(x))
to log errors for such countries. Default: do nothing
log_merge_multiple_strings: function, optional
Function to call for logging multiple strings, see
log_missing_countries Default: do nothing
coco: instance of CountryConverter, optional
CountryConverter instance used for the conversion. Pass a custom one
if additional data is needed in addition to the custom country
converter file. If None (default), the bare CountryConverter is used
as_dataframe: boolean or st, optional
If False, output as OrderedDict. If True or str, output as pandas
dataframe. If str and 'full', output as a full matrix, otherwise only
two collumns with the original and aggregated names are returned.
original_countries_class: str, optional
Valid column name of CountryConverter.data. This parameter is needed
if a list of countries is passed to 'orginal_countries' and strings
corresponding to data in CountryConverter.data are used subsequently.
Can be omitted otherwise.
Returns
-------
OrderedDict or DataFrame (defined by 'as_dataframe')
"""
if coco is None:
coco = CountryConverter()
if type(original_countries) is str:
original_countries_class = original_countries
original_countries = coco.data[original_countries].values
else:
original_countries_class = (original_countries_class or
coco._get_input_format_from_name(
original_countries[0]))
if type(aggregates) is not list:
aggregates = [aggregates]
correspond = OrderedDict.fromkeys(original_countries)
for agg in aggregates:
if type(agg) is str:
agg = coco.get_correspondance_dict(original_countries_class,
agg)
for country in original_countries:
if correspond.get(country) is None:
try:
entry = agg[country]
except KeyError:
entry = None
if type(entry) is list:
if 1 < len(entry):
if merge_multiple_string:
entry = merge_multiple_string.join([
str(e) for e in entry])
else:
entry = None
if log_merge_multiple_strings:
log_merge_multiple_strings(country)
else:
entry = entry[0]
correspond[country] = entry
for country in original_countries:
if correspond.get(country) is None:
if missing_countries is True:
correspond[country] = country
elif missing_countries is False:
del correspond[country]
else:
correspond[country] = missing_countries
if log_missing_countries:
log_missing_countries(country)
if as_dataframe:
correspond = pd.DataFrame.from_dict(
correspond, orient='index').reset_index()
correspond.columns = ['original', 'aggregated']
if ((type(as_dataframe) is str) and
(as_dataframe[0].lower() == 'f')):
_co_list = correspond.original
correspond['val'] = 1
correspond = correspond.set_index(
['original', 'aggregated']).unstack().fillna(0)['val']
correspond = correspond.loc[_co_list]
return correspond | python | def agg_conc(original_countries,
aggregates,
missing_countries='test',
merge_multiple_string='_&_',
log_missing_countries=None,
log_merge_multiple_strings=None,
coco=None,
as_dataframe='sparse',
original_countries_class=None):
""" Builds an aggregation concordance dict, vec or matrix
Parameters
----------
original_countries: list or str
List of countries to aggregated, also accepts and valid column name of
CountryConverter.data
aggregates: list of dict or str
List of aggregation information. This can either be dict mapping the
names of 'original_countries' to aggregates, or a valid column name of
CountryConverter.data Aggregation happens in order given in this
parameter. Thus, country assigned to an aggregate are not re-assigned
by the following aggregation information.
missing_countries: str, boolean, None
Entry to fill in for countries in 'original_countries' which do not
appear in 'aggregates'. str: Use the given name for all missing
countries True: Use the name in original_countries for missing
countries False: Skip these countries None: Use None for these
countries
merge_multiple_string: str or None, optional
If multiple correspondance entries are given in one of the aggregates
join them with the given string (default: '_&_'. To skip these enries,
pass None.
log_missing_countries: function, optional
This function is called with country is country is in
'original_countries' but missing in all 'aggregates'.
For example, pass
lambda x: logging.error('Country {} missing'.format(x))
to log errors for such countries. Default: do nothing
log_merge_multiple_strings: function, optional
Function to call for logging multiple strings, see
log_missing_countries Default: do nothing
coco: instance of CountryConverter, optional
CountryConverter instance used for the conversion. Pass a custom one
if additional data is needed in addition to the custom country
converter file. If None (default), the bare CountryConverter is used
as_dataframe: boolean or st, optional
If False, output as OrderedDict. If True or str, output as pandas
dataframe. If str and 'full', output as a full matrix, otherwise only
two collumns with the original and aggregated names are returned.
original_countries_class: str, optional
Valid column name of CountryConverter.data. This parameter is needed
if a list of countries is passed to 'orginal_countries' and strings
corresponding to data in CountryConverter.data are used subsequently.
Can be omitted otherwise.
Returns
-------
OrderedDict or DataFrame (defined by 'as_dataframe')
"""
if coco is None:
coco = CountryConverter()
if type(original_countries) is str:
original_countries_class = original_countries
original_countries = coco.data[original_countries].values
else:
original_countries_class = (original_countries_class or
coco._get_input_format_from_name(
original_countries[0]))
if type(aggregates) is not list:
aggregates = [aggregates]
correspond = OrderedDict.fromkeys(original_countries)
for agg in aggregates:
if type(agg) is str:
agg = coco.get_correspondance_dict(original_countries_class,
agg)
for country in original_countries:
if correspond.get(country) is None:
try:
entry = agg[country]
except KeyError:
entry = None
if type(entry) is list:
if 1 < len(entry):
if merge_multiple_string:
entry = merge_multiple_string.join([
str(e) for e in entry])
else:
entry = None
if log_merge_multiple_strings:
log_merge_multiple_strings(country)
else:
entry = entry[0]
correspond[country] = entry
for country in original_countries:
if correspond.get(country) is None:
if missing_countries is True:
correspond[country] = country
elif missing_countries is False:
del correspond[country]
else:
correspond[country] = missing_countries
if log_missing_countries:
log_missing_countries(country)
if as_dataframe:
correspond = pd.DataFrame.from_dict(
correspond, orient='index').reset_index()
correspond.columns = ['original', 'aggregated']
if ((type(as_dataframe) is str) and
(as_dataframe[0].lower() == 'f')):
_co_list = correspond.original
correspond['val'] = 1
correspond = correspond.set_index(
['original', 'aggregated']).unstack().fillna(0)['val']
correspond = correspond.loc[_co_list]
return correspond | Builds an aggregation concordance dict, vec or matrix
Parameters
----------
original_countries: list or str
List of countries to aggregated, also accepts and valid column name of
CountryConverter.data
aggregates: list of dict or str
List of aggregation information. This can either be dict mapping the
names of 'original_countries' to aggregates, or a valid column name of
CountryConverter.data Aggregation happens in order given in this
parameter. Thus, country assigned to an aggregate are not re-assigned
by the following aggregation information.
missing_countries: str, boolean, None
Entry to fill in for countries in 'original_countries' which do not
appear in 'aggregates'. str: Use the given name for all missing
countries True: Use the name in original_countries for missing
countries False: Skip these countries None: Use None for these
countries
merge_multiple_string: str or None, optional
If multiple correspondance entries are given in one of the aggregates
join them with the given string (default: '_&_'. To skip these enries,
pass None.
log_missing_countries: function, optional
This function is called with country is country is in
'original_countries' but missing in all 'aggregates'.
For example, pass
lambda x: logging.error('Country {} missing'.format(x))
to log errors for such countries. Default: do nothing
log_merge_multiple_strings: function, optional
Function to call for logging multiple strings, see
log_missing_countries Default: do nothing
coco: instance of CountryConverter, optional
CountryConverter instance used for the conversion. Pass a custom one
if additional data is needed in addition to the custom country
converter file. If None (default), the bare CountryConverter is used
as_dataframe: boolean or st, optional
If False, output as OrderedDict. If True or str, output as pandas
dataframe. If str and 'full', output as a full matrix, otherwise only
two collumns with the original and aggregated names are returned.
original_countries_class: str, optional
Valid column name of CountryConverter.data. This parameter is needed
if a list of countries is passed to 'orginal_countries' and strings
corresponding to data in CountryConverter.data are used subsequently.
Can be omitted otherwise.
Returns
-------
OrderedDict or DataFrame (defined by 'as_dataframe') | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L18-L153 |
konstantinstadler/country_converter | country_converter/country_converter.py | match | def match(list_a, list_b, not_found='not_found', enforce_sublist=False,
country_data=COUNTRY_DATA_FILE, additional_data=None):
""" Matches the country names given in two lists into a dictionary.
This function matches names given in list_a to the one provided in list_b
using regular expressions defined in country_data.
Parameters
----------
list_a : list
Names of countries to identify
list_b : list
Master list of names for countries
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
enforce_sublist : boolean, optional
If True, all entries in both list are list.
If False(default), only multiple matches are list, rest are strings
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data file. (utf-8 encoded tab separated data, same
column headers in all files)
Returns
-------
dict:
A dictionary with a key for every entry in list_a. The value
correspond to the matching entry in list_b if found. If there is
a 1:1 correspondence, the value is a str (if enforce_sublist is False),
otherwise multiple entries as list.
"""
if isinstance(list_a, str):
list_a = [list_a]
if isinstance(list_b, str):
list_b = [list_b]
if isinstance(list_a, tuple):
list_a = list(list_a)
if isinstance(list_b, tuple):
list_b = list(list_b)
coco = CountryConverter(country_data, additional_data)
name_dict_a = dict()
match_dict_a = dict()
for name_a in list_a:
name_dict_a[name_a] = []
match_dict_a[name_a] = []
for regex in coco.regexes:
if regex.search(name_a):
match_dict_a[name_a].append(regex)
if len(match_dict_a[name_a]) == 0:
logging.warning('Could not identify {} in list_a'.format(name_a))
_not_found_entry = name_a if not not_found else not_found
name_dict_a[name_a].append(_not_found_entry)
if not enforce_sublist:
name_dict_a[name_a] = name_dict_a[name_a][0]
continue
if len(match_dict_a[name_a]) > 1:
logging.warning(
'Multiple matches for name {} in list_a'.format(name_a))
for match_case in match_dict_a[name_a]:
b_matches = 0
for name_b in list_b:
if match_case.search(name_b):
b_matches += 1
name_dict_a[name_a].append(name_b)
if b_matches == 0:
logging.warning(
'Could not find any '
'correspondence for {} in list_b'.format(name_a))
_not_found_entry = name_a if not not_found else not_found
name_dict_a[name_a].append(_not_found_entry)
if b_matches > 1:
logging.warning('Multiple matches for '
'name {} in list_b'.format(name_a))
if not enforce_sublist and (len(name_dict_a[name_a]) == 1):
name_dict_a[name_a] = name_dict_a[name_a][0]
return name_dict_a | python | def match(list_a, list_b, not_found='not_found', enforce_sublist=False,
country_data=COUNTRY_DATA_FILE, additional_data=None):
""" Matches the country names given in two lists into a dictionary.
This function matches names given in list_a to the one provided in list_b
using regular expressions defined in country_data.
Parameters
----------
list_a : list
Names of countries to identify
list_b : list
Master list of names for countries
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
enforce_sublist : boolean, optional
If True, all entries in both list are list.
If False(default), only multiple matches are list, rest are strings
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data file. (utf-8 encoded tab separated data, same
column headers in all files)
Returns
-------
dict:
A dictionary with a key for every entry in list_a. The value
correspond to the matching entry in list_b if found. If there is
a 1:1 correspondence, the value is a str (if enforce_sublist is False),
otherwise multiple entries as list.
"""
if isinstance(list_a, str):
list_a = [list_a]
if isinstance(list_b, str):
list_b = [list_b]
if isinstance(list_a, tuple):
list_a = list(list_a)
if isinstance(list_b, tuple):
list_b = list(list_b)
coco = CountryConverter(country_data, additional_data)
name_dict_a = dict()
match_dict_a = dict()
for name_a in list_a:
name_dict_a[name_a] = []
match_dict_a[name_a] = []
for regex in coco.regexes:
if regex.search(name_a):
match_dict_a[name_a].append(regex)
if len(match_dict_a[name_a]) == 0:
logging.warning('Could not identify {} in list_a'.format(name_a))
_not_found_entry = name_a if not not_found else not_found
name_dict_a[name_a].append(_not_found_entry)
if not enforce_sublist:
name_dict_a[name_a] = name_dict_a[name_a][0]
continue
if len(match_dict_a[name_a]) > 1:
logging.warning(
'Multiple matches for name {} in list_a'.format(name_a))
for match_case in match_dict_a[name_a]:
b_matches = 0
for name_b in list_b:
if match_case.search(name_b):
b_matches += 1
name_dict_a[name_a].append(name_b)
if b_matches == 0:
logging.warning(
'Could not find any '
'correspondence for {} in list_b'.format(name_a))
_not_found_entry = name_a if not not_found else not_found
name_dict_a[name_a].append(_not_found_entry)
if b_matches > 1:
logging.warning('Multiple matches for '
'name {} in list_b'.format(name_a))
if not enforce_sublist and (len(name_dict_a[name_a]) == 1):
name_dict_a[name_a] = name_dict_a[name_a][0]
return name_dict_a | Matches the country names given in two lists into a dictionary.
This function matches names given in list_a to the one provided in list_b
using regular expressions defined in country_data.
Parameters
----------
list_a : list
Names of countries to identify
list_b : list
Master list of names for countries
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
enforce_sublist : boolean, optional
If True, all entries in both list are list.
If False(default), only multiple matches are list, rest are strings
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data file. (utf-8 encoded tab separated data, same
column headers in all files)
Returns
-------
dict:
A dictionary with a key for every entry in list_a. The value
correspond to the matching entry in list_b if found. If there is
a 1:1 correspondence, the value is a str (if enforce_sublist is False),
otherwise multiple entries as list. | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L156-L253 |
konstantinstadler/country_converter | country_converter/country_converter.py | convert | def convert(*args, **kargs):
""" Wrapper around CountryConverter.convert()
Uses the same parameters. This function has the same performance as
CountryConverter.convert for one call; for multiple calls it is better to
instantiate a common CountryConverter (this avoid loading the source data
file multiple times).
Note
----
A lot of the functionality can also be done directly in Pandas DataFrames.
For example:
cc = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
cc.data[cc.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert to 'to' classification
src : str, optional
Source classification
to : str, optional
Output classification (valid str for an index of the
country data file), default: name_short
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was passed)
or to be a list of lists (if multiple names were passed). If False
(default), the output will be a string (if only one name was passed) or
a list of str and/or lists (str if a one to one matching, list
otherwise).
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data_file. (utf-8 encoded tab separated data, same
column headers as in the general country data file)
Returns
-------
list or str, depending on enforce_list
"""
init = {'country_data': COUNTRY_DATA_FILE,
'additional_data': None,
'only_UNmember': False,
'include_obsolete': False}
init.update({kk: kargs.get(kk) for kk in init.keys() if kk in kargs})
coco = CountryConverter(**init)
kargs = {kk: ii for kk, ii in kargs.items() if kk not in init.keys()}
return coco.convert(*args, **kargs) | python | def convert(*args, **kargs):
""" Wrapper around CountryConverter.convert()
Uses the same parameters. This function has the same performance as
CountryConverter.convert for one call; for multiple calls it is better to
instantiate a common CountryConverter (this avoid loading the source data
file multiple times).
Note
----
A lot of the functionality can also be done directly in Pandas DataFrames.
For example:
cc = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
cc.data[cc.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert to 'to' classification
src : str, optional
Source classification
to : str, optional
Output classification (valid str for an index of the
country data file), default: name_short
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was passed)
or to be a list of lists (if multiple names were passed). If False
(default), the output will be a string (if only one name was passed) or
a list of str and/or lists (str if a one to one matching, list
otherwise).
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data_file. (utf-8 encoded tab separated data, same
column headers as in the general country data file)
Returns
-------
list or str, depending on enforce_list
"""
init = {'country_data': COUNTRY_DATA_FILE,
'additional_data': None,
'only_UNmember': False,
'include_obsolete': False}
init.update({kk: kargs.get(kk) for kk in init.keys() if kk in kargs})
coco = CountryConverter(**init)
kargs = {kk: ii for kk, ii in kargs.items() if kk not in init.keys()}
return coco.convert(*args, **kargs) | Wrapper around CountryConverter.convert()
Uses the same parameters. This function has the same performance as
CountryConverter.convert for one call; for multiple calls it is better to
instantiate a common CountryConverter (this avoid loading the source data
file multiple times).
Note
----
A lot of the functionality can also be done directly in Pandas DataFrames.
For example:
cc = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
cc.data[cc.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert to 'to' classification
src : str, optional
Source classification
to : str, optional
Output classification (valid str for an index of the
country data file), default: name_short
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was passed)
or to be a list of lists (if multiple names were passed). If False
(default), the output will be a string (if only one name was passed) or
a list of str and/or lists (str if a one to one matching, list
otherwise).
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data_file. (utf-8 encoded tab separated data, same
column headers as in the general country data file)
Returns
-------
list or str, depending on enforce_list | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L256-L317 |
konstantinstadler/country_converter | country_converter/country_converter.py | _parse_arg | def _parse_arg(valid_classifications):
""" Command line parser for coco
Parameters
----------
valid_classifications: list
Available classifications, used for checking input parameters.
Returns
-------
args : ArgumentParser namespace
"""
parser = argparse.ArgumentParser(
description=('The country converter (coco): a Python package for '
'converting country names between '
'different classifications schemes. '
'Version: {}'.format(__version__)
), prog='coco', usage=('%(prog)s --names --src --to]'))
parser.add_argument('names',
help=('List of countries to convert '
'(space separated, country names consisting of '
'multiple words must be put in quotation marks).'
'Possible classifications: ' +
', '.join(valid_classifications) +
'; NB: long, official and short are provided '
'as shortcuts for the names classifications'
), nargs='*')
parser.add_argument('-s', '--src', '--source', '-f', '--from',
help=('Classification of the names given, '
'(default: inferred from names)'))
parser.add_argument('-t', '--to',
help=('Required classification of the passed names'
'(default: "ISO3"'))
parser.add_argument('-o', '--output_sep',
help=('Seperator for output names '
'(default: space), e.g. "," '))
parser.add_argument('-n', '--not_found',
default='not found',
help=('Fill in value for none found entries. '
'If "None" (string), keep the input value '
'(default: not found)'))
parser.add_argument('-a', '--additional_data',
help=('Data file with additional country data'
'(Same format as the original data file - '
'utf-8 encoded tab separated data, same '
'column headers as in the general country '
'data file; default: not found)'))
args = parser.parse_args()
args.src = args.src or None
args.to = args.to or 'ISO3'
args.not_found = args.not_found if args.not_found != 'None' else None
args.output_sep = args.output_sep or ' '
return args | python | def _parse_arg(valid_classifications):
""" Command line parser for coco
Parameters
----------
valid_classifications: list
Available classifications, used for checking input parameters.
Returns
-------
args : ArgumentParser namespace
"""
parser = argparse.ArgumentParser(
description=('The country converter (coco): a Python package for '
'converting country names between '
'different classifications schemes. '
'Version: {}'.format(__version__)
), prog='coco', usage=('%(prog)s --names --src --to]'))
parser.add_argument('names',
help=('List of countries to convert '
'(space separated, country names consisting of '
'multiple words must be put in quotation marks).'
'Possible classifications: ' +
', '.join(valid_classifications) +
'; NB: long, official and short are provided '
'as shortcuts for the names classifications'
), nargs='*')
parser.add_argument('-s', '--src', '--source', '-f', '--from',
help=('Classification of the names given, '
'(default: inferred from names)'))
parser.add_argument('-t', '--to',
help=('Required classification of the passed names'
'(default: "ISO3"'))
parser.add_argument('-o', '--output_sep',
help=('Seperator for output names '
'(default: space), e.g. "," '))
parser.add_argument('-n', '--not_found',
default='not found',
help=('Fill in value for none found entries. '
'If "None" (string), keep the input value '
'(default: not found)'))
parser.add_argument('-a', '--additional_data',
help=('Data file with additional country data'
'(Same format as the original data file - '
'utf-8 encoded tab separated data, same '
'column headers as in the general country '
'data file; default: not found)'))
args = parser.parse_args()
args.src = args.src or None
args.to = args.to or 'ISO3'
args.not_found = args.not_found if args.not_found != 'None' else None
args.output_sep = args.output_sep or ' '
return args | Command line parser for coco
Parameters
----------
valid_classifications: list
Available classifications, used for checking input parameters.
Returns
-------
args : ArgumentParser namespace | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L805-L863 |
konstantinstadler/country_converter | country_converter/country_converter.py | main | def main():
""" Main entry point - used for command line call
"""
args = _parse_arg(CountryConverter().valid_class)
coco = CountryConverter(additional_data=args.additional_data)
converted_names = coco.convert(
names=args.names,
src=args.src,
to=args.to,
enforce_list=False,
not_found=args.not_found)
print(args.output_sep.join(
[str(etr) for etr in converted_names] if
isinstance(converted_names, list) else [str(converted_names)])) | python | def main():
""" Main entry point - used for command line call
"""
args = _parse_arg(CountryConverter().valid_class)
coco = CountryConverter(additional_data=args.additional_data)
converted_names = coco.convert(
names=args.names,
src=args.src,
to=args.to,
enforce_list=False,
not_found=args.not_found)
print(args.output_sep.join(
[str(etr) for etr in converted_names] if
isinstance(converted_names, list) else [str(converted_names)])) | Main entry point - used for command line call | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L866-L880 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter._separate_exclude_cases | def _separate_exclude_cases(name, exclude_prefix):
""" Splits the excluded
Parameters
----------
name : str
Name of the country/region to convert.
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Returns
-------
dict with
'clean_name' : str
as name without anything following exclude_prefix
'excluded_countries' : list
list of excluded countries
"""
excluder = re.compile('|'.join(exclude_prefix))
split_entries = excluder.split(name)
return {'clean_name': split_entries[0],
'excluded_countries': split_entries[1:]} | python | def _separate_exclude_cases(name, exclude_prefix):
""" Splits the excluded
Parameters
----------
name : str
Name of the country/region to convert.
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Returns
-------
dict with
'clean_name' : str
as name without anything following exclude_prefix
'excluded_countries' : list
list of excluded countries
"""
excluder = re.compile('|'.join(exclude_prefix))
split_entries = excluder.split(name)
return {'clean_name': split_entries[0],
'excluded_countries': split_entries[1:]} | Splits the excluded
Parameters
----------
name : str
Name of the country/region to convert.
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Returns
-------
dict with
'clean_name' : str
as name without anything following exclude_prefix
'excluded_countries' : list
list of excluded countries | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L332-L361 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter.convert | def convert(self, names, src=None, to='ISO3', enforce_list=False,
not_found='not found',
exclude_prefix=['excl\\w.*', 'without', 'w/o']):
""" Convert names from a list to another list.
Note
----
A lot of the functionality can also be done directly in Pandas
DataFrames.
For example:
coco = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
coco.data[coco.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert
to 'to' classification
src : str, optional
Source classification. If None (default), each passed name is
checked if it is a number (assuming UNnumeric) or 2 (ISO2) or
3 (ISO3) characters long; for longer names 'regex' is assumed.
to : str, optional
Output classification (valid index of the country_data file),
default: ISO3
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was
passed) or to be a list of lists (if multiple names were passed).
If False (default), the output will be a string (if only one name
was passed) or a list of str and/or lists (str if a one to one
matching, list otherwise).
not_found : str, optional
Fill in value for none found entries. If None, keep the input value
(default: 'not found')
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Default: ['excl\\w.*', 'without', 'w/o'])
Returns
-------
list or str, depending on enforce_list
"""
# The list to tuple conversion is necessary for Matlab interface
names = list(names) if (
isinstance(names, tuple) or
isinstance(names, set)) else names
names = names if isinstance(names, list) else [names]
names = [str(n) for n in names]
outlist = names.copy()
to = [self._validate_input_para(to, self.data.columns)]
exclude_split = {name: self._separate_exclude_cases(name,
exclude_prefix)
for name in names}
for ind_names, current_name in enumerate(names):
spec_name = exclude_split[current_name]['clean_name']
if src is None:
src_format = self._get_input_format_from_name(spec_name)
else:
src_format = self._validate_input_para(src, self.data.columns)
if src_format.lower() == 'regex':
result_list = []
for ind_regex, ccregex in enumerate(self.regexes):
if ccregex.search(spec_name):
result_list.append(
self.data.ix[ind_regex, to].values[0])
if len(result_list) > 1:
logging.warning('More then one regular expression '
'match for {}'.format(spec_name))
else:
_match_col = self.data[src_format].astype(
str).str.replace('\\..*', '')
result_list = [etr[0] for etr in
self.data[_match_col.str.contains(
'^' + spec_name + '$', flags=re.IGNORECASE,
na=False)][to].values]
if len(result_list) == 0:
logging.warning(
'{} not found in {}'.format(spec_name, src_format))
_fillin = not_found or spec_name
outlist[ind_names] = [_fillin] if enforce_list else _fillin
else:
outlist[ind_names] = []
for etr in result_list:
try:
conv_etr = int(etr)
except ValueError:
conv_etr = etr
outlist[ind_names].append(conv_etr)
if len(outlist[ind_names]) == 1 and enforce_list is False:
outlist[ind_names] = outlist[ind_names][0]
if (len(outlist) == 1) and not enforce_list:
return outlist[0]
else:
return outlist | python | def convert(self, names, src=None, to='ISO3', enforce_list=False,
not_found='not found',
exclude_prefix=['excl\\w.*', 'without', 'w/o']):
""" Convert names from a list to another list.
Note
----
A lot of the functionality can also be done directly in Pandas
DataFrames.
For example:
coco = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
coco.data[coco.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert
to 'to' classification
src : str, optional
Source classification. If None (default), each passed name is
checked if it is a number (assuming UNnumeric) or 2 (ISO2) or
3 (ISO3) characters long; for longer names 'regex' is assumed.
to : str, optional
Output classification (valid index of the country_data file),
default: ISO3
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was
passed) or to be a list of lists (if multiple names were passed).
If False (default), the output will be a string (if only one name
was passed) or a list of str and/or lists (str if a one to one
matching, list otherwise).
not_found : str, optional
Fill in value for none found entries. If None, keep the input value
(default: 'not found')
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Default: ['excl\\w.*', 'without', 'w/o'])
Returns
-------
list or str, depending on enforce_list
"""
# The list to tuple conversion is necessary for Matlab interface
names = list(names) if (
isinstance(names, tuple) or
isinstance(names, set)) else names
names = names if isinstance(names, list) else [names]
names = [str(n) for n in names]
outlist = names.copy()
to = [self._validate_input_para(to, self.data.columns)]
exclude_split = {name: self._separate_exclude_cases(name,
exclude_prefix)
for name in names}
for ind_names, current_name in enumerate(names):
spec_name = exclude_split[current_name]['clean_name']
if src is None:
src_format = self._get_input_format_from_name(spec_name)
else:
src_format = self._validate_input_para(src, self.data.columns)
if src_format.lower() == 'regex':
result_list = []
for ind_regex, ccregex in enumerate(self.regexes):
if ccregex.search(spec_name):
result_list.append(
self.data.ix[ind_regex, to].values[0])
if len(result_list) > 1:
logging.warning('More then one regular expression '
'match for {}'.format(spec_name))
else:
_match_col = self.data[src_format].astype(
str).str.replace('\\..*', '')
result_list = [etr[0] for etr in
self.data[_match_col.str.contains(
'^' + spec_name + '$', flags=re.IGNORECASE,
na=False)][to].values]
if len(result_list) == 0:
logging.warning(
'{} not found in {}'.format(spec_name, src_format))
_fillin = not_found or spec_name
outlist[ind_names] = [_fillin] if enforce_list else _fillin
else:
outlist[ind_names] = []
for etr in result_list:
try:
conv_etr = int(etr)
except ValueError:
conv_etr = etr
outlist[ind_names].append(conv_etr)
if len(outlist[ind_names]) == 1 and enforce_list is False:
outlist[ind_names] = outlist[ind_names][0]
if (len(outlist) == 1) and not enforce_list:
return outlist[0]
else:
return outlist | Convert names from a list to another list.
Note
----
A lot of the functionality can also be done directly in Pandas
DataFrames.
For example:
coco = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
coco.data[coco.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert
to 'to' classification
src : str, optional
Source classification. If None (default), each passed name is
checked if it is a number (assuming UNnumeric) or 2 (ISO2) or
3 (ISO3) characters long; for longer names 'regex' is assumed.
to : str, optional
Output classification (valid index of the country_data file),
default: ISO3
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was
passed) or to be a list of lists (if multiple names were passed).
If False (default), the output will be a string (if only one name
was passed) or a list of str and/or lists (str if a one to one
matching, list otherwise).
not_found : str, optional
Fill in value for none found entries. If None, keep the input value
(default: 'not found')
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Default: ['excl\\w.*', 'without', 'w/o'])
Returns
-------
list or str, depending on enforce_list | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L445-L561 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter.EU28as | def EU28as(self, to='name_short'):
"""
Return EU28 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if type(to) is str:
to = [to]
return self.data[self.data.EU < 2015][to] | python | def EU28as(self, to='name_short'):
"""
Return EU28 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if type(to) is str:
to = [to]
return self.data[self.data.EU < 2015][to] | Return EU28 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L563-L580 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter.EU27as | def EU27as(self, to='name_short'):
"""
Return EU27 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.EU < 2013][to] | python | def EU27as(self, to='name_short'):
"""
Return EU27 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.EU < 2013][to] | Return EU27 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L582-L599 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter.OECDas | def OECDas(self, to='name_short'):
"""
Return OECD member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.OECD > 0][to] | python | def OECDas(self, to='name_short'):
"""
Return OECD member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.OECD > 0][to] | Return OECD member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L601-L618 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter.UNas | def UNas(self, to='name_short'):
"""
Return UN member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.UNmember > 0][to] | python | def UNas(self, to='name_short'):
"""
Return UN member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.UNmember > 0][to] | Return UN member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L620-L637 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter.obsoleteas | def obsoleteas(self, to='name_short'):
"""
Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.obsolete > 0][to] | python | def obsoleteas(self, to='name_short'):
"""
Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.obsolete > 0][to] | Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L639-L656 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter.get_correspondance_dict | def get_correspondance_dict(self, classA, classB,
restrict=None,
replace_numeric=True):
""" Returns a correspondance between classification A and B as dict
Parameters
----------
classA: str
Valid classification (column name of data)
classB: str
Valid classification (column name of data).
restrict: boolean vector of size cc.data, optional
where cc is the name of the CountryConverter instance. Used to
restrict the data sheet if necessary. E.g. to convert to countries
which were OECD members before 1970 use
cc.get_correspondance_dict('ISO3', 'OECD', restrict=cc.data.OECD <
1970)
replace_numeric: boolean, optional
If True (default) replace numeric values with the column header.
This can be used if get a correspondance to, for example, 'OECD'
instead of to the OECD membership years. Set to False if the actual
numbers are required (as for UNcode).
Returns
-------
dict with
keys: based on classA
items: list of correspoding entries in classB or None
"""
result = {nn: None for nn in self.data[classA].values}
if restrict is None:
df = self.data.copy()
else:
df = self.data[restrict].copy()
if replace_numeric and df[classB].dtype.kind in 'bifc':
df.loc[~df[classB].isnull(), classB] = classB
df.loc[df[classB].isnull(), classB] = None
result.update(df.groupby(classA)
.aggregate(lambda x: list(x.unique()))
.to_dict()[classB])
return result | python | def get_correspondance_dict(self, classA, classB,
restrict=None,
replace_numeric=True):
""" Returns a correspondance between classification A and B as dict
Parameters
----------
classA: str
Valid classification (column name of data)
classB: str
Valid classification (column name of data).
restrict: boolean vector of size cc.data, optional
where cc is the name of the CountryConverter instance. Used to
restrict the data sheet if necessary. E.g. to convert to countries
which were OECD members before 1970 use
cc.get_correspondance_dict('ISO3', 'OECD', restrict=cc.data.OECD <
1970)
replace_numeric: boolean, optional
If True (default) replace numeric values with the column header.
This can be used if get a correspondance to, for example, 'OECD'
instead of to the OECD membership years. Set to False if the actual
numbers are required (as for UNcode).
Returns
-------
dict with
keys: based on classA
items: list of correspoding entries in classB or None
"""
result = {nn: None for nn in self.data[classA].values}
if restrict is None:
df = self.data.copy()
else:
df = self.data[restrict].copy()
if replace_numeric and df[classB].dtype.kind in 'bifc':
df.loc[~df[classB].isnull(), classB] = classB
df.loc[df[classB].isnull(), classB] = None
result.update(df.groupby(classA)
.aggregate(lambda x: list(x.unique()))
.to_dict()[classB])
return result | Returns a correspondance between classification A and B as dict
Parameters
----------
classA: str
Valid classification (column name of data)
classB: str
Valid classification (column name of data).
restrict: boolean vector of size cc.data, optional
where cc is the name of the CountryConverter instance. Used to
restrict the data sheet if necessary. E.g. to convert to countries
which were OECD members before 1970 use
cc.get_correspondance_dict('ISO3', 'OECD', restrict=cc.data.OECD <
1970)
replace_numeric: boolean, optional
If True (default) replace numeric values with the column header.
This can be used if get a correspondance to, for example, 'OECD'
instead of to the OECD membership years. Set to False if the actual
numbers are required (as for UNcode).
Returns
-------
dict with
keys: based on classA
items: list of correspoding entries in classB or None | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L691-L740 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter._validate_input_para | def _validate_input_para(self, para, column_names):
""" Convert the input classificaton para to the correct df column name
Parameters
----------
para : string
column_names : list of strings
Returns
-------
validated_para : string
Converted to the case used in the country file
"""
lower_case_valid_class = [et.lower() for et in self.valid_class]
alt_valid_names = {
'name_short': ['short', 'short_name', 'name', 'names'],
'name_official': ['official', 'long_name', 'long'],
'UNcode': ['un', 'unnumeric'],
'ISOnumeric': ['isocode'],
}
for item in alt_valid_names.items():
if para.lower() in item[1]:
para = item[0]
try:
validated_para = self.valid_class[
lower_case_valid_class.index(para.lower())]
except ValueError:
raise KeyError(
'{} is not a valid country classification'.format(para))
return validated_para | python | def _validate_input_para(self, para, column_names):
""" Convert the input classificaton para to the correct df column name
Parameters
----------
para : string
column_names : list of strings
Returns
-------
validated_para : string
Converted to the case used in the country file
"""
lower_case_valid_class = [et.lower() for et in self.valid_class]
alt_valid_names = {
'name_short': ['short', 'short_name', 'name', 'names'],
'name_official': ['official', 'long_name', 'long'],
'UNcode': ['un', 'unnumeric'],
'ISOnumeric': ['isocode'],
}
for item in alt_valid_names.items():
if para.lower() in item[1]:
para = item[0]
try:
validated_para = self.valid_class[
lower_case_valid_class.index(para.lower())]
except ValueError:
raise KeyError(
'{} is not a valid country classification'.format(para))
return validated_para | Convert the input classificaton para to the correct df column name
Parameters
----------
para : string
column_names : list of strings
Returns
-------
validated_para : string
Converted to the case used in the country file | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L742-L777 |
konstantinstadler/country_converter | country_converter/country_converter.py | CountryConverter._get_input_format_from_name | def _get_input_format_from_name(self, name):
""" Determines the input format based on the given country name
Parameters
----------
name : string
Returns
-------
string : valid input format
"""
try:
int(name)
src_format = 'ISOnumeric'
except ValueError:
if len(name) == 2:
src_format = 'ISO2'
elif len(name) == 3:
src_format = 'ISO3'
else:
src_format = 'regex'
return src_format | python | def _get_input_format_from_name(self, name):
""" Determines the input format based on the given country name
Parameters
----------
name : string
Returns
-------
string : valid input format
"""
try:
int(name)
src_format = 'ISOnumeric'
except ValueError:
if len(name) == 2:
src_format = 'ISO2'
elif len(name) == 3:
src_format = 'ISO3'
else:
src_format = 'regex'
return src_format | Determines the input format based on the given country name
Parameters
----------
name : string
Returns
-------
string : valid input format | https://github.com/konstantinstadler/country_converter/blob/b78622dfa6700480df632fe061d6837b96f31b71/country_converter/country_converter.py#L779-L802 |
matplotlib/viscm | viscm/bezierbuilder.py | Bernstein | def Bernstein(n, k):
"""Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly | python | def Bernstein(n, k):
"""Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly | Bernstein polynomial. | https://github.com/matplotlib/viscm/blob/cb31d0a6b95bcb23fd8f48d23e28e415db5ddb7c/viscm/bezierbuilder.py#L299-L308 |
matplotlib/viscm | viscm/bezierbuilder.py | Bezier | def Bezier(points, at):
"""Build Bézier curve from points.
Deprecated. CatmulClark builds nicer splines
"""
at = np.asarray(at)
at_flat = at.ravel()
N = len(points)
curve = np.zeros((at_flat.shape[0], 2))
for ii in range(N):
curve += np.outer(Bernstein(N - 1, ii)(at_flat), points[ii])
return curve.reshape(at.shape + (2,)) | python | def Bezier(points, at):
"""Build Bézier curve from points.
Deprecated. CatmulClark builds nicer splines
"""
at = np.asarray(at)
at_flat = at.ravel()
N = len(points)
curve = np.zeros((at_flat.shape[0], 2))
for ii in range(N):
curve += np.outer(Bernstein(N - 1, ii)(at_flat), points[ii])
return curve.reshape(at.shape + (2,)) | Build Bézier curve from points.
Deprecated. CatmulClark builds nicer splines | https://github.com/matplotlib/viscm/blob/cb31d0a6b95bcb23fd8f48d23e28e415db5ddb7c/viscm/bezierbuilder.py#L311-L321 |
spyder-ide/pywinpty | winpty/ptyprocess.py | _read_in_thread | def _read_in_thread(address, pty, blocking):
"""Read data from the pty in a thread.
"""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(address)
while 1:
data = pty.read(4096, blocking=blocking)
if not data and not pty.isalive():
while not data and not pty.iseof():
data += pty.read(4096, blocking=blocking)
if not data:
try:
client.send(b'')
except socket.error:
pass
break
try:
client.send(data)
except socket.error:
break
client.close() | python | def _read_in_thread(address, pty, blocking):
"""Read data from the pty in a thread.
"""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(address)
while 1:
data = pty.read(4096, blocking=blocking)
if not data and not pty.isalive():
while not data and not pty.iseof():
data += pty.read(4096, blocking=blocking)
if not data:
try:
client.send(b'')
except socket.error:
pass
break
try:
client.send(data)
except socket.error:
break
client.close() | Read data from the pty in a thread. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L325-L349 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.spawn | def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
"""
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
# Create the environemnt string.
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst | python | def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
"""
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
# Create the environemnt string.
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst | Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L63-L125 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.close | def close(self, force=False):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT)."""
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None | python | def close(self, force=False):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT)."""
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None | This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT). | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L138-L156 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.read | def read(self, size=1024):
"""Read and return at most ``size`` characters from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
data = self.fileobj.recv(size)
if not data:
self.flag_eof = True
raise EOFError('Pty is closed')
return self.decoder.decode(data, final=False) | python | def read(self, size=1024):
"""Read and return at most ``size`` characters from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
data = self.fileobj.recv(size)
if not data:
self.flag_eof = True
raise EOFError('Pty is closed')
return self.decoder.decode(data, final=False) | Read and return at most ``size`` characters from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L183-L194 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.readline | def readline(self):
"""Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf) | python | def readline(self):
"""Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf) | Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L196-L210 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.write | def write(self, s):
"""Write the string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
if not self.isalive():
raise EOFError('Pty is closed')
if PY2:
s = _unicode(s)
success, nbytes = self.pty.write(s)
if not success:
raise IOError('Write failed')
return nbytes | python | def write(self, s):
"""Write the string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
if not self.isalive():
raise EOFError('Pty is closed')
if PY2:
s = _unicode(s)
success, nbytes = self.pty.write(s)
if not success:
raise IOError('Write failed')
return nbytes | Write the string ``s`` to the pseudoterminal.
Returns the number of bytes written. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L212-L225 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.terminate | def terminate(self, force=False):
"""This forces a child process to terminate."""
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False | python | def terminate(self, force=False):
"""This forces a child process to terminate."""
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False | This forces a child process to terminate. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L227-L241 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.sendcontrol | def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if 97 <= a <= 122:
a = a - ord('a') + 1
byte = bytes([a])
return self.pty.write(byte.decode('utf-8')), byte
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0, b''
byte = bytes([d[char]])
return self.pty.write(byte.decode('utf-8')), byte | python | def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if 97 <= a <= 122:
a = a - ord('a') + 1
byte = bytes([a])
return self.pty.write(byte.decode('utf-8')), byte
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0, b''
byte = bytes([d[char]])
return self.pty.write(byte.decode('utf-8')), byte | Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof(). | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L264-L288 |
spyder-ide/pywinpty | winpty/ptyprocess.py | PtyProcess.setwinsize | def setwinsize(self, rows, cols):
"""Set the terminal window size of the child tty.
"""
self._winsize = (rows, cols)
self.pty.set_size(cols, rows) | python | def setwinsize(self, rows, cols):
"""Set the terminal window size of the child tty.
"""
self._winsize = (rows, cols)
self.pty.set_size(cols, rows) | Set the terminal window size of the child tty. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/ptyprocess.py#L318-L322 |
spyder-ide/pywinpty | winpty/winpty_wrapper.py | PTY.read | def read(self, length=1000, blocking=False):
"""
Read ``length`` bytes from current process output stream.
Note: This method is not fully non-blocking, however it
behaves like one.
"""
size_p = PLARGE_INTEGER(LARGE_INTEGER(0))
if not blocking:
windll.kernel32.GetFileSizeEx(self.conout_pipe, size_p)
size = size_p[0]
length = min(size, length)
data = ctypes.create_string_buffer(length)
if length > 0:
num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0))
ReadFile(self.conout_pipe, data, length, num_bytes, None)
return data.value | python | def read(self, length=1000, blocking=False):
"""
Read ``length`` bytes from current process output stream.
Note: This method is not fully non-blocking, however it
behaves like one.
"""
size_p = PLARGE_INTEGER(LARGE_INTEGER(0))
if not blocking:
windll.kernel32.GetFileSizeEx(self.conout_pipe, size_p)
size = size_p[0]
length = min(size, length)
data = ctypes.create_string_buffer(length)
if length > 0:
num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0))
ReadFile(self.conout_pipe, data, length, num_bytes, None)
return data.value | Read ``length`` bytes from current process output stream.
Note: This method is not fully non-blocking, however it
behaves like one. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/winpty_wrapper.py#L60-L76 |
spyder-ide/pywinpty | winpty/winpty_wrapper.py | PTY.write | def write(self, data):
"""Write string data to current process input stream."""
data = data.encode('utf-8')
data_p = ctypes.create_string_buffer(data)
num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0))
bytes_to_write = len(data)
success = WriteFile(self.conin_pipe, data_p,
bytes_to_write, num_bytes, None)
return success, num_bytes[0] | python | def write(self, data):
"""Write string data to current process input stream."""
data = data.encode('utf-8')
data_p = ctypes.create_string_buffer(data)
num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0))
bytes_to_write = len(data)
success = WriteFile(self.conin_pipe, data_p,
bytes_to_write, num_bytes, None)
return success, num_bytes[0] | Write string data to current process input stream. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/winpty_wrapper.py#L78-L86 |
spyder-ide/pywinpty | winpty/winpty_wrapper.py | PTY.close | def close(self):
"""Close all communication process streams."""
windll.kernel32.CloseHandle(self.conout_pipe)
windll.kernel32.CloseHandle(self.conin_pipe) | python | def close(self):
"""Close all communication process streams."""
windll.kernel32.CloseHandle(self.conout_pipe)
windll.kernel32.CloseHandle(self.conin_pipe) | Close all communication process streams. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/winpty_wrapper.py#L88-L91 |
spyder-ide/pywinpty | winpty/winpty_wrapper.py | PTY.iseof | def iseof(self):
"""Check if current process streams are still open."""
succ = windll.kernel32.PeekNamedPipe(
self.conout_pipe, None, None, None, None, None
)
return not bool(succ) | python | def iseof(self):
"""Check if current process streams are still open."""
succ = windll.kernel32.PeekNamedPipe(
self.conout_pipe, None, None, None, None, None
)
return not bool(succ) | Check if current process streams are still open. | https://github.com/spyder-ide/pywinpty/blob/f4461cde9f0c53047e61e9eff7f7ec21ecbc4573/winpty/winpty_wrapper.py#L93-L98 |
marcopompili/django-instagram | django_instagram/scraper.py | instagram_scrap_profile | def instagram_scrap_profile(username):
"""
Scrap an instagram profile page
:param username:
:return:
"""
try:
url = "https://www.instagram.com/{}/".format(username)
page = requests.get(url)
# Raise error for 404 cause by a bad profile name
page.raise_for_status()
return html.fromstring(page.content)
except HTTPError:
logging.exception('user profile "{}" not found'.format(username))
except (ConnectionError, socket_error) as e:
logging.exception("instagram.com unreachable") | python | def instagram_scrap_profile(username):
"""
Scrap an instagram profile page
:param username:
:return:
"""
try:
url = "https://www.instagram.com/{}/".format(username)
page = requests.get(url)
# Raise error for 404 cause by a bad profile name
page.raise_for_status()
return html.fromstring(page.content)
except HTTPError:
logging.exception('user profile "{}" not found'.format(username))
except (ConnectionError, socket_error) as e:
logging.exception("instagram.com unreachable") | Scrap an instagram profile page
:param username:
:return: | https://github.com/marcopompili/django-instagram/blob/b237b52516b2408c862c791f37fa2bb950630a01/django_instagram/scraper.py#L18-L33 |
marcopompili/django-instagram | django_instagram/scraper.py | instagram_profile_js | def instagram_profile_js(username):
"""
Retrieve the script tags from the parsed page.
:param username:
:return:
"""
try:
tree = instagram_scrap_profile(username)
return tree.xpath('//script')
except AttributeError:
logging.exception("scripts not found")
return None | python | def instagram_profile_js(username):
"""
Retrieve the script tags from the parsed page.
:param username:
:return:
"""
try:
tree = instagram_scrap_profile(username)
return tree.xpath('//script')
except AttributeError:
logging.exception("scripts not found")
return None | Retrieve the script tags from the parsed page.
:param username:
:return: | https://github.com/marcopompili/django-instagram/blob/b237b52516b2408c862c791f37fa2bb950630a01/django_instagram/scraper.py#L36-L47 |
marcopompili/django-instagram | django_instagram/scraper.py | instagram_profile_json | def instagram_profile_json(username):
"""
Get the JSON data string from the scripts.
:param username:
:return:
"""
scripts = instagram_profile_js(username)
source = None
if scripts:
for script in scripts:
if script.text:
if script.text[0:SCRIPT_JSON_PREFIX] == "window._sharedData":
source = script.text[SCRIPT_JSON_DATA_INDEX:-1]
return source | python | def instagram_profile_json(username):
"""
Get the JSON data string from the scripts.
:param username:
:return:
"""
scripts = instagram_profile_js(username)
source = None
if scripts:
for script in scripts:
if script.text:
if script.text[0:SCRIPT_JSON_PREFIX] == "window._sharedData":
source = script.text[SCRIPT_JSON_DATA_INDEX:-1]
return source | Get the JSON data string from the scripts.
:param username:
:return: | https://github.com/marcopompili/django-instagram/blob/b237b52516b2408c862c791f37fa2bb950630a01/django_instagram/scraper.py#L50-L65 |
marcopompili/django-instagram | django_instagram/templatetags/instagram_client.py | get_profile_media | def get_profile_media(profile, page = 0):
"""
Parse a generated media object
:param profile:
:param page:
:return:
"""
try:
edges = profile['entry_data']['ProfilePage'][page]['graphql']['user']['edge_owner_to_timeline_media']['edges']
return [edge['node'] for edge in edges]
except KeyError:
logging.exception("path to profile media not found") | python | def get_profile_media(profile, page = 0):
"""
Parse a generated media object
:param profile:
:param page:
:return:
"""
try:
edges = profile['entry_data']['ProfilePage'][page]['graphql']['user']['edge_owner_to_timeline_media']['edges']
return [edge['node'] for edge in edges]
except KeyError:
logging.exception("path to profile media not found") | Parse a generated media object
:param profile:
:param page:
:return: | https://github.com/marcopompili/django-instagram/blob/b237b52516b2408c862c791f37fa2bb950630a01/django_instagram/templatetags/instagram_client.py#L20-L31 |
marcopompili/django-instagram | django_instagram/templatetags/instagram_client.py | instagram_user_recent_media | def instagram_user_recent_media(parser, token):
"""
Tag for getting data about recent media of an user.
:param parser:
:param token:
:return:
"""
try:
tagname, username = token.split_contents()
return InstagramUserRecentMediaNode(username)
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires a single argument" % token.contents.split()[0]
) | python | def instagram_user_recent_media(parser, token):
"""
Tag for getting data about recent media of an user.
:param parser:
:param token:
:return:
"""
try:
tagname, username = token.split_contents()
return InstagramUserRecentMediaNode(username)
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires a single argument" % token.contents.split()[0]
) | Tag for getting data about recent media of an user.
:param parser:
:param token:
:return: | https://github.com/marcopompili/django-instagram/blob/b237b52516b2408c862c791f37fa2bb950630a01/django_instagram/templatetags/instagram_client.py#L53-L67 |
tehmaze/ipcalc | ipcalc.py | IP.bin | def bin(self):
"""Full-length binary representation of the IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.bin())
01111111000000000000000000000001
"""
bits = self.v == 4 and 32 or 128
return bin(self.ip).split('b')[1].rjust(bits, '0') | python | def bin(self):
"""Full-length binary representation of the IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.bin())
01111111000000000000000000000001
"""
bits = self.v == 4 and 32 or 128
return bin(self.ip).split('b')[1].rjust(bits, '0') | Full-length binary representation of the IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.bin())
01111111000000000000000000000001 | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L175-L183 |
tehmaze/ipcalc | ipcalc.py | IP.info | def info(self):
"""Show IANA allocation information for the current IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.info())
LOOPBACK
"""
b = self.bin()
for i in range(len(b), 0, -1):
if b[:i] in self._range[self.v]:
return self._range[self.v][b[:i]]
return 'UNKNOWN' | python | def info(self):
"""Show IANA allocation information for the current IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.info())
LOOPBACK
"""
b = self.bin()
for i in range(len(b), 0, -1):
if b[:i] in self._range[self.v]:
return self._range[self.v][b[:i]]
return 'UNKNOWN' | Show IANA allocation information for the current IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.info())
LOOPBACK | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L210-L221 |
tehmaze/ipcalc | ipcalc.py | IP._dqtoi | def _dqtoi(self, dq):
"""Convert dotquad or hextet to long."""
# hex notation
if dq.startswith('0x'):
return self._dqtoi_hex(dq)
# IPv6
if ':' in dq:
return self._dqtoi_ipv6(dq)
elif len(dq) == 32:
# Assume full heximal notation
self.v = 6
return int(dq, 16)
# IPv4
if '.' in dq:
return self._dqtoi_ipv4(dq)
raise ValueError('Invalid address input') | python | def _dqtoi(self, dq):
"""Convert dotquad or hextet to long."""
# hex notation
if dq.startswith('0x'):
return self._dqtoi_hex(dq)
# IPv6
if ':' in dq:
return self._dqtoi_ipv6(dq)
elif len(dq) == 32:
# Assume full heximal notation
self.v = 6
return int(dq, 16)
# IPv4
if '.' in dq:
return self._dqtoi_ipv4(dq)
raise ValueError('Invalid address input') | Convert dotquad or hextet to long. | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L223-L241 |
tehmaze/ipcalc | ipcalc.py | IP._itodq | def _itodq(self, n):
"""Convert long to dotquad or hextet."""
if self.v == 4:
return '.'.join(map(str, [
(n >> 24) & 0xff,
(n >> 16) & 0xff,
(n >> 8) & 0xff,
n & 0xff,
]))
else:
n = '%032x' % n
return ':'.join(n[4 * x:4 * x + 4] for x in range(0, 8)) | python | def _itodq(self, n):
"""Convert long to dotquad or hextet."""
if self.v == 4:
return '.'.join(map(str, [
(n >> 24) & 0xff,
(n >> 16) & 0xff,
(n >> 8) & 0xff,
n & 0xff,
]))
else:
n = '%032x' % n
return ':'.join(n[4 * x:4 * x + 4] for x in range(0, 8)) | Convert long to dotquad or hextet. | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L309-L320 |
tehmaze/ipcalc | ipcalc.py | IP.to_compressed | def to_compressed(self):
"""
Compress an IP address to its shortest possible compressed form.
>>> print(IP('127.0.0.1').to_compressed())
127.1
>>> print(IP('127.1.0.1').to_compressed())
127.1.1
>>> print(IP('127.0.1.1').to_compressed())
127.0.1.1
>>> print(IP('2001:1234:0000:0000:0000:0000:0000:5678').to_compressed())
2001:1234::5678
>>> print(IP('1234:0000:0000:beef:0000:0000:0000:5678').to_compressed())
1234:0:0:beef::5678
>>> print(IP('0000:0000:0000:0000:0000:0000:0000:0001').to_compressed())
::1
>>> print(IP('fe80:0000:0000:0000:0000:0000:0000:0000').to_compressed())
fe80::
"""
if self.v == 4:
quads = self.dq.split('.')
try:
zero = quads.index('0')
if zero == 1 and quads.index('0', zero + 1):
quads.pop(zero)
quads.pop(zero)
return '.'.join(quads)
elif zero == 2:
quads.pop(zero)
return '.'.join(quads)
except ValueError: # No zeroes
pass
return self.dq
else:
quads = map(lambda q: '%x' % (int(q, 16)), self.dq.split(':'))
quadc = ':%s:' % (':'.join(quads),)
zeros = [0, -1]
# Find the largest group of zeros
for match in re.finditer(r'(:[:0]+)', quadc):
count = len(match.group(1)) - 1
if count > zeros[0]:
zeros = [count, match.start(1)]
count, where = zeros
if count:
quadc = quadc[:where] + ':' + quadc[where + count:]
quadc = re.sub(r'((^:)|(:$))', '', quadc)
quadc = re.sub(r'((^:)|(:$))', '::', quadc)
return quadc | python | def to_compressed(self):
"""
Compress an IP address to its shortest possible compressed form.
>>> print(IP('127.0.0.1').to_compressed())
127.1
>>> print(IP('127.1.0.1').to_compressed())
127.1.1
>>> print(IP('127.0.1.1').to_compressed())
127.0.1.1
>>> print(IP('2001:1234:0000:0000:0000:0000:0000:5678').to_compressed())
2001:1234::5678
>>> print(IP('1234:0000:0000:beef:0000:0000:0000:5678').to_compressed())
1234:0:0:beef::5678
>>> print(IP('0000:0000:0000:0000:0000:0000:0000:0001').to_compressed())
::1
>>> print(IP('fe80:0000:0000:0000:0000:0000:0000:0000').to_compressed())
fe80::
"""
if self.v == 4:
quads = self.dq.split('.')
try:
zero = quads.index('0')
if zero == 1 and quads.index('0', zero + 1):
quads.pop(zero)
quads.pop(zero)
return '.'.join(quads)
elif zero == 2:
quads.pop(zero)
return '.'.join(quads)
except ValueError: # No zeroes
pass
return self.dq
else:
quads = map(lambda q: '%x' % (int(q, 16)), self.dq.split(':'))
quadc = ':%s:' % (':'.join(quads),)
zeros = [0, -1]
# Find the largest group of zeros
for match in re.finditer(r'(:[:0]+)', quadc):
count = len(match.group(1)) - 1
if count > zeros[0]:
zeros = [count, match.start(1)]
count, where = zeros
if count:
quadc = quadc[:where] + ':' + quadc[where + count:]
quadc = re.sub(r'((^:)|(:$))', '', quadc)
quadc = re.sub(r'((^:)|(:$))', '::', quadc)
return quadc | Compress an IP address to its shortest possible compressed form.
>>> print(IP('127.0.0.1').to_compressed())
127.1
>>> print(IP('127.1.0.1').to_compressed())
127.1.1
>>> print(IP('127.0.1.1').to_compressed())
127.0.1.1
>>> print(IP('2001:1234:0000:0000:0000:0000:0000:5678').to_compressed())
2001:1234::5678
>>> print(IP('1234:0000:0000:beef:0000:0000:0000:5678').to_compressed())
1234:0:0:beef::5678
>>> print(IP('0000:0000:0000:0000:0000:0000:0000:0001').to_compressed())
::1
>>> print(IP('fe80:0000:0000:0000:0000:0000:0000:0000').to_compressed())
fe80:: | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L420-L472 |
tehmaze/ipcalc | ipcalc.py | IP.to_ipv4 | def to_ipv4(self):
"""
Convert (an IPv6) IP address to an IPv4 address, if possible.
Only works for IPv4-compat (::/96), IPv4-mapped (::ffff/96), and 6-to-4
(2002::/16) addresses.
>>> ip = IP('2002:c000:022a::')
>>> print(ip.to_ipv4())
192.0.2.42
"""
if self.v == 4:
return self
else:
if self.bin().startswith('0' * 96):
return IP(int(self), version=4)
elif self.bin().startswith('0' * 80 + '1' * 16):
return IP(int(self) & MAX_IPV4, version=4)
elif int(self) & BASE_6TO4:
return IP((int(self) - BASE_6TO4) >> 80, version=4)
else:
return ValueError('%s: IPv6 address is not IPv4 compatible or mapped, '
'nor an 6-to-4 IP' % self.dq) | python | def to_ipv4(self):
"""
Convert (an IPv6) IP address to an IPv4 address, if possible.
Only works for IPv4-compat (::/96), IPv4-mapped (::ffff/96), and 6-to-4
(2002::/16) addresses.
>>> ip = IP('2002:c000:022a::')
>>> print(ip.to_ipv4())
192.0.2.42
"""
if self.v == 4:
return self
else:
if self.bin().startswith('0' * 96):
return IP(int(self), version=4)
elif self.bin().startswith('0' * 80 + '1' * 16):
return IP(int(self) & MAX_IPV4, version=4)
elif int(self) & BASE_6TO4:
return IP((int(self) - BASE_6TO4) >> 80, version=4)
else:
return ValueError('%s: IPv6 address is not IPv4 compatible or mapped, '
'nor an 6-to-4 IP' % self.dq) | Convert (an IPv6) IP address to an IPv4 address, if possible.
Only works for IPv4-compat (::/96), IPv4-mapped (::ffff/96), and 6-to-4
(2002::/16) addresses.
>>> ip = IP('2002:c000:022a::')
>>> print(ip.to_ipv4())
192.0.2.42 | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L474-L496 |
tehmaze/ipcalc | ipcalc.py | IP.from_bin | def from_bin(cls, value):
"""Initialize a new network from binary notation."""
value = value.lstrip('b')
if len(value) == 32:
return cls(int(value, 2))
elif len(value) == 128:
return cls(int(value, 2))
else:
return ValueError('%r: invalid binary notation' % (value,)) | python | def from_bin(cls, value):
"""Initialize a new network from binary notation."""
value = value.lstrip('b')
if len(value) == 32:
return cls(int(value, 2))
elif len(value) == 128:
return cls(int(value, 2))
else:
return ValueError('%r: invalid binary notation' % (value,)) | Initialize a new network from binary notation. | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L499-L507 |
tehmaze/ipcalc | ipcalc.py | IP.from_hex | def from_hex(cls, value):
"""Initialize a new network from hexadecimal notation."""
if len(value) == 8:
return cls(int(value, 16))
elif len(value) == 32:
return cls(int(value, 16))
else:
raise ValueError('%r: invalid hexadecimal notation' % (value,)) | python | def from_hex(cls, value):
"""Initialize a new network from hexadecimal notation."""
if len(value) == 8:
return cls(int(value, 16))
elif len(value) == 32:
return cls(int(value, 16))
else:
raise ValueError('%r: invalid hexadecimal notation' % (value,)) | Initialize a new network from hexadecimal notation. | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L510-L517 |
tehmaze/ipcalc | ipcalc.py | IP.to_ipv6 | def to_ipv6(self, ip_type='6-to-4'):
"""
Convert (an IPv4) IP address to an IPv6 address.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_ipv6())
2002:c000:022a:0000:0000:0000:0000:0000
>>> print(ip.to_ipv6('compat'))
0000:0000:0000:0000:0000:0000:c000:022a
>>> print(ip.to_ipv6('mapped'))
0000:0000:0000:0000:0000:ffff:c000:022a
"""
assert ip_type in ['6-to-4', 'compat', 'mapped'], 'Conversion ip_type not supported'
if self.v == 4:
if ip_type == '6-to-4':
return IP(BASE_6TO4 | int(self) << 80, version=6)
elif ip_type == 'compat':
return IP(int(self), version=6)
elif ip_type == 'mapped':
return IP(0xffff << 32 | int(self), version=6)
else:
return self | python | def to_ipv6(self, ip_type='6-to-4'):
"""
Convert (an IPv4) IP address to an IPv6 address.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_ipv6())
2002:c000:022a:0000:0000:0000:0000:0000
>>> print(ip.to_ipv6('compat'))
0000:0000:0000:0000:0000:0000:c000:022a
>>> print(ip.to_ipv6('mapped'))
0000:0000:0000:0000:0000:ffff:c000:022a
"""
assert ip_type in ['6-to-4', 'compat', 'mapped'], 'Conversion ip_type not supported'
if self.v == 4:
if ip_type == '6-to-4':
return IP(BASE_6TO4 | int(self) << 80, version=6)
elif ip_type == 'compat':
return IP(int(self), version=6)
elif ip_type == 'mapped':
return IP(0xffff << 32 | int(self), version=6)
else:
return self | Convert (an IPv4) IP address to an IPv6 address.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_ipv6())
2002:c000:022a:0000:0000:0000:0000:0000
>>> print(ip.to_ipv6('compat'))
0000:0000:0000:0000:0000:0000:c000:022a
>>> print(ip.to_ipv6('mapped'))
0000:0000:0000:0000:0000:ffff:c000:022a | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L519-L542 |
tehmaze/ipcalc | ipcalc.py | IP.to_reverse | def to_reverse(self):
"""Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
"""
if self.v == 4:
return '.'.join(list(self.dq.split('.')[::-1]) + ['in-addr', 'arpa'])
else:
return '.'.join(list(self.hex())[::-1] + ['ip6', 'arpa']) | python | def to_reverse(self):
"""Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
"""
if self.v == 4:
return '.'.join(list(self.dq.split('.')[::-1]) + ['in-addr', 'arpa'])
else:
return '.'.join(list(self.hex())[::-1] + ['ip6', 'arpa']) | Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L544-L558 |
tehmaze/ipcalc | ipcalc.py | Network.netmask_long | def netmask_long(self):
"""
Network netmask derived from subnet size, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.netmask_long())
4278190080
"""
if self.version() == 4:
return (MAX_IPV4 >> (32 - self.mask)) << (32 - self.mask)
else:
return (MAX_IPV6 >> (128 - self.mask)) << (128 - self.mask) | python | def netmask_long(self):
"""
Network netmask derived from subnet size, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.netmask_long())
4278190080
"""
if self.version() == 4:
return (MAX_IPV4 >> (32 - self.mask)) << (32 - self.mask)
else:
return (MAX_IPV6 >> (128 - self.mask)) << (128 - self.mask) | Network netmask derived from subnet size, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.netmask_long())
4278190080 | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L595-L606 |
tehmaze/ipcalc | ipcalc.py | Network.broadcast_long | def broadcast_long(self):
"""
Broadcast address, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.broadcast_long())
2147483647
"""
if self.version() == 4:
return self.network_long() | (MAX_IPV4 - self.netmask_long())
else:
return self.network_long() \
| (MAX_IPV6 - self.netmask_long()) | python | def broadcast_long(self):
"""
Broadcast address, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.broadcast_long())
2147483647
"""
if self.version() == 4:
return self.network_long() | (MAX_IPV4 - self.netmask_long())
else:
return self.network_long() \
| (MAX_IPV6 - self.netmask_long()) | Broadcast address, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.broadcast_long())
2147483647 | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L640-L652 |
tehmaze/ipcalc | ipcalc.py | Network.host_first | def host_first(self):
"""First available host in this subnet."""
if (self.version() == 4 and self.mask > 30) or \
(self.version() == 6 and self.mask > 126):
return self
else:
return IP(self.network_long() + 1, version=self.version()) | python | def host_first(self):
"""First available host in this subnet."""
if (self.version() == 4 and self.mask > 30) or \
(self.version() == 6 and self.mask > 126):
return self
else:
return IP(self.network_long() + 1, version=self.version()) | First available host in this subnet. | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L654-L660 |
tehmaze/ipcalc | ipcalc.py | Network.host_last | def host_last(self):
"""Last available host in this subnet."""
if (self.version() == 4 and self.mask == 32) or \
(self.version() == 6 and self.mask == 128):
return self
elif (self.version() == 4 and self.mask == 31) or \
(self.version() == 6 and self.mask == 127):
return IP(int(self) + 1, version=self.version())
else:
return IP(self.broadcast_long() - 1, version=self.version()) | python | def host_last(self):
"""Last available host in this subnet."""
if (self.version() == 4 and self.mask == 32) or \
(self.version() == 6 and self.mask == 128):
return self
elif (self.version() == 4 and self.mask == 31) or \
(self.version() == 6 and self.mask == 127):
return IP(int(self) + 1, version=self.version())
else:
return IP(self.broadcast_long() - 1, version=self.version()) | Last available host in this subnet. | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L662-L671 |
tehmaze/ipcalc | ipcalc.py | Network.check_collision | def check_collision(self, other):
"""Check another network against the given network."""
other = Network(other)
return self.network_long() <= other.network_long() <= self.broadcast_long() or \
other.network_long() <= self.network_long() <= other.broadcast_long() | python | def check_collision(self, other):
"""Check another network against the given network."""
other = Network(other)
return self.network_long() <= other.network_long() <= self.broadcast_long() or \
other.network_long() <= self.network_long() <= other.broadcast_long() | Check another network against the given network. | https://github.com/tehmaze/ipcalc/blob/d436b95d2783347c3e0084d76ec3c52d1f5d2f0b/ipcalc.py#L673-L677 |
django-fluent/django-fluent-dashboard | fluent_dashboard/items.py | CmsModelList.init_with_context | def init_with_context(self, context):
"""
Initialize the menu.
"""
# Apply the include/exclude patterns:
listitems = self._visible_models(context['request'])
# Convert to a similar data structure like the dashboard icons have.
# This allows sorting the items identically.
models = [
{'name': model._meta.model_name,
'app_name': model._meta.app_label,
'title': capfirst(model._meta.verbose_name_plural),
'url': self._get_admin_change_url(model, context)
}
for model, perms in listitems if self.is_item_visible(model, perms)
]
# Sort models.
sort_cms_models(models)
# Convert to items
for model in models:
self.children.append(items.MenuItem(title=model['title'], url=model['url'])) | python | def init_with_context(self, context):
"""
Initialize the menu.
"""
# Apply the include/exclude patterns:
listitems = self._visible_models(context['request'])
# Convert to a similar data structure like the dashboard icons have.
# This allows sorting the items identically.
models = [
{'name': model._meta.model_name,
'app_name': model._meta.app_label,
'title': capfirst(model._meta.verbose_name_plural),
'url': self._get_admin_change_url(model, context)
}
for model, perms in listitems if self.is_item_visible(model, perms)
]
# Sort models.
sort_cms_models(models)
# Convert to items
for model in models:
self.children.append(items.MenuItem(title=model['title'], url=model['url'])) | Initialize the menu. | https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L27-L50 |
django-fluent/django-fluent-dashboard | fluent_dashboard/items.py | ReturnToSiteItem.init_with_context | def init_with_context(self, context):
"""
Find the current URL based on the context.
It uses :func:`get_edited_object` to find the model,
and calls ``get_absolute_url()`` to get the frontend URL.
"""
super(ReturnToSiteItem, self).init_with_context(context)
# See if the current page is being edited, update URL accordingly.
edited_model = self.get_edited_object(context['request'])
if edited_model:
try:
url = edited_model.get_absolute_url()
except (AttributeError, urls.NoReverseMatch) as e:
pass
else:
if url:
self.url = url | python | def init_with_context(self, context):
"""
Find the current URL based on the context.
It uses :func:`get_edited_object` to find the model,
and calls ``get_absolute_url()`` to get the frontend URL.
"""
super(ReturnToSiteItem, self).init_with_context(context)
# See if the current page is being edited, update URL accordingly.
edited_model = self.get_edited_object(context['request'])
if edited_model:
try:
url = edited_model.get_absolute_url()
except (AttributeError, urls.NoReverseMatch) as e:
pass
else:
if url:
self.url = url | Find the current URL based on the context.
It uses :func:`get_edited_object` to find the model,
and calls ``get_absolute_url()`` to get the frontend URL. | https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L82-L99 |
django-fluent/django-fluent-dashboard | fluent_dashboard/items.py | ReturnToSiteItem.get_edited_object | def get_edited_object(self, request):
"""
Return the object which is currently being edited.
Returns ``None`` if the match could not be made.
"""
resolvermatch = urls.resolve(request.path_info)
if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'):
# In "appname_modelname_change" view of the admin.
# Extract the appname and model from the url name.
# For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view)
match = RE_CHANGE_URL.match(resolvermatch.url_name)
if not match:
return None
# object_id can be string (e.g. a country code as PK).
try:
object_id = resolvermatch.kwargs['object_id'] # Django 2.0+
except KeyError:
object_id = resolvermatch.args[0]
return self.get_object_by_natural_key(match.group(1), match.group(2), object_id)
return None | python | def get_edited_object(self, request):
"""
Return the object which is currently being edited.
Returns ``None`` if the match could not be made.
"""
resolvermatch = urls.resolve(request.path_info)
if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'):
# In "appname_modelname_change" view of the admin.
# Extract the appname and model from the url name.
# For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view)
match = RE_CHANGE_URL.match(resolvermatch.url_name)
if not match:
return None
# object_id can be string (e.g. a country code as PK).
try:
object_id = resolvermatch.kwargs['object_id'] # Django 2.0+
except KeyError:
object_id = resolvermatch.args[0]
return self.get_object_by_natural_key(match.group(1), match.group(2), object_id)
return None | Return the object which is currently being edited.
Returns ``None`` if the match could not be made. | https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L101-L122 |
django-fluent/django-fluent-dashboard | fluent_dashboard/items.py | ReturnToSiteItem.get_object_by_natural_key | def get_object_by_natural_key(self, app_label, model_name, object_id):
"""
Return a model based on a natural key.
This is a utility function for :func:`get_edited_object`.
"""
try:
model_type = ContentType.objects.get_by_natural_key(app_label, model_name)
except ContentType.DoesNotExist:
return None
# Pointless to fetch the object, if there is no URL to generate
# Avoid another database query.
ModelClass = model_type.model_class()
if not hasattr(ModelClass, 'get_absolute_url'):
return None
try:
return model_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
return None | python | def get_object_by_natural_key(self, app_label, model_name, object_id):
"""
Return a model based on a natural key.
This is a utility function for :func:`get_edited_object`.
"""
try:
model_type = ContentType.objects.get_by_natural_key(app_label, model_name)
except ContentType.DoesNotExist:
return None
# Pointless to fetch the object, if there is no URL to generate
# Avoid another database query.
ModelClass = model_type.model_class()
if not hasattr(ModelClass, 'get_absolute_url'):
return None
try:
return model_type.get_object_for_this_type(pk=object_id)
except ObjectDoesNotExist:
return None | Return a model based on a natural key.
This is a utility function for :func:`get_edited_object`. | https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/items.py#L124-L143 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.