index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
20,916 | precession.precession | tiler |
Repeat the quantity thing a numer of times give by the shape of shaper.
Parameters
----------
thing: float
Quantity to be repeated
shaper: array
Quantity providing the shape.
Returns
-------
thing: float
Quantity to be repeated
Examples
--------
``thing = precession.tiler(thing,shaper)``
| def tiler(thing,shaper):
"""
Repeat the quantity thing a numer of times give by the shape of shaper.
Parameters
----------
thing: float
Quantity to be repeated
shaper: array
Quantity providing the shape.
Returns
-------
thing: float
Quantity to be repeated
Examples
--------
``thing = precession.tiler(thing,shaper)``
"""
thing =np.atleast_1d(thing)
shaper =np.atleast_1d(shaper)
assert thing.ndim == 1 and shaper.ndim==1
return np.squeeze(np.tile(thing, np.shape(shaper)).reshape(len(shaper),len(thing)))
| (thing, shaper) |
20,917 | precession.precession | tofdeltachi |
Time as a function of deltachi on the precessional timescale (without radiation reaction).
Parameters
----------
deltachi: float
Weighted spin difference.
kappa: float
Asymptotic angular momentum.
r: float
Binary separation.
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
cyclesign: integer, optional (default: 1)
Sign (either +1 or -1) to cover the two halves of a precesion cycle.
precomputedroots: array, optional (default: None)
Pre-computed output of deltachiroots for computational efficiency.
Returns
-------
t: float
Time.
Examples
--------
``t = precession.tofdeltachi(deltachi,kappa,r,chieff,q,chi1,chi2,cyclesign=1)``
| def tofdeltachi(deltachi, kappa , r, chieff, q, chi1, chi2, cyclesign=1, precomputedroots=None):
"""
Time as a function of deltachi on the precessional timescale (without radiation reaction).
Parameters
----------
deltachi: float
Weighted spin difference.
kappa: float
Asymptotic angular momentum.
r: float
Binary separation.
chieff: float
Effective spin.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
cyclesign: integer, optional (default: 1)
Sign (either +1 or -1) to cover the two halves of a precesion cycle.
precomputedroots: array, optional (default: None)
Pre-computed output of deltachiroots for computational efficiency.
Returns
-------
t: float
Time.
Examples
--------
``t = precession.tofdeltachi(deltachi,kappa,r,chieff,q,chi1,chi2,cyclesign=1)``
"""
u= eval_u(r=r,q=q)
deltachiminus,deltachiplus,deltachi3 = deltachiroots(kappa, u, chieff, q, chi1, chi2, precomputedroots=precomputedroots)
psiperiod = eval_tau(kappa, r, chieff, q, chi1, chi2, precomputedroots=np.stack([deltachiminus,deltachiplus,deltachi3]), return_psiperiod=True)
deltachitilde = affine(deltachi,deltachiminus,deltachiplus)
m = elliptic_parameter(kappa, u, chieff, q, chi1, chi2, precomputedroots=np.stack([deltachiminus,deltachiplus,deltachi3]))
t = np.sign(cyclesign) * psiperiod * scipy.special.ellipkinc(np.arcsin(deltachitilde**(1/2)), m)
return t
| (deltachi, kappa, r, chieff, q, chi1, chi2, cyclesign=1, precomputedroots=None) |
20,918 | precession.precession | updown_endpoint |
Endpoint of the up-down precessional instability.
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
deltaphi: float
Angle between the projections of the two spins onto the orbital plane.
theta1: float
Angle between orbital angular momentum and primary spin.
theta2: float
Angle between orbital angular momentum and secondary spin.
Examples
--------
``theta1,theta2,deltaphi = precession.updown_endpoint(q,chi1,chi2)``
| def updown_endpoint(q, chi1, chi2):
"""
Endpoint of the up-down precessional instability.
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
deltaphi: float
Angle between the projections of the two spins onto the orbital plane.
theta1: float
Angle between orbital angular momentum and primary spin.
theta2: float
Angle between orbital angular momentum and secondary spin.
Examples
--------
``theta1,theta2,deltaphi = precession.updown_endpoint(q,chi1,chi2)``
"""
q = np.atleast_1d(q).astype(float)
chi1 = np.atleast_1d(chi1).astype(float)
chi2 = np.atleast_1d(chi2).astype(float)
costhetaupdown = (chi1 - q * chi2) / (chi1 + q * chi2)
theta1 = np.arccos(costhetaupdown)
theta2 = np.arccos(costhetaupdown)
deltaphi = np.zeros(len(theta1))
return np.stack([theta1, theta2, deltaphi])
| (q, chi1, chi2) |
20,919 | precession.precession | vectors_to_Jframe |
Rotate vectors of the three momenta onto a frame where J is along z and L lies in the x-z plane.
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Returns
-------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Examples
--------
``Lvec,S1vec,S2vec = precession.vectors_to_Jframe(Lvec,S1vec,S2vec)``
| def vectors_to_Jframe(Lvec, S1vec, S2vec):
"""
Rotate vectors of the three momenta onto a frame where J is along z and L lies in the x-z plane.
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Returns
-------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Examples
--------
``Lvec,S1vec,S2vec = precession.vectors_to_Jframe(Lvec,S1vec,S2vec)``
"""
Jvec = Lvec + S1vec + S2vec
rotation = lambda vec: rotate_nested(vec, Jvec, Lvec)
Lvecrot = rotation(Lvec)
S1vecrot = rotation(S1vec)
S2vecrot = rotation(S2vec)
return np.stack([Lvecrot, S1vecrot, S2vecrot])
| (Lvec, S1vec, S2vec) |
20,920 | precession.precession | vectors_to_Lframe |
Rotate vectors of the three momenta onto a frame where L is along z and S1 lies in the x-z plane.
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Returns
-------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Examples
--------
``Lvec,S1vec,S2vec = precession.vectors_to_Lframe(Lvec,S1vec,S2vec)``
| def vectors_to_Lframe(Lvec, S1vec, S2vec):
"""
Rotate vectors of the three momenta onto a frame where L is along z and S1 lies in the x-z plane.
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Returns
-------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Examples
--------
``Lvec,S1vec,S2vec = precession.vectors_to_Lframe(Lvec,S1vec,S2vec)``
"""
Jvec = Lvec + S1vec + S2vec
rotation = lambda vec: rotate_nested(vec, Lvec, S1vec)
Lvecrot = rotation(Lvec)
S1vecrot = rotation(S1vec)
S2vecrot = rotation(S2vec)
return np.stack([Lvecrot, S1vecrot, S2vecrot])
| (Lvec, S1vec, S2vec) |
20,921 | precession.precession | vectors_to_angles |
Convert cartesian vectors (L,S1,S2) into angles (theta1,theta2,deltaphi).
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Returns
-------
deltaphi: float
Angle between the projections of the two spins onto the orbital plane.
theta1: float
Angle between orbital angular momentum and primary spin.
theta2: float
Angle between orbital angular momentum and secondary spin.
Examples
--------
``theta1,theta2,deltaphi = precession.vectors_to_angles(Lvec,S1vec,S2vec)``
| def vectors_to_angles(Lvec, S1vec, S2vec):
"""
Convert cartesian vectors (L,S1,S2) into angles (theta1,theta2,deltaphi).
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
Returns
-------
deltaphi: float
Angle between the projections of the two spins onto the orbital plane.
theta1: float
Angle between orbital angular momentum and primary spin.
theta2: float
Angle between orbital angular momentum and secondary spin.
Examples
--------
``theta1,theta2,deltaphi = precession.vectors_to_angles(Lvec,S1vec,S2vec)``
"""
Lvec = np.atleast_2d(Lvec).astype(float)
S1vec = np.atleast_2d(S1vec).astype(float)
S2vec = np.atleast_2d(S2vec).astype(float)
S1vec = normalize_nested(S1vec)
S2vec = normalize_nested(S2vec)
Lvec = normalize_nested(Lvec)
theta1 = np.arccos(dot_nested(S1vec, Lvec))
theta2 = np.arccos(dot_nested(S2vec, Lvec))
S1crL = np.cross(S1vec, Lvec)
S2crL = np.cross(S2vec, Lvec)
absdeltaphi = np.arccos(dot_nested(normalize_nested(S1crL), normalize_nested(S2crL)))
cyclesign = eval_cyclesign(Lvec=Lvec, S1vec=S1vec, S2vec=S2vec)
deltaphi = absdeltaphi*cyclesign
return np.stack([theta1, theta2, deltaphi])
| (Lvec, S1vec, S2vec) |
20,922 | precession.precession | vectors_to_conserved |
Convert vectors (L,S1,S2) to conserved quanties (deltachi,kappa,chieff).
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
q: float
Mass ratio: 0<=q<=1.
full_output: boolean, optional (default: False)
Return additional outputs.
Returns
-------
chieff: float
Effective spin.
cyclesign: integer, optional
Sign (either +1 or -1) to cover the two halves of a precesion cycle.
deltachi: float
Weighted spin difference.
kappa: float
Asymptotic angular momentum.
Examples
--------
``deltachi,kappa,chieff = precession.vectors_to_conserved(Lvec,S1vec,S2vec,q)``
``deltachi,kappa,chieff,cyclesign = precession.vectors_to_conserved(Lvec,S1vec,S2vec,q,full_output=True)``
| def vectors_to_conserved(Lvec, S1vec, S2vec, q,full_output=False):
"""
Convert vectors (L,S1,S2) to conserved quanties (deltachi,kappa,chieff).
Parameters
----------
Lvec: array
Cartesian vector of the orbital angular momentum.
S1vec: array
Cartesian vector of the primary spin.
S2vec: array
Cartesian vector of the secondary spin.
q: float
Mass ratio: 0<=q<=1.
full_output: boolean, optional (default: False)
Return additional outputs.
Returns
-------
chieff: float
Effective spin.
cyclesign: integer, optional
Sign (either +1 or -1) to cover the two halves of a precesion cycle.
deltachi: float
Weighted spin difference.
kappa: float
Asymptotic angular momentum.
Examples
--------
``deltachi,kappa,chieff = precession.vectors_to_conserved(Lvec,S1vec,S2vec,q)``
``deltachi,kappa,chieff,cyclesign = precession.vectors_to_conserved(Lvec,S1vec,S2vec,q,full_output=True)``
"""
L = norm_nested(Lvec)
S1 = norm_nested(S1vec)
S2 = norm_nested(S2vec)
r = eval_r(L=L,q=q)
chi1 = eval_chi1(q,S1)
chi2 = eval_chi2(q,S2)
theta1,theta2,deltaphi = vectors_to_angles(Lvec, S1vec, S2vec)
deltachi, kappa, chieff, cyclesign= angles_to_conserved(theta1, theta2, deltaphi, r, q, chi1, chi2, full_output=True)
if full_output:
return np.stack([deltachi, kappa, chieff, cyclesign])
else:
return np.stack([deltachi, kappa, chieff])
| (Lvec, S1vec, S2vec, q, full_output=False) |
20,924 | precession.precession | widenutation_condition |
Conditions for wide nutation to take place. The returned flag which is
- `wide1` if wide nutation is allowed for the primary BH.
- `wide2` if wide nutation is allowed for the secondary BH.
- `nowide` if wide nutation is not allowed.
Parameters
----------
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
chieff: float
Effective spin.
kappa: float
Asymptotic angular momentum.
which: string
Select function behavior.
Examples
--------
``which,kappa,chieff = precession.widenutation_condition(r,q,chi1,chi2)``
| def widenutation_condition(r, q, chi1, chi2):
"""
Conditions for wide nutation to take place. The returned flag which is
- `wide1` if wide nutation is allowed for the primary BH.
- `wide2` if wide nutation is allowed for the secondary BH.
- `nowide` if wide nutation is not allowed.
Parameters
----------
r: float
Binary separation.
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
chieff: float
Effective spin.
kappa: float
Asymptotic angular momentum.
which: string
Select function behavior.
Examples
--------
``which,kappa,chieff = precession.widenutation_condition(r,q,chi1,chi2)``
"""
r = np.atleast_1d(r).astype(float)
q = np.atleast_1d(q).astype(float)
chi1 = np.atleast_1d(chi1).astype(float)
chi2 = np.atleast_1d(chi2).astype(float)
rwide = widenutation_separation(q, chi1, chi2)
kappawide1 = (chi1**2 - 2*q*chi1**2 + q**4*chi2**2 - 2*q**2*(1-q)*r)/(2*q*(1+q)**2 * r**0.5)
chieffwide1 = -(1-q)*r**0.5/(1+q)
kappawide2 = (chi1**2 - 2*q**3*chi1**2 + q**4*chi2**2 + 2*q*(1-q)*r)/(2*q*(1+q)**2 * r**0.5)
chieffwide2 = (1-q)*r**0.5/(1+q)
which = np.where(r<=rwide, np.where(chi1<=chi2,"wide1","wide2"), "nowide")
kappa = np.where(r<=rwide, np.where(chi1<=chi2,kappawide1,kappawide2), np.nan)
chieff = np.where(r<=rwide, np.where(chi1<=chi2,chieffwide1,chieffwide2), np.nan)
return np.stack[(which, kappa, chieff)]
| (r, q, chi1, chi2) |
20,925 | precession.precession | widenutation_separation |
The critical separation r_wide below which the binary component with
smaller dimensionless spin may undergo wide nutations.
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
r_wide: float
Orbital separation where wide nutations becomes possible.
Examples
--------
``r_wide = precession.widenutation_separation(q,chi1,chi2)``
| def widenutation_separation(q, chi1, chi2):
"""
The critical separation r_wide below which the binary component with
smaller dimensionless spin may undergo wide nutations.
Parameters
----------
q: float
Mass ratio: 0<=q<=1.
chi1: float
Dimensionless spin of the primary (heavier) black hole: 0<=chi1<=1.
chi2: float
Dimensionless spin of the secondary (lighter) black hole: 0<=chi2<=1.
Returns
-------
r_wide: float
Orbital separation where wide nutations becomes possible.
Examples
--------
``r_wide = precession.widenutation_separation(q,chi1,chi2)``
"""
q = np.atleast_1d(q).astype(float)
chi1 = np.atleast_1d(chi1).astype(float)
chi2 = np.atleast_1d(chi2).astype(float)
rwide = ((chi1 - q*chi2) / (1-q))**2
return rwide
| (q, chi1, chi2) |
20,926 | precession.precession | wraproots |
Find roots of a polynomial given coefficients, ordered according to their real part. Complex roots are masked with nans. This is essentially a wrapper of numpy.roots.
Parameters
----------
coefficientfunction: callable
Function returning the polynomial coefficients ordered from highest to lowest degree.
*args, **kwargs:
Parameters of `coefficientfunction`.
Returns
-------
sols: array
Roots of the polynomial.
Examples
--------
``sols = precession.wraproots(coefficientfunction, *args, **kwargs)``
| def wraproots(coefficientfunction, *args, **kwargs):
"""
Find roots of a polynomial given coefficients, ordered according to their real part. Complex roots are masked with nans. This is essentially a wrapper of numpy.roots.
Parameters
----------
coefficientfunction: callable
Function returning the polynomial coefficients ordered from highest to lowest degree.
*args, **kwargs:
Parameters of `coefficientfunction`.
Returns
-------
sols: array
Roots of the polynomial.
Examples
--------
``sols = precession.wraproots(coefficientfunction, *args, **kwargs)``
"""
coeffs = coefficientfunction(*args, **kwargs)
sols = np.sort_complex(roots_vec(coeffs.T))
sols = np.real(np.where(np.isreal(sols), sols, np.nan))
return sols
| (coefficientfunction, *args, **kwargs) |
20,928 | pproxy.server | proxies_by_uri | null | def proxies_by_uri(uri_jumps):
jump = DIRECT
for uri in reversed(uri_jumps.split('__')):
jump = proxy_by_uri(uri, jump)
return jump
| (uri_jumps) |
20,929 | pproxy.server | compile_rule | null | def compile_rule(filename):
if filename.startswith("{") and filename.endswith("}"):
return re.compile(filename[1:-1]).match
with open(filename) as f:
return re.compile('(:?'+''.join('|'.join(i.strip() for i in f if i.strip() and not i.startswith('#')))+')$').match
| (filename) |
20,934 | qt_api | example_function | null | def example_function():
return 1 + 1
| () |
20,935 | argufy.formatter | ArgufyHelpFormatter | Provide formatting for Argufy. | class ArgufyHelpFormatter(HelpFormatter):
"""Provide formatting for Argufy."""
# argparse.HelpFormatter(prog, max_help_position=80, width=130)
def add_usage(
self,
usage: Optional[str],
actions: Iterable[Action],
# groups: Iterable['ArgumentGroup'],
groups: Iterable['MutuallyExclusiveGroup'],
prefix: Optional[str] = 'usage: ',
) -> None:
"""Format usage message."""
if prefix is not None:
prefix = self.font(prefix)
super().add_usage(usage, actions, groups, prefix)
@staticmethod
def font(text: str, width: str = 'BRIGHT') -> str:
"""Set the string thickness."""
return getattr(Style, width) + text + Style.RESET_ALL
@staticmethod
def shade(text: str, color: str = 'CYAN') -> str:
"""Set the string color."""
return getattr(Fore, color.upper()) + text + Style.RESET_ALL
# def _format_action_invocation(self, action: Action) -> str:
# """Format arguments summary."""
# # TODO: find alternative that does not modify action
# if isinstance(action, argparse._SubParsersAction):
# if action.choices is not None:
# for choice in list(action.choices):
# parser = action.choices.pop(choice)
# choice = self.shade(choice)
# action.choices[choice] = parser
# return super(
# ArgufyHelpFormatter, self
# )._format_action_invocation(action)
def _expand_help(self, action: Action) -> str:
"""Format help message."""
if action.help:
return self.shade(
super()._expand_help(action).rstrip('.').lower(),
'YELLOW',
)
return ''
def _format_action(self, action: Action) -> str:
"""Format arguments."""
if isinstance(action, argparse._SubParsersAction._ChoicesPseudoAction):
subcommand = self.shade(
self.font(self._format_action_invocation(action))
)
help_text = self._expand_help(action)
# TODO: calculate correct spacing
return f" {subcommand.ljust(37)}{help_text}\n"
# action.option_strings = [
# self.font(self.shade(option))
# for option in action.option_strings
# ]
return super()._format_action(action)
| (prog, indent_increment=2, max_help_position=24, width=None) |
20,936 | argparse | __init__ | null | def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
import shutil
width = shutil.get_terminal_size().columns
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = min(max_help_position,
max(width - 20, indent_increment * 2))
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+', _re.ASCII)
self._long_break_matcher = _re.compile(r'\n\n\n+')
| (self, prog, indent_increment=2, max_help_position=24, width=None) |
20,939 | argufy.formatter | _expand_help | Format help message. | def _expand_help(self, action: Action) -> str:
"""Format help message."""
if action.help:
return self.shade(
super()._expand_help(action).rstrip('.').lower(),
'YELLOW',
)
return ''
| (self, action: argparse.Action) -> str |
20,940 | argparse | _fill_text | null | def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
import textwrap
return textwrap.fill(text, width,
initial_indent=indent,
subsequent_indent=indent)
| (self, text, width, indent) |
20,941 | argufy.formatter | _format_action | Format arguments. | def _format_action(self, action: Action) -> str:
"""Format arguments."""
if isinstance(action, argparse._SubParsersAction._ChoicesPseudoAction):
subcommand = self.shade(
self.font(self._format_action_invocation(action))
)
help_text = self._expand_help(action)
# TODO: calculate correct spacing
return f" {subcommand.ljust(37)}{help_text}\n"
# action.option_strings = [
# self.font(self.shade(option))
# for option in action.option_strings
# ]
return super()._format_action(action)
| (self, action: argparse.Action) -> str |
20,958 | argufy.formatter | add_usage | Format usage message. | def add_usage(
self,
usage: Optional[str],
actions: Iterable[Action],
# groups: Iterable['ArgumentGroup'],
groups: Iterable['MutuallyExclusiveGroup'],
prefix: Optional[str] = 'usage: ',
) -> None:
"""Format usage message."""
if prefix is not None:
prefix = self.font(prefix)
super().add_usage(usage, actions, groups, prefix)
| (self, usage: Optional[str], actions: Iterable[argparse.Action], groups: Iterable[ForwardRef('MutuallyExclusiveGroup')], prefix: Optional[str] = 'usage: ') -> None |
20,960 | argufy.formatter | font | Set the string thickness. | @staticmethod
def font(text: str, width: str = 'BRIGHT') -> str:
"""Set the string thickness."""
return getattr(Style, width) + text + Style.RESET_ALL
| (text: str, width: str = 'BRIGHT') -> str |
20,962 | argufy.formatter | shade | Set the string color. | @staticmethod
def shade(text: str, color: str = 'CYAN') -> str:
"""Set the string color."""
return getattr(Fore, color.upper()) + text + Style.RESET_ALL
| (text: str, color: str = 'CYAN') -> str |
20,964 | argufy.argument | Argument | Represent argparse arguments. | class Argument: # pylint: disable=too-many-instance-attributes
"""Represent argparse arguments."""
__short_flags: List[str] = ['-h']
def __init__(
self,
docstring: Optional[DocstringParam] = None,
parameters: Optional[Parameter] = None,
) -> None:
"""Initialize argparse argument."""
# self.attributes: Dict[Any, Any] = {}
# set parameter default
if parameters:
self.default = parameters.default
self.name = parameters # type: ignore
else:
self.default = None
# set parameter type
if parameters and parameters.annotation != empty:
self.__parse_parameters(parameters)
elif docstring and docstring.type_name:
self.__parse_docstring(docstring)
elif self.default is not None:
self.type = type(self.default)
# if hasattr(self, 'type'):
# self.metavar = (self.type.__name__)
# set parameter help message
if docstring and docstring.description:
self.help = docstring.description
def __parse_parameters(self, parameters: Parameter) -> None:
"""Get parameter types from type inspection."""
# if typing.get_origin(parameters.annotation) is Union:
# XXX need to handle types from typing
# print(
# '---', parameters.annotation,
# typing.get_origin(parameters.annotation),
# )
if hasattr(parameters.annotation, '__origin__'):
annotation = typing.get_args(parameters.annotation)
# print('annotation', annotation, parameters.annotation)
# check if annotation is optional
if type(None) in annotation:
self.nargs = '?'
else:
# self.type = annotation
self.type = typing.get_origin(parameters.annotation)
else:
self.type = parameters.annotation
def __parse_docstring(self, docstring: DocstringParam) -> None:
"""Get parameter types from docstring."""
# Parse docstring for parameter types and defaults
if docstring.type_name and ',' in docstring.type_name:
for arg in docstring.type_name.split(',', 1):
if not hasattr(self, 'type'):
# NOTE: Limit input that eval will parse
if arg.__class__.__module__ == 'builtins':
self.type = literal_eval(arg) if arg != 'str' else str
if arg.lower() == 'optional' and not hasattr(self, 'default'):
# XXX: should optional not exist instead of 'None'
self.default = None
# TODO: tighten regex
if re.search(r'^\s*\{.*\}\s*$', arg):
self.choices = literal_eval(arg.strip())
if not hasattr(self, 'type'):
# NOTE: Limit input that eval will parse
if docstring.type_name in (
('float', 'int', 'str', 'list', 'dict', 'tuple')
):
self.type = locate(docstring.type_name)
@property
def name(self) -> List[str]:
"""Get argparse command/argument name."""
return self.__name
@name.setter
def name(self, parameters: Parameter) -> None:
"""Set argparse command/argument name."""
name = parameters.name.replace('_', '-')
# parse positional argument
if not hasattr(self, 'default') and not str(parameters).startswith(
'**'
):
self.__name = [name]
if str(parameters).startswith('*'):
self.nargs = '*'
# parse optional argument
else:
if str(parameters).startswith('**'):
self.nargs = '*'
flags = [f"--{name}"]
# NOTE: check for conflicting flags
if '-' not in name:
# TODO: check if common short flag (ex: version)
n = name[:1]
if n not in Argument.__short_flags:
Argument.__short_flags.append(n)
flags.insert(0, f"-{n}")
elif n.upper() not in Argument.__short_flags:
Argument.__short_flags.append(n.upper())
flags.insert(0, f"-{n.upper()}")
self.__name = flags
@property
def type(self) -> Any:
"""Get argparse argument type."""
return self.__type # type: ignore
@type.setter
def type(self, annotation: Any) -> None:
"""Set argparse argument type."""
# log.debug('prematched annotation:', annotation)
# print(annotation)
if annotation == bool:
# NOTE: these store bool type internally
if self.default or not hasattr(self, 'default'):
self.action = 'store_false'
else:
self.action = 'store_true'
elif annotation == int:
self.__type = annotation
self.action = 'append'
elif annotation == list:
self.__type = lambda v: int(v) if v.isdigit() else v
self.action = 'append'
elif annotation == tuple:
self.__type = annotation
self.nargs = '+'
elif annotation == set:
self.__type = annotation
self.nargs = '+'
else:
# log.debug('unmatched annotation:', annotation)
self.__type = annotation
# self.nargs = 1
@property
def metavar(self) -> str:
"""Get argparse argument metavar."""
return self.__metavar
@metavar.setter
def metavar(self, metavar: str) -> None:
"""Set argparse argument metavar."""
# NOTE: Only positional arguments use metavars
if not hasattr(self, 'default'):
self.__metavar = metavar
# @property
# def const(self) -> str:
# """Get argparse argument const."""
# return self.__const
# @const.setter
# def const(self, const: str) -> None:
# """Set argparse argument const."""
# self.__const = const
# @property
# def dest(self) -> str:
# """Get argparse command/argument dest."""
# return self.__dest
# @dest.setter
# def dest(self, dest: str) -> None:
# """Set argparse command/argument dest."""
# self.__dest = dest
# @property
# def required(self) -> bool:
# """Get argparse required argument."""
# return self.__required
# @required.setter
# def required(self, required: bool) -> None:
# """Set argparse required argument."""
# self.__required = required
@property
def action(self) -> str:
"""Get argparse argument action."""
return self.__action
@action.setter
def action(self, action: str) -> None:
"""Set argparse argument action."""
self.__action = action
@property
def choices(self) -> List[str]:
"""Get argparse argument choices."""
return self.__choices
@choices.setter
def choices(self, choices: set) -> None:
"""Set argparse argument choices."""
self.__choices = list(choices)
@property
def nargs(self) -> Union[int, str]:
"""Get argparse argument nargs."""
return self.__nargs
@nargs.setter
def nargs(self, nargs: Union[int, str]) -> None:
"""Set argparse argument nargs."""
# TODO: map nargs to argparse with typing
# 1: set number of values
# ?: a single optional value
# *: a flexible list of values
# +: like * requiring at least one value
# REMAINDER: unused args
self.__nargs = nargs
@property
def default(self) -> Any:
"""Get argparse argument default."""
return self.__default
@default.setter
def default(self, default: Any) -> None:
"""Set argparse argument default."""
if default != empty:
self.__default = default
# XXX: this keeps conflicting with positional arguments
# else:
# self.__default = None
@property
def help(self) -> str:
"""Get argparse command/argument help message."""
return self.__help
@help.setter
def help(self, description: str) -> None:
"""Set argparse command/argument help message."""
self.__help = description
| (docstring: Optional[docstring_parser.common.DocstringParam] = None, parameters: Optional[inspect.Parameter] = None) -> None |
20,965 | argufy.argument | __parse_docstring | Get parameter types from docstring. | def __parse_docstring(self, docstring: DocstringParam) -> None:
"""Get parameter types from docstring."""
# Parse docstring for parameter types and defaults
if docstring.type_name and ',' in docstring.type_name:
for arg in docstring.type_name.split(',', 1):
if not hasattr(self, 'type'):
# NOTE: Limit input that eval will parse
if arg.__class__.__module__ == 'builtins':
self.type = literal_eval(arg) if arg != 'str' else str
if arg.lower() == 'optional' and not hasattr(self, 'default'):
# XXX: should optional not exist instead of 'None'
self.default = None
# TODO: tighten regex
if re.search(r'^\s*\{.*\}\s*$', arg):
self.choices = literal_eval(arg.strip())
if not hasattr(self, 'type'):
# NOTE: Limit input that eval will parse
if docstring.type_name in (
('float', 'int', 'str', 'list', 'dict', 'tuple')
):
self.type = locate(docstring.type_name)
| (self, docstring: docstring_parser.common.DocstringParam) -> NoneType |
20,966 | argufy.argument | __parse_parameters | Get parameter types from type inspection. | def __parse_parameters(self, parameters: Parameter) -> None:
"""Get parameter types from type inspection."""
# if typing.get_origin(parameters.annotation) is Union:
# XXX need to handle types from typing
# print(
# '---', parameters.annotation,
# typing.get_origin(parameters.annotation),
# )
if hasattr(parameters.annotation, '__origin__'):
annotation = typing.get_args(parameters.annotation)
# print('annotation', annotation, parameters.annotation)
# check if annotation is optional
if type(None) in annotation:
self.nargs = '?'
else:
# self.type = annotation
self.type = typing.get_origin(parameters.annotation)
else:
self.type = parameters.annotation
| (self, parameters: inspect.Parameter) -> NoneType |
20,967 | argufy.argument | __init__ | Initialize argparse argument. | def __init__(
self,
docstring: Optional[DocstringParam] = None,
parameters: Optional[Parameter] = None,
) -> None:
"""Initialize argparse argument."""
# self.attributes: Dict[Any, Any] = {}
# set parameter default
if parameters:
self.default = parameters.default
self.name = parameters # type: ignore
else:
self.default = None
# set parameter type
if parameters and parameters.annotation != empty:
self.__parse_parameters(parameters)
elif docstring and docstring.type_name:
self.__parse_docstring(docstring)
elif self.default is not None:
self.type = type(self.default)
# if hasattr(self, 'type'):
# self.metavar = (self.type.__name__)
# set parameter help message
if docstring and docstring.description:
self.help = docstring.description
| (self, docstring: Optional[docstring_parser.common.DocstringParam] = None, parameters: Optional[inspect.Parameter] = None) -> NoneType |
20,968 | argufy.parser | Parser | Provide CLI parser for function. | class Parser(ArgumentParser):
"""Provide CLI parser for function."""
exclude_prefixes = ('@', '_')
def __init__(self, **kwargs: Any) -> None:
"""Initialize parser.
Parameters
----------
prog: str
The name of the program
usage: str
The string describing the program usage
description: str
Text to display before the argument help
epilog: str
Text to display after the argument help
parents: list
A list of ArgumentParser objects whose arguments should also
be included
formatter_class: Object
A class for customizing the help output
prefix_chars: char
The set of characters that prefix optional arguments
fromfile_prefix_chars: None
The set of characters that prefix files from which additional
arguments should be read
argument_default: None
The global default value for arguments
conflict_handler: Object
The strategy for resolving conflicting optionals
add_help: str
Add a -h/--help option to the parser
allow_abbrev: bool
Allows long options to be abbreviated if the abbreviation is
unambiguous
"""
# TODO: handle environment variables
module = self.__get_parent_module()
if module and module.__doc__:
docstring = docparse(module.__doc__)
if not kwargs.get('description'):
kwargs['description'] = docstring.short_description
if 'prog' not in kwargs:
kwargs['prog'] = module.__name__.split('.')[0]
if 'version' in kwargs:
self.prog_version = kwargs.pop('version')
# if 'prefix' in kwargs:
# self.prefix = kwargs.pop('prefix')
# else:
# self.prefix = kwargs['prog'].upper()
# log.debug(str(self.prefix))
if 'log_level' in kwargs:
log.setLevel(getattr(logging, kwargs.pop('log_level').upper()))
if 'log_handler' in kwargs:
log_handler = kwargs.pop('log_handler')
log.addHandler(logging.StreamHandler(log_handler))
self.use_module_args = kwargs.pop('use_module_args', False)
self.main_args_builder = kwargs.pop('main_args_builder', None)
self.command_type = kwargs.pop('command_type', None)
self.command_scheme = kwargs.pop('command_scheme', None)
if 'formatter_class' not in kwargs:
self.formatter_class = ArgufyHelpFormatter
super().__init__(**kwargs)
# NOTE: cannot move to formatter
self._positionals.title = ArgufyHelpFormatter.font(
self._positionals.title or 'arguments'
)
self._optionals.title = ArgufyHelpFormatter.font(
self._optionals.title or 'flags'
)
# XXX version lookup infinite loop when absent
if hasattr(self, 'prog_version'):
self.add_argument(
'--version',
action='version',
version=f"%(prog)s {self.prog_version}",
help='display application version',
)
@staticmethod
def __get_parent_module() -> Optional[ModuleType]:
"""Get name of module importing this module."""
stack = inspect.stack()
# TODO: need way to better identify parent module
stack_frame = stack[2]
result = inspect.getmodule(stack_frame[0]) or None
return result
@staticmethod
def __clean_args(argument: Argument) -> Dict[Any, Any]:
"""Retrieve cleaned parameters from an Argument."""
size = len('_Argument__')
return {
k[size:]: v
for k, v in vars(argument).items()
if k.startswith('_Argument__')
}
@staticmethod
def _get_excludes(exclude_prefixes: Tuple[str, ...] = tuple()) -> tuple:
"""Combine class excludes with instance."""
if exclude_prefixes != ():
return tuple(exclude_prefixes) + Parser.exclude_prefixes
return Parser.exclude_prefixes
@staticmethod
def __get_description(
name: str, docstring: 'Docstring'
) -> Optional['DocstringParam']:
"""Get argument description from docstring."""
return next((d for d in docstring.params if d.arg_name == name), None)
@staticmethod
def __get_keyword_args(
signature: 'Signature', docstring: 'Docstring'
) -> List[str]:
"""Get keyward arguments from docstring."""
return [
x.arg_name
for x in docstring.params
if x.arg_name not in list(signature.parameters)
]
@staticmethod
def __generate_parameter(name: str, module: ModuleType) -> Parameter:
"""Generate inpect parameter."""
parameter = Parameter(
name,
ParameterKind.POSITIONAL_OR_KEYWORD,
default=getattr(module, name),
annotation=empty,
)
return parameter
def add_commands( # pylint: disable=too-many-locals,too-many-branches
self,
module: ModuleType,
parser: Optional[ArgumentParser] = None,
exclude_prefixes: tuple = tuple(),
command_type: Optional[str] = None,
) -> 'Parser':
"""Add commands.
Parameters
----------
module: ModuleType,
Module used to import functions for CLI commands.
parser: ArgumentParser, optional
Parser used to append subparsers to create subcommands.
exclude_prefixes: tuple,
Methods from a module that should be excluded.
command_type: str, optional
Choose format type of commands to be created.
Returns
-------
self:
Return object itself to allow chaining functions.
"""
# use self or an existing parser
if not parser:
parser = self
parser.formatter_class = ArgufyHelpFormatter
module_name = module.__name__.split('.')[-1]
docstring = docparse(module.__doc__) if module.__doc__ else None
excludes = Parser._get_excludes(exclude_prefixes)
# use exsiting subparser or create a new one
if not any(isinstance(x, SubParsersAction) for x in parser._actions):
# TODO: use metavar for hidden commands
parser.add_subparsers(dest=module_name, parser_class=Parser)
# check if command exists
command = next(
(x for x in parser._actions if isinstance(x, SubParsersAction)),
None,
)
# set command name scheme
if command_type is None:
command_type = self.command_type
# create subcommand for command
if command_type == 'subcommand':
if command:
msg = docstring.short_description if docstring else None
subcommand = command.add_parser(
module_name.replace('_', '-'),
description=msg,
formatter_class=self.formatter_class,
help=msg,
)
subcommand.set_defaults(mod=module)
# append subcommand to exsiting command or create a new one
return self.add_commands(
module=module,
parser=subcommand,
exclude_prefixes=Parser._get_excludes(exclude_prefixes),
command_type='command',
)
# TODO: separate into method
# pylint: disable-next=too-many-nested-blocks
for name, value in inspect.getmembers(module):
# TODO: Possible singledispatch candidate
if not name.startswith(excludes):
# skip classes for now
if inspect.isclass(value):
# TODO: check if dataclass instance
# TODO: check if class instance
continue # pragma: no cover
# create commands from functions
if inspect.isfunction(value):
# TODO: Turn parameter-less function into switch
# merge builder function maing_args into parser
if (
self.main_args_builder
and name == self.main_args_builder['function']
):
self.add_arguments(value, parser)
# create commands from functions
elif (
module.__name__ == value.__module__
and not name.startswith(', '.join(excludes))
or (
self.main_args_builder
and name == self.main_args_builder['function']
)
):
# create command from function
if command:
# control command name format
if self.command_scheme == 'chain':
cmd_name = f"{module_name}.{name}"
else:
cmd_name = name
msg = (
docparse(value.__doc__).short_description
if value.__doc__
else None
)
cmd = command.add_parser(
cmd_name.replace('_', '-'),
description=msg,
formatter_class=self.formatter_class,
help=msg,
)
cmd.set_defaults(mod=module, fn=value)
# add arguments from function
# log.debug("command %s %s %s", name, value, cmd)
self.add_arguments(value, cmd)
# create arguments from module varibles
elif (
self.use_module_args
and not isinstance(value, ModuleType)
and not hasattr(typing, name)
and (
self.main_args_builder
and name != self.main_args_builder['instance']
)
):
# TODO: Reconcile inspect parameters with dict
# TODO: use argparse.SUPPRESS for hidden arguments
arguments = self.__clean_args(
Argument(
self.__get_description(name, docstring)
if docstring
else None,
self.__generate_parameter(name, module),
)
)
name = arguments.pop('name')
parser.add_argument(*name, **arguments)
return self
def add_arguments(
self, obj: Any, parser: Optional[ArgumentParser] = None
) -> 'Parser':
"""Add arguments to parser/subparser.
Parameters
----------
obj: Any
Verious module, function, or arguments that can be inspected.
parser: ArgumentParser, optional
Parser/Subparser that arguments will be added.
Returns
-------
self:
Return object itself to allow chaining functions.
"""
if not parser:
parser = self
# prep object for inspection
docstring = docparse(obj.__doc__)
signature = inspect.signature(obj)
# populate subcommand with keyword arguments
for arg in signature.parameters:
param = signature.parameters[arg]
description = self.__get_description(arg, docstring)
log.debug("param: %s, %s", param, param.kind)
if not param.kind == Parameter.VAR_KEYWORD:
log.debug("param annotation: %s", param.annotation)
argument = self.__clean_args(Argument(description, param))
name = argument.pop('name')
# print(name, argument)
parser.add_argument(*name, **argument)
# populate options
# log.debug("params %s", params)
if docstring:
for arg in self.__get_keyword_args(signature, docstring):
description = self.__get_description(arg, docstring)
arguments = self.__clean_args(Argument(docstring=description))
parser.add_argument(f"--{arg.replace('_', '-')}", **arguments)
# log.debug("arguments %s", arguments)
# TODO for any docstring not collected parse here (args, kwargs)
# log.debug('docstring params', docstring.params)
return self
def __set_main_arguments(self, ns: 'Namespace') -> 'Namespace':
"""Separate and set main arguments from builder function.
Paramters
---------
ns: Namespace
Argparse namespace object for a command.
Returns
-------
Namespace:
Argparse namespace object with command arguments.
"""
# pass main arguments to builder function
if self.main_args_builder:
builder_mod = sys.modules[self.main_args_builder['module']]
builder = getattr(builder_mod, self.main_args_builder['function'])
builder_signature = inspect.signature(builder)
builder_args = {}
for param in builder_signature.parameters:
if param in vars(ns):
builder_args[param] = vars(ns).pop(param)
builder_mod.__dict__[self.main_args_builder['instance']] = builder(
**builder_args
)
return ns
def __set_module_arguments(
self, fn: Callable[[F], F], ns: 'Namespace'
) -> 'Namespace':
"""Separate and set module arguments from functions.
Paramters
---------
fn: Callable
Function used to seperate module arguments from function.
ns: Namespace
Argparse namespace object for a command.
Returns
-------
Namespace:
Argparse namespace object with command arguments.
"""
# XXX: only works on subcommands that use 'mod'
if 'mod' in ns:
mod = vars(ns).pop('mod')
else:
mod = None
# separate namespace from other variables
signature = inspect.signature(fn)
docstring = docparse(fn.__doc__) if fn.__doc__ else None
# inspect non-signature keyword args
keywords = (
self.__get_keyword_args(signature, docstring)
if docstring
else list(signature.parameters)
)
args = [
{k: vars(ns).pop(k)}
for k in list(vars(ns).keys()).copy()
if not signature.parameters.get(k) and k not in keywords
]
log.debug("arguments %s, %s", args, keywords)
# set module variables
if mod and self.use_module_args:
for arg in args:
for k, v in arg.items():
mod.__dict__[k] = v
return ns
def retrieve(
self,
args: Sequence[str] = sys.argv[1:],
ns: Optional['Namespace'] = None,
) -> Tuple[List[str], 'Namespace']:
"""Retrieve parsed values from CLI input.
Paramters
---------
args: Sequence[str]
Command line arguments passed to the parser.
ns: Optional[Namespace]
Argparse namespace object for a command.
Returns
-------
List[str]:
Argparse remaining unparse arguments.
Namespace:
Argparse namespace object with command arguments.
"""
# TODO: handle invalid argument
# show help when no arguments provided
if args == []:
args = ['--help'] # pragma: no cover
main_ns, main_args = self.parse_known_args(args, ns)
if main_args == [] and 'fn' in vars(main_ns):
return main_args, main_ns
# default to help message for subcommand
if 'mod' in vars(main_ns):
mod_args = []
mod_args.append(vars(main_ns)['mod'].__name__.split('.')[-1])
mod_args.append('--help')
self.parse_args(mod_args)
return main_args, main_ns
def dispatch(
self,
args: Sequence[str] = sys.argv[1:],
ns: Optional['Namespace'] = None,
) -> Optional[Callable[[F], F]]:
"""Call command with arguments.
Paramters
---------
args: Sequence[str]
Command line arguments passed to the parser.
ns: Optional[Namespace]
Argparse namespace object for a command.
Returns
-------
Optional[Callable[[F], F]]:
Call function with arguments.
"""
# parse variables
arguments, namespace = self.retrieve(args, ns)
log.debug("dispatch: %s, %s", arguments, namespace)
main_ns_result = self.__set_main_arguments(namespace)
# call function with variables
if 'fn' in namespace:
ns_vars = vars(namespace)
fn = ns_vars.pop('fn')
self.__set_module_arguments(fn, main_ns_result)
# XXX: only takes standard types
# attempt to plug parameters using inspect
splat = None
signature = inspect.signature(fn)
for arg in signature.parameters:
param = signature.parameters[arg]
if str(param).startswith('*') and not str(param).startswith(
'**'
):
splat = ns_vars.pop(arg)
# XXX: only works with splat and kwargs
if splat:
fn(*splat, **ns_vars)
else:
fn(**ns_vars)
return self.dispatch(arguments) if arguments != [] else None
| (**kwargs: Any) -> None |
20,969 | argufy.parser | __clean_args | Retrieve cleaned parameters from an Argument. | @staticmethod
def __clean_args(argument: Argument) -> Dict[Any, Any]:
"""Retrieve cleaned parameters from an Argument."""
size = len('_Argument__')
return {
k[size:]: v
for k, v in vars(argument).items()
if k.startswith('_Argument__')
}
| (argument: argufy.argument.Argument) -> Dict[Any, Any] |
20,970 | argufy.parser | __generate_parameter | Generate inpect parameter. | @staticmethod
def __generate_parameter(name: str, module: ModuleType) -> Parameter:
"""Generate inpect parameter."""
parameter = Parameter(
name,
ParameterKind.POSITIONAL_OR_KEYWORD,
default=getattr(module, name),
annotation=empty,
)
return parameter
| (name: str, module: module) -> inspect.Parameter |
20,971 | argufy.parser | __get_description | Get argument description from docstring. | @staticmethod
def __get_description(
name: str, docstring: 'Docstring'
) -> Optional['DocstringParam']:
"""Get argument description from docstring."""
return next((d for d in docstring.params if d.arg_name == name), None)
| (name: str, docstring: 'Docstring') -> Optional[ForwardRef('DocstringParam')] |
20,972 | argufy.parser | __get_keyword_args | Get keyward arguments from docstring. | @staticmethod
def __get_keyword_args(
signature: 'Signature', docstring: 'Docstring'
) -> List[str]:
"""Get keyward arguments from docstring."""
return [
x.arg_name
for x in docstring.params
if x.arg_name not in list(signature.parameters)
]
| (signature: 'Signature', docstring: 'Docstring') -> List[str] |
20,973 | argufy.parser | __get_parent_module | Get name of module importing this module. | @staticmethod
def __get_parent_module() -> Optional[ModuleType]:
"""Get name of module importing this module."""
stack = inspect.stack()
# TODO: need way to better identify parent module
stack_frame = stack[2]
result = inspect.getmodule(stack_frame[0]) or None
return result
| () -> Optional[module] |
20,974 | argufy.parser | __set_main_arguments | Separate and set main arguments from builder function.
Paramters
---------
ns: Namespace
Argparse namespace object for a command.
Returns
-------
Namespace:
Argparse namespace object with command arguments.
| def __set_main_arguments(self, ns: 'Namespace') -> 'Namespace':
"""Separate and set main arguments from builder function.
Paramters
---------
ns: Namespace
Argparse namespace object for a command.
Returns
-------
Namespace:
Argparse namespace object with command arguments.
"""
# pass main arguments to builder function
if self.main_args_builder:
builder_mod = sys.modules[self.main_args_builder['module']]
builder = getattr(builder_mod, self.main_args_builder['function'])
builder_signature = inspect.signature(builder)
builder_args = {}
for param in builder_signature.parameters:
if param in vars(ns):
builder_args[param] = vars(ns).pop(param)
builder_mod.__dict__[self.main_args_builder['instance']] = builder(
**builder_args
)
return ns
| (self, ns: 'Namespace') -> 'Namespace' |
20,975 | argufy.parser | __set_module_arguments | Separate and set module arguments from functions.
Paramters
---------
fn: Callable
Function used to seperate module arguments from function.
ns: Namespace
Argparse namespace object for a command.
Returns
-------
Namespace:
Argparse namespace object with command arguments.
| def __set_module_arguments(
self, fn: Callable[[F], F], ns: 'Namespace'
) -> 'Namespace':
"""Separate and set module arguments from functions.
Paramters
---------
fn: Callable
Function used to seperate module arguments from function.
ns: Namespace
Argparse namespace object for a command.
Returns
-------
Namespace:
Argparse namespace object with command arguments.
"""
# XXX: only works on subcommands that use 'mod'
if 'mod' in ns:
mod = vars(ns).pop('mod')
else:
mod = None
# separate namespace from other variables
signature = inspect.signature(fn)
docstring = docparse(fn.__doc__) if fn.__doc__ else None
# inspect non-signature keyword args
keywords = (
self.__get_keyword_args(signature, docstring)
if docstring
else list(signature.parameters)
)
args = [
{k: vars(ns).pop(k)}
for k in list(vars(ns).keys()).copy()
if not signature.parameters.get(k) and k not in keywords
]
log.debug("arguments %s, %s", args, keywords)
# set module variables
if mod and self.use_module_args:
for arg in args:
for k, v in arg.items():
mod.__dict__[k] = v
return ns
| (self, fn: Callable[[~F], ~F], ns: 'Namespace') -> 'Namespace' |
20,976 | argufy.parser | __init__ | Initialize parser.
Parameters
----------
prog: str
The name of the program
usage: str
The string describing the program usage
description: str
Text to display before the argument help
epilog: str
Text to display after the argument help
parents: list
A list of ArgumentParser objects whose arguments should also
be included
formatter_class: Object
A class for customizing the help output
prefix_chars: char
The set of characters that prefix optional arguments
fromfile_prefix_chars: None
The set of characters that prefix files from which additional
arguments should be read
argument_default: None
The global default value for arguments
conflict_handler: Object
The strategy for resolving conflicting optionals
add_help: str
Add a -h/--help option to the parser
allow_abbrev: bool
Allows long options to be abbreviated if the abbreviation is
unambiguous
| def __init__(self, **kwargs: Any) -> None:
"""Initialize parser.
Parameters
----------
prog: str
The name of the program
usage: str
The string describing the program usage
description: str
Text to display before the argument help
epilog: str
Text to display after the argument help
parents: list
A list of ArgumentParser objects whose arguments should also
be included
formatter_class: Object
A class for customizing the help output
prefix_chars: char
The set of characters that prefix optional arguments
fromfile_prefix_chars: None
The set of characters that prefix files from which additional
arguments should be read
argument_default: None
The global default value for arguments
conflict_handler: Object
The strategy for resolving conflicting optionals
add_help: str
Add a -h/--help option to the parser
allow_abbrev: bool
Allows long options to be abbreviated if the abbreviation is
unambiguous
"""
# TODO: handle environment variables
module = self.__get_parent_module()
if module and module.__doc__:
docstring = docparse(module.__doc__)
if not kwargs.get('description'):
kwargs['description'] = docstring.short_description
if 'prog' not in kwargs:
kwargs['prog'] = module.__name__.split('.')[0]
if 'version' in kwargs:
self.prog_version = kwargs.pop('version')
# if 'prefix' in kwargs:
# self.prefix = kwargs.pop('prefix')
# else:
# self.prefix = kwargs['prog'].upper()
# log.debug(str(self.prefix))
if 'log_level' in kwargs:
log.setLevel(getattr(logging, kwargs.pop('log_level').upper()))
if 'log_handler' in kwargs:
log_handler = kwargs.pop('log_handler')
log.addHandler(logging.StreamHandler(log_handler))
self.use_module_args = kwargs.pop('use_module_args', False)
self.main_args_builder = kwargs.pop('main_args_builder', None)
self.command_type = kwargs.pop('command_type', None)
self.command_scheme = kwargs.pop('command_scheme', None)
if 'formatter_class' not in kwargs:
self.formatter_class = ArgufyHelpFormatter
super().__init__(**kwargs)
# NOTE: cannot move to formatter
self._positionals.title = ArgufyHelpFormatter.font(
self._positionals.title or 'arguments'
)
self._optionals.title = ArgufyHelpFormatter.font(
self._optionals.title or 'flags'
)
# XXX version lookup infinite loop when absent
if hasattr(self, 'prog_version'):
self.add_argument(
'--version',
action='version',
version=f"%(prog)s {self.prog_version}",
help='display application version',
)
| (self, **kwargs: Any) -> NoneType |
20,983 | argufy.parser | _get_excludes | Combine class excludes with instance. | @staticmethod
def _get_excludes(exclude_prefixes: Tuple[str, ...] = tuple()) -> tuple:
"""Combine class excludes with instance."""
if exclude_prefixes != ():
return tuple(exclude_prefixes) + Parser.exclude_prefixes
return Parser.exclude_prefixes
| (exclude_prefixes: Tuple[str, ...] = ()) -> tuple |
21,006 | argparse | add_argument |
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
| def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not callable(action_class):
raise ValueError('unknown action "%s"' % (action_class,))
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not callable(type_func):
raise ValueError('%r is not callable' % (type_func,))
if type_func is FileType:
raise ValueError('%r is a FileType class object, instance of it'
' must be passed' % (type_func,))
# raise an error if the metavar does not match the type
if hasattr(self, "_get_formatter"):
try:
self._get_formatter()._format_args(action, None)
except TypeError:
raise ValueError("length of metavar tuple does not match nargs")
return self._add_action(action)
| (self, *args, **kwargs) |
21,008 | argufy.parser | add_arguments | Add arguments to parser/subparser.
Parameters
----------
obj: Any
Verious module, function, or arguments that can be inspected.
parser: ArgumentParser, optional
Parser/Subparser that arguments will be added.
Returns
-------
self:
Return object itself to allow chaining functions.
| def add_arguments(
self, obj: Any, parser: Optional[ArgumentParser] = None
) -> 'Parser':
"""Add arguments to parser/subparser.
Parameters
----------
obj: Any
Verious module, function, or arguments that can be inspected.
parser: ArgumentParser, optional
Parser/Subparser that arguments will be added.
Returns
-------
self:
Return object itself to allow chaining functions.
"""
if not parser:
parser = self
# prep object for inspection
docstring = docparse(obj.__doc__)
signature = inspect.signature(obj)
# populate subcommand with keyword arguments
for arg in signature.parameters:
param = signature.parameters[arg]
description = self.__get_description(arg, docstring)
log.debug("param: %s, %s", param, param.kind)
if not param.kind == Parameter.VAR_KEYWORD:
log.debug("param annotation: %s", param.annotation)
argument = self.__clean_args(Argument(description, param))
name = argument.pop('name')
# print(name, argument)
parser.add_argument(*name, **argument)
# populate options
# log.debug("params %s", params)
if docstring:
for arg in self.__get_keyword_args(signature, docstring):
description = self.__get_description(arg, docstring)
arguments = self.__clean_args(Argument(docstring=description))
parser.add_argument(f"--{arg.replace('_', '-')}", **arguments)
# log.debug("arguments %s", arguments)
# TODO for any docstring not collected parse here (args, kwargs)
# log.debug('docstring params', docstring.params)
return self
| (self, obj: Any, parser: Optional[argparse.ArgumentParser] = None) -> argufy.parser.Parser |
21,009 | argufy.parser | add_commands | Add commands.
Parameters
----------
module: ModuleType,
Module used to import functions for CLI commands.
parser: ArgumentParser, optional
Parser used to append subparsers to create subcommands.
exclude_prefixes: tuple,
Methods from a module that should be excluded.
command_type: str, optional
Choose format type of commands to be created.
Returns
-------
self:
Return object itself to allow chaining functions.
| def add_commands( # pylint: disable=too-many-locals,too-many-branches
self,
module: ModuleType,
parser: Optional[ArgumentParser] = None,
exclude_prefixes: tuple = tuple(),
command_type: Optional[str] = None,
) -> 'Parser':
"""Add commands.
Parameters
----------
module: ModuleType,
Module used to import functions for CLI commands.
parser: ArgumentParser, optional
Parser used to append subparsers to create subcommands.
exclude_prefixes: tuple,
Methods from a module that should be excluded.
command_type: str, optional
Choose format type of commands to be created.
Returns
-------
self:
Return object itself to allow chaining functions.
"""
# use self or an existing parser
if not parser:
parser = self
parser.formatter_class = ArgufyHelpFormatter
module_name = module.__name__.split('.')[-1]
docstring = docparse(module.__doc__) if module.__doc__ else None
excludes = Parser._get_excludes(exclude_prefixes)
# use exsiting subparser or create a new one
if not any(isinstance(x, SubParsersAction) for x in parser._actions):
# TODO: use metavar for hidden commands
parser.add_subparsers(dest=module_name, parser_class=Parser)
# check if command exists
command = next(
(x for x in parser._actions if isinstance(x, SubParsersAction)),
None,
)
# set command name scheme
if command_type is None:
command_type = self.command_type
# create subcommand for command
if command_type == 'subcommand':
if command:
msg = docstring.short_description if docstring else None
subcommand = command.add_parser(
module_name.replace('_', '-'),
description=msg,
formatter_class=self.formatter_class,
help=msg,
)
subcommand.set_defaults(mod=module)
# append subcommand to exsiting command or create a new one
return self.add_commands(
module=module,
parser=subcommand,
exclude_prefixes=Parser._get_excludes(exclude_prefixes),
command_type='command',
)
# TODO: separate into method
# pylint: disable-next=too-many-nested-blocks
for name, value in inspect.getmembers(module):
# TODO: Possible singledispatch candidate
if not name.startswith(excludes):
# skip classes for now
if inspect.isclass(value):
# TODO: check if dataclass instance
# TODO: check if class instance
continue # pragma: no cover
# create commands from functions
if inspect.isfunction(value):
# TODO: Turn parameter-less function into switch
# merge builder function maing_args into parser
if (
self.main_args_builder
and name == self.main_args_builder['function']
):
self.add_arguments(value, parser)
# create commands from functions
elif (
module.__name__ == value.__module__
and not name.startswith(', '.join(excludes))
or (
self.main_args_builder
and name == self.main_args_builder['function']
)
):
# create command from function
if command:
# control command name format
if self.command_scheme == 'chain':
cmd_name = f"{module_name}.{name}"
else:
cmd_name = name
msg = (
docparse(value.__doc__).short_description
if value.__doc__
else None
)
cmd = command.add_parser(
cmd_name.replace('_', '-'),
description=msg,
formatter_class=self.formatter_class,
help=msg,
)
cmd.set_defaults(mod=module, fn=value)
# add arguments from function
# log.debug("command %s %s %s", name, value, cmd)
self.add_arguments(value, cmd)
# create arguments from module varibles
elif (
self.use_module_args
and not isinstance(value, ModuleType)
and not hasattr(typing, name)
and (
self.main_args_builder
and name != self.main_args_builder['instance']
)
):
# TODO: Reconcile inspect parameters with dict
# TODO: use argparse.SUPPRESS for hidden arguments
arguments = self.__clean_args(
Argument(
self.__get_description(name, docstring)
if docstring
else None,
self.__generate_parameter(name, module),
)
)
name = arguments.pop('name')
parser.add_argument(*name, **arguments)
return self
| (self, module: module, parser: Optional[argparse.ArgumentParser] = None, exclude_prefixes: tuple = (), command_type: Optional[str] = None) -> argufy.parser.Parser |
21,013 | argufy.parser | dispatch | Call command with arguments.
Paramters
---------
args: Sequence[str]
Command line arguments passed to the parser.
ns: Optional[Namespace]
Argparse namespace object for a command.
Returns
-------
Optional[Callable[[F], F]]:
Call function with arguments.
| def dispatch(
self,
args: Sequence[str] = sys.argv[1:],
ns: Optional['Namespace'] = None,
) -> Optional[Callable[[F], F]]:
"""Call command with arguments.
Paramters
---------
args: Sequence[str]
Command line arguments passed to the parser.
ns: Optional[Namespace]
Argparse namespace object for a command.
Returns
-------
Optional[Callable[[F], F]]:
Call function with arguments.
"""
# parse variables
arguments, namespace = self.retrieve(args, ns)
log.debug("dispatch: %s, %s", arguments, namespace)
main_ns_result = self.__set_main_arguments(namespace)
# call function with variables
if 'fn' in namespace:
ns_vars = vars(namespace)
fn = ns_vars.pop('fn')
self.__set_module_arguments(fn, main_ns_result)
# XXX: only takes standard types
# attempt to plug parameters using inspect
splat = None
signature = inspect.signature(fn)
for arg in signature.parameters:
param = signature.parameters[arg]
if str(param).startswith('*') and not str(param).startswith(
'**'
):
splat = ns_vars.pop(arg)
# XXX: only works with splat and kwargs
if splat:
fn(*splat, **ns_vars)
else:
fn(**ns_vars)
return self.dispatch(arguments) if arguments != [] else None
| (self, args: Sequence[str] = ['--package', 'argufy', '--s3_bucket', '/data'], ns: Optional[ForwardRef('Namespace')] = None) -> Optional[Callable[[~F], ~F]] |
21,026 | argufy.parser | retrieve | Retrieve parsed values from CLI input.
Paramters
---------
args: Sequence[str]
Command line arguments passed to the parser.
ns: Optional[Namespace]
Argparse namespace object for a command.
Returns
-------
List[str]:
Argparse remaining unparse arguments.
Namespace:
Argparse namespace object with command arguments.
| def retrieve(
self,
args: Sequence[str] = sys.argv[1:],
ns: Optional['Namespace'] = None,
) -> Tuple[List[str], 'Namespace']:
"""Retrieve parsed values from CLI input.
Paramters
---------
args: Sequence[str]
Command line arguments passed to the parser.
ns: Optional[Namespace]
Argparse namespace object for a command.
Returns
-------
List[str]:
Argparse remaining unparse arguments.
Namespace:
Argparse namespace object with command arguments.
"""
# TODO: handle invalid argument
# show help when no arguments provided
if args == []:
args = ['--help'] # pragma: no cover
main_ns, main_args = self.parse_known_args(args, ns)
if main_args == [] and 'fn' in vars(main_ns):
return main_args, main_ns
# default to help message for subcommand
if 'mod' in vars(main_ns):
mod_args = []
mod_args.append(vars(main_ns)['mod'].__name__.split('.')[-1])
mod_args.append('--help')
self.parse_args(mod_args)
return main_args, main_ns
| (self, args: Sequence[str] = ['--package', 'argufy', '--s3_bucket', '/data'], ns: Optional[ForwardRef('Namespace')] = None) -> Tuple[List[str], ForwardRef('Namespace')] |
21,032 | tfields.core | Container |
Store lists of tfields objects. Save mechanisms are provided
Examples:
>>> import numpy as np
>>> import tfields
>>> sphere = tfields.Mesh3D.grid(
... (1, 1, 1),
... (-np.pi, np.pi, 3),
... (-np.pi / 2, np.pi / 2, 3),
... coord_sys='spherical')
>>> sphere2 = sphere.copy() * 3
>>> c = tfields.Container([sphere, sphere2])
>>> c.save("~/tmp/spheres.npz")
>>> c1 = tfields.Container.load("~/tmp/spheres.npz")
| class Container(AbstractFields):
"""
Store lists of tfields objects. Save mechanisms are provided
Examples:
>>> import numpy as np
>>> import tfields
>>> sphere = tfields.Mesh3D.grid(
... (1, 1, 1),
... (-np.pi, np.pi, 3),
... (-np.pi / 2, np.pi / 2, 3),
... coord_sys='spherical')
>>> sphere2 = sphere.copy() * 3
>>> c = tfields.Container([sphere, sphere2])
>>> c.save("~/tmp/spheres.npz")
>>> c1 = tfields.Container.load("~/tmp/spheres.npz")
"""
def __init__(self, *items, labels=None):
if len(items) == 1 and issubclass(type(items[0]), list):
# Container([a, b, ...]) - includes copy constructor
items = items[0]
if labels is None and issubclass(type(items), Container):
labels = items.labels
super().__init__(items)
self.labels = labels
def __setstate__(self, state):
self.__dict__ = state
def copy(self):
return deepcopy(self)
@property
def items(self):
"""
items of the container as a list
"""
return list(self)
@items.setter
def items(self, items):
del self[:]
for item in items:
self.append(item)
def _kwargs(self):
return {"labels": self.labels}
| (*items, labels=None) |
21,033 | tfields.core | __getitem__ | null | def __getitem__(self, index):
if isinstance(index, str):
for field in self:
if hasattr(field, "name") and field.name == index:
return field
return super().__getitem__(index)
| (self, index) |
21,034 | tfields.core | __init__ | null | def __init__(self, *items, labels=None):
if len(items) == 1 and issubclass(type(items[0]), list):
# Container([a, b, ...]) - includes copy constructor
items = items[0]
if labels is None and issubclass(type(items), Container):
labels = items.labels
super().__init__(items)
self.labels = labels
| (self, *items, labels=None) |
21,035 | tfields.core | __setitem__ | null | def __setitem__(self, index, value):
if isinstance(index, str):
if not hasattr(value, "name"):
raise TypeError(
f"Value type {type(value)} does not support the 'name' field"
)
# We could set value.name = index but that would be very confusing since we do not copy
assert (
value.name == index
), "We do not dynamically want to change the name of a tensor"
for i, field in enumerate(self):
if hasattr(field, "name") and field.name == index:
index = i
break
else:
self.append(value)
return
super().__setitem__(index, value)
| (self, index, value) |
21,036 | tfields.core | __setstate__ | null | def __setstate__(self, state):
self.__dict__ = state
| (self, state) |
21,037 | tfields.core | _args | null | def _args(self):
return super()._args() + tuple(self)
| (self) |
21,038 | tfields.core | _as_dict |
Get an object represenation in a dict format. This is necessary e.g. for saving the full
file uniquely in the npz format
Args:
recurse: dict of {attribute: callable(iterable) -> dict}
Returns:
dict: object packed as nested dictionary
| def _as_dict(self, recurse: typing.Dict[str, typing.Callable] = None) -> dict:
"""
Get an object represenation in a dict format. This is necessary e.g. for saving the full
file uniquely in the npz format
Args:
recurse: dict of {attribute: callable(iterable) -> dict}
Returns:
dict: object packed as nested dictionary
"""
content = {}
# type
content["type"] = type(self).__name__
# args and kwargs
for base_attr, iterable in [
("args", ((str(i), arg) for i, arg in enumerate(self._args()))),
("kwargs", self._kwargs().items()),
]:
for attr, value in iterable:
attr = base_attr + _HIERARCHY_SEPARATOR + attr
if (recurse is not None and attr in recurse) or hasattr(
value, "_as_dict"
):
if recurse is not None and attr in recurse:
part_dict = recurse[attr](value)
else:
part_dict = value._as_dict() # pylint: disable=protected-access
for part_attr, part_value in part_dict.items():
content[attr + _HIERARCHY_SEPARATOR + part_attr] = part_value
else:
content[attr] = value
return content
| (self, recurse: Optional[Dict[str, Callable]] = None) -> dict |
21,039 | tfields.core | _kwargs | null | def _kwargs(self):
return {"labels": self.labels}
| (self) |
21,040 | rna.polymorphism | _save_dill | null | def _save_dill(self, path):
import dill # pylint:disable = import-outside-toplevel
with open(path, "wb") as out_file:
dill.dump(self, out_file)
| (self, path) |
21,041 | tfields.core | _save_npz |
Args:
path (open file or str/unicode): destination to save file to.
Examples:
Build some dummies:
>>> import tfields
>>> from tempfile import NamedTemporaryFile
>>> out_file = NamedTemporaryFile(suffix='.npz')
>>> p = tfields.Points3D([[1., 2., 3.], [4., 5., 6.], [1, 2, -6]],
... name='my_points')
>>> scalars = tfields.Tensors([0, 1, 2], name=42)
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0]])
>>> maps = [tfields.TensorFields([[0, 1, 2], [0, 1, 2]], [42, 21]),
... tfields.TensorFields([[1], [2]], [-42, -21])]
>>> m = tfields.TensorMaps(vectors, scalars,
... maps=maps)
Simply give the file name to save
>>> p.save(out_file.name)
>>> _ = out_file.seek(0) # this is only necessary in the test
>>> p1 = tfields.Points3D.load(out_file.name)
>>> assert p.equal(p1)
>>> assert p.coord_sys == p1.coord_sys
The fully nested structure of a TensorMaps object is reconstructed
>>> out_file_maps = NamedTemporaryFile(suffix='.npz')
>>> m.save(out_file_maps.name)
>>> _ = out_file_maps.seek(0)
>>> m1 = tfields.TensorMaps.load(out_file_maps.name,
... allow_pickle=True)
>>> assert m.equal(m1)
>>> assert m.maps[3].dtype == m1.maps[3].dtype
Names are preserved
>>> assert p.name == 'my_points'
>>> m.names
[42]
| def _save_npz(self, path):
"""
Args:
path (open file or str/unicode): destination to save file to.
Examples:
Build some dummies:
>>> import tfields
>>> from tempfile import NamedTemporaryFile
>>> out_file = NamedTemporaryFile(suffix='.npz')
>>> p = tfields.Points3D([[1., 2., 3.], [4., 5., 6.], [1, 2, -6]],
... name='my_points')
>>> scalars = tfields.Tensors([0, 1, 2], name=42)
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0]])
>>> maps = [tfields.TensorFields([[0, 1, 2], [0, 1, 2]], [42, 21]),
... tfields.TensorFields([[1], [2]], [-42, -21])]
>>> m = tfields.TensorMaps(vectors, scalars,
... maps=maps)
Simply give the file name to save
>>> p.save(out_file.name)
>>> _ = out_file.seek(0) # this is only necessary in the test
>>> p1 = tfields.Points3D.load(out_file.name)
>>> assert p.equal(p1)
>>> assert p.coord_sys == p1.coord_sys
The fully nested structure of a TensorMaps object is reconstructed
>>> out_file_maps = NamedTemporaryFile(suffix='.npz')
>>> m.save(out_file_maps.name)
>>> _ = out_file_maps.seek(0)
>>> m1 = tfields.TensorMaps.load(out_file_maps.name,
... allow_pickle=True)
>>> assert m.equal(m1)
>>> assert m.maps[3].dtype == m1.maps[3].dtype
Names are preserved
>>> assert p.name == 'my_points'
>>> m.names
[42]
"""
content_dict = self._as_dict()
content_dict["tfields_version"] = tfields.__version__
np.savez(path, **content_dict)
| (self, path) |
21,042 | rna.polymorphism | _save_pickle | null | def _save_pickle(self, path):
import pickle # pylint:disable = import-outside-toplevel
with open(path, "wb") as out_file:
pickle.dump(self, out_file)
| (self, path) |
21,044 | rna.polymorphism | reload |
Update the object from path. This requires the inheritor of this polymorphism
to provide the 'def __update__(self, other):' method
| def reload(self, path):
"""
Update the object from path. This requires the inheritor of this polymorphism
to provide the 'def __update__(self, other):' method
"""
if not hasattr(self, "__update__"):
raise NotImplementedError("__update__ required")
other = type(self).load(path)
self.___update__(other)
| (self, path) |
21,045 | rna.polymorphism | save |
Saving by redirecting to the correct save method depending on path.
Implement _save_{extension}(self, path, **kwargs) for saving to your extension of choice
Args:
path (str | buffer)
*args: joined with path
create_parents: create the parent dir(s) if required
**kwargs:
extension (str | List[str]): explicit extension required if path is buffer,
if list of extensions save as all extension formats.
... remaining:forwarded to extension specific method
| def save(
self,
path: typing.Union[str, io.IOBase],
*args: [str],
create_parents: bool = True,
extension: typing.Optional[typing.Union[str, typing.List[str]]] = None,
**kwargs
) -> None:
"""
Saving by redirecting to the correct save method depending on path.
Implement _save_{extension}(self, path, **kwargs) for saving to your extension of choice
Args:
path (str | buffer)
*args: joined with path
create_parents: create the parent dir(s) if required
**kwargs:
extension (str | List[str]): explicit extension required if path is buffer,
if list of extensions save as all extension formats.
... remaining:forwarded to extension specific method
"""
# get the extension
if isinstance(extension, list):
for ext in extension:
LOGGER.info("Saving to path '%s' with extension %s.", path, ext)
self.save(
path, *args, create_parents=create_parents, extension=ext, **kwargs
)
return
if isinstance(path, (str, pathlib.Path)):
path = rna.path.resolve(path, *args)
if extension is None:
extension = rna.path.extension(path)
if not path.endswith(extension):
path += "." + extension
if not extension:
raise ValueError(
"Path {path} requires extension for auto rooting.".format(**locals())
)
# get the save method
if (
self.EXPLICIT_SAVE_METHODS is not None
and extension
in self.EXPLICIT_SAVE_METHODS # pylint:disable=unsupported-membership-test
):
# pylint:disable=unsubscriptable-object
method_name = self.EXPLICIT_SAVE_METHODS[extension]
else:
method_name = "_save_" + extension
try:
save_method = getattr(self, method_name)
except AttributeError as err:
raise NotImplementedError(
"Save method {method_name} for extension: {extension} required.".format(
**locals()
)
) from err
if create_parents:
rna.path.mkdir(path)
log = logging.getLogger(__name__)
log.info("Saving to path '%s' with method %s.", path, save_method)
save_method(path, **kwargs)
| (self, path: Union[str, io.IOBase], *args: [<class 'str'>], create_parents: bool = True, extension: Union[str, List[str], NoneType] = None, **kwargs) -> NoneType |
21,046 | tfields.core | Maps |
Container for TensorFields sorted by dimension, i.e indexing by dimension
Args:
*args (
List(TensorFields):
| List(Tuple(int, TensorFields)):
| TensorFields:
| Tuple(Tensors, *Fields)):
TODO: document
)
**kwargs: forwarded to SortedDict
TODO: further documentation
| class Maps(sortedcontainers.SortedDict, AbstractObject):
"""
Container for TensorFields sorted by dimension, i.e indexing by dimension
Args:
*args (
List(TensorFields):
| List(Tuple(int, TensorFields)):
| TensorFields:
| Tuple(Tensors, *Fields)):
TODO: document
)
**kwargs: forwarded to SortedDict
TODO: further documentation
"""
def __init__(self, *args, **kwargs):
if args and args[0] is None:
# None key passed e.g. by copy. We do not change keys here.
args = args[1:]
if len(args) == 1 and issubclass(type(args[0]), (list, dict)):
new_args = []
if issubclass(type(args[0]), list):
# Maps([...])
iterator = args[0]
elif issubclass(type(args[0]), dict):
# Maps({}), Maps(Maps(...)) - includes Maps i.e. copy
iterator = args[0].items()
for entry in iterator:
dimension = None
if issubclass(type(entry), tuple):
if np.issubdtype(type(entry[0]), np.integer):
# Maps([(key, value), ...]), Maps({key: value, ...})
maps = self.to_map(entry[1], copy=True)
dimension = entry[0]
else:
# Maps([(tensors, field1, field2), ...])
maps = self.to_map(*entry, copy=True)
else:
# Maps([mp, mp, ...])
maps = self.to_map(entry, copy=True)
if dimension is None:
dimension = dim(maps)
new_args.append((dimension, maps))
args = (new_args,)
super().__init__(*args, **kwargs)
@staticmethod
def to_map(map_, *fields, copy=False, **kwargs):
"""
Args:
map_ (TensorFields)
*fields (Tensors)
copy (bool)
**kwargs: passed to TensorFields constructor
"""
if not copy:
if isinstance(map_, TensorFields) and not fields:
if not np.issubdtype(map_.dtype, np.integer):
map_ = map_.astype(int)
else:
copy = True
if copy: # not else, because in case of wrong map_ type we initialize
kwargs.setdefault("dtype", int)
map_ = TensorFields(map_, *fields, **kwargs)
return map_
def __setitem__(self, dimension, map_):
map_ = self.to_map(map_)
super().__setitem__(dimension, map_)
def _args(self):
return super()._args() + (list(self.items()),)
def _as_dict(self, recurse: typing.Dict[str, typing.Callable] = None) -> dict:
if recurse is None:
recurse = {}
def recurse_args_0(
iterable: typing.List[typing.Tuple[int, typing.Any]]
) -> dict:
# iterable is list of tuple
part_dict = {"type": "list"}
for i, (dim, tensor) in enumerate(iterable):
content = tensor._as_dict()
tuple_key = _HIERARCHY_SEPARATOR.join(["args", str(i), ""])
part_dict[tuple_key + "type"] = "tuple"
args_key = tuple_key + _HIERARCHY_SEPARATOR.join(["args", ""])
part_dict[
args_key + _HIERARCHY_SEPARATOR.join(["0", "args", "0"])
] = dim
part_dict[args_key + _HIERARCHY_SEPARATOR.join(["0", "type"])] = "int"
for key, value in content.items():
part_dict[args_key + _HIERARCHY_SEPARATOR.join(["1", key])] = value
return part_dict
attr = "args" + _HIERARCHY_SEPARATOR + str(0)
recurse[attr] = recurse_args_0
return super()._as_dict(recurse=recurse)
def equal(self, other, **kwargs):
"""
Test equality with other object.
Args:
**kwargs: passed to each item on equality check
"""
if not self.keys() == other.keys():
return False
# pylint:disable=consider-using-dict-items
for dimension in self.keys():
if not self[dimension].equal(other[dimension], **kwargs):
return False
return True
| (*args, **kwargs) |
21,047 | sortedcontainers.sorteddict | copy | Return a shallow copy of the sorted dict.
Runtime complexity: `O(n)`
:return: new sorted dict
| def copy(self):
"""Return a shallow copy of the sorted dict.
Runtime complexity: `O(n)`
:return: new sorted dict
"""
return self.__class__(self._key, self.items())
| (self) |
21,048 | sortedcontainers.sorteddict | __delitem__ | Remove item from sorted dict identified by `key`.
``sd.__delitem__(key)`` <==> ``del sd[key]``
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> del sd['b']
>>> sd
SortedDict({'a': 1, 'c': 3})
>>> del sd['z']
Traceback (most recent call last):
...
KeyError: 'z'
:param key: `key` for item lookup
:raises KeyError: if key not found
| def __delitem__(self, key):
"""Remove item from sorted dict identified by `key`.
``sd.__delitem__(key)`` <==> ``del sd[key]``
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> del sd['b']
>>> sd
SortedDict({'a': 1, 'c': 3})
>>> del sd['z']
Traceback (most recent call last):
...
KeyError: 'z'
:param key: `key` for item lookup
:raises KeyError: if key not found
"""
dict.__delitem__(self, key)
self._list_remove(key)
| (self, key) |
21,049 | tfields.core | __init__ | null | def __init__(self, *args, **kwargs):
if args and args[0] is None:
# None key passed e.g. by copy. We do not change keys here.
args = args[1:]
if len(args) == 1 and issubclass(type(args[0]), (list, dict)):
new_args = []
if issubclass(type(args[0]), list):
# Maps([...])
iterator = args[0]
elif issubclass(type(args[0]), dict):
# Maps({}), Maps(Maps(...)) - includes Maps i.e. copy
iterator = args[0].items()
for entry in iterator:
dimension = None
if issubclass(type(entry), tuple):
if np.issubdtype(type(entry[0]), np.integer):
# Maps([(key, value), ...]), Maps({key: value, ...})
maps = self.to_map(entry[1], copy=True)
dimension = entry[0]
else:
# Maps([(tensors, field1, field2), ...])
maps = self.to_map(*entry, copy=True)
else:
# Maps([mp, mp, ...])
maps = self.to_map(entry, copy=True)
if dimension is None:
dimension = dim(maps)
new_args.append((dimension, maps))
args = (new_args,)
super().__init__(*args, **kwargs)
| (self, *args, **kwargs) |
21,050 | sortedcontainers.sorteddict | __ior__ | null | def __ior__(self, other):
self._update(other)
return self
| (self, other) |
21,051 | sortedcontainers.sorteddict | __iter__ | Return an iterator over the keys of the sorted dict.
``sd.__iter__()`` <==> ``iter(sd)``
Iterating the sorted dict while adding or deleting items may raise a
:exc:`RuntimeError` or fail to iterate over all keys.
| def __iter__(self):
"""Return an iterator over the keys of the sorted dict.
``sd.__iter__()`` <==> ``iter(sd)``
Iterating the sorted dict while adding or deleting items may raise a
:exc:`RuntimeError` or fail to iterate over all keys.
"""
return self._list_iter()
| (self) |
21,052 | sortedcontainers.sorteddict | __or__ | null | def __or__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
items = chain(self.items(), other.items())
return self.__class__(self._key, items)
| (self, other) |
21,053 | sortedcontainers.sorteddict | __reduce__ | Support for pickle.
The tricks played with caching references in
:func:`SortedDict.__init__` confuse pickle so customize the reducer.
| def __reduce__(self):
"""Support for pickle.
The tricks played with caching references in
:func:`SortedDict.__init__` confuse pickle so customize the reducer.
"""
items = dict.copy(self)
return (type(self), (self._key, items))
| (self) |
21,054 | sortedcontainers.sorteddict | __repr__ | Return string representation of sorted dict.
``sd.__repr__()`` <==> ``repr(sd)``
:return: string representation
| """Sorted Dict
==============
:doc:`Sorted Containers<index>` is an Apache2 licensed Python sorted
collections library, written in pure-Python, and fast as C-extensions. The
:doc:`introduction<introduction>` is the best way to get started.
Sorted dict implementations:
.. currentmodule:: sortedcontainers
* :class:`SortedDict`
* :class:`SortedKeysView`
* :class:`SortedItemsView`
* :class:`SortedValuesView`
"""
import sys
import warnings
from itertools import chain
from .sortedlist import SortedList, recursive_repr
from .sortedset import SortedSet
###############################################################################
# BEGIN Python 2/3 Shims
###############################################################################
try:
from collections.abc import (
ItemsView, KeysView, Mapping, ValuesView, Sequence
)
except ImportError:
from collections import ItemsView, KeysView, Mapping, ValuesView, Sequence
###############################################################################
# END Python 2/3 Shims
###############################################################################
class SortedDict(dict):
"""Sorted dict is a sorted mutable mapping.
Sorted dict keys are maintained in sorted order. The design of sorted dict
is simple: sorted dict inherits from dict to store items and maintains a
sorted list of keys.
Sorted dict keys must be hashable and comparable. The hash and total
ordering of keys must not change while they are stored in the sorted dict.
Mutable mapping methods:
* :func:`SortedDict.__getitem__` (inherited from dict)
* :func:`SortedDict.__setitem__`
* :func:`SortedDict.__delitem__`
* :func:`SortedDict.__iter__`
* :func:`SortedDict.__len__` (inherited from dict)
Methods for adding items:
* :func:`SortedDict.setdefault`
* :func:`SortedDict.update`
Methods for removing items:
* :func:`SortedDict.clear`
* :func:`SortedDict.pop`
* :func:`SortedDict.popitem`
Methods for looking up items:
* :func:`SortedDict.__contains__` (inherited from dict)
* :func:`SortedDict.get` (inherited from dict)
* :func:`SortedDict.peekitem`
Methods for views:
* :func:`SortedDict.keys`
* :func:`SortedDict.items`
* :func:`SortedDict.values`
Methods for miscellany:
* :func:`SortedDict.copy`
* :func:`SortedDict.fromkeys`
* :func:`SortedDict.__reversed__`
* :func:`SortedDict.__eq__` (inherited from dict)
* :func:`SortedDict.__ne__` (inherited from dict)
* :func:`SortedDict.__repr__`
* :func:`SortedDict._check`
Sorted list methods available (applies to keys):
* :func:`SortedList.bisect_left`
* :func:`SortedList.bisect_right`
* :func:`SortedList.count`
* :func:`SortedList.index`
* :func:`SortedList.irange`
* :func:`SortedList.islice`
* :func:`SortedList._reset`
Additional sorted list methods available, if key-function used:
* :func:`SortedKeyList.bisect_key_left`
* :func:`SortedKeyList.bisect_key_right`
* :func:`SortedKeyList.irange_key`
Sorted dicts may only be compared for equality and inequality.
"""
def __init__(self, *args, **kwargs):
"""Initialize sorted dict instance.
Optional key-function argument defines a callable that, like the `key`
argument to the built-in `sorted` function, extracts a comparison key
from each dictionary key. If no function is specified, the default
compares the dictionary keys directly. The key-function argument must
be provided as a positional argument and must come before all other
arguments.
Optional iterable argument provides an initial sequence of pairs to
initialize the sorted dict. Each pair in the sequence defines the key
and corresponding value. If a key is seen more than once, the last
value associated with it is stored in the new sorted dict.
Optional mapping argument provides an initial mapping of items to
initialize the sorted dict.
If keyword arguments are given, the keywords themselves, with their
associated values, are added as items to the dictionary. If a key is
specified both in the positional argument and as a keyword argument,
the value associated with the keyword is stored in the
sorted dict.
Sorted dict keys must be hashable, per the requirement for Python's
dictionaries. Keys (or the result of the key-function) must also be
comparable, per the requirement for sorted lists.
>>> d = {'alpha': 1, 'beta': 2}
>>> SortedDict([('alpha', 1), ('beta', 2)]) == d
True
>>> SortedDict({'alpha': 1, 'beta': 2}) == d
True
>>> SortedDict(alpha=1, beta=2) == d
True
"""
if args and (args[0] is None or callable(args[0])):
_key = self._key = args[0]
args = args[1:]
else:
_key = self._key = None
self._list = SortedList(key=_key)
# Reaching through ``self._list`` repeatedly adds unnecessary overhead
# so cache references to sorted list methods.
_list = self._list
self._list_add = _list.add
self._list_clear = _list.clear
self._list_iter = _list.__iter__
self._list_reversed = _list.__reversed__
self._list_pop = _list.pop
self._list_remove = _list.remove
self._list_update = _list.update
# Expose some sorted list methods publicly.
self.bisect_left = _list.bisect_left
self.bisect = _list.bisect_right
self.bisect_right = _list.bisect_right
self.index = _list.index
self.irange = _list.irange
self.islice = _list.islice
self._reset = _list._reset
if _key is not None:
self.bisect_key_left = _list.bisect_key_left
self.bisect_key_right = _list.bisect_key_right
self.bisect_key = _list.bisect_key
self.irange_key = _list.irange_key
self._update(*args, **kwargs)
@property
def key(self):
"""Function used to extract comparison key from keys.
Sorted dict compares keys directly when the key function is none.
"""
return self._key
@property
def iloc(self):
"""Cached reference of sorted keys view.
Deprecated in version 2 of Sorted Containers. Use
:func:`SortedDict.keys` instead.
"""
# pylint: disable=attribute-defined-outside-init
try:
return self._iloc
except AttributeError:
warnings.warn(
'sorted_dict.iloc is deprecated.'
' Use SortedDict.keys() instead.',
DeprecationWarning,
stacklevel=2,
)
_iloc = self._iloc = SortedKeysView(self)
return _iloc
def clear(self):
"""Remove all items from sorted dict.
Runtime complexity: `O(n)`
"""
dict.clear(self)
self._list_clear()
def __delitem__(self, key):
"""Remove item from sorted dict identified by `key`.
``sd.__delitem__(key)`` <==> ``del sd[key]``
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> del sd['b']
>>> sd
SortedDict({'a': 1, 'c': 3})
>>> del sd['z']
Traceback (most recent call last):
...
KeyError: 'z'
:param key: `key` for item lookup
:raises KeyError: if key not found
"""
dict.__delitem__(self, key)
self._list_remove(key)
def __iter__(self):
"""Return an iterator over the keys of the sorted dict.
``sd.__iter__()`` <==> ``iter(sd)``
Iterating the sorted dict while adding or deleting items may raise a
:exc:`RuntimeError` or fail to iterate over all keys.
"""
return self._list_iter()
def __reversed__(self):
"""Return a reverse iterator over the keys of the sorted dict.
``sd.__reversed__()`` <==> ``reversed(sd)``
Iterating the sorted dict while adding or deleting items may raise a
:exc:`RuntimeError` or fail to iterate over all keys.
"""
return self._list_reversed()
def __setitem__(self, key, value):
"""Store item in sorted dict with `key` and corresponding `value`.
``sd.__setitem__(key, value)`` <==> ``sd[key] = value``
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict()
>>> sd['c'] = 3
>>> sd['a'] = 1
>>> sd['b'] = 2
>>> sd
SortedDict({'a': 1, 'b': 2, 'c': 3})
:param key: key for item
:param value: value for item
"""
if key not in self:
self._list_add(key)
dict.__setitem__(self, key, value)
_setitem = __setitem__
def __or__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
items = chain(self.items(), other.items())
return self.__class__(self._key, items)
def __ror__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
items = chain(other.items(), self.items())
return self.__class__(self._key, items)
def __ior__(self, other):
self._update(other)
return self
def copy(self):
"""Return a shallow copy of the sorted dict.
Runtime complexity: `O(n)`
:return: new sorted dict
"""
return self.__class__(self._key, self.items())
__copy__ = copy
@classmethod
def fromkeys(cls, iterable, value=None):
"""Return a new sorted dict initailized from `iterable` and `value`.
Items in the sorted dict have keys from `iterable` and values equal to
`value`.
Runtime complexity: `O(n*log(n))`
:return: new sorted dict
"""
return cls((key, value) for key in iterable)
def keys(self):
"""Return new sorted keys view of the sorted dict's keys.
See :class:`SortedKeysView` for details.
:return: new sorted keys view
"""
return SortedKeysView(self)
def items(self):
"""Return new sorted items view of the sorted dict's items.
See :class:`SortedItemsView` for details.
:return: new sorted items view
"""
return SortedItemsView(self)
def values(self):
"""Return new sorted values view of the sorted dict's values.
See :class:`SortedValuesView` for details.
:return: new sorted values view
"""
return SortedValuesView(self)
if sys.hexversion < 0x03000000:
def __make_raise_attributeerror(original, alternate):
# pylint: disable=no-self-argument
message = (
'SortedDict.{original}() is not implemented.'
' Use SortedDict.{alternate}() instead.'
).format(original=original, alternate=alternate)
def method(self):
# pylint: disable=missing-docstring,unused-argument
raise AttributeError(message)
method.__name__ = original # pylint: disable=non-str-assignment-to-dunder-name
method.__doc__ = message
return property(method)
iteritems = __make_raise_attributeerror('iteritems', 'items')
iterkeys = __make_raise_attributeerror('iterkeys', 'keys')
itervalues = __make_raise_attributeerror('itervalues', 'values')
viewitems = __make_raise_attributeerror('viewitems', 'items')
viewkeys = __make_raise_attributeerror('viewkeys', 'keys')
viewvalues = __make_raise_attributeerror('viewvalues', 'values')
class _NotGiven(object):
# pylint: disable=too-few-public-methods
def __repr__(self):
return '<not-given>'
__not_given = _NotGiven()
def pop(self, key, default=__not_given):
"""Remove and return value for item identified by `key`.
If the `key` is not found then return `default` if given. If `default`
is not given then raise :exc:`KeyError`.
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.pop('c')
3
>>> sd.pop('z', 26)
26
>>> sd.pop('y')
Traceback (most recent call last):
...
KeyError: 'y'
:param key: `key` for item
:param default: `default` value if key not found (optional)
:return: value for item
:raises KeyError: if `key` not found and `default` not given
"""
if key in self:
self._list_remove(key)
return dict.pop(self, key)
else:
if default is self.__not_given:
raise KeyError(key)
return default
def popitem(self, index=-1):
"""Remove and return ``(key, value)`` pair at `index` from sorted dict.
Optional argument `index` defaults to -1, the last item in the sorted
dict. Specify ``index=0`` for the first item in the sorted dict.
If the sorted dict is empty, raises :exc:`KeyError`.
If the `index` is out of range, raises :exc:`IndexError`.
Runtime complexity: `O(log(n))`
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.popitem()
('c', 3)
>>> sd.popitem(0)
('a', 1)
>>> sd.popitem(100)
Traceback (most recent call last):
...
IndexError: list index out of range
:param int index: `index` of item (default -1)
:return: key and value pair
:raises KeyError: if sorted dict is empty
:raises IndexError: if `index` out of range
"""
if not self:
raise KeyError('popitem(): dictionary is empty')
key = self._list_pop(index)
value = dict.pop(self, key)
return (key, value)
def peekitem(self, index=-1):
"""Return ``(key, value)`` pair at `index` in sorted dict.
Optional argument `index` defaults to -1, the last item in the sorted
dict. Specify ``index=0`` for the first item in the sorted dict.
Unlike :func:`SortedDict.popitem`, the sorted dict is not modified.
If the `index` is out of range, raises :exc:`IndexError`.
Runtime complexity: `O(log(n))`
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.peekitem()
('c', 3)
>>> sd.peekitem(0)
('a', 1)
>>> sd.peekitem(100)
Traceback (most recent call last):
...
IndexError: list index out of range
:param int index: index of item (default -1)
:return: key and value pair
:raises IndexError: if `index` out of range
"""
key = self._list[index]
return key, self[key]
def setdefault(self, key, default=None):
"""Return value for item identified by `key` in sorted dict.
If `key` is in the sorted dict then return its value. If `key` is not
in the sorted dict then insert `key` with value `default` and return
`default`.
Optional argument `default` defaults to none.
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict()
>>> sd.setdefault('a', 1)
1
>>> sd.setdefault('a', 10)
1
>>> sd
SortedDict({'a': 1})
:param key: key for item
:param default: value for item (default None)
:return: value for item identified by `key`
"""
if key in self:
return self[key]
dict.__setitem__(self, key, default)
self._list_add(key)
return default
def update(self, *args, **kwargs):
"""Update sorted dict with items from `args` and `kwargs`.
Overwrites existing items.
Optional arguments `args` and `kwargs` may be a mapping, an iterable of
pairs or keyword arguments. See :func:`SortedDict.__init__` for
details.
:param args: mapping or iterable of pairs
:param kwargs: keyword arguments mapping
"""
if not self:
dict.update(self, *args, **kwargs)
self._list_update(dict.__iter__(self))
return
if not kwargs and len(args) == 1 and isinstance(args[0], dict):
pairs = args[0]
else:
pairs = dict(*args, **kwargs)
if (10 * len(pairs)) > len(self):
dict.update(self, pairs)
self._list_clear()
self._list_update(dict.__iter__(self))
else:
for key in pairs:
self._setitem(key, pairs[key])
_update = update
def __reduce__(self):
"""Support for pickle.
The tricks played with caching references in
:func:`SortedDict.__init__` confuse pickle so customize the reducer.
"""
items = dict.copy(self)
return (type(self), (self._key, items))
@recursive_repr()
def __repr__(self):
"""Return string representation of sorted dict.
``sd.__repr__()`` <==> ``repr(sd)``
:return: string representation
"""
_key = self._key
type_name = type(self).__name__
key_arg = '' if _key is None else '{0!r}, '.format(_key)
item_format = '{0!r}: {1!r}'.format
items = ', '.join(item_format(key, self[key]) for key in self._list)
return '{0}({1}{{{2}}})'.format(type_name, key_arg, items)
def _check(self):
"""Check invariants of sorted dict.
Runtime complexity: `O(n)`
"""
_list = self._list
_list._check()
assert len(self) == len(_list)
assert all(key in self for key in _list)
| (self) |
21,055 | sortedcontainers.sorteddict | __reversed__ | Return a reverse iterator over the keys of the sorted dict.
``sd.__reversed__()`` <==> ``reversed(sd)``
Iterating the sorted dict while adding or deleting items may raise a
:exc:`RuntimeError` or fail to iterate over all keys.
| def __reversed__(self):
"""Return a reverse iterator over the keys of the sorted dict.
``sd.__reversed__()`` <==> ``reversed(sd)``
Iterating the sorted dict while adding or deleting items may raise a
:exc:`RuntimeError` or fail to iterate over all keys.
"""
return self._list_reversed()
| (self) |
21,056 | sortedcontainers.sorteddict | __ror__ | null | def __ror__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
items = chain(other.items(), self.items())
return self.__class__(self._key, items)
| (self, other) |
21,057 | tfields.core | __setitem__ | null | def __setitem__(self, dimension, map_):
map_ = self.to_map(map_)
super().__setitem__(dimension, map_)
| (self, dimension, map_) |
21,058 | tfields.core | _args | null | def _args(self):
return super()._args() + (list(self.items()),)
| (self) |
21,059 | tfields.core | _as_dict | null | def _as_dict(self, recurse: typing.Dict[str, typing.Callable] = None) -> dict:
if recurse is None:
recurse = {}
def recurse_args_0(
iterable: typing.List[typing.Tuple[int, typing.Any]]
) -> dict:
# iterable is list of tuple
part_dict = {"type": "list"}
for i, (dim, tensor) in enumerate(iterable):
content = tensor._as_dict()
tuple_key = _HIERARCHY_SEPARATOR.join(["args", str(i), ""])
part_dict[tuple_key + "type"] = "tuple"
args_key = tuple_key + _HIERARCHY_SEPARATOR.join(["args", ""])
part_dict[
args_key + _HIERARCHY_SEPARATOR.join(["0", "args", "0"])
] = dim
part_dict[args_key + _HIERARCHY_SEPARATOR.join(["0", "type"])] = "int"
for key, value in content.items():
part_dict[args_key + _HIERARCHY_SEPARATOR.join(["1", key])] = value
return part_dict
attr = "args" + _HIERARCHY_SEPARATOR + str(0)
recurse[attr] = recurse_args_0
return super()._as_dict(recurse=recurse)
| (self, recurse: Optional[Dict[str, Callable]] = None) -> dict |
21,060 | sortedcontainers.sorteddict | _check | Check invariants of sorted dict.
Runtime complexity: `O(n)`
| def _check(self):
"""Check invariants of sorted dict.
Runtime complexity: `O(n)`
"""
_list = self._list
_list._check()
assert len(self) == len(_list)
assert all(key in self for key in _list)
| (self) |
21,061 | tfields.core | _kwargs |
Used for allowing the polymorphic signature Class(obj) as a copy/casting constructor
| def _kwargs(self) -> dict: # pylint: disable=no-self-use
"""
Used for allowing the polymorphic signature Class(obj) as a copy/casting constructor
"""
return dict()
| (self) -> dict |
21,065 | sortedcontainers.sorteddict | __setitem__ | Store item in sorted dict with `key` and corresponding `value`.
``sd.__setitem__(key, value)`` <==> ``sd[key] = value``
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict()
>>> sd['c'] = 3
>>> sd['a'] = 1
>>> sd['b'] = 2
>>> sd
SortedDict({'a': 1, 'b': 2, 'c': 3})
:param key: key for item
:param value: value for item
| def __setitem__(self, key, value):
"""Store item in sorted dict with `key` and corresponding `value`.
``sd.__setitem__(key, value)`` <==> ``sd[key] = value``
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict()
>>> sd['c'] = 3
>>> sd['a'] = 1
>>> sd['b'] = 2
>>> sd
SortedDict({'a': 1, 'b': 2, 'c': 3})
:param key: key for item
:param value: value for item
"""
if key not in self:
self._list_add(key)
dict.__setitem__(self, key, value)
| (self, key, value) |
21,066 | sortedcontainers.sorteddict | update | Update sorted dict with items from `args` and `kwargs`.
Overwrites existing items.
Optional arguments `args` and `kwargs` may be a mapping, an iterable of
pairs or keyword arguments. See :func:`SortedDict.__init__` for
details.
:param args: mapping or iterable of pairs
:param kwargs: keyword arguments mapping
| def update(self, *args, **kwargs):
"""Update sorted dict with items from `args` and `kwargs`.
Overwrites existing items.
Optional arguments `args` and `kwargs` may be a mapping, an iterable of
pairs or keyword arguments. See :func:`SortedDict.__init__` for
details.
:param args: mapping or iterable of pairs
:param kwargs: keyword arguments mapping
"""
if not self:
dict.update(self, *args, **kwargs)
self._list_update(dict.__iter__(self))
return
if not kwargs and len(args) == 1 and isinstance(args[0], dict):
pairs = args[0]
else:
pairs = dict(*args, **kwargs)
if (10 * len(pairs)) > len(self):
dict.update(self, pairs)
self._list_clear()
self._list_update(dict.__iter__(self))
else:
for key in pairs:
self._setitem(key, pairs[key])
| (self, *args, **kwargs) |
21,067 | sortedcontainers.sorteddict | clear | Remove all items from sorted dict.
Runtime complexity: `O(n)`
| def clear(self):
"""Remove all items from sorted dict.
Runtime complexity: `O(n)`
"""
dict.clear(self)
self._list_clear()
| (self) |
21,069 | tfields.core | equal |
Test equality with other object.
Args:
**kwargs: passed to each item on equality check
| def equal(self, other, **kwargs):
"""
Test equality with other object.
Args:
**kwargs: passed to each item on equality check
"""
if not self.keys() == other.keys():
return False
# pylint:disable=consider-using-dict-items
for dimension in self.keys():
if not self[dimension].equal(other[dimension], **kwargs):
return False
return True
| (self, other, **kwargs) |
21,070 | sortedcontainers.sorteddict | items | Return new sorted items view of the sorted dict's items.
See :class:`SortedItemsView` for details.
:return: new sorted items view
| def items(self):
"""Return new sorted items view of the sorted dict's items.
See :class:`SortedItemsView` for details.
:return: new sorted items view
"""
return SortedItemsView(self)
| (self) |
21,071 | sortedcontainers.sorteddict | keys | Return new sorted keys view of the sorted dict's keys.
See :class:`SortedKeysView` for details.
:return: new sorted keys view
| def keys(self):
"""Return new sorted keys view of the sorted dict's keys.
See :class:`SortedKeysView` for details.
:return: new sorted keys view
"""
return SortedKeysView(self)
| (self) |
21,072 | sortedcontainers.sorteddict | peekitem | Return ``(key, value)`` pair at `index` in sorted dict.
Optional argument `index` defaults to -1, the last item in the sorted
dict. Specify ``index=0`` for the first item in the sorted dict.
Unlike :func:`SortedDict.popitem`, the sorted dict is not modified.
If the `index` is out of range, raises :exc:`IndexError`.
Runtime complexity: `O(log(n))`
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.peekitem()
('c', 3)
>>> sd.peekitem(0)
('a', 1)
>>> sd.peekitem(100)
Traceback (most recent call last):
...
IndexError: list index out of range
:param int index: index of item (default -1)
:return: key and value pair
:raises IndexError: if `index` out of range
| def peekitem(self, index=-1):
"""Return ``(key, value)`` pair at `index` in sorted dict.
Optional argument `index` defaults to -1, the last item in the sorted
dict. Specify ``index=0`` for the first item in the sorted dict.
Unlike :func:`SortedDict.popitem`, the sorted dict is not modified.
If the `index` is out of range, raises :exc:`IndexError`.
Runtime complexity: `O(log(n))`
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.peekitem()
('c', 3)
>>> sd.peekitem(0)
('a', 1)
>>> sd.peekitem(100)
Traceback (most recent call last):
...
IndexError: list index out of range
:param int index: index of item (default -1)
:return: key and value pair
:raises IndexError: if `index` out of range
"""
key = self._list[index]
return key, self[key]
| (self, index=-1) |
21,073 | sortedcontainers.sorteddict | pop | Remove and return value for item identified by `key`.
If the `key` is not found then return `default` if given. If `default`
is not given then raise :exc:`KeyError`.
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.pop('c')
3
>>> sd.pop('z', 26)
26
>>> sd.pop('y')
Traceback (most recent call last):
...
KeyError: 'y'
:param key: `key` for item
:param default: `default` value if key not found (optional)
:return: value for item
:raises KeyError: if `key` not found and `default` not given
| def pop(self, key, default=__not_given):
"""Remove and return value for item identified by `key`.
If the `key` is not found then return `default` if given. If `default`
is not given then raise :exc:`KeyError`.
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.pop('c')
3
>>> sd.pop('z', 26)
26
>>> sd.pop('y')
Traceback (most recent call last):
...
KeyError: 'y'
:param key: `key` for item
:param default: `default` value if key not found (optional)
:return: value for item
:raises KeyError: if `key` not found and `default` not given
"""
if key in self:
self._list_remove(key)
return dict.pop(self, key)
else:
if default is self.__not_given:
raise KeyError(key)
return default
| (self, key, default=<not-given>) |
21,074 | sortedcontainers.sorteddict | popitem | Remove and return ``(key, value)`` pair at `index` from sorted dict.
Optional argument `index` defaults to -1, the last item in the sorted
dict. Specify ``index=0`` for the first item in the sorted dict.
If the sorted dict is empty, raises :exc:`KeyError`.
If the `index` is out of range, raises :exc:`IndexError`.
Runtime complexity: `O(log(n))`
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.popitem()
('c', 3)
>>> sd.popitem(0)
('a', 1)
>>> sd.popitem(100)
Traceback (most recent call last):
...
IndexError: list index out of range
:param int index: `index` of item (default -1)
:return: key and value pair
:raises KeyError: if sorted dict is empty
:raises IndexError: if `index` out of range
| def popitem(self, index=-1):
"""Remove and return ``(key, value)`` pair at `index` from sorted dict.
Optional argument `index` defaults to -1, the last item in the sorted
dict. Specify ``index=0`` for the first item in the sorted dict.
If the sorted dict is empty, raises :exc:`KeyError`.
If the `index` is out of range, raises :exc:`IndexError`.
Runtime complexity: `O(log(n))`
>>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3})
>>> sd.popitem()
('c', 3)
>>> sd.popitem(0)
('a', 1)
>>> sd.popitem(100)
Traceback (most recent call last):
...
IndexError: list index out of range
:param int index: `index` of item (default -1)
:return: key and value pair
:raises KeyError: if sorted dict is empty
:raises IndexError: if `index` out of range
"""
if not self:
raise KeyError('popitem(): dictionary is empty')
key = self._list_pop(index)
value = dict.pop(self, key)
return (key, value)
| (self, index=-1) |
21,077 | sortedcontainers.sorteddict | setdefault | Return value for item identified by `key` in sorted dict.
If `key` is in the sorted dict then return its value. If `key` is not
in the sorted dict then insert `key` with value `default` and return
`default`.
Optional argument `default` defaults to none.
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict()
>>> sd.setdefault('a', 1)
1
>>> sd.setdefault('a', 10)
1
>>> sd
SortedDict({'a': 1})
:param key: key for item
:param default: value for item (default None)
:return: value for item identified by `key`
| def setdefault(self, key, default=None):
"""Return value for item identified by `key` in sorted dict.
If `key` is in the sorted dict then return its value. If `key` is not
in the sorted dict then insert `key` with value `default` and return
`default`.
Optional argument `default` defaults to none.
Runtime complexity: `O(log(n))` -- approximate.
>>> sd = SortedDict()
>>> sd.setdefault('a', 1)
1
>>> sd.setdefault('a', 10)
1
>>> sd
SortedDict({'a': 1})
:param key: key for item
:param default: value for item (default None)
:return: value for item identified by `key`
"""
if key in self:
return self[key]
dict.__setitem__(self, key, default)
self._list_add(key)
return default
| (self, key, default=None) |
21,078 | tfields.core | to_map |
Args:
map_ (TensorFields)
*fields (Tensors)
copy (bool)
**kwargs: passed to TensorFields constructor
| @staticmethod
def to_map(map_, *fields, copy=False, **kwargs):
"""
Args:
map_ (TensorFields)
*fields (Tensors)
copy (bool)
**kwargs: passed to TensorFields constructor
"""
if not copy:
if isinstance(map_, TensorFields) and not fields:
if not np.issubdtype(map_.dtype, np.integer):
map_ = map_.astype(int)
else:
copy = True
if copy: # not else, because in case of wrong map_ type we initialize
kwargs.setdefault("dtype", int)
map_ = TensorFields(map_, *fields, **kwargs)
return map_
| (map_, *fields, copy=False, **kwargs) |
21,080 | sortedcontainers.sorteddict | values | Return new sorted values view of the sorted dict's values.
See :class:`SortedValuesView` for details.
:return: new sorted values view
| def values(self):
"""Return new sorted values view of the sorted dict's values.
See :class:`SortedValuesView` for details.
:return: new sorted values view
"""
return SortedValuesView(self)
| (self) |
21,081 | tfields.mesh_3d | Mesh3D |
Points3D child used as vertices combined with faces to build a geometrical mesh of triangles
Examples:
>>> import tfields
>>> import numpy as np
>>> m = tfields.Mesh3D([[1,2,3], [3,3,3], [0,0,0], [5,6,7]], faces=[[0, 1, 2], [1, 2, 3]])
>>> m.equal([[1, 2, 3],
... [3, 3, 3],
... [0, 0, 0],
... [5, 6, 7]])
True
>>> np.array_equal(m.faces, [[0, 1, 2], [1, 2, 3]])
True
conversion to points only
>>> tfields.Points3D(m).equal([[1, 2, 3],
... [3, 3, 3],
... [0, 0, 0],
... [5, 6, 7]])
True
Empty instances
>>> m = tfields.Mesh3D([]);
going from Mesh3D to Triangles3D instance is easy and will be cached.
>>> m = tfields.Mesh3D([[1,0,0], [0,1,0], [0,0,0]], faces=[[0, 1, 2]]);
>>> assert m.triangles().equal(tfields.Triangles3D([[ 1., 0., 0.],
... [ 0., 1., 0.],
... [ 0., 0., 0.]]))
a list of scalars is assigned to each face
>>> mScalar = tfields.Mesh3D([[1,0,0], [0,1,0], [0,0,0]],
... faces=([[0, 1, 2]], [.5]));
>>> np.array_equal(mScalar.faces.fields, [[ 0.5]])
True
adding together two meshes:
>>> m2 = tfields.Mesh3D([[1,0,0],[2,0,0],[0,3,0]],
... faces=([[0,1,2]], [.7]))
>>> msum = tfields.Mesh3D.merged(mScalar, m2)
>>> msum.equal([[ 1., 0., 0.],
... [ 0., 1., 0.],
... [ 0., 0., 0.],
... [ 1., 0., 0.],
... [ 2., 0., 0.],
... [ 0., 3., 0.]])
True
>>> assert np.array_equal(msum.faces, [[0, 1, 2], [3, 4, 5]])
Saving and reading
>>> from tempfile import NamedTemporaryFile
>>> outFile = NamedTemporaryFile(suffix='.npz')
>>> m.save(outFile.name)
>>> _ = outFile.seek(0)
>>> m1 = tfields.Mesh3D.load(outFile.name, allow_pickle=True)
>>> bool(np.all(m == m1))
True
>>> assert np.array_equal(m1.faces, np.array([[0, 1, 2]]))
| class Mesh3D(tfields.TensorMaps):
# pylint: disable=R0904
"""
Points3D child used as vertices combined with faces to build a geometrical mesh of triangles
Examples:
>>> import tfields
>>> import numpy as np
>>> m = tfields.Mesh3D([[1,2,3], [3,3,3], [0,0,0], [5,6,7]], faces=[[0, 1, 2], [1, 2, 3]])
>>> m.equal([[1, 2, 3],
... [3, 3, 3],
... [0, 0, 0],
... [5, 6, 7]])
True
>>> np.array_equal(m.faces, [[0, 1, 2], [1, 2, 3]])
True
conversion to points only
>>> tfields.Points3D(m).equal([[1, 2, 3],
... [3, 3, 3],
... [0, 0, 0],
... [5, 6, 7]])
True
Empty instances
>>> m = tfields.Mesh3D([]);
going from Mesh3D to Triangles3D instance is easy and will be cached.
>>> m = tfields.Mesh3D([[1,0,0], [0,1,0], [0,0,0]], faces=[[0, 1, 2]]);
>>> assert m.triangles().equal(tfields.Triangles3D([[ 1., 0., 0.],
... [ 0., 1., 0.],
... [ 0., 0., 0.]]))
a list of scalars is assigned to each face
>>> mScalar = tfields.Mesh3D([[1,0,0], [0,1,0], [0,0,0]],
... faces=([[0, 1, 2]], [.5]));
>>> np.array_equal(mScalar.faces.fields, [[ 0.5]])
True
adding together two meshes:
>>> m2 = tfields.Mesh3D([[1,0,0],[2,0,0],[0,3,0]],
... faces=([[0,1,2]], [.7]))
>>> msum = tfields.Mesh3D.merged(mScalar, m2)
>>> msum.equal([[ 1., 0., 0.],
... [ 0., 1., 0.],
... [ 0., 0., 0.],
... [ 1., 0., 0.],
... [ 2., 0., 0.],
... [ 0., 3., 0.]])
True
>>> assert np.array_equal(msum.faces, [[0, 1, 2], [3, 4, 5]])
Saving and reading
>>> from tempfile import NamedTemporaryFile
>>> outFile = NamedTemporaryFile(suffix='.npz')
>>> m.save(outFile.name)
>>> _ = outFile.seek(0)
>>> m1 = tfields.Mesh3D.load(outFile.name, allow_pickle=True)
>>> bool(np.all(m == m1))
True
>>> assert np.array_equal(m1.faces, np.array([[0, 1, 2]]))
"""
def __new__(cls, tensors, *fields, **kwargs):
kwargs["dim"] = 3
if "maps" in kwargs and "faces" in kwargs:
raise ValueError("Conflicting options maps and faces")
faces = kwargs.pop("faces", None)
maps = kwargs.pop("maps", None)
if faces is not None:
if len(faces) == 0:
# faces = []
faces = np.empty((0, 3))
maps = [faces]
if maps is not None:
kwargs["maps"] = maps
obj = super(Mesh3D, cls).__new__(cls, tensors, *fields, **kwargs)
if len(obj.maps) > 1:
raise ValueError("Mesh3D only allows one map")
if obj.maps and (len(obj.maps) > 1 or obj.maps.keys()[0] != 3):
raise ValueError("Face dimension should be 3")
return obj
def _save_obj(self, path, **kwargs):
"""
Save obj as wavefront/.obj file
"""
obj = kwargs.pop("object", None)
group = kwargs.pop("group", None)
cmap = kwargs.pop("cmap", "viridis")
map_index = kwargs.pop("map_index", None)
path = path.replace(".obj", "")
directory, name = os.path.split(path)
if map_index is not None:
scalars = self.maps[3].fields[map_index]
min_scalar = scalars[~np.isnan(scalars)].min()
max_scalar = scalars[~np.isnan(scalars)].max()
vmin = kwargs.pop("vmin", min_scalar)
vmax = kwargs.pop("vmax", max_scalar)
if vmin == vmax:
if vmin == 0.0:
vmax = 1.0
else:
vmin = 0.0
import matplotlib.colors as colors
import matplotlib.pyplot as plt
norm = colors.Normalize(vmin, vmax)
color_map = plt.get_cmap(cmap)
else:
# switch for not coloring the triangles and thus not producing the
# materials
norm = None
if len(kwargs) != 0:
raise ValueError("Unused arguments.")
if norm is not None:
mat_name = name + "_frame_{0}.mat".format(map_index)
scalars[np.isnan(scalars)] = min_scalar - 1
sorted_scalars = scalars[scalars.argsort()]
sorted_scalars[sorted_scalars == min_scalar - 1] = np.nan
sorted_faces = self.faces[scalars.argsort()]
scalar_set = np.unique(sorted_scalars)
scalar_set[scalar_set == min_scalar - 1] = np.nan
mat_path = os.path.join(directory, mat_name)
with open(mat_path, "w") as mf:
for s in scalar_set:
if np.isnan(s):
mf.write("newmtl nan")
mf.write("Kd 0 0 0\n\n")
else:
mf.write("newmtl mtl_{0}\n".format(s))
mf.write(
"Kd {c[0]} {c[1]} {c[2]}\n\n".format(c=color_map(norm(s)))
)
else:
sorted_faces = self.faces
# writing of the obj file
with open(path + ".obj", "w") as f:
f.write("# File saved with tfields Mesh3D._save_obj method\n\n")
if norm is not None:
f.write("mtllib ./{0}\n\n".format(mat_name))
if obj is not None:
f.write("o {0}\n".format(obj))
if group is not None:
f.write("g {0}\n".format(group))
for vertex in self:
f.write("v {v[0]} {v[1]} {v[2]}\n".format(v=vertex))
last_scalar = None
for i, face in enumerate(sorted_faces + 1):
if norm is not None:
if not last_scalar == sorted_scalars[i]:
last_scalar = sorted_scalars[i]
f.write("usemtl mtl_{0}\n".format(last_scalar))
f.write("f {f[0]} {f[1]} {f[2]}\n".format(f=face))
@classmethod
def _load_obj(cls, path, *group_names):
"""
Factory method
Given a path to a obj/wavefront file, construct the object
"""
import csv
log = logging.getLogger()
with open(path, mode="r") as f:
reader = csv.reader(f, delimiter=" ")
groups = []
group = None
vertex_no = 1
for line in reader:
if not line:
continue
if line[0] == "#":
continue
if line[0] == "g":
if group:
groups.append(group)
group = dict(name=line[1], vertices={}, faces=[])
elif line[0] == "v":
if not group:
log.debug("No group found. Setting default 'Group'")
group = dict(name="Group", vertices={}, faces=[])
vertex = list(map(float, line[1:4]))
group["vertices"][vertex_no] = vertex
vertex_no += 1
elif line[0] == "f":
face = []
for v in line[1:]:
w = v.split("/")
face.append(int(w[0]))
group["faces"].append(face)
else:
groups.append(group)
vertices = []
for g in groups[:]:
vertices.extend(g["vertices"].values())
if len(group_names) != 0:
groups = [g for g in groups if g["name"] in group_names]
faces = []
for g in groups:
faces.extend(g["faces"])
faces = np.add(np.array(faces), -1).tolist()
"""
Building the class from retrieved vertices and faces
"""
if len(vertices) == 0:
return cls([])
faceLenghts = [len(face) for face in faces]
for i in reversed(range(len(faceLenghts))):
length = faceLenghts[i]
if length == 3:
continue
if length == 4:
log.warning(
"Given a Rectangle. I will split it but "
"sometimes the order is different."
)
faces.insert(i + 1, faces[i][2:] + faces[i][:1])
faces[i] = faces[i][:3]
else:
raise NotImplementedError()
mesh = cls(vertices, faces=faces)
if group_names:
mesh = mesh.cleaned()
return mesh
def _save_stl(self, path, **kwargs):
"""
Saves the mesh in stl format
"""
self.triangles()._save_stl(path, **kwargs)
@classmethod
def _load_stl(cls, path):
"""
Factory method
Given a path to a stl file, construct the object
"""
return tfields.Triangles3D.load(path).mesh()
@classmethod
def plane(cls, *base_vectors, **kwargs):
"""
Alternative constructor for creating a plane from
Args:
*base_vectors: see grid constructors in core. One base_vector has
to be one-dimensional
**kwargs: forwarded to __new__
"""
vertices = tfields.Tensors.grid(*base_vectors, **kwargs)
base_vectors = tfields.lib.grid.ensure_complex(*base_vectors)
base_vectors = tfields.lib.grid.to_base_vectors(*base_vectors)
fix_coord = None
for coord in range(3):
if len(base_vectors[coord]) > 1:
continue
if len(base_vectors[coord]) == 0:
continue
fix_coord = coord
if fix_coord is None:
raise ValueError("Describe a plane with one variable fiexed")
var_coords = list(range(3))
var_coords.pop(var_coords.index(fix_coord))
faces = []
base0, base1 = base_vectors[var_coords[0]], base_vectors[var_coords[1]]
for i1 in range(len(base1) - 1):
for i0 in range(len(base0) - 1):
idx_top_left = len(base1) * (i0 + 0) + (i1 + 0)
idx_top_right = len(base1) * (i0 + 0) + (i1 + 1)
idx_bot_left = len(base1) * (i0 + 1) + (i1 + 0)
idx_bot_right = len(base1) * (i0 + 1) + (i1 + 1)
faces.append([idx_top_left, idx_top_right, idx_bot_left])
faces.append([idx_top_right, idx_bot_left, idx_bot_right])
inst = cls.__new__(cls, vertices, faces=faces)
return inst
@classmethod
def grid(cls, *base_vectors, **kwargs):
"""
Construct 'cuboid' along base_vectors
Examples:
Building symmetric geometries were never as easy:
Approximated sphere with radius 1, translated in y by 2 units
>>> import numpy as np
>>> import tfields
>>> sphere = tfields.Mesh3D.grid((1, 1, 1),
... (-np.pi, np.pi, 12),
... (-np.pi / 2, np.pi / 2, 12),
... coord_sys='spherical')
>>> sphere.transform('cartesian')
>>> sphere[:, 1] += 2
Oktaeder
>>> oktaeder = tfields.Mesh3D.grid((1, 1, 1),
... (-np.pi, np.pi, 5),
... (-np.pi / 2, np.pi / 2, 3),
... coord_sys='spherical')
>>> oktaeder.transform('cartesian')
Cube with edge length of 2 units
>>> cube = tfields.Mesh3D.grid((-1, 1, 2),
... (-1, 1, 2),
... (-5, -3, 2))
Cylinder
>>> cylinder = tfields.Mesh3D.grid((1, 1, 1),
... (-np.pi, np.pi, 12),
... (-5, 3, 12),
... coord_sys='cylinder')
>>> cylinder.transform('cartesian')
"""
if not len(base_vectors) == 3:
raise AttributeError("3 base_vectors vectors required")
base_vectors = tfields.lib.grid.ensure_complex(*base_vectors)
base_vectors = tfields.lib.grid.to_base_vectors(*base_vectors)
indices = [0, -1]
coords = range(3)
base_lengths_above_1 = [len(b) > 1 for b in base_vectors]
# if one plane is given: rearrange indices and coords
if not all(base_lengths_above_1):
indices = [0]
for i, b in enumerate(base_lengths_above_1):
if not b:
coords = [i]
break
base_vectors = list(base_vectors)
planes = []
for ind in indices:
for coord in coords:
basePart = base_vectors[:]
basePart[coord] = np.array([base_vectors[coord][ind]], dtype=float)
planes.append(cls.plane(*basePart, **kwargs))
inst = cls.merged(*planes, **kwargs)
return inst
@property
def faces(self):
if self.maps:
return self.maps[3]
else:
logging.warning(
"No faces found. Mesh has {x} vertices.".format(x=len(self))
)
return tfields.Maps.to_map([], dim=3)
@faces.setter
def faces(self, faces):
mp = tfields.Maps.to_map(faces, dim=3)
self.maps[tfields.dim(mp)] = mp
@cached_property()
def _triangles(self):
"""
with the decorator, this should be handled like an attribute though it
is a method
"""
if self.faces.size == 0:
return tfields.Triangles3D([])
tris = tfields.Tensors(self[self.maps[3].flatten()])
fields = self.maps[3].fields
return tfields.Triangles3D(tris, *fields)
def triangles(self):
"""
Cached method to retrieve the triangles, belonging to this mesh
Examples:
>>> import tfields
>>> mesh = tfields.Mesh3D.grid((0, 1, 3), (1, 2, 3), (2, 3, 3))
>>> assert mesh.triangles() is mesh.triangles()
"""
return self._triangles
def centroids(self):
return self.triangles().centroids()
@cached_property()
def _planes(self):
if self.faces.size == 0:
return tfields.Planes3D([])
return tfields.Planes3D(self.centroids(), self.triangles().norms())
def planes(self):
return self._planes
def nfaces(self):
return self.faces.shape[0]
def in_faces(self, points, delta, **kwargs):
"""
Check whether points lie within triangles with Barycentric Technique
see Triangles3D.in_triangles. If multiple requests are done on huge meshes,
this can be hugely optimized by requesting the property
self.tree or setting it to self.tree = <saved tree> before calling in_faces.
"""
key = "mesh_tree"
if hasattr(self, "_cache") and key in self._cache:
log = logging.getLogger()
log.info("Using cached decision tree to speed up point - face mapping.")
indices = self.tree.in_faces(points, delta, **kwargs)
else:
indices = self.triangles().in_triangles(points, delta, **kwargs)
return indices
@property
def tree(self):
"""
Cached property to retrieve a bounding_box Searcher. This searcher can
hugely optimize 'in_faces' searches
Examples:
>>> import numpy as np
>>> import tfields
>>> mesh = tfields.Mesh3D.grid((0, 1, 3), (1, 2, 3), (2, 3, 3))
>>> _ = mesh.tree
>>> assert hasattr(mesh, '_cache')
>>> assert 'mesh_tree' in mesh._cache
>>> face_indices = mesh.in_faces(tfields.Points3D([[0.2, 1.2, 2.0]]),
... 0.00001)
You might want to know the number of points per face
>>> unique, counts = np.unique(face_indices, return_counts=True)
>>> dict(zip(unique, counts)) # one point on triangle number 16
{16: 1}
"""
if not hasattr(self, "_cache"):
self._cache = {}
key = "mesh_tree"
if key in self._cache:
tree = self._cache[key]
else:
tree = tfields.bounding_box.Searcher(self)
self._cache[key] = tree
return tree
@tree.setter
def tree(self, tree):
if not hasattr(self, "_cache"):
self._cache = {}
key = "mesh_tree"
self._cache[key] = tree
def remove_faces(self, face_delete_mask):
"""
Remove faces where face_delete_mask is True
"""
face_delete_mask = np.array(face_delete_mask, dtype=bool)
self.faces = self.faces[~face_delete_mask]
self.faces.fields = self.faces.fields[~face_delete_mask]
def template(self, sub_mesh):
"""
'Manual' way to build a template that can be used with self.cut
Returns:
Mesh3D: template (see cut), can be used as template to retrieve
sub_mesh from self instance
Examples:
>>> import tfields
>>> from sympy.abc import y
>>> mp = tfields.TensorFields([[0,1,2],[2,3,0],[3,2,5],[5,4,3]],
... [1, 2, 3, 4])
>>> m = tfields.Mesh3D([[0,0,0], [1,0,0], [1,1,0], [0,1,0], [0,2,0], [1,2,0]],
... maps=[mp])
>>> m_cut = m.cut(y < 1.5, at_intersection='split')
>>> template = m.template(m_cut)
>>> assert m_cut.equal(m.cut(template))
TODO:
fields template not yet implemented
"""
cents = tfields.Tensors(sub_mesh.centroids())
face_indices = self.in_faces(cents, delta=None)
inst = sub_mesh.copy()
if inst.maps:
inst.maps[3].fields = [tfields.Tensors(face_indices, dim=1)]
else:
inst.maps = [
tfields.TensorFields([], tfields.Tensors([], dim=1), dim=3, dtype=int)
]
return inst
def project(
self,
tensor_field,
delta=None,
merge_functions=None,
point_face_assignment=None,
return_point_face_assignment=False,
):
"""
Project the points of the tensor_field to a copy of the mesh
and set the face values accord to the field to the maps field.
If no field is present in tensor_field, the number of points in a mesh
is counted.
Args:
tensor_field (Tensors | TensorFields)
delta (float | None): forwarded to Mesh3D.in_faces
merge_functions (callable): if multiple Tensors lie in the same face,
they are mapped with the merge_function to one value
point_face_assignment (np.array, dtype=int): array assigning each
point to a face. Each entry position corresponds to a point of the
tensor, each entry value is the index of the assigned face
return_point_face_assignment (bool): if true, return the
point_face_assignment
Examples:
>>> import tfields
>>> import numpy as np
>>> mp = tfields.TensorFields([[0,1,2],[2,3,0],[3,2,5],[5,4,3]],
... [1, 2, 3, 4])
>>> m = tfields.Mesh3D([[0,0,0], [1,0,0], [1,1,0], [0,1,0], [0,2,0], [1,2,0]],
... maps=[mp])
>>> points = tfields.Tensors([[0.5, 0.2, 0.0],
... [0.5, 0.02, 0.0],
... [0.5, 0.8, 0.0],
... [0.5, 0.8, 0.1]]) # not contained
Projecting points onto the mesh gives the count
>>> m_points = m.project(points, delta=0.01)
>>> list(m_points.maps[3].fields[0])
[2, 1, 0, 0]
TensorFields with arbitrary size are projected,
combinging the fields automatically
>>> fields = [tfields.Tensors([1,3,42, -1]),
... tfields.Tensors([[0,1,2], [2,3,4], [3,4,5], [-1] * 3]),
... tfields.Tensors([[[0, 0]] * 2,
... [[2, 2]] * 2,
... [[3, 3]] * 2,
... [[9, 9]] * 2])]
>>> tf = tfields.TensorFields(points, *fields)
>>> m_tf = m.project(tf, delta=0.01)
>>> assert m_tf.maps[3].fields[0].equal([2, 42, np.nan, np.nan], equal_nan=True)
>>> assert m_tf.maps[3].fields[1].equal([[1, 2, 3],
... [3, 4, 5],
... [np.nan] * 3,
... [np.nan] * 3],
... equal_nan=True)
>>> assert m_tf.maps[3].fields[2].equal([[[1, 1]] * 2,
... [[3, 3]] * 2,
... [[np.nan, np.nan]] * 2,
... [[np.nan, np.nan]] * 2],
... equal_nan=True)
Returning the calculated point_face_assignment can speed up multiple
results
>>> m_tf, point_face_assignment = m.project(tf, delta=0.01,
... return_point_face_assignment=True)
>>> m_tf_fast = m.project(tf, delta=0.01, point_face_assignment=point_face_assignment)
>>> assert m_tf.equal(m_tf_fast, equal_nan=True)
"""
if not issubclass(type(tensor_field), tfields.Tensors):
tensor_field = tfields.TensorFields(tensor_field)
inst = self.copy()
# setup empty map fields and collect fields
n_faces = len(self.maps[3])
point_indices = np.arange(len(tensor_field))
if not hasattr(tensor_field, "fields") or len(tensor_field.fields) == 0:
# if not fields is existing use int type fields and empty_map_fields
# in order to generate a sum
fields = [np.full(len(tensor_field), 1, dtype=int)]
empty_map_fields = [tfields.Tensors(np.full(n_faces, 0, dtype=int))]
if merge_functions is None:
merge_functions = [np.sum]
else:
fields = tensor_field.fields
empty_map_fields = []
for field in fields:
cls = type(field)
kwargs = {key: getattr(field, key) for key in cls.__slots__}
shape = (n_faces,) + field.shape[1:]
empty_map_fields.append(cls(np.full(shape, np.nan), **kwargs))
if merge_functions is None:
merge_functions = [lambda x: np.mean(x, axis=0)] * len(fields)
# build point_face_assignment if not given.
if point_face_assignment is not None:
if len(point_face_assignment) != len(tensor_field):
raise ValueError("Template needs same lenght as tensor_field")
else:
point_face_assignment = self.in_faces(tensor_field, delta=delta)
point_face_assignment_set = set(point_face_assignment)
# merge the fields according to point_face_assignment
map_fields = []
for field, map_field, merge_function in zip(
fields, empty_map_fields, merge_functions
):
for i, f_index in enumerate(point_face_assignment_set):
if f_index == -1:
# point could not be mapped
continue
point_in_face_indices = point_indices[point_face_assignment == f_index]
res = field[point_in_face_indices]
if len(res) == 1:
map_field[f_index] = res
else:
map_field[f_index] = merge_function(res)
map_fields.append(map_field)
inst.maps[3].fields = map_fields
if return_point_face_assignment:
return inst, point_face_assignment
return inst
def _cut_sympy(self, expression, at_intersection="remove", _in_recursion=False):
"""
Partition the mesh with the cuts given and return the template
"""
eps = 0.000000001
# direct return if self is empty
if len(self) == 0:
return self.copy(), self.copy()
inst = self.copy()
"""
add the indices of the vertices and maps to the fields. They will be
removed afterwards
"""
if not _in_recursion:
inst.fields.append(tfields.Tensors(np.arange(len(inst))))
for mp in inst.maps.values():
mp.fields.append(tfields.Tensors(np.arange(len(mp))))
# mask for points that do not fulfill the cut expression
mask = inst.evalf(expression)
# remove the points
if not any(~mask):
# no vertex is valid
inst = inst[mask]
elif all(~mask):
# all vertices are valid
inst = inst[mask]
elif at_intersection == "keep":
expression_parts = tfields.lib.symbolics.split_expression(expression)
if len(expression_parts) > 1:
new_mesh = inst.copy()
for expr_part in expression_parts:
inst, _ = inst._cut_sympy(
expr_part, at_intersection=at_intersection, _in_recursion=True
)
elif len(expression_parts) == 1:
face_delete_indices = set([])
for i, face in enumerate(inst.maps[3]):
"""
vertices_rejected is a mask for each face that is True, where
a Point is on the rejected side of the plane
"""
vertices_rejected = [~mask[f] for f in face]
if all(vertices_rejected):
# delete face
face_delete_indices.add(i)
mask = np.full(len(inst.maps[3]), True, dtype=bool)
for face_idx in range(len(inst.maps[3])):
if face_idx in face_delete_indices:
mask[face_idx] = False
inst.maps[3] = inst.maps[3][mask]
else:
raise ValueError("Sympy expression is not splitable.")
inst = inst.cleaned()
elif at_intersection == "split" or at_intersection == "split_rough":
"""
add vertices and faces that are at the border of the cuts
"""
expression_parts = tfields.lib.symbolics.split_expression(expression)
if len(expression_parts) > 1:
new_mesh = inst.copy()
if at_intersection == "split_rough":
"""
the following is, to speed up the process. Problem is, that
triangles can exist, where all points lie outside the cut,
but part of the area
still overlaps with the cut.
These are at the intersection line between two cuts.
"""
face_inters_mask = np.full((inst.faces.shape[0]), False, dtype=bool)
for i, face in enumerate(inst.faces):
vertices_rejected = [-mask[f] for f in face]
face_on_edge = any(vertices_rejected) and not all(
vertices_rejected
)
if face_on_edge:
face_inters_mask[i] = True
new_mesh.remove_faces(-face_inters_mask)
for expr_part in expression_parts:
inst, _ = inst._cut_sympy(
expr_part, at_intersection="split", _in_recursion=True
)
elif len(expression_parts) == 1:
# build plane from cut expression
plane_sympy = tfields.lib.symbolics.to_plane(expression)
norm_sympy = np.array(plane_sympy.normal_vector).astype(float)
d = -norm_sympy.dot(np.array(plane_sympy.p1).astype(float))
plane = {"normal": norm_sympy, "d": d}
# initialize empty containers
norm_vectors = inst.triangles().norms()
new_vertices = np.empty((0, 3))
new_faces = np.empty((0, 3))
new_fields = [
tfields.Tensors(
np.empty((0,) + field.shape[1:]), coord_sys=field.coord_sys
)
for field in inst.fields
]
new_map_fields = [[] for field in inst.maps[3].fields]
new_norm_vectors = []
# copy TODO?
vertices = np.array(inst)
faces = np.array(inst.maps[3])
fields = [np.array(field) for field in inst.fields]
faces_fields = [np.array(field) for field in inst.maps[3].fields]
face_delete_indices = set([]) # indexing not intersected faces
for i, face in enumerate(inst.maps[3]):
"""
vertices_rejected is a mask for each face that is True
where a point is on the rejected side of the plane
"""
vertices_rejected = [~mask[f] for f in face]
if any(vertices_rejected):
# delete face
face_delete_indices.add(i)
if any(vertices_rejected) and not all(vertices_rejected):
# face on edge
n_true = vertices_rejected.count(True)
lonely_bool = True if n_true == 1 else False
triangle_points = [vertices[f] for f in face]
"""
Add the intersection points and faces
"""
intersection = _intersect(
triangle_points, plane, vertices_rejected
)
last_idx = len(vertices)
for tri_list in intersection:
new_face = []
for item in tri_list:
if isinstance(item, int):
# reference to old vertex
new_face.append(face[item])
elif isinstance(item, complex):
# reference to new vertex that has been
# concatenated already
new_face.append(last_idx + int(item.imag))
else:
# new vertex
new_face.append(len(vertices))
vertices = np.append(
vertices, [[float(x) for x in item]], axis=0
)
fields = [
np.append(
field,
np.full((1,) + field.shape[1:], np.nan),
axis=0,
)
for field in fields
]
faces = np.append(faces, [new_face], axis=0)
faces_fields = [
np.append(field, [field[i]], axis=0)
for field in faces_fields
]
faces_fields[-1][-1] = i
face_map = tfields.TensorFields(
faces, *faces_fields, dtype=int, coord_sys=inst.maps[3].coord_sys
)
inst = tfields.Mesh3D(
vertices, *fields, maps=[face_map], coord_sys=inst.coord_sys
)
mask = np.full(len(inst.maps[3]), True, dtype=bool)
for face_idx in range(len(inst.maps[3])):
if face_idx in face_delete_indices:
mask[face_idx] = False
inst.maps[3] = inst.maps[3][mask]
else:
raise ValueError("Sympy expression is not splitable.")
inst = inst.cleaned()
elif at_intersection == "remove":
inst = inst[mask]
else:
raise AttributeError(
"No at_intersection method called {at_intersection} "
"implemented".format(**locals())
)
if _in_recursion:
template = None
else:
template_field = inst.fields.pop(-1)
template_maps = []
for mp in inst.maps.values():
t_mp = tfields.TensorFields(tfields.Tensors(mp), mp.fields.pop(-1))
template_maps.append(t_mp)
template = tfields.Mesh3D(
tfields.Tensors(inst), template_field, maps=template_maps
)
return inst, template
def _cut_template(self, template):
"""
Args:
template (tfields.Mesh3D)
Examples:
>>> import tfields
>>> import numpy as np
Build mesh
>>> mmap = tfields.TensorFields([[0, 1, 2], [0, 3, 4]],
... [[42, 21], [-42, -21]])
>>> m = tfields.Mesh3D([[0]*3, [1]*3, [2]*3, [3]*3, [4]*3],
... [0.0, 0.1, 0.2, 0.3, 0.4],
... [0.0, -0.1, -0.2, -0.3, -0.4],
... maps=[mmap])
Build template
>>> tmap = tfields.TensorFields([[0, 3, 4], [0, 1, 2]],
... [1, 0])
>>> t = tfields.Mesh3D([[0]*3, [-1]*3, [-2]*3, [-3]*3, [-4]*3],
... [1, 0, 3, 2, 4],
... maps=[tmap])
Use template as instruction to make a fast cut
>>> res = m._cut_template(t)
>>> assert np.array_equal(res.fields,
... [[0.1, 0.0, 0.3, 0.2, 0.4],
... [-0.1, 0.0, -0.3, -0.2, -0.4]])
>>> assert np.array_equal(res.maps[3].fields[0],
... [[-42, -21], [42, 21]])
"""
# Possible Extension (small todo): check: len(field(s)) == len(self/maps)
# Redirect fields
fields = []
if template.fields:
template_field = np.array(template.fields[0])
if len(self) > 0:
"""
if new vertices have been created in the template, it is
in principle unclear what fields we have to refer to.
Thus in creating the template, we gave np.nan.
To make it fast, we replace nan with 0 as a dummy and correct
the field entries afterwards with np.nan.
"""
nan_mask = np.isnan(template_field)
template_field[nan_mask] = 0 # dummy reference to index 0.
template_field = template_field.astype(int)
for field in self.fields:
projected_field = field[template_field]
projected_field[nan_mask] = np.nan # correction for nan
fields.append(projected_field)
# Redirect maps and their fields
maps = []
for mp, template_mp in zip(self.maps.values(), template.maps.values()):
mp_fields = []
for field in mp.fields:
if len(template_mp) == 0 and len(template_mp.fields) == 0:
mp_fields.append(field[0:0]) # np.empty
else:
mp_fields.append(field[template_mp.fields[0].astype(int)])
new_mp = tfields.TensorFields(tfields.Tensors(template_mp), *mp_fields)
maps.append(new_mp)
inst = tfields.Mesh3D(tfields.Tensors(template), *fields, maps=maps)
return inst
def cut(self, *args, **kwargs):
"""
cut method for Mesh3D.
Args:
expression (sympy logical expression | Mesh3D):
sympy locical expression: Sympy expression that defines planes
in 3D
Mesh3D: A mesh3D will be interpreted as a template, i.e. a
fast instruction of how to cut the triangles.
It is the second part of the tuple, returned by a previous
cut with a sympy locial expression with 'return_template=True'.
We use the vertices and maps of the Mesh as the skelleton of
the returned mesh. The fields are mapped according to
indices in the template.maps[i].fields.
coord_sys (coordinate system to cut in):
at_intersection (str): instruction on what to do, when a cut will intersect a triangle.
Options: 'remove' (Default) - remove the faces that are on the edge
'keep' - keep the faces that are on the edge
'split' - Create new triangles that make up the old one.
return_template (bool): If True: return the template
to redo the same cut fast
Examples:
define the cut
>>> import numpy as np
>>> import tfields
>>> from sympy.abc import x,y,z
>>> cut_expr = x > 1.5
>>> m = tfields.Mesh3D.grid((0, 3, 4),
... (0, 3, 4),
... (0, 0, 1))
>>> m.fields.append(tfields.Tensors(np.linspace(0, len(m) - 1,
... len(m))))
>>> m.maps[3].fields.append(
... tfields.Tensors(np.linspace(0,
... len(m.maps[3]) - 1,
... len(m.maps[3]))))
>>> mNew = m.cut(cut_expr)
>>> len(mNew)
8
>>> mNew.nfaces()
6
>>> float(mNew[:, 0].min())
2.0
Cutting with the 'keep' option will leave triangles on the edge
untouched:
>>> m_keep = m.cut(cut_expr, at_intersection='keep')
>>> float(m_keep[:, 0].min())
1.0
>>> m_keep.nfaces()
12
Cutting with the 'split' option will create new triangles on the edge:
>>> m_split = m.cut(cut_expr, at_intersection='split')
>>> float(m_split[:, 0].min())
1.5
>>> len(m_split)
15
>>> m_split.nfaces()
15
Cut with 'return_template=True' will return the exact same mesh but
additionally an instruction to conduct the exact same cut fast (template)
>>> m_split_2, template = m.cut(cut_expr, at_intersection='split',
... return_template=True)
>>> m_split_template = m.cut(template)
>>> assert m_split.equal(m_split_2, equal_nan=True)
>>> assert m_split.equal(m_split_template, equal_nan=True)
>>> assert len(template.fields) == 1
>>> assert len(m_split.fields) == 1
>>> assert len(m_split_template.fields) == 1
>>> assert m_split.fields[0].equal(
... list(range(8, 16)) + [np.nan] * 7, equal_nan=True)
>>> assert m_split_template.fields[0].equal(
... list(range(8, 16)) + [np.nan] * 7, equal_nan=True)
This seems irrelevant at first but consider, the map field or the
tensor field changes:
>>> m_altered_fields = m.copy()
>>> m_altered_fields[0] += 42
>>> assert not m_split.equal(m_altered_fields.cut(template))
>>> assert tfields.Tensors(m_split).equal(
... m_altered_fields.cut(template))
>>> assert tfields.Tensors(m_split.maps[3]).equal(
... m_altered_fields.cut(template).maps[3])
The cut expression may be a sympy.BooleanFunction:
>>> cut_expr_bool_fun = (x > 1.5) & (y < 1.5) & (y >0.2) & (z > -0.5)
>>> m_split_bool = m.cut(cut_expr_bool_fun,
... at_intersection='split')
Returns:
copy of cut mesh
* optional: template
"""
return super().cut(*args, **kwargs)
def disjoint_parts(self, return_template=False):
"""
Returns:
disjoint_parts(List(cls)), templates(List(cls))
>>> import tfields
>>> a = tfields.Mesh3D(
... [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]],
... maps=[[[0, 1, 2], [0, 2, 3]]])
>>> b = a.copy()
>>> b[:, 0] += 2
>>> m = tfields.Mesh3D.merged(a, b)
>>> parts = m.disjoint_parts()
>>> aa, ba = parts
>>> assert aa.maps[3].equal(ba.maps[3])
>>> assert aa.equal(a)
>>> assert ba.equal(b)
"""
mp_description = self.disjoint_map(3)
parts = self.parts(mp_description)
if not return_template:
return parts
else:
templates = []
for i, part in enumerate(parts):
template = part.copy()
template.maps[3].fields = [tfields.Tensors(mp_description[1][i])]
templates.append(template)
return parts, templates
| (tensors, *fields, **kwargs) |
21,082 | tfields.core | __array_finalize__ | null | def __array_finalize__(self, obj):
if obj is None:
return
for attr in self._iter_slots():
setattr(self, attr, getattr(obj, attr, None))
| (self, obj) |
21,083 | tfields.core | __array_wrap__ | null | def __array_wrap__(self, out_arr, context=None): # pylint: disable=arguments-differ
return np.ndarray.__array_wrap__( # pylint: disable=too-many-function-args
self, out_arr, context
)
| (self, out_arr, context=None) |
21,084 | tfields.core | __getitem__ |
In addition to the usual, also slice fields
Examples:
>>> import tfields
>>> import numpy as np
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0],
... [1, 1, 1], [-1, -1, -1]])
>>> maps=[tfields.TensorFields([[0, 1, 2], [0, 1, 3], [2, 3, 4]],
... [[1, 2], [3, 4], [5, 6]]),
... tfields.TensorFields([[0], [1], [2], [3], [4]])]
>>> mesh = tfields.TensorMaps(vectors,
... [42, 21, 10.5, 1, 1],
... [1, 2, 3, 3, 3],
... maps=maps)
Slicing
>>> sliced = mesh[2:]
>>> assert isinstance(sliced, tfields.TensorMaps)
>>> assert isinstance(sliced.fields[0], tfields.Tensors)
>>> assert isinstance(sliced.maps[3], tfields.TensorFields)
>>> assert sliced.fields[0].equal([10.5, 1, 1])
>>> assert sliced.maps[3].equal([[0, 1, 2]])
>>> assert sliced.maps[3].fields[0].equal([[5, 6]])
Picking
>>> picked = mesh[1]
>>> assert np.array_equal(picked, [0, 0, 1])
>>> assert np.array_equal(picked.maps[3], np.empty((0, 3)))
>>> assert np.array_equal(picked.maps[1], [[0]])
Masking
>>> masked = mesh[np.array([True, False, True, True, True])]
>>> assert masked.equal([[0, 0, 0], [0, -1, 0],
... [1, 1, 1], [-1, -1, -1]])
>>> assert masked.fields[0].equal([42, 10.5, 1, 1])
>>> assert masked.fields[1].equal([1, 3, 3, 3])
>>> assert masked.maps[3].equal([[1, 2, 3]])
>>> assert masked.maps[1].equal([[0], [1], [2], [3]])
Iteration
>>> _ = [vertex for vertex in mesh]
| def __getitem__(self, index):
"""
In addition to the usual, also slice fields
Examples:
>>> import tfields
>>> import numpy as np
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0],
... [1, 1, 1], [-1, -1, -1]])
>>> maps=[tfields.TensorFields([[0, 1, 2], [0, 1, 3], [2, 3, 4]],
... [[1, 2], [3, 4], [5, 6]]),
... tfields.TensorFields([[0], [1], [2], [3], [4]])]
>>> mesh = tfields.TensorMaps(vectors,
... [42, 21, 10.5, 1, 1],
... [1, 2, 3, 3, 3],
... maps=maps)
Slicing
>>> sliced = mesh[2:]
>>> assert isinstance(sliced, tfields.TensorMaps)
>>> assert isinstance(sliced.fields[0], tfields.Tensors)
>>> assert isinstance(sliced.maps[3], tfields.TensorFields)
>>> assert sliced.fields[0].equal([10.5, 1, 1])
>>> assert sliced.maps[3].equal([[0, 1, 2]])
>>> assert sliced.maps[3].fields[0].equal([[5, 6]])
Picking
>>> picked = mesh[1]
>>> assert np.array_equal(picked, [0, 0, 1])
>>> assert np.array_equal(picked.maps[3], np.empty((0, 3)))
>>> assert np.array_equal(picked.maps[1], [[0]])
Masking
>>> masked = mesh[np.array([True, False, True, True, True])]
>>> assert masked.equal([[0, 0, 0], [0, -1, 0],
... [1, 1, 1], [-1, -1, -1]])
>>> assert masked.fields[0].equal([42, 10.5, 1, 1])
>>> assert masked.fields[1].equal([1, 3, 3, 3])
>>> assert masked.maps[3].equal([[1, 2, 3]])
>>> assert masked.maps[1].equal([[0], [1], [2], [3]])
Iteration
>>> _ = [vertex for vertex in mesh]
"""
item = super(TensorMaps, self).__getitem__(index)
if issubclass(type(item), TensorMaps): # pylint: disable=too-many-nested-blocks
if isinstance(index, tuple):
index = index[0]
if item.maps:
item.maps = Maps(item.maps)
indices = np.arange(len(self))
keep_indices = indices[index]
if isinstance(keep_indices, (int, np.integer)):
keep_indices = [keep_indices]
delete_indices = set(indices).difference(set(keep_indices))
# correct all maps that contain deleted indices
for map_dim in self.maps:
# build mask, where the map should be deleted
map_delete_mask = np.full(
(len(self.maps[map_dim]),), False, dtype=bool
)
for i, map_ in enumerate( # pylint: disable=invalid-name
self.maps[map_dim]
):
for node_index in map_:
if node_index in delete_indices:
map_delete_mask[i] = True
break
map_mask = ~map_delete_mask
# build the correction counters
move_up_counter = np.zeros(self.maps[map_dim].shape, dtype=int)
for delete_index in delete_indices:
move_up_counter[self.maps[map_dim] > delete_index] -= 1
item.maps[map_dim] = (self.maps[map_dim] + move_up_counter)[
map_mask
]
return item
| (self, index) |
21,085 | tfields.core | __iter__ |
Forwarding iterations to the bulk array. Otherwise __getitem__ would
kick in and slow down imensely.
Examples:
>>> import tfields
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0]])
>>> scalar_field = tfields.TensorFields(
... vectors, [42, 21, 10.5], [1, 2, 3])
>>> [(point.rank, point.dim) for point in scalar_field]
[(0, 1), (0, 1), (0, 1)]
| def __iter__(self):
"""
Forwarding iterations to the bulk array. Otherwise __getitem__ would
kick in and slow down imensely.
Examples:
>>> import tfields
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0]])
>>> scalar_field = tfields.TensorFields(
... vectors, [42, 21, 10.5], [1, 2, 3])
>>> [(point.rank, point.dim) for point in scalar_field]
[(0, 1), (0, 1), (0, 1)]
"""
for index in range(len(self)):
yield super(Tensors, self).__getitem__(index).view(Tensors)
| (self) |
21,086 | tfields.mesh_3d | __new__ | null | def __new__(cls, tensors, *fields, **kwargs):
kwargs["dim"] = 3
if "maps" in kwargs and "faces" in kwargs:
raise ValueError("Conflicting options maps and faces")
faces = kwargs.pop("faces", None)
maps = kwargs.pop("maps", None)
if faces is not None:
if len(faces) == 0:
# faces = []
faces = np.empty((0, 3))
maps = [faces]
if maps is not None:
kwargs["maps"] = maps
obj = super(Mesh3D, cls).__new__(cls, tensors, *fields, **kwargs)
if len(obj.maps) > 1:
raise ValueError("Mesh3D only allows one map")
if obj.maps and (len(obj.maps) > 1 or obj.maps.keys()[0] != 3):
raise ValueError("Face dimension should be 3")
return obj
| (cls, tensors, *fields, **kwargs) |
21,087 | tfields.core | __reduce__ |
important for pickling (see `here <https://stackoverflow.com/questions/26598109/preserve-custom-attributes-when-pickling-subclass-of-numpy-array>`_)
Examples:
>>> from tempfile import NamedTemporaryFile
>>> import pickle
>>> import tfields
Build a dummy scalar field
>>> scalars = tfields.Tensors([0, 1, 2])
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0]])
>>> scalar_field = tfields.TensorFields(
... vectors,
... scalars,
... coord_sys='cylinder')
Save it and restore it
>>> out_file = NamedTemporaryFile(suffix='.pickle')
>>> pickle.dump(scalar_field,
... out_file)
>>> _ = out_file.seek(0)
>>> sf = pickle.load(out_file)
>>> sf.coord_sys == 'cylinder'
True
>>> sf.fields[0][2] == 2.
True
| def __reduce__(self):
"""
important for pickling (see `here <https://stackoverflow.com/questions/\
26598109/preserve-custom-attributes-when-pickling-subclass-of-numpy-array>`_)
Examples:
>>> from tempfile import NamedTemporaryFile
>>> import pickle
>>> import tfields
Build a dummy scalar field
>>> scalars = tfields.Tensors([0, 1, 2])
>>> vectors = tfields.Tensors([[0, 0, 0], [0, 0, 1], [0, -1, 0]])
>>> scalar_field = tfields.TensorFields(
... vectors,
... scalars,
... coord_sys='cylinder')
Save it and restore it
>>> out_file = NamedTemporaryFile(suffix='.pickle')
>>> pickle.dump(scalar_field,
... out_file)
>>> _ = out_file.seek(0)
>>> sf = pickle.load(out_file)
>>> sf.coord_sys == 'cylinder'
True
>>> sf.fields[0][2] == 2.
True
"""
# Get the parent's __reduce__ tuple
pickled_state = super(AbstractNdarray, self).__reduce__()
# Create our own tuple to pass to __setstate__
new_state = pickled_state[2] + tuple(
getattr(self, slot) for slot in self._iter_slots()
)
# Return a tuple that replaces the parent's __setstate__
# tuple with our own
return (pickled_state[0], pickled_state[1], new_state)
| (self) |
21,088 | tfields.core | __setattr__ | null | def __setattr__(self, name, value):
if name in self.__slots__:
index = self.__slots__.index(name)
try:
setter = self.__slot_setters__[index]
except IndexError:
setter = None
if isinstance(setter, str):
setter = getattr(self, setter)
if setter is not None:
value = setter(value)
super(AbstractNdarray, self).__setattr__(name, value)
| (self, name, value) |
21,089 | tfields.core | __setitem__ |
In addition to the usual, also slice fields
Examples:
>>> import tfields
>>> import numpy as np
>>> original = tfields.TensorFields(
... [[0, 0, 0], [0, 0, 1], [0, -1, 0]],
... [42, 21, 10.5], [1, 2, 3])
>>> obj = tfields.TensorFields(
... [[0, 0, 0], [0, 0, np.nan],
... [0, -1, 0]], [42, 22, 10.5], [1, -1, 3])
>>> slice_obj = obj.copy()
>>> assert not obj.equal(original)
>>> obj[1] = original[1]
>>> assert obj[:2].equal(original[:2])
>>> assert not slice_obj.equal(original)
>>> slice_obj[:] = original[:]
>>> assert slice_obj.equal(original)
| def __setitem__(self, index, item):
"""
In addition to the usual, also slice fields
Examples:
>>> import tfields
>>> import numpy as np
>>> original = tfields.TensorFields(
... [[0, 0, 0], [0, 0, 1], [0, -1, 0]],
... [42, 21, 10.5], [1, 2, 3])
>>> obj = tfields.TensorFields(
... [[0, 0, 0], [0, 0, np.nan],
... [0, -1, 0]], [42, 22, 10.5], [1, -1, 3])
>>> slice_obj = obj.copy()
>>> assert not obj.equal(original)
>>> obj[1] = original[1]
>>> assert obj[:2].equal(original[:2])
>>> assert not slice_obj.equal(original)
>>> slice_obj[:] = original[:]
>>> assert slice_obj.equal(original)
"""
super(TensorFields, self).__setitem__(index, item)
if issubclass(type(item), TensorFields):
if isinstance(index, slice):
for i, field in enumerate(item.fields):
self.fields[i].__setitem__(index, field)
elif isinstance(index, tuple):
for i, field in enumerate(item.fields):
self.fields[i].__setitem__(index[0], field)
else:
for i, field in enumerate(item.fields):
self.fields[i].__setitem__(index, field)
| (self, index, item) |
21,090 | tfields.core | __setstate__ |
Counterpart to __reduce__. Important for unpickling.
| def __setstate__(self, state):
"""
Counterpart to __reduce__. Important for unpickling.
"""
# Call the parent's __setstate__ with the other tuple elements.
super(AbstractNdarray, self).__setstate__(state[0 : -len(self._iter_slots())])
# set the __slot__ attributes
valid_slot_attrs = list(self._iter_slots())
# attributes that have been added later have not been pickled with the full information
# and thus need to be excluded from the __setstate__ need to be in the same order as they
# have been added to __slots__
added_slot_attrs = ["name"]
n_np = 5 # number of numpy array states
n_old = len(valid_slot_attrs) - len(state[n_np:])
if n_old > 0:
for latest_index in range(n_old):
new_slot = added_slot_attrs[-latest_index]
warnings.warn(
"Slots with names '{new_slot}' appears to have "
"been added after the creation of the reduced "
"state. No corresponding state found in "
"__setstate__.".format(**locals())
)
valid_slot_attrs.pop(valid_slot_attrs.index(new_slot))
setattr(self, new_slot, None)
for slot_index, slot in enumerate(valid_slot_attrs):
state_index = n_np + slot_index
setattr(self, slot, state[state_index])
| (self, state) |
21,091 | tfields.core | _args | null | def _args(self) -> tuple:
return super()._args() + tuple(self.fields)
| (self) -> tuple |
21,093 | tfields.mesh_3d | _cut_sympy |
Partition the mesh with the cuts given and return the template
| def _cut_sympy(self, expression, at_intersection="remove", _in_recursion=False):
"""
Partition the mesh with the cuts given and return the template
"""
eps = 0.000000001
# direct return if self is empty
if len(self) == 0:
return self.copy(), self.copy()
inst = self.copy()
"""
add the indices of the vertices and maps to the fields. They will be
removed afterwards
"""
if not _in_recursion:
inst.fields.append(tfields.Tensors(np.arange(len(inst))))
for mp in inst.maps.values():
mp.fields.append(tfields.Tensors(np.arange(len(mp))))
# mask for points that do not fulfill the cut expression
mask = inst.evalf(expression)
# remove the points
if not any(~mask):
# no vertex is valid
inst = inst[mask]
elif all(~mask):
# all vertices are valid
inst = inst[mask]
elif at_intersection == "keep":
expression_parts = tfields.lib.symbolics.split_expression(expression)
if len(expression_parts) > 1:
new_mesh = inst.copy()
for expr_part in expression_parts:
inst, _ = inst._cut_sympy(
expr_part, at_intersection=at_intersection, _in_recursion=True
)
elif len(expression_parts) == 1:
face_delete_indices = set([])
for i, face in enumerate(inst.maps[3]):
"""
vertices_rejected is a mask for each face that is True, where
a Point is on the rejected side of the plane
"""
vertices_rejected = [~mask[f] for f in face]
if all(vertices_rejected):
# delete face
face_delete_indices.add(i)
mask = np.full(len(inst.maps[3]), True, dtype=bool)
for face_idx in range(len(inst.maps[3])):
if face_idx in face_delete_indices:
mask[face_idx] = False
inst.maps[3] = inst.maps[3][mask]
else:
raise ValueError("Sympy expression is not splitable.")
inst = inst.cleaned()
elif at_intersection == "split" or at_intersection == "split_rough":
"""
add vertices and faces that are at the border of the cuts
"""
expression_parts = tfields.lib.symbolics.split_expression(expression)
if len(expression_parts) > 1:
new_mesh = inst.copy()
if at_intersection == "split_rough":
"""
the following is, to speed up the process. Problem is, that
triangles can exist, where all points lie outside the cut,
but part of the area
still overlaps with the cut.
These are at the intersection line between two cuts.
"""
face_inters_mask = np.full((inst.faces.shape[0]), False, dtype=bool)
for i, face in enumerate(inst.faces):
vertices_rejected = [-mask[f] for f in face]
face_on_edge = any(vertices_rejected) and not all(
vertices_rejected
)
if face_on_edge:
face_inters_mask[i] = True
new_mesh.remove_faces(-face_inters_mask)
for expr_part in expression_parts:
inst, _ = inst._cut_sympy(
expr_part, at_intersection="split", _in_recursion=True
)
elif len(expression_parts) == 1:
# build plane from cut expression
plane_sympy = tfields.lib.symbolics.to_plane(expression)
norm_sympy = np.array(plane_sympy.normal_vector).astype(float)
d = -norm_sympy.dot(np.array(plane_sympy.p1).astype(float))
plane = {"normal": norm_sympy, "d": d}
# initialize empty containers
norm_vectors = inst.triangles().norms()
new_vertices = np.empty((0, 3))
new_faces = np.empty((0, 3))
new_fields = [
tfields.Tensors(
np.empty((0,) + field.shape[1:]), coord_sys=field.coord_sys
)
for field in inst.fields
]
new_map_fields = [[] for field in inst.maps[3].fields]
new_norm_vectors = []
# copy TODO?
vertices = np.array(inst)
faces = np.array(inst.maps[3])
fields = [np.array(field) for field in inst.fields]
faces_fields = [np.array(field) for field in inst.maps[3].fields]
face_delete_indices = set([]) # indexing not intersected faces
for i, face in enumerate(inst.maps[3]):
"""
vertices_rejected is a mask for each face that is True
where a point is on the rejected side of the plane
"""
vertices_rejected = [~mask[f] for f in face]
if any(vertices_rejected):
# delete face
face_delete_indices.add(i)
if any(vertices_rejected) and not all(vertices_rejected):
# face on edge
n_true = vertices_rejected.count(True)
lonely_bool = True if n_true == 1 else False
triangle_points = [vertices[f] for f in face]
"""
Add the intersection points and faces
"""
intersection = _intersect(
triangle_points, plane, vertices_rejected
)
last_idx = len(vertices)
for tri_list in intersection:
new_face = []
for item in tri_list:
if isinstance(item, int):
# reference to old vertex
new_face.append(face[item])
elif isinstance(item, complex):
# reference to new vertex that has been
# concatenated already
new_face.append(last_idx + int(item.imag))
else:
# new vertex
new_face.append(len(vertices))
vertices = np.append(
vertices, [[float(x) for x in item]], axis=0
)
fields = [
np.append(
field,
np.full((1,) + field.shape[1:], np.nan),
axis=0,
)
for field in fields
]
faces = np.append(faces, [new_face], axis=0)
faces_fields = [
np.append(field, [field[i]], axis=0)
for field in faces_fields
]
faces_fields[-1][-1] = i
face_map = tfields.TensorFields(
faces, *faces_fields, dtype=int, coord_sys=inst.maps[3].coord_sys
)
inst = tfields.Mesh3D(
vertices, *fields, maps=[face_map], coord_sys=inst.coord_sys
)
mask = np.full(len(inst.maps[3]), True, dtype=bool)
for face_idx in range(len(inst.maps[3])):
if face_idx in face_delete_indices:
mask[face_idx] = False
inst.maps[3] = inst.maps[3][mask]
else:
raise ValueError("Sympy expression is not splitable.")
inst = inst.cleaned()
elif at_intersection == "remove":
inst = inst[mask]
else:
raise AttributeError(
"No at_intersection method called {at_intersection} "
"implemented".format(**locals())
)
if _in_recursion:
template = None
else:
template_field = inst.fields.pop(-1)
template_maps = []
for mp in inst.maps.values():
t_mp = tfields.TensorFields(tfields.Tensors(mp), mp.fields.pop(-1))
template_maps.append(t_mp)
template = tfields.Mesh3D(
tfields.Tensors(inst), template_field, maps=template_maps
)
return inst, template
| (self, expression, at_intersection='remove', _in_recursion=False) |
21,094 | tfields.mesh_3d | _cut_template |
Args:
template (tfields.Mesh3D)
Examples:
>>> import tfields
>>> import numpy as np
Build mesh
>>> mmap = tfields.TensorFields([[0, 1, 2], [0, 3, 4]],
... [[42, 21], [-42, -21]])
>>> m = tfields.Mesh3D([[0]*3, [1]*3, [2]*3, [3]*3, [4]*3],
... [0.0, 0.1, 0.2, 0.3, 0.4],
... [0.0, -0.1, -0.2, -0.3, -0.4],
... maps=[mmap])
Build template
>>> tmap = tfields.TensorFields([[0, 3, 4], [0, 1, 2]],
... [1, 0])
>>> t = tfields.Mesh3D([[0]*3, [-1]*3, [-2]*3, [-3]*3, [-4]*3],
... [1, 0, 3, 2, 4],
... maps=[tmap])
Use template as instruction to make a fast cut
>>> res = m._cut_template(t)
>>> assert np.array_equal(res.fields,
... [[0.1, 0.0, 0.3, 0.2, 0.4],
... [-0.1, 0.0, -0.3, -0.2, -0.4]])
>>> assert np.array_equal(res.maps[3].fields[0],
... [[-42, -21], [42, 21]])
| def _cut_template(self, template):
"""
Args:
template (tfields.Mesh3D)
Examples:
>>> import tfields
>>> import numpy as np
Build mesh
>>> mmap = tfields.TensorFields([[0, 1, 2], [0, 3, 4]],
... [[42, 21], [-42, -21]])
>>> m = tfields.Mesh3D([[0]*3, [1]*3, [2]*3, [3]*3, [4]*3],
... [0.0, 0.1, 0.2, 0.3, 0.4],
... [0.0, -0.1, -0.2, -0.3, -0.4],
... maps=[mmap])
Build template
>>> tmap = tfields.TensorFields([[0, 3, 4], [0, 1, 2]],
... [1, 0])
>>> t = tfields.Mesh3D([[0]*3, [-1]*3, [-2]*3, [-3]*3, [-4]*3],
... [1, 0, 3, 2, 4],
... maps=[tmap])
Use template as instruction to make a fast cut
>>> res = m._cut_template(t)
>>> assert np.array_equal(res.fields,
... [[0.1, 0.0, 0.3, 0.2, 0.4],
... [-0.1, 0.0, -0.3, -0.2, -0.4]])
>>> assert np.array_equal(res.maps[3].fields[0],
... [[-42, -21], [42, 21]])
"""
# Possible Extension (small todo): check: len(field(s)) == len(self/maps)
# Redirect fields
fields = []
if template.fields:
template_field = np.array(template.fields[0])
if len(self) > 0:
"""
if new vertices have been created in the template, it is
in principle unclear what fields we have to refer to.
Thus in creating the template, we gave np.nan.
To make it fast, we replace nan with 0 as a dummy and correct
the field entries afterwards with np.nan.
"""
nan_mask = np.isnan(template_field)
template_field[nan_mask] = 0 # dummy reference to index 0.
template_field = template_field.astype(int)
for field in self.fields:
projected_field = field[template_field]
projected_field[nan_mask] = np.nan # correction for nan
fields.append(projected_field)
# Redirect maps and their fields
maps = []
for mp, template_mp in zip(self.maps.values(), template.maps.values()):
mp_fields = []
for field in mp.fields:
if len(template_mp) == 0 and len(template_mp.fields) == 0:
mp_fields.append(field[0:0]) # np.empty
else:
mp_fields.append(field[template_mp.fields[0].astype(int)])
new_mp = tfields.TensorFields(tfields.Tensors(template_mp), *mp_fields)
maps.append(new_mp)
inst = tfields.Mesh3D(tfields.Tensors(template), *fields, maps=maps)
return inst
| (self, template) |
21,095 | tfields.core | _kwargs | null | def _kwargs(self) -> dict:
content = super()._kwargs()
content.pop("fields") # instantiated via _args
return content
| (self) -> dict |
21,098 | tfields.mesh_3d | _save_obj |
Save obj as wavefront/.obj file
| def _save_obj(self, path, **kwargs):
"""
Save obj as wavefront/.obj file
"""
obj = kwargs.pop("object", None)
group = kwargs.pop("group", None)
cmap = kwargs.pop("cmap", "viridis")
map_index = kwargs.pop("map_index", None)
path = path.replace(".obj", "")
directory, name = os.path.split(path)
if map_index is not None:
scalars = self.maps[3].fields[map_index]
min_scalar = scalars[~np.isnan(scalars)].min()
max_scalar = scalars[~np.isnan(scalars)].max()
vmin = kwargs.pop("vmin", min_scalar)
vmax = kwargs.pop("vmax", max_scalar)
if vmin == vmax:
if vmin == 0.0:
vmax = 1.0
else:
vmin = 0.0
import matplotlib.colors as colors
import matplotlib.pyplot as plt
norm = colors.Normalize(vmin, vmax)
color_map = plt.get_cmap(cmap)
else:
# switch for not coloring the triangles and thus not producing the
# materials
norm = None
if len(kwargs) != 0:
raise ValueError("Unused arguments.")
if norm is not None:
mat_name = name + "_frame_{0}.mat".format(map_index)
scalars[np.isnan(scalars)] = min_scalar - 1
sorted_scalars = scalars[scalars.argsort()]
sorted_scalars[sorted_scalars == min_scalar - 1] = np.nan
sorted_faces = self.faces[scalars.argsort()]
scalar_set = np.unique(sorted_scalars)
scalar_set[scalar_set == min_scalar - 1] = np.nan
mat_path = os.path.join(directory, mat_name)
with open(mat_path, "w") as mf:
for s in scalar_set:
if np.isnan(s):
mf.write("newmtl nan")
mf.write("Kd 0 0 0\n\n")
else:
mf.write("newmtl mtl_{0}\n".format(s))
mf.write(
"Kd {c[0]} {c[1]} {c[2]}\n\n".format(c=color_map(norm(s)))
)
else:
sorted_faces = self.faces
# writing of the obj file
with open(path + ".obj", "w") as f:
f.write("# File saved with tfields Mesh3D._save_obj method\n\n")
if norm is not None:
f.write("mtllib ./{0}\n\n".format(mat_name))
if obj is not None:
f.write("o {0}\n".format(obj))
if group is not None:
f.write("g {0}\n".format(group))
for vertex in self:
f.write("v {v[0]} {v[1]} {v[2]}\n".format(v=vertex))
last_scalar = None
for i, face in enumerate(sorted_faces + 1):
if norm is not None:
if not last_scalar == sorted_scalars[i]:
last_scalar = sorted_scalars[i]
f.write("usemtl mtl_{0}\n".format(last_scalar))
f.write("f {f[0]} {f[1]} {f[2]}\n".format(f=face))
| (self, path, **kwargs) |
21,100 | tfields.mesh_3d | _save_stl |
Saves the mesh in stl format
| def _save_stl(self, path, **kwargs):
"""
Saves the mesh in stl format
"""
self.triangles()._save_stl(path, **kwargs)
| (self, path, **kwargs) |
21,101 | tfields.core | _save_txt |
Save as text file.
Args:
**kwargs passed to np.savetxt.
| def _save_txt(self, path, **kwargs):
"""
Save as text file.
Args:
**kwargs passed to np.savetxt.
"""
header = kwargs.get("header", [])
if isinstance(header, dict):
header = [
f"{key}: {type(value).__name__} = {value}"
for key, value in header.items()
]
if isinstance(header, list):
# statictyping like attribute saving
header = [
f"{key}: {type(getattr(self, key)).__name__} = {getattr(self, key)}"
for key in self._iter_slots()
] + header
kwargs["header"] = "\n".join(header)
np.savetxt(path, self, **kwargs)
| (self, path, **kwargs) |
21,102 | tfields.core | _weights |
Expansion of Tensors._weights with integer inputs
Args:
weights (np.ndarray | int | None):
if weights is int: use field at index <weights>
else: see Tensors._weights
| def _weights(self, weights, rigid=True):
"""
Expansion of Tensors._weights with integer inputs
Args:
weights (np.ndarray | int | None):
if weights is int: use field at index <weights>
else: see Tensors._weights
"""
if isinstance(weights, int):
weights = self.fields[weights]
return super(TensorFields, self)._weights(weights, rigid=rigid)
| (self, weights, rigid=True) |
21,103 | tfields.mesh_3d | centroids | null | def centroids(self):
return self.triangles().centroids()
| (self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.