Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def write_main(argv):
"""
write FILENAME
Write a local copy of FILENAME using FILENAME_tweaks for local tweaks.
"""
if len(argv) != 1:
print("Please provide the name of a file to write.")
return 1
filename = argv[0]
resource_name = "files/" + filename
tweaks_name = amend_filename(filename, "_tweaks")
if not pkg_resources.resource_exists("edx_lint", resource_name):
print(u"Don't have file %r to write." % filename)
return 2
if os.path.exists(filename):
print(u"Checking existing copy of %s" % filename)
tef = TamperEvidentFile(filename)
if not tef.validate():
bak_name = amend_filename(filename, "_backup")
print(u"Your copy of %s seems to have been edited, renaming it to %s" % (filename, bak_name))
if os.path.exists(bak_name):
print(u"A previous %s exists, deleting it" % bak_name)
os.remove(bak_name)
os.rename(filename, bak_name)
print(u"Reading edx_lint/files/%s" % filename)
cfg = configparser.RawConfigParser()
resource_string = pkg_resources.resource_string("edx_lint", resource_name).decode("utf8")
# pkg_resources always reads binary data (in both python2 and python3).
# ConfigParser.read_string only exists in python3, so we have to wrap the string
# from pkg_resources in a cStringIO so that we can pass it into ConfigParser.readfp.
if six.PY2:
cfg.readfp(cStringIO(resource_string), resource_name)
else:
cfg.read_string(resource_string, resource_name) # pylint: disable=no-member
if os.path.exists(tweaks_name):
print(u"Applying local tweaks from %s" % tweaks_name)
cfg_tweaks = configparser.RawConfigParser()
cfg_tweaks.read([tweaks_name])
merge_configs(cfg, cfg_tweaks)
print(u"Writing %s" % filename)
output_text = cStringIO()
output_text.write(WARNING_HEADER.format(filename=filename, tweaks_name=tweaks_name))
cfg.write(output_text)
out_tef = TamperEvidentFile(filename)
if six.PY2:
output_bytes = output_text.getvalue()
else:
output_bytes = output_text.getvalue().encode("utf8")
out_tef.write(output_bytes)
return 0 |
def amend_filename(filename, amend):
"""Amend a filename with a suffix.
amend_filename("foo.txt", "_tweak") --> "foo_tweak.txt"
"""
base, ext = os.path.splitext(filename)
amended_name = base + amend + ext
return amended_name |
def check_main(argv):
"""
check FILENAME
Check that FILENAME has not been edited since writing.
"""
if len(argv) != 1:
print("Please provide the name of a file to check.")
return 1
filename = argv[0]
if os.path.exists(filename):
print(u"Checking existing copy of %s" % filename)
tef = TamperEvidentFile(filename)
if tef.validate():
print(u"Your copy of %s is good" % filename)
else:
print(u"Your copy of %s seems to have been edited" % filename)
else:
print(u"You don't have a copy of %s" % filename)
return 0 |
def merge_configs(main, tweaks):
"""Merge tweaks into a main config file."""
for section in tweaks.sections():
for option in tweaks.options(section):
value = tweaks.get(section, option)
if option.endswith("+"):
option = option[:-1]
value = main.get(section, option) + value
main.set(section, option, value) |
def write(self, text, hashline=b"# {}"):
u"""
Write `text` to the file.
Writes the text to the file, with a final line checksumming the
contents. The entire file must be written with one `.write()` call.
The last line is written with the `hashline` format string, which can
be changed to accommodate different file syntaxes.
Both arguments are UTF8 byte strings.
Arguments:
text (UTF8 byte string): the contents of the file to write.
hashline (UTF8 byte string): the format of the last line to append
to the file, with "{}" replaced with the hash.
"""
if not text.endswith(b"\n"):
text += b"\n"
actual_hash = hashlib.sha1(text).hexdigest()
with open(self.filename, "wb") as f:
f.write(text)
f.write(hashline.decode("utf8").format(actual_hash).encode("utf8"))
f.write(b"\n") |
def validate(self):
"""
Check if the file still has its original contents.
Returns True if the file is unchanged, False if it has been tampered
with.
"""
with open(self.filename, "rb") as f:
text = f.read()
start_last_line = text.rfind(b"\n", 0, -1)
if start_last_line == -1:
return False
original_text = text[:start_last_line+1]
last_line = text[start_last_line+1:]
expected_hash = hashlib.sha1(original_text).hexdigest().encode('utf8')
match = re.search(b"[0-9a-f]{40}", last_line)
if not match:
return False
actual_hash = match.group(0)
return actual_hash == expected_hash |
def check_visitors(cls):
"""Check that a checker's visitors are correctly named.
A checker has methods named visit_NODETYPE, but it's easy to mis-name
a visit method, and it will never be called. This decorator checks
the class to see that all of its visitors are named after an existing
node class.
"""
for name in dir(cls):
if name.startswith("visit_"):
if name[6:] not in CLASS_NAMES:
raise Exception(u"Method {} doesn't correspond to a node class".format(name))
return cls |
def usable_class_name(node):
"""Make a reasonable class name for a class node."""
name = node.qname()
for prefix in ["__builtin__.", "builtins.", "."]:
if name.startswith(prefix):
name = name[len(prefix):]
return name |
def parse_pylint_output(pylint_output):
"""
Parse the pylint output-format=parseable lines into PylintError tuples.
"""
for line in pylint_output:
if not line.strip():
continue
if line[0:5] in ("-"*5, "*"*5):
continue
parsed = PYLINT_PARSEABLE_REGEX.search(line)
if parsed is None:
LOG.warning(
u"Unable to parse %r. If this is a lint failure, please re-run pylint with the "
u"--output-format=parseable option, otherwise, you can ignore this message.",
line
)
continue
parsed_dict = parsed.groupdict()
parsed_dict['linenum'] = int(parsed_dict['linenum'])
yield PylintError(**parsed_dict) |
def format_pylint_disables(error_names, tag=True):
"""
Format a list of error_names into a 'pylint: disable=' line.
"""
tag_str = "lint-amnesty, " if tag else ""
if error_names:
return u" # {tag}pylint: disable={disabled}".format(
disabled=", ".join(sorted(error_names)),
tag=tag_str,
)
else:
return "" |
def fix_pylint(line, errors):
"""
Yield any modified versions of ``line`` needed to address the errors in ``errors``.
"""
if not errors:
yield line
return
current = PYLINT_EXCEPTION_REGEX.search(line)
if current:
original_errors = {disable.strip() for disable in current.group('disables').split(',')}
else:
original_errors = set()
disabled_errors = set(original_errors)
for error in errors:
if error.error_name == 'useless-suppression':
parsed = re.search("""Useless suppression of '(?P<error_name>[^']+)'""", error.error_msg)
disabled_errors.discard(parsed.group('error_name'))
elif error.error_name == 'missing-docstring' and error.error_msg == 'Missing module docstring':
yield format_pylint_disables({error.error_name}).strip() + '\n'
else:
disabled_errors.add(error.error_name)
disable_string = format_pylint_disables(disabled_errors, not disabled_errors <= original_errors)
if current:
yield PYLINT_EXCEPTION_REGEX.sub(disable_string, line)
else:
yield re.sub(r'($\s*)', disable_string + r'\1', line, count=1) |
def pylint_amnesty(pylint_output):
"""
Add ``# pylint: disable`` clauses to add exceptions to all existing pylint errors in a codebase.
"""
errors = defaultdict(lambda: defaultdict(set))
for pylint_error in parse_pylint_output(pylint_output):
errors[pylint_error.filename][pylint_error.linenum].add(pylint_error)
for file_with_errors in sorted(errors):
try:
opened_file = open(file_with_errors)
except IOError:
LOG.warning(u"Unable to open %s for edits", file_with_errors, exc_info=True)
else:
with opened_file as input_file:
output_lines = []
for line_num, line in enumerate(input_file, start=1):
output_lines.extend(
fix_pylint(
line,
errors[file_with_errors][line_num]
)
)
with open(file_with_errors, 'w') as output_file:
output_file.writelines(output_lines) |
def main(argv=None):
"""The edx_lint command entry point."""
if argv is None:
argv = sys.argv[1:]
if not argv or argv[0] == "help":
show_help()
return 0
elif argv[0] == "check":
return check_main(argv[1:])
elif argv[0] == "list":
return list_main(argv[1:])
elif argv[0] == "write":
return write_main(argv[1:])
else:
print(u"Don't understand {!r}".format(" ".join(argv)))
show_help()
return 1 |
def show_help():
"""Print the help string for the edx_lint command."""
print("""\
Manage local config files from masters in edx_lint.
Commands:
""")
for cmd in [write_main, check_main, list_main]:
print(cmd.__doc__.lstrip("\n")) |
def parse_json_form(dictionary, prefix=''):
"""
Parse an HTML JSON form submission as per the W3C Draft spec
An implementation of "The application/json encoding algorithm"
http://www.w3.org/TR/html-json-forms/
"""
# Step 1: Initialize output object
output = {}
for name, value in get_all_items(dictionary):
# TODO: implement is_file flag
# Step 2: Compute steps array
steps = parse_json_path(name)
# Step 3: Initialize context
context = output
# Step 4: Iterate through steps
for step in steps:
# Step 4.1 Retrieve current value from context
current_value = get_value(context, step.key, Undefined())
# Steps 4.2, 4.3: Set JSON value on context
context = set_json_value(
context=context,
step=step,
current_value=current_value,
entry_value=value,
is_file=False,
)
# Convert any remaining Undefined array entries to None
output = clean_undefined(output)
output = clean_empty_string(output)
# Account for DRF prefix (not part of JSON form spec)
result = get_value(output, prefix, Undefined())
if isinstance(result, Undefined):
return output
else:
return result |
def parse_json_path(path):
"""
Parse a string as a JSON path
An implementation of "steps to parse a JSON encoding path"
http://www.w3.org/TR/html-json-forms/#dfn-steps-to-parse-a-json-encoding-path
"""
# Steps 1, 2, 3
original_path = path
steps = []
# Step 11 (Failure)
failed = [
JsonStep(
type="object",
key=original_path,
last=True,
failed=True,
)
]
# Other variables for later use
digit_re = re.compile(r'^\[([0-9]+)\]')
key_re = re.compile(r'^\[([^\]]+)\]')
# Step 4 - Find characters before first [ (if any)
parts = path.split("[")
first_key = parts[0]
if parts[1:]:
path = "[" + "[".join(parts[1:])
else:
path = ""
# Step 5 - According to spec, keys cannot start with [
# NOTE: This was allowed in older DRF versions, so disabling rule for now
# if not first_key:
# return failed
# Step 6 - Save initial step
steps.append(JsonStep(
type="object",
key=first_key,
))
# Step 7 - Simple single-step case (no [ found)
if not path:
steps[-1].last = True
return steps
# Step 8 - Loop
while path:
# Step 8.1 - Check for single-item array
if path[:2] == "[]":
path = path[2:]
steps.append(JsonStep(
type="array",
key=0,
))
continue
# Step 8.2 - Check for array[index]
digit_match = digit_re.match(path)
if digit_match:
path = digit_re.sub("", path)
steps.append(JsonStep(
type="array",
key=int(digit_match.group(1)),
))
continue
# Step 8.3 - Check for object[key]
key_match = key_re.match(path)
if key_match:
path = key_re.sub("", path)
steps.append(JsonStep(
type="object",
key=key_match.group(1),
))
continue
# Step 8.4 - Invalid key format
return failed
# Step 9
next_step = None
for step in reversed(steps):
if next_step:
step.next_type = next_step.type
else:
step.last = True
next_step = step
return steps |
def set_json_value(context, step, current_value, entry_value, is_file):
"""
Apply a JSON value to a context object
An implementation of "steps to set a JSON encoding value"
http://www.w3.org/TR/html-json-forms/#dfn-steps-to-set-a-json-encoding-value
"""
# TODO: handle is_file
# Add empty values to array so indexing works like JavaScript
if isinstance(context, list) and isinstance(step.key, int):
undefined_count = step.key - len(context) + 1
if undefined_count > 1000:
raise ParseException("Too many consecutive empty values!")
elif undefined_count > 0:
context += [Undefined()] * undefined_count
# Step 7: Handle last step
if step.last:
if isinstance(current_value, Undefined):
# Step 7.1: No existing value
key = step.key
if isinstance(context, dict) and isinstance(key, int):
key = str(key)
if step.append:
context[key] = [entry_value]
else:
context[key] = entry_value
elif isinstance(current_value, list):
# Step 7.2: Existing value is an Array, assume multi-valued field
# and add entry to end.
# FIXME: What if the other items in the array had explicit keys and
# this one is supposed to be the "" value?
# (See step 8.4 and Example 7)
context[step.key].append(entry_value)
elif isinstance(current_value, dict) and not is_file:
# Step 7.3: Existing value is an Object
return set_json_value(
context=current_value,
step=JsonStep(type="object", key="", last=True),
current_value=current_value.get("", Undefined()),
entry_value=entry_value,
is_file=is_file,
)
else:
# Step 7.4: Existing value is a scalar; preserve both values
context[step.key] = [current_value, entry_value]
# Step 7.5
return context
# Step 8: Handle intermediate steps
if isinstance(current_value, Undefined):
# 8.1: No existing value
if step.next_type == "array":
context[step.key] = []
else:
context[step.key] = {}
return context[step.key]
elif isinstance(current_value, dict):
# Step 8.2: Existing value is an Object
return get_value(context, step.key, Undefined())
elif isinstance(current_value, list):
# Step 8.3: Existing value is an Array
if step.next_type == "array":
return current_value
# Convert array to object to facilitate mixed keys
obj = {}
for i, item in enumerate(current_value):
if not isinstance(item, Undefined):
obj[str(i)] = item
context[step.key] = obj
return obj
else:
# 8.4: Existing value is a scalar; convert to Object, preserving
# current value via an empty key
obj = {'': current_value}
context[step.key] = obj
return obj |
def get_value(obj, key, default=None):
"""
Mimic JavaScript Object/Array behavior by allowing access to nonexistent
indexes.
"""
if isinstance(obj, dict):
return obj.get(key, default)
elif isinstance(obj, list):
try:
return obj[key]
except IndexError:
return default |
def clean_undefined(obj):
"""
Convert Undefined array entries to None (null)
"""
if isinstance(obj, list):
return [
None if isinstance(item, Undefined) else item
for item in obj
]
if isinstance(obj, dict):
for key in obj:
obj[key] = clean_undefined(obj[key])
return obj |
def clean_empty_string(obj):
"""
Replace empty form values with None, since the is_html_input() check in
Field won't work after we convert to JSON.
(FIXME: What about allow_blank=True?)
"""
if obj == '':
return None
if isinstance(obj, list):
return [
None if item == '' else item
for item in obj
]
if isinstance(obj, dict):
for key in obj:
obj[key] = clean_empty_string(obj[key])
return obj |
def get_all_items(obj):
"""
dict.items() but with a separate row for each value in a MultiValueDict
"""
if hasattr(obj, 'getlist'):
items = []
for key in obj:
for value in obj.getlist(key):
items.append((key, value))
return items
else:
return obj.items() |
def trans_new(name, transform, inverse, breaks=None,
minor_breaks=None, _format=None,
domain=(-np.inf, np.inf), doc='', **kwargs):
"""
Create a transformation class object
Parameters
----------
name : str
Name of the transformation
transform : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the transformation.
inverse : callable ``f(x)``
A function (preferably a `ufunc`) that computes
the inverse of the transformation.
breaks : callable ``f(limits)``
Function to compute the breaks for this transform.
If None, then a default good enough for a linear
domain is used.
minor_breaks : callable ``f(major, limits)``
Function to compute the minor breaks for this
transform. If None, then a default good enough for
a linear domain is used.
_format : callable ``f(breaks)``
Function to format the generated breaks.
domain : array_like
Domain over which the transformation is valid.
It should be of length 2.
doc : str
Docstring for the class.
**kwargs : dict
Attributes of the transform, e.g if base is passed
in kwargs, then `t.base` would be a valied attribute.
Returns
-------
out : trans
Transform class
"""
def _get(func):
if isinstance(func, (classmethod, staticmethod, MethodType)):
return func
else:
return staticmethod(func)
klass_name = '{}_trans'.format(name)
d = {'transform': _get(transform),
'inverse': _get(inverse),
'domain': domain,
'__doc__': doc,
**kwargs}
if breaks:
d['breaks_'] = _get(breaks)
if minor_breaks:
d['minor_breaks'] = _get(minor_breaks)
if _format:
d['format'] = _get(_format)
return type(klass_name, (trans,), d) |
def log_trans(base=None, **kwargs):
"""
Create a log transform class for *base*
Parameters
----------
base : float
Base for the logarithm. If None, then
the natural log is used.
kwargs : dict
Keyword arguments passed onto
:func:`trans_new`. Should not include
the `transform` or `inverse`.
Returns
-------
out : type
Log transform class
"""
# transform function
if base is None:
name = 'log'
base = np.exp(1)
transform = np.log
elif base == 10:
name = 'log10'
transform = np.log10
elif base == 2:
name = 'log2'
transform = np.log2
else:
name = 'log{}'.format(base)
def transform(x):
return np.log(x)/np.log(base)
# inverse function
def inverse(x):
try:
return base ** x
except TypeError:
return [base**val for val in x]
if 'domain' not in kwargs:
kwargs['domain'] = (sys.float_info.min, np.inf)
if 'breaks' not in kwargs:
kwargs['breaks'] = log_breaks(base=base)
kwargs['base'] = base
kwargs['_format'] = log_format(base)
_trans = trans_new(name, transform, inverse, **kwargs)
if 'minor_breaks' not in kwargs:
n = int(base) - 2
_trans.minor_breaks = trans_minor_breaks(_trans, n=n)
return _trans |
def exp_trans(base=None, **kwargs):
"""
Create a exponential transform class for *base*
This is inverse of the log transform.
Parameters
----------
base : float
Base of the logarithm
kwargs : dict
Keyword arguments passed onto
:func:`trans_new`. Should not include
the `transform` or `inverse`.
Returns
-------
out : type
Exponential transform class
"""
# default to e
if base is None:
name = 'power_e'
base = np.exp(1)
else:
name = 'power_{}'.format(base)
# transform function
def transform(x):
return base ** x
# inverse function
def inverse(x):
return np.log(x)/np.log(base)
kwargs['base'] = base
return trans_new(name, transform, inverse, **kwargs) |
def boxcox_trans(p, **kwargs):
"""
Boxcox Transformation
Parameters
----------
p : float
Power parameter, commonly denoted by
lower-case lambda in formulae
kwargs : dict
Keyword arguments passed onto
:func:`trans_new`. Should not include
the `transform` or `inverse`.
"""
if np.abs(p) < 1e-7:
return log_trans()
def transform(x):
return (x**p - 1) / (p * np.sign(x-1))
def inverse(x):
return (np.abs(x) * p + np.sign(x)) ** (1 / p)
kwargs['p'] = p
kwargs['name'] = kwargs.get('name', 'pow_{}'.format(p))
kwargs['transform'] = transform
kwargs['inverse'] = inverse
return trans_new(**kwargs) |
def probability_trans(distribution, *args, **kwargs):
"""
Probability Transformation
Parameters
----------
distribution : str
Name of the distribution. Valid distributions are
listed at :mod:`scipy.stats`. Any of the continuous
or discrete distributions.
args : tuple
Arguments passed to the distribution functions.
kwargs : dict
Keyword arguments passed to the distribution functions.
Notes
-----
Make sure that the distribution is a good enough
approximation for the data. When this is not the case,
computations may run into errors. Absence of any errors
does not imply that the distribution fits the data.
"""
import scipy.stats as stats
cdists = {k for k in dir(stats)
if hasattr(getattr(stats, k), 'cdf')}
if distribution not in cdists:
msg = "Unknown distribution '{}'"
raise ValueError(msg.format(distribution))
try:
doc = kwargs.pop('_doc')
except KeyError:
doc = ''
try:
name = kwargs.pop('_name')
except KeyError:
name = 'prob_{}'.format(distribution)
def transform(x):
return getattr(stats, distribution).cdf(x, *args, **kwargs)
def inverse(x):
return getattr(stats, distribution).ppf(x, *args, **kwargs)
return trans_new(name,
transform, inverse, domain=(0, 1),
doc=doc) |
def gettrans(t):
"""
Return a trans object
Parameters
----------
t : str | callable | type | trans
name of transformation function
Returns
-------
out : trans
"""
obj = t
# Make sure trans object is instantiated
if isinstance(obj, str):
name = '{}_trans'.format(obj)
obj = globals()[name]()
if callable(obj):
obj = obj()
if isinstance(obj, type):
obj = obj()
if not isinstance(obj, trans):
raise ValueError("Could not get transform object.")
return obj |
def breaks(self, limits):
"""
Calculate breaks in data space and return them
in transformed space.
Expects limits to be in *transform space*, this
is the same space as that where the domain is
specified.
This method wraps around :meth:`breaks_` to ensure
that the calculated breaks are within the domain
the transform. This is helpful in cases where an
aesthetic requests breaks with limits expanded for
some padding, yet the expansion goes beyond the
domain of the transform. e.g for a probability
transform the breaks will be in the domain
``[0, 1]`` despite any outward limits.
Parameters
----------
limits : tuple
The scale limits. Size 2.
Returns
-------
out : array_like
Major breaks
"""
# clip the breaks to the domain,
# e.g. probabilities will be in [0, 1] domain
vmin = np.max([self.domain[0], limits[0]])
vmax = np.min([self.domain[1], limits[1]])
breaks = np.asarray(self.breaks_([vmin, vmax]))
# Some methods(mpl_breaks, extended_breaks) that
# calculate breaks take the limits as guide posts and
# not hard limits.
breaks = breaks.compress((breaks >= self.domain[0]) &
(breaks <= self.domain[1]))
return breaks |
def transform(x):
"""
Transform from date to a numerical format
"""
try:
x = date2num(x)
except AttributeError:
# numpy datetime64
# This is not ideal because the operations do not
# preserve the np.datetime64 type. May be need
# a datetime64_trans
x = [pd.Timestamp(item) for item in x]
x = date2num(x)
return x |
def transform(x):
"""
Transform from Timeddelta to numerical format
"""
# microseconds
try:
x = np.array([_x.total_seconds()*10**6 for _x in x])
except TypeError:
x = x.total_seconds()*10**6
return x |
def inverse(x):
"""
Transform to Timedelta from numerical format
"""
try:
x = [datetime.timedelta(microseconds=i) for i in x]
except TypeError:
x = datetime.timedelta(microseconds=x)
return x |
def transform(x):
"""
Transform from Timeddelta to numerical format
"""
# nanoseconds
try:
x = np.array([_x.value for _x in x])
except TypeError:
x = x.value
return x |
def inverse(x):
"""
Transform to Timedelta from numerical format
"""
try:
x = [pd.Timedelta(int(i)) for i in x]
except TypeError:
x = pd.Timedelta(int(x))
return x |
def rescale(x, to=(0, 1), _from=None):
"""
Rescale numeric vector to have specified minimum and maximum.
Parameters
----------
x : array_like | numeric
1D vector of values to manipulate.
to : tuple
output range (numeric vector of length two)
_from : tuple
input range (numeric vector of length two).
If not given, is calculated from the range of x
Returns
-------
out : array_like
Rescaled values
Examples
--------
>>> x = [0, 2, 4, 6, 8, 10]
>>> rescale(x)
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> rescale(x, to=(0, 2))
array([0. , 0.4, 0.8, 1.2, 1.6, 2. ])
>>> rescale(x, to=(0, 2), _from=(0, 20))
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
"""
if _from is None:
_from = np.min(x), np.max(x)
return np.interp(x, _from, to) |
def rescale_mid(x, to=(0, 1), _from=None, mid=0):
"""
Rescale numeric vector to have specified minimum, midpoint,
and maximum.
Parameters
----------
x : array_like | numeric
1D vector of values to manipulate.
to : tuple
output range (numeric vector of length two)
_from : tuple
input range (numeric vector of length two).
If not given, is calculated from the range of x
mid : numeric
mid-point of input range
Returns
-------
out : array_like
Rescaled values
Examples
--------
>>> rescale_mid([1, 2, 3], mid=1)
array([0.5 , 0.75, 1. ])
>>> rescale_mid([1, 2, 3], mid=2)
array([0. , 0.5, 1. ])
"""
array_like = True
try:
len(x)
except TypeError:
array_like = False
x = [x]
if not hasattr(x, 'dtype'):
x = np.asarray(x)
if _from is None:
_from = np.array([np.min(x), np.max(x)])
else:
_from = np.asarray(_from)
if (zero_range(_from) or zero_range(to)):
out = np.repeat(np.mean(to), len(x))
else:
extent = 2 * np.max(np.abs(_from - mid))
out = (x - mid) / extent * np.diff(to) + np.mean(to)
if not array_like:
out = out[0]
return out |
def rescale_max(x, to=(0, 1), _from=None):
"""
Rescale numeric vector to have specified maximum.
Parameters
----------
x : array_like | numeric
1D vector of values to manipulate.
to : tuple
output range (numeric vector of length two)
_from : tuple
input range (numeric vector of length two).
If not given, is calculated from the range of x.
Only the 2nd (max) element is essential to the
output.
Returns
-------
out : array_like
Rescaled values
Examples
--------
>>> x = [0, 2, 4, 6, 8, 10]
>>> rescale_max(x, (0, 3))
array([0. , 0.6, 1.2, 1.8, 2.4, 3. ])
Only the 2nd (max) element of the parameters ``to``
and ``_from`` are essential to the output.
>>> rescale_max(x, (1, 3))
array([0. , 0.6, 1.2, 1.8, 2.4, 3. ])
>>> rescale_max(x, (0, 20))
array([ 0., 4., 8., 12., 16., 20.])
If :python:`max(x) < _from[1]` then values will be
scaled beyond the requested (:python:`to[1]`) maximum.
>>> rescale_max(x, to=(1, 3), _from=(-1, 6))
array([0., 1., 2., 3., 4., 5.])
"""
array_like = True
try:
len(x)
except TypeError:
array_like = False
x = [x]
if not hasattr(x, 'dtype'):
x = np.asarray(x)
if _from is None:
_from = np.array([np.min(x), np.max(x)])
out = x/_from[1] * to[1]
if not array_like:
out = out[0]
return out |
def squish_infinite(x, range=(0, 1)):
"""
Truncate infinite values to a range.
Parameters
----------
x : array_like
Values that should have infinities squished.
range : tuple
The range onto which to squish the infinites.
Must be of size 2.
Returns
-------
out : array_like
Values with infinites squished.
Examples
--------
>>> squish_infinite([0, .5, .25, np.inf, .44])
[0.0, 0.5, 0.25, 1.0, 0.44]
>>> squish_infinite([0, -np.inf, .5, .25, np.inf], (-10, 9))
[0.0, -10.0, 0.5, 0.25, 9.0]
"""
xtype = type(x)
if not hasattr(x, 'dtype'):
x = np.asarray(x)
x[x == -np.inf] = range[0]
x[x == np.inf] = range[1]
if not isinstance(x, xtype):
x = xtype(x)
return x |
def squish(x, range=(0, 1), only_finite=True):
"""
Squish values into range.
Parameters
----------
x : array_like
Values that should have out of range values squished.
range : tuple
The range onto which to squish the values.
only_finite: boolean
When true, only squishes finite values.
Returns
-------
out : array_like
Values with out of range values squished.
Examples
--------
>>> squish([-1.5, 0.2, 0.5, 0.8, 1.0, 1.2])
[0.0, 0.2, 0.5, 0.8, 1.0, 1.0]
>>> squish([-np.inf, -1.5, 0.2, 0.5, 0.8, 1.0, np.inf], only_finite=False)
[0.0, 0.0, 0.2, 0.5, 0.8, 1.0, 1.0]
"""
xtype = type(x)
if not hasattr(x, 'dtype'):
x = np.asarray(x)
finite = np.isfinite(x) if only_finite else True
x[np.logical_and(x < range[0], finite)] = range[0]
x[np.logical_and(x > range[1], finite)] = range[1]
if not isinstance(x, xtype):
x = xtype(x)
return x |
def censor(x, range=(0, 1), only_finite=True):
"""
Convert any values outside of range to a **NULL** type object.
Parameters
----------
x : array_like
Values to manipulate
range : tuple
(min, max) giving desired output range
only_finite : bool
If True (the default), will only modify
finite values.
Returns
-------
x : array_like
Censored array
Examples
--------
>>> a = [1, 2, np.inf, 3, 4, -np.inf, 5]
>>> censor(a, (0, 10))
[1, 2, inf, 3, 4, -inf, 5]
>>> censor(a, (0, 10), False)
[1, 2, nan, 3, 4, nan, 5]
>>> censor(a, (2, 4))
[nan, 2, inf, 3, 4, -inf, nan]
Notes
-----
All values in ``x`` should be of the same type. ``only_finite`` parameter
is not considered for Datetime and Timedelta types.
The **NULL** type object depends on the type of values in **x**.
- :class:`float` - :py:`float('nan')`
- :class:`int` - :py:`float('nan')`
- :class:`datetime.datetime` : :py:`np.datetime64(NaT)`
- :class:`datetime.timedelta` : :py:`np.timedelta64(NaT)`
"""
if not len(x):
return x
py_time_types = (datetime.datetime, datetime.timedelta)
np_pd_time_types = (pd.Timestamp, pd.Timedelta,
np.datetime64, np.timedelta64)
x0 = first_element(x)
# Yes, we want type not isinstance
if type(x0) in py_time_types:
return _censor_with(x, range, 'NaT')
if not hasattr(x, 'dtype') and isinstance(x0, np_pd_time_types):
return _censor_with(x, range, type(x0)('NaT'))
x_array = np.asarray(x)
if pdtypes.is_number(x0) and not isinstance(x0, np.timedelta64):
null = float('nan')
elif com.is_datetime_arraylike(x_array):
null = pd.Timestamp('NaT')
elif pdtypes.is_datetime64_dtype(x_array):
null = np.datetime64('NaT')
elif isinstance(x0, pd.Timedelta):
null = pd.Timedelta('NaT')
elif pdtypes.is_timedelta64_dtype(x_array):
null = np.timedelta64('NaT')
else:
raise ValueError(
"Do not know how to censor values of type "
"{}".format(type(x0)))
if only_finite:
try:
finite = np.isfinite(x)
except TypeError:
finite = np.repeat(True, len(x))
else:
finite = np.repeat(True, len(x))
if hasattr(x, 'dtype'):
outside = (x < range[0]) | (x > range[1])
bool_idx = finite & outside
x = x.copy()
x[bool_idx] = null
else:
x = [null if not range[0] <= val <= range[1] and f else val
for val, f in zip(x, finite)]
return x |
def _censor_with(x, range, value=None):
"""
Censor any values outside of range with ``None``
"""
return [val if range[0] <= val <= range[1] else value
for val in x] |
def zero_range(x, tol=np.finfo(float).eps * 100):
"""
Determine if range of vector is close to zero.
Parameters
----------
x : array_like | numeric
Value(s) to check. If it is an array_like, it
should be of length 2.
tol : float
Tolerance. Default tolerance is the `machine epsilon`_
times :math:`10^2`.
Returns
-------
out : bool
Whether ``x`` has zero range.
Examples
--------
>>> zero_range([1, 1])
True
>>> zero_range([1, 2])
False
>>> zero_range([1, 2], tol=2)
True
.. _machine epsilon: https://en.wikipedia.org/wiki/Machine_epsilon
"""
try:
if len(x) == 1:
return True
except TypeError:
return True
if len(x) != 2:
raise ValueError('x must be length 1 or 2')
# Deals with array_likes that have non-standard indices
x = tuple(x)
# datetime - pandas, cpython
if isinstance(x[0], (pd.Timestamp, datetime.datetime)):
# date2num include timezone info, .toordinal() does not
x = date2num(x)
# datetime - numpy
elif isinstance(x[0], np.datetime64):
return x[0] == x[1]
# timedelta - pandas, cpython
elif isinstance(x[0], (pd.Timedelta, datetime.timedelta)):
x = x[0].total_seconds(), x[1].total_seconds()
# timedelta - numpy
elif isinstance(x[0], np.timedelta64):
return x[0] == x[1]
elif not isinstance(x[0], (float, int, np.number)):
raise TypeError(
"zero_range objects cannot work with objects "
"of type '{}'".format(type(x[0])))
if any(np.isnan(x)):
return np.nan
if x[0] == x[1]:
return True
if all(np.isinf(x)):
return False
m = np.abs(x).min()
if m == 0:
return False
return np.abs((x[0] - x[1]) / m) < tol |
def expand_range(range, mul=0, add=0, zero_width=1):
"""
Expand a range with a multiplicative or additive constant
Parameters
----------
range : tuple
Range of data. Size 2.
mul : int | float
Multiplicative constant
add : int | float | timedelta
Additive constant
zero_width : int | float | timedelta
Distance to use if range has zero width
Returns
-------
out : tuple
Expanded range
Examples
--------
>>> expand_range((3, 8))
(3, 8)
>>> expand_range((0, 10), mul=0.1)
(-1.0, 11.0)
>>> expand_range((0, 10), add=2)
(-2, 12)
>>> expand_range((0, 10), mul=.1, add=2)
(-3.0, 13.0)
>>> expand_range((0, 1))
(0, 1)
When the range has zero width
>>> expand_range((5, 5))
(4.5, 5.5)
Notes
-----
If expanding *datetime* or *timedelta* types, **add** and
**zero_width** must be suitable *timedeltas* i.e. You should
not mix types between **Numpy**, **Pandas** and the
:mod:`datetime` module.
In Python 2, you cannot multiplicative constant **mul** cannot be
a :class:`float`.
"""
x = range
# Enforce tuple
try:
x[0]
except TypeError:
x = (x, x)
# The expansion cases
if zero_range(x):
new = x[0]-zero_width/2, x[0]+zero_width/2
else:
dx = (x[1] - x[0]) * mul + add
new = x[0]-dx, x[1]+dx
return new |
def expand_range_distinct(range, expand=(0, 0, 0, 0), zero_width=1):
"""
Expand a range with a multiplicative or additive constants
Similar to :func:`expand_range` but both sides of the range
expanded using different constants
Parameters
----------
range : tuple
Range of data. Size 2
expand : tuple
Length 2 or 4. If length is 2, then the same constants
are used for both sides. If length is 4 then the first
two are are the Multiplicative (*mul*) and Additive (*add*)
constants for the lower limit, and the second two are
the constants for the upper limit.
zero_width : int | float | timedelta
Distance to use if range has zero width
Returns
-------
out : tuple
Expanded range
Examples
--------
>>> expand_range_distinct((3, 8))
(3, 8)
>>> expand_range_distinct((0, 10), (0.1, 0))
(-1.0, 11.0)
>>> expand_range_distinct((0, 10), (0.1, 0, 0.1, 0))
(-1.0, 11.0)
>>> expand_range_distinct((0, 10), (0.1, 0, 0, 0))
(-1.0, 10)
>>> expand_range_distinct((0, 10), (0, 2))
(-2, 12)
>>> expand_range_distinct((0, 10), (0, 2, 0, 2))
(-2, 12)
>>> expand_range_distinct((0, 10), (0, 0, 0, 2))
(0, 12)
>>> expand_range_distinct((0, 10), (.1, 2))
(-3.0, 13.0)
>>> expand_range_distinct((0, 10), (.1, 2, .1, 2))
(-3.0, 13.0)
>>> expand_range_distinct((0, 10), (0, 0, .1, 2))
(0, 13.0)
"""
if len(expand) == 2:
expand = tuple(expand) * 2
lower = expand_range(range, expand[0], expand[1], zero_width)[0]
upper = expand_range(range, expand[2], expand[3], zero_width)[1]
return (lower, upper) |
def _extend_breaks(self, major):
"""
Append 2 extra breaks at either end of major
If breaks of transform space are non-equidistant,
:func:`minor_breaks` add minor breaks beyond the first
and last major breaks. The solutions is to extend those
breaks (in transformed space) before the minor break call
is made. How the breaks depends on the type of transform.
"""
trans = self.trans
trans = trans if isinstance(trans, type) else trans.__class__
# so far we are only certain about this extending stuff
# making sense for log transform
is_log = trans.__name__.startswith('log')
diff = np.diff(major)
step = diff[0]
if is_log and all(diff == step):
major = np.hstack([major[0]-step, major, major[-1]+step])
return major |
def best_units(self, sequence):
"""
Determine good units for representing a sequence of timedeltas
"""
# Read
# [(0.9, 's'),
# (9, 'm)]
# as, break ranges between 0.9 seconds (inclusive)
# and 9 minutes are represented in seconds. And so on.
ts_range = self.value(max(sequence)) - self.value(min(sequence))
package = self.determine_package(sequence[0])
if package == 'pandas':
cuts = [
(0.9, 'us'),
(0.9, 'ms'),
(0.9, 's'),
(9, 'm'),
(6, 'h'),
(4, 'd'),
(4, 'w'),
(4, 'M'),
(3, 'y')]
denomination = NANOSECONDS
base_units = 'ns'
else:
cuts = [
(0.9, 's'),
(9, 'm'),
(6, 'h'),
(4, 'd'),
(4, 'w'),
(4, 'M'),
(3, 'y')]
denomination = SECONDS
base_units = 'ms'
for size, units in reversed(cuts):
if ts_range >= size*denomination[units]:
return units
return base_units |
def scaled_limits(self):
"""
Minimum and Maximum to use for computing breaks
"""
_min = self.limits[0]/self.factor
_max = self.limits[1]/self.factor
return _min, _max |
def numeric_to_timedelta(self, numerics):
"""
Convert sequence of numerics to timedelta
"""
if self.package == 'pandas':
return [self.type(int(x*self.factor), units='ns')
for x in numerics]
else:
return [self.type(seconds=x*self.factor)
for x in numerics] |
def to_numeric(self, td):
"""
Convert timedelta to a number corresponding to the
appropriate units. The appropriate units are those
determined with the object is initialised.
"""
if self.package == 'pandas':
return td.value/NANOSECONDS[self.units]
else:
return td.total_seconds()/SECONDS[self.units] |
def round_any(x, accuracy, f=np.round):
"""
Round to multiple of any number.
"""
if not hasattr(x, 'dtype'):
x = np.asarray(x)
return f(x / accuracy) * accuracy |
def min_max(x, na_rm=False, finite=True):
"""
Return the minimum and maximum of x
Parameters
----------
x : array_like
Sequence
na_rm : bool
Whether to remove ``nan`` values.
finite : bool
Whether to consider only finite values.
Returns
-------
out : tuple
(minimum, maximum) of x
"""
if not hasattr(x, 'dtype'):
x = np.asarray(x)
if na_rm and finite:
x = x[np.isfinite(x)]
elif not na_rm and np.any(np.isnan(x)):
return np.nan, np.nan
elif na_rm:
x = x[~np.isnan(x)]
elif finite:
x = x[~np.isinf(x)]
if (len(x)):
return np.min(x), np.max(x)
else:
return float('-inf'), float('inf') |
def match(v1, v2, nomatch=-1, incomparables=None, start=0):
"""
Return a vector of the positions of (first)
matches of its first argument in its second.
Parameters
----------
v1: array_like
Values to be matched
v2: array_like
Values to be matched against
nomatch: int
Value to be returned in the case when
no match is found.
incomparables: array_like
Values that cannot be matched. Any value in ``v1``
matching a value in this list is assigned the nomatch
value.
start: int
Type of indexing to use. Most likely 0 or 1
"""
v2_indices = {}
for i, x in enumerate(v2):
if x not in v2_indices:
v2_indices[x] = i
v1_to_v2_map = [nomatch] * len(v1)
skip = set(incomparables) if incomparables else set()
for i, x in enumerate(v1):
if x in skip:
continue
try:
v1_to_v2_map[i] = v2_indices[x] + start
except KeyError:
pass
return v1_to_v2_map |
def precision(x):
"""
Return the precision of x
Parameters
----------
x : array_like | numeric
Value(s) whose for which to compute the precision.
Returns
-------
out : numeric
The precision of ``x`` or that the values in ``x``.
Notes
-----
The precision is computed in base 10.
Examples
--------
>>> precision(0.08)
0.01
>>> precision(9)
1
>>> precision(16)
10
"""
from .bounds import zero_range
rng = min_max(x, na_rm=True)
if zero_range(rng):
span = np.abs(rng[0])
else:
span = np.diff(rng)[0]
if span == 0:
return 1
else:
return 10 ** int(np.floor(np.log10(span))) |
def multitype_sort(a):
"""
Sort elements of multiple types
x is assumed to contain elements of different types, such that
plain sort would raise a `TypeError`.
Parameters
----------
a : array-like
Array of items to be sorted
Returns
-------
out : list
Items sorted within their type groups.
"""
types = defaultdict(list)
numbers = {int, float, complex}
for x in a:
t = type(x)
if t in numbers:
types['number'].append(x)
else:
types[t].append(x)
for t in types:
types[t] = np.sort(types[t])
return list(chain(*(types[t] for t in types))) |
def nearest_int(x):
"""
Return nearest long integer to x
"""
if x == 0:
return np.int64(0)
elif x > 0:
return np.int64(x + 0.5)
else:
return np.int64(x - 0.5) |
def is_close_to_int(x):
"""
Check if value is close to an integer
Parameters
----------
x : float
Numeric value to check
Returns
-------
out : bool
"""
if not np.isfinite(x):
return False
return abs(x - nearest_int(x)) < 1e-10 |
def same_log10_order_of_magnitude(x, delta=0.1):
"""
Return true if range is approximately in same order of magnitude
For example these sequences are in the same order of magnitude:
- [1, 8, 5] # [1, 10)
- [35, 20, 80] # [10 100)
- [232, 730] # [100, 1000)
Parameters
----------
x : array-like
Values in base 10. Must be size 2 and
``rng[0] <= rng[1]``.
delta : float
Fuzz factor for approximation. It is multiplicative.
"""
dmin = np.log10(np.min(x)*(1-delta))
dmax = np.log10(np.max(x)*(1+delta))
return np.floor(dmin) == np.floor(dmax) |
def _format(formatter, x):
"""
Helper to format and tidy up
"""
# For MPL to play nice
formatter.create_dummy_axis()
# For sensible decimal places
formatter.set_locs([val for val in x if ~np.isnan(val)])
try:
oom = int(formatter.orderOfMagnitude)
except AttributeError:
oom = 0
labels = [formatter(tick) for tick in x]
# Remove unnecessary decimals
pattern = re.compile(r'\.0+$')
for i, label in enumerate(labels):
match = pattern.search(label)
if match:
labels[i] = pattern.sub('', label)
# MPL does not add the exponential component
if oom:
labels = ['{}e{}'.format(s, oom) if s != '0' else s
for s in labels]
return labels |
def _tidyup_labels(self, labels):
"""
Make all labels uniform in format and remove redundant zeros
for labels in exponential format.
Parameters
----------
labels : list-like
Labels to be tidied.
Returns
-------
out : list-like
Labels
"""
def remove_zeroes(s):
"""
Remove unnecessary zeros for float string s
"""
tup = s.split('e')
if len(tup) == 2:
mantissa = tup[0].rstrip('0').rstrip('.')
exponent = int(tup[1])
if exponent:
s = '%se%d' % (mantissa, exponent)
else:
s = mantissa
return s
def as_exp(s):
"""
Float string s as in exponential format
"""
return s if 'e' in s else '{:1.0e}'.format(float(s))
# If any are in exponential format, make all of
# them expontential
has_e = np.array(['e' in x for x in labels])
if not np.all(has_e) and not np.all(~has_e):
labels = [as_exp(x) for x in labels]
labels = [remove_zeroes(x) for x in labels]
return labels |
def hls_palette(n_colors=6, h=.01, l=.6, s=.65):
"""
Get a set of evenly spaced colors in HLS hue space.
h, l, and s should be between 0 and 1
Parameters
----------
n_colors : int
number of colors in the palette
h : float
first hue
l : float
lightness
s : float
saturation
Returns
-------
palette : list
List of colors as RGB hex strings.
See Also
--------
husl_palette : Make a palette using evenly spaced circular
hues in the HUSL system.
Examples
--------
>>> len(hls_palette(2))
2
>>> len(hls_palette(9))
9
"""
hues = np.linspace(0, 1, n_colors + 1)[:-1]
hues += h
hues %= 1
hues -= hues.astype(int)
palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]
return palette |
def husl_palette(n_colors=6, h=.01, s=.9, l=.65):
"""
Get a set of evenly spaced colors in HUSL hue space.
h, s, and l should be between 0 and 1
Parameters
----------
n_colors : int
number of colors in the palette
h : float
first hue
s : float
saturation
l : float
lightness
Returns
-------
palette : list
List of colors as RGB hex strings.
See Also
--------
hls_palette : Make a palette using evenly spaced circular
hues in the HSL system.
Examples
--------
>>> len(husl_palette(3))
3
>>> len(husl_palette(11))
11
"""
hues = np.linspace(0, 1, n_colors + 1)[:-1]
hues += h
hues %= 1
hues *= 359
s *= 99
l *= 99
palette = [husl.husl_to_rgb(h_i, s, l) for h_i in hues]
return palette |
def area_pal(range=(1, 6)):
"""
Point area palette (continuous).
Parameters
----------
range : tuple
Numeric vector of length two, giving range of possible sizes.
Should be greater than 0.
Returns
-------
out : function
Palette function that takes a sequence of values
in the range ``[0, 1]`` and returns values in
the specified range.
Examples
--------
>>> x = np.arange(0, .6, .1)**2
>>> palette = area_pal()
>>> palette(x)
array([1. , 1.5, 2. , 2.5, 3. , 3.5])
The results are equidistant because the input ``x`` is in
area space, i.e it is squared.
"""
def area_palette(x):
return rescale(np.sqrt(x), to=range, _from=(0, 1))
return area_palette |
def abs_area(max):
"""
Point area palette (continuous), with area proportional to value.
Parameters
----------
max : float
A number representing the maximum size
Returns
-------
out : function
Palette function that takes a sequence of values
in the range ``[0, 1]`` and returns values in the range
``[0, max]``.
Examples
--------
>>> x = np.arange(0, .8, .1)**2
>>> palette = abs_area(5)
>>> palette(x)
array([0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5])
Compared to :func:`area_pal`, :func:`abs_area` will handle values
in the range ``[-1, 0]`` without returning ``np.nan``. And values
whose absolute value is greater than 1 will be clipped to the
maximum.
"""
def abs_area_palette(x):
return rescale(np.sqrt(np.abs(x)), to=(0, max), _from=(0, 1))
return abs_area_palette |
def grey_pal(start=0.2, end=0.8):
"""
Utility for creating continuous grey scale palette
Parameters
----------
start : float
grey value at low end of palette
end : float
grey value at high end of palette
Returns
-------
out : function
Continuous color palette that takes a single
:class:`int` parameter ``n`` and returns ``n``
equally spaced colors.
Examples
--------
>>> palette = grey_pal()
>>> palette(5)
['#333333', '#737373', '#989898', '#b5b5b5', '#cccccc']
"""
gamma = 2.2
ends = ((0.0, start, start), (1.0, end, end))
cdict = {'red': ends, 'green': ends, 'blue': ends}
grey_cmap = mcolors.LinearSegmentedColormap('grey', cdict)
def continuous_grey_palette(n):
colors = []
# The grey scale points are linearly separated in
# gamma encoded space
for x in np.linspace(start**gamma, end**gamma, n):
# Map points onto the [0, 1] palette domain
x = (x ** (1./gamma) - start) / (end - start)
colors.append(mcolors.rgb2hex(grey_cmap(x)))
return colors
return continuous_grey_palette |
def hue_pal(h=.01, l=.6, s=.65, color_space='hls'):
"""
Utility for making hue palettes for color schemes.
Parameters
----------
h : float
first hue. In the [0, 1] range
l : float
lightness. In the [0, 1] range
s : float
saturation. In the [0, 1] range
color_space : 'hls' | 'husl'
Color space to use for the palette
Returns
-------
out : function
A discrete color palette that takes a single
:class:`int` parameter ``n`` and returns ``n``
equally spaced colors. Though the palette
is continuous, since it is varies the hue it
is good for categorical data. However if ``n``
is large enough the colors show continuity.
Examples
--------
>>> hue_pal()(5)
['#db5f57', '#b9db57', '#57db94', '#5784db', '#c957db']
>>> hue_pal(color_space='husl')(5)
['#e0697e', '#9b9054', '#569d79', '#5b98ab', '#b675d7']
"""
if not all([0 <= val <= 1 for val in (h, l, s)]):
msg = ("hue_pal expects values to be between 0 and 1. "
" I got h={}, l={}, s={}".format(h, l, s))
raise ValueError(msg)
if color_space not in ('hls', 'husl'):
msg = "color_space should be one of ['hls', 'husl']"
raise ValueError(msg)
name = '{}_palette'.format(color_space)
palette = globals()[name]
def _hue_pal(n):
colors = palette(n, h=h, l=l, s=s)
return [mcolors.rgb2hex(c) for c in colors]
return _hue_pal |
def brewer_pal(type='seq', palette=1):
"""
Utility for making a brewer palette
Parameters
----------
type : 'sequential' | 'qualitative' | 'diverging'
Type of palette. Sequential, Qualitative or
Diverging. The following abbreviations may
be used, ``seq``, ``qual`` or ``div``.
palette : int | str
Which palette to choose from. If is an integer,
it must be in the range ``[0, m]``, where ``m``
depends on the number sequential, qualitative or
diverging palettes. If it is a string, then it
is the name of the palette.
Returns
-------
out : function
A color palette that takes a single
:class:`int` parameter ``n`` and returns ``n``
colors. The maximum value of ``n`` varies
depending on the parameters.
Examples
--------
>>> brewer_pal()(5)
['#EFF3FF', '#BDD7E7', '#6BAED6', '#3182BD', '#08519C']
>>> brewer_pal('qual')(5)
['#7FC97F', '#BEAED4', '#FDC086', '#FFFF99', '#386CB0']
>>> brewer_pal('qual', 2)(5)
['#1B9E77', '#D95F02', '#7570B3', '#E7298A', '#66A61E']
>>> brewer_pal('seq', 'PuBuGn')(5)
['#F6EFF7', '#BDC9E1', '#67A9CF', '#1C9099', '#016C59']
The available color names for each palette type can be
obtained using the following code::
import palettable.colorbrewer as brewer
print([k for k in brewer.COLOR_MAPS['Sequential'].keys()])
print([k for k in brewer.COLOR_MAPS['Qualitative'].keys()])
print([k for k in brewer.COLOR_MAPS['Diverging'].keys()])
"""
def full_type_name(text):
abbrevs = {
'seq': 'Sequential',
'qual': 'Qualitative',
'div': 'Diverging'
}
text = abbrevs.get(text, text)
return text.title()
def number_to_palette_name(ctype, n):
"""
Return palette name that corresponds to a given number
Uses alphabetical ordering
"""
n -= 1
palettes = sorted(colorbrewer.COLOR_MAPS[ctype].keys())
if n < len(palettes):
return palettes[n]
raise ValueError(
"There are only '{}' palettes of type {}. "
"You requested palette no. {}".format(len(palettes),
ctype, n+1))
def max_palette_colors(type, palette_name):
"""
Return the number of colors in the brewer palette
"""
if type == 'Sequential':
return 9
elif type == 'Diverging':
return 11
else:
# Qualitative palettes have different limits
qlimit = {'Accent': 8, 'Dark2': 8, 'Paired': 12,
'Pastel1': 9, 'Pastel2': 8, 'Set1': 9,
'Set2': 8, 'Set3': 12}
return qlimit[palette_name]
type = full_type_name(type)
if isinstance(palette, int):
palette_name = number_to_palette_name(type, palette)
else:
palette_name = palette
nmax = max_palette_colors(type, palette_name)
def _brewer_pal(n):
# Only draw the maximum allowable colors from the palette
# and fill any remaining spots with None
_n = n if n <= nmax else nmax
try:
bmap = colorbrewer.get_map(palette_name, type, _n)
except ValueError as err:
# Some palettes have a minimum no. of colors set at 3
# We get around that restriction.
if 0 <= _n < 3:
bmap = colorbrewer.get_map(palette_name, type, 3)
else:
raise err
hex_colors = bmap.hex_colors[:n]
if n > nmax:
msg = ("Warning message:"
"Brewer palette {} has a maximum of {} colors"
"Returning the palette you asked for with"
"that many colors".format(palette_name, nmax))
warnings.warn(msg)
hex_colors = hex_colors + [None] * (n - nmax)
return hex_colors
return _brewer_pal |
def ratios_to_colors(values, colormap):
"""
Map values in the range [0, 1] onto colors
Parameters
----------
values : array_like | float
Numeric(s) in the range [0, 1]
colormap : cmap
Matplotlib colormap to use for the mapping
Returns
-------
out : list | float
Color(s) corresponding to the values
"""
iterable = True
try:
iter(values)
except TypeError:
iterable = False
values = [values]
color_tuples = colormap(values)
try:
hex_colors = [mcolors.rgb2hex(t) for t in color_tuples]
except IndexError:
hex_colors = mcolors.rgb2hex(color_tuples)
return hex_colors if iterable else hex_colors[0] |
def gradient_n_pal(colors, values=None, name='gradientn'):
"""
Create a n color gradient palette
Parameters
----------
colors : list
list of colors
values : list, optional
list of points in the range [0, 1] at which to
place each color. Must be the same size as
`colors`. Default to evenly space the colors
name : str
Name to call the resultant MPL colormap
Returns
-------
out : function
Continuous color palette that takes a single
parameter either a :class:`float` or a sequence
of floats maps those value(s) onto the palette
and returns color(s). The float(s) must be
in the range [0, 1].
Examples
--------
>>> palette = gradient_n_pal(['red', 'blue'])
>>> palette([0, .25, .5, .75, 1])
['#ff0000', '#bf0040', '#7f0080', '#3f00c0', '#0000ff']
"""
# Note: For better results across devices and media types,
# it would be better to do the interpolation in
# Lab color space.
if values is None:
colormap = mcolors.LinearSegmentedColormap.from_list(
name, colors)
else:
colormap = mcolors.LinearSegmentedColormap.from_list(
name, list(zip(values, colors)))
def _gradient_n_pal(vals):
return ratios_to_colors(vals, colormap)
return _gradient_n_pal |
def cmap_pal(name=None, lut=None):
"""
Create a continuous palette using an MPL colormap
Parameters
----------
name : str
Name of colormap
lut : None | int
This is the number of entries desired in the lookup table.
Default is ``None``, leave it up Matplotlib.
Returns
-------
out : function
Continuous color palette that takes a single
parameter either a :class:`float` or a sequence
of floats maps those value(s) onto the palette
and returns color(s). The float(s) must be
in the range [0, 1].
Examples
--------
>>> palette = cmap_pal('viridis')
>>> palette([.1, .2, .3, .4, .5])
['#482475', '#414487', '#355f8d', '#2a788e', '#21918c']
"""
colormap = get_cmap(name, lut)
def _cmap_pal(vals):
return ratios_to_colors(vals, colormap)
return _cmap_pal |
def cmap_d_pal(name=None, lut=None):
"""
Create a discrete palette using an MPL Listed colormap
Parameters
----------
name : str
Name of colormap
lut : None | int
This is the number of entries desired in the lookup table.
Default is ``None``, leave it up Matplotlib.
Returns
-------
out : function
A discrete color palette that takes a single
:class:`int` parameter ``n`` and returns ``n``
colors. The maximum value of ``n`` varies
depending on the parameters.
Examples
--------
>>> palette = cmap_d_pal('viridis')
>>> palette(5)
['#440154', '#3b528b', '#21918c', '#5cc863', '#fde725']
"""
colormap = get_cmap(name, lut)
if not isinstance(colormap, mcolors.ListedColormap):
raise ValueError(
"For a discrete palette, cmap must be of type "
"matplotlib.colors.ListedColormap")
ncolors = len(colormap.colors)
def _cmap_d_pal(n):
if n > ncolors:
raise ValueError(
"cmap `{}` has {} colors you requested {} "
"colors.".format(name, ncolors, n))
if ncolors < 256:
return [mcolors.rgb2hex(c) for c in colormap.colors[:n]]
else:
# Assume these are continuous and get colors equally spaced
# intervals e.g. viridis is defined with 256 colors
idx = np.linspace(0, ncolors-1, n).round().astype(int)
return [mcolors.rgb2hex(colormap.colors[i]) for i in idx]
return _cmap_d_pal |
def desaturate_pal(color, prop, reverse=False):
"""
Create a palette that desaturate a color by some proportion
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
prop : float
saturation channel of color will be multiplied by
this value
reverse : bool
Whether to reverse the palette.
Returns
-------
out : function
Continuous color palette that takes a single
parameter either a :class:`float` or a sequence
of floats maps those value(s) onto the palette
and returns color(s). The float(s) must be
in the range [0, 1].
Examples
--------
>>> palette = desaturate_pal('red', .1)
>>> palette([0, .25, .5, .75, 1])
['#ff0000', '#e21d1d', '#c53a3a', '#a95656', '#8c7373']
"""
if not 0 <= prop <= 1:
raise ValueError("prop must be between 0 and 1")
# Get rgb tuple rep
# Convert to hls
# Desaturate the saturation channel
# Convert back to rgb
rgb = mcolors.colorConverter.to_rgb(color)
h, l, s = colorsys.rgb_to_hls(*rgb)
s *= prop
desaturated_color = colorsys.hls_to_rgb(h, l, s)
colors = [color, desaturated_color]
if reverse:
colors = colors[::-1]
return gradient_n_pal(colors, name='desaturated') |
def manual_pal(values):
"""
Create a palette from a list of values
Parameters
----------
values : sequence
Values that will be returned by the palette function.
Returns
-------
out : function
A function palette that takes a single
:class:`int` parameter ``n`` and returns ``n`` values.
Examples
--------
>>> palette = manual_pal(['a', 'b', 'c', 'd', 'e'])
>>> palette(3)
['a', 'b', 'c']
"""
max_n = len(values)
def _manual_pal(n):
if n > max_n:
msg = ("Palette can return a maximum of {} values. "
"{} were requested from it.")
warnings.warn(msg.format(max_n, n))
return values[:n]
return _manual_pal |
def cubehelix_pal(start=0, rot=.4, gamma=1.0, hue=0.8,
light=.85, dark=.15, reverse=False):
"""
Utility for creating continuous palette from the cubehelix system.
This produces a colormap with linearly-decreasing (or increasing)
brightness. That means that information will be preserved if printed to
black and white or viewed by someone who is colorblind.
Parameters
----------
start : float (0 <= start <= 3)
The hue at the start of the helix.
rot : float
Rotations around the hue wheel over the range of the palette.
gamma : float (0 <= gamma)
Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)
colors.
hue : float (0 <= hue <= 1)
Saturation of the colors.
dark : float (0 <= dark <= 1)
Intensity of the darkest color in the palette.
light : float (0 <= light <= 1)
Intensity of the lightest color in the palette.
reverse : bool
If True, the palette will go from dark to light.
Returns
-------
out : function
Continuous color palette that takes a single
:class:`int` parameter ``n`` and returns ``n``
equally spaced colors.
References
----------
Green, D. A. (2011). "A colour scheme for the display of astronomical
intensity images". Bulletin of the Astromical Society of India, Vol. 39,
p. 289-295.
Examples
--------
>>> palette = cubehelix_pal()
>>> palette(5)
['#edd1cb', '#d499a7', '#aa688f', '#6e4071', '#2d1e3e']
"""
cdict = mpl._cm.cubehelix(gamma, start, rot, hue)
cubehelix_cmap = mpl.colors.LinearSegmentedColormap('cubehelix', cdict)
def cubehelix_palette(n):
values = np.linspace(light, dark, n)
return [mcolors.rgb2hex(cubehelix_cmap(x)) for x in values]
return cubehelix_palette |
def apply(cls, x, palette, na_value=None, trans=None):
"""
Scale data continuously
Parameters
----------
x : array_like
Continuous values to scale
palette : callable ``f(x)``
Palette to use
na_value : object
Value to use for missing values.
trans : trans
How to transform the data before scaling. If
``None``, no transformation is done.
Returns
-------
out : array_like
Scaled values
"""
if trans is not None:
x = trans.transform(x)
limits = cls.train(x)
return cls.map(x, palette, limits, na_value) |
def train(cls, new_data, old=None):
"""
Train a continuous scale
Parameters
----------
new_data : array_like
New values
old : array_like
Old range. Most likely a tuple of length 2.
Returns
-------
out : tuple
Limits(range) of the scale
"""
if not len(new_data):
return old
if not hasattr(new_data, 'dtype'):
new_data = np.asarray(new_data)
if new_data.dtype.kind not in CONTINUOUS_KINDS:
raise TypeError(
"Discrete value supplied to continuous scale")
if old is not None:
new_data = np.hstack([new_data, old])
return min_max(new_data, na_rm=True, finite=True) |
def map(cls, x, palette, limits, na_value=None, oob=censor):
"""
Map values to a continuous palette
Parameters
----------
x : array_like
Continuous values to scale
palette : callable ``f(x)``
palette to use
na_value : object
Value to use for missing values.
oob : callable ``f(x)``
Function to deal with values that are
beyond the limits
Returns
-------
out : array_like
Values mapped onto a palette
"""
x = oob(rescale(x, _from=limits))
pal = palette(x)
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal |
def train(cls, new_data, old=None, drop=False, na_rm=False):
"""
Train a continuous scale
Parameters
----------
new_data : array_like
New values
old : array_like
Old range. List of values known to the scale.
drop : bool
Whether to drop(not include) unused categories
na_rm : bool
If ``True``, remove missing values. Missing values
are either ``NaN`` or ``None``.
Returns
-------
out : list
Values covered by the scale
"""
if not len(new_data):
return old
if old is None:
old = []
# Get the missing values (NaN & Nones) locations and remove them
nan_bool_idx = pd.isnull(new_data)
has_na = np.any(nan_bool_idx)
if not hasattr(new_data, 'dtype'):
new_data = np.asarray(new_data)
new_data = new_data[~nan_bool_idx]
if new_data.dtype.kind not in DISCRETE_KINDS:
raise TypeError(
"Continuous value supplied to discrete scale")
# Train i.e. get the new values
if pdtypes.is_categorical_dtype(new_data):
try:
new = list(new_data.cat.categories) # series
except AttributeError:
new = list(new_data.categories) # plain categorical
if drop:
present = set(new_data.drop_duplicates())
new = [i for i in new if i in present]
else:
try:
new = np.unique(new_data)
new.sort()
except TypeError:
# new_data probably has nans and other types
new = list(set(new_data))
new = multitype_sort(new)
# Add nan if required
if has_na and not na_rm:
new = np.hstack([new, np.nan])
# update old
old_set = set(old)
return list(old) + [i for i in new if (i not in old_set)] |
def map(cls, x, palette, limits, na_value=None):
"""
Map values to a discrete palette
Parameters
----------
palette : callable ``f(x)``
palette to use
x : array_like
Continuous values to scale
na_value : object
Value to use for missing values.
Returns
-------
out : array_like
Values mapped onto a palette
"""
n = len(limits)
pal = palette(n)[match(x, limits)]
try:
pal[pd.isnull(x)] = na_value
except TypeError:
pal = [v if not pd.isnull(v) else na_value for v in pal]
return pal |
def parse(type: Type):
"""
Register a parser for a attribute type.
Parsers will be used to parse `str` type objects from either
the commandline arguments or environment variables.
Args:
type: the type the decorated function will be responsible
for parsing a environment variable to.
"""
def decorator(parser):
EnvVar.parsers[type] = parser
return parser
return decorator |
def _patched_run_hook(hook_name, project_dir, context):
"""Used to patch cookiecutter's ``run_hook`` function.
This patched version ensures that the temple.yaml file is created before
any cookiecutter hooks are executed
"""
if hook_name == 'post_gen_project':
with temple.utils.cd(project_dir):
temple.utils.write_temple_config(context['cookiecutter'],
context['template'],
context['version'])
return cc_hooks.run_hook(hook_name, project_dir, context) |
def _generate_files(repo_dir, config, template, version):
"""Uses cookiecutter to generate files for the project.
Monkeypatches cookiecutter's "run_hook" to ensure that the temple.yaml file is
generated before any hooks run. This is important to ensure that hooks can also
perform any actions involving temple.yaml
"""
with unittest.mock.patch('cookiecutter.generate.run_hook', side_effect=_patched_run_hook):
cc_generate.generate_files(repo_dir=repo_dir,
context={'cookiecutter': config,
'template': template,
'version': version},
overwrite_if_exists=False,
output_dir='.') |
def setup(template, version=None):
"""Sets up a new project from a template
Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'setup' during the duration
of this function.
Args:
template (str): The git SSH path to a template
version (str, optional): The version of the template to use when updating. Defaults
to the latest version
"""
temple.check.is_git_ssh_path(template)
temple.check.not_in_git_repo()
repo_path = temple.utils.get_repo_path(template)
msg = (
'You will be prompted for the parameters of your new project.'
' Please read the docs at https://github.com/{} before entering parameters.'
).format(repo_path)
print(msg)
cc_repo_dir, config = temple.utils.get_cookiecutter_config(template, version=version)
if not version:
with temple.utils.cd(cc_repo_dir):
ret = temple.utils.shell('git rev-parse HEAD', stdout=subprocess.PIPE)
version = ret.stdout.decode('utf-8').strip()
_generate_files(repo_dir=cc_repo_dir, config=config, template=template, version=version) |
def _parse_link_header(headers):
"""Parses Github's link header for pagination.
TODO eventually use a github client for this
"""
links = {}
if 'link' in headers:
link_headers = headers['link'].split(', ')
for link_header in link_headers:
(url, rel) = link_header.split('; ')
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
return links |
def _code_search(query, github_user=None):
"""Performs a Github API code search
Args:
query (str): The query sent to Github's code search
github_user (str, optional): The Github user being searched in the query string
Returns:
dict: A dictionary of repository information keyed on the git SSH url
Raises:
`InvalidGithubUserError`: When ``github_user`` is invalid
"""
github_client = temple.utils.GithubClient()
headers = {'Accept': 'application/vnd.github.v3.text-match+json'}
resp = github_client.get('/search/code',
params={'q': query, 'per_page': 100},
headers=headers)
if resp.status_code == requests.codes.unprocessable_entity and github_user:
raise temple.exceptions.InvalidGithubUserError(
'Invalid Github user or org - "{}"'.format(github_user))
resp.raise_for_status()
resp_data = resp.json()
repositories = collections.defaultdict(dict)
while True:
repositories.update({
'[email protected]:{}.git'.format(repo['repository']['full_name']): repo['repository']
for repo in resp_data['items']
})
next_url = _parse_link_header(resp.headers).get('next')
if next_url:
resp = requests.get(next_url, headers=headers)
resp.raise_for_status()
resp_data = resp.json()
else:
break
return repositories |
def ls(github_user, template=None):
"""Lists all temple templates and packages associated with those templates
If ``template`` is None, returns the available templates for the configured
Github org.
If ``template`` is a Github path to a template, returns all projects spun
up with that template.
``ls`` uses the github search API to find results.
Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'ls' for the duration of this
function.
Args:
github_user (str): The github user or org being searched.
template (str, optional): The template git repo path. If provided, lists
all projects that have been created with the provided template. Note
that the template path is the SSH path
(e.g. [email protected]:CloverHealth/temple.git)
Returns:
dict: A dictionary of repository information keyed on the SSH Github url
Raises:
`InvalidGithubUserError`: When ``github_user`` is invalid
"""
temple.check.has_env_vars(temple.constants.GITHUB_API_TOKEN_ENV_VAR)
if template:
temple.check.is_git_ssh_path(template)
search_q = 'user:{} filename:{} {}'.format(
github_user,
temple.constants.TEMPLE_CONFIG_FILE,
template)
else:
search_q = 'user:{} cookiecutter.json in:path'.format(github_user)
results = _code_search(search_q, github_user)
return collections.OrderedDict(sorted(results.items())) |
def update(check, enter_parameters, version):
"""
Update package with latest template. Must be inside of the project
folder to run.
Using "-e" will prompt for re-entering the template parameters again
even if the project is up to date.
Use "-v" to update to a particular version of a template.
Using "-c" will perform a check that the project is up to date
with the latest version of the template (or the version specified by "-v").
No updating will happen when using this option.
"""
if check:
if temple.update.up_to_date(version=version):
print('Temple package is up to date')
else:
msg = (
'This temple package is out of date with the latest template.'
' Update your package by running "temple update" and commiting changes.'
)
raise temple.exceptions.NotUpToDateWithTemplateError(msg)
else:
temple.update.update(new_version=version, enter_parameters=enter_parameters) |
def ls(github_user, template, long_format):
"""
List packages created with temple. Enter a github user or
organization to list all templates under the user or org.
Using a template path as the second argument will list all projects
that have been started with that template.
Use "-l" to print the Github repository descriptions of templates
or projects.
"""
github_urls = temple.ls.ls(github_user, template=template)
for ssh_path, info in github_urls.items():
if long_format:
print(ssh_path, '-', info['description'] or '(no project description found)')
else:
print(ssh_path) |
def switch(template, version):
"""
Switch a project's template to a different template.
"""
temple.update.update(new_template=template, new_version=version) |
def _in_git_repo():
"""Returns True if inside a git repo, False otherwise"""
ret = temple.utils.shell('git rev-parse', stderr=subprocess.DEVNULL, check=False)
return ret.returncode == 0 |
def _has_branch(branch):
"""Return True if the target branch exists."""
ret = temple.utils.shell('git rev-parse --verify {}'.format(branch),
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
check=False)
return ret.returncode == 0 |
def not_has_branch(branch):
"""Raises `ExistingBranchError` if the specified branch exists."""
if _has_branch(branch):
msg = 'Cannot proceed while {} branch exists; remove and try again.'.format(branch)
raise temple.exceptions.ExistingBranchError(msg) |
def has_env_vars(*env_vars):
"""Raises `InvalidEnvironmentError` when one isnt set"""
for env_var in env_vars:
if not os.environ.get(env_var):
msg = (
'Must set {} environment variable. View docs for setting up environment at {}'
).format(env_var, temple.constants.TEMPLE_DOCS_URL)
raise temple.exceptions.InvalidEnvironmentError(msg) |
def is_temple_project():
"""Raises `InvalidTempleProjectError` if repository is not a temple project"""
if not os.path.exists(temple.constants.TEMPLE_CONFIG_FILE):
msg = 'No {} file found in repository.'.format(temple.constants.TEMPLE_CONFIG_FILE)
raise temple.exceptions.InvalidTempleProjectError(msg) |
def _get_current_branch():
"""Determine the current git branch"""
result = temple.utils.shell('git rev-parse --abbrev-ref HEAD', stdout=subprocess.PIPE)
return result.stdout.decode('utf8').strip() |
def clean():
"""Cleans up temporary resources
Tries to clean up:
1. The temporary update branch used during ``temple update``
2. The primary update branch used during ``temple update``
"""
temple.check.in_git_repo()
current_branch = _get_current_branch()
update_branch = temple.constants.UPDATE_BRANCH_NAME
temp_update_branch = temple.constants.TEMP_UPDATE_BRANCH_NAME
if current_branch in (update_branch, temp_update_branch):
err_msg = (
'You must change from the "{}" branch since it will be deleted during cleanup'
).format(current_branch)
raise temple.exceptions.InvalidCurrentBranchError(err_msg)
if temple.check._has_branch(update_branch):
temple.utils.shell('git branch -D {}'.format(update_branch))
if temple.check._has_branch(temp_update_branch):
temple.utils.shell('git branch -D {}'.format(temp_update_branch)) |
def _cookiecutter_configs_have_changed(template, old_version, new_version):
"""Given an old version and new version, check if the cookiecutter.json files have changed
When the cookiecutter.json files change, it means the user will need to be prompted for
new context
Args:
template (str): The git SSH path to the template
old_version (str): The git SHA of the old version
new_version (str): The git SHA of the new version
Returns:
bool: True if the cookiecutter.json files have been changed in the old and new versions
"""
temple.check.is_git_ssh_path(template)
repo_path = temple.utils.get_repo_path(template)
github_client = temple.utils.GithubClient()
api = '/repos/{}/contents/cookiecutter.json'.format(repo_path)
old_config_resp = github_client.get(api, params={'ref': old_version})
old_config_resp.raise_for_status()
new_config_resp = github_client.get(api, params={'ref': new_version})
new_config_resp.raise_for_status()
return old_config_resp.json()['content'] != new_config_resp.json()['content'] |
def _apply_template(template, target, *, checkout, extra_context):
"""Apply a template to a temporary directory and then copy results to target."""
with tempfile.TemporaryDirectory() as tempdir:
repo_dir = cc_main.cookiecutter(
template,
checkout=checkout,
no_input=True,
output_dir=tempdir,
extra_context=extra_context)
for item in os.listdir(repo_dir):
src = os.path.join(repo_dir, item)
dst = os.path.join(target, item)
if os.path.isdir(src):
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
if os.path.exists(dst):
os.remove(dst)
shutil.copy2(src, dst) |
def up_to_date(version=None):
"""Checks if a temple project is up to date with the repo
Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'update' for the duration of this
function.
Args:
version (str, optional): Update against this git SHA or branch of the template
Returns:
boolean: True if up to date with ``version`` (or latest version), False otherwise
Raises:
`NotInGitRepoError`: When running outside of a git repo
`InvalidTempleProjectError`: When not inside a valid temple repository
"""
temple.check.in_git_repo()
temple.check.is_temple_project()
temple_config = temple.utils.read_temple_config()
old_template_version = temple_config['_version']
new_template_version = version or _get_latest_template_version(temple_config['_template'])
return new_template_version == old_template_version |
def _needs_new_cc_config_for_update(old_template, old_version, new_template, new_version):
"""
Given two templates and their respective versions, return True if a new cookiecutter
config needs to be obtained from the user
"""
if old_template != new_template:
return True
else:
return _cookiecutter_configs_have_changed(new_template,
old_version,
new_version) |
def update(old_template=None, old_version=None, new_template=None, new_version=None,
enter_parameters=False):
"""Updates the temple project to the latest template
Proceeeds in the following steps:
1. Ensure we are inside the project repository
2. Obtain the latest version of the package template
3. If the package is up to date with the latest template, return
4. If not, create an empty template branch with a new copy of the old template
5. Create an update branch from HEAD and merge in the new template copy
6. Create a new copy of the new template and merge into the empty template branch
7. Merge the updated empty template branch into the update branch
8. Ensure temple.yaml reflects what is in the template branch
9. Remove the empty template branch
Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'update' for the
duration of this function.
Two branches will be created during the update process, one named
``_temple_update`` and one named ``_temple_update_temp``. At the end of
the process, ``_temple_update_temp`` will be removed automatically. The
work will be left in ``_temple_update`` in an uncommitted state for
review. The update will fail early if either of these branches exist
before the process starts.
Args:
old_template (str, default=None): The old template from which to update. Defaults
to the template in temple.yaml
old_version (str, default=None): The old version of the template. Defaults to
the version in temple.yaml
new_template (str, default=None): The new template for updating. Defaults to the
template in temple.yaml
new_version (str, default=None): The new version of the new template to update.
Defaults to the latest version of the new template
enter_parameters (bool, default=False): Force entering template parameters for the project
Raises:
`NotInGitRepoError`: When not inside of a git repository
`InvalidTempleProjectError`: When not inside a valid temple repository
`InDirtyRepoError`: When an update is triggered while the repo is in a dirty state
`ExistingBranchError`: When an update is triggered and there is an existing
update branch
Returns:
boolean: True if update was performed or False if template was already up to date
"""
update_branch = temple.constants.UPDATE_BRANCH_NAME
temp_update_branch = temple.constants.TEMP_UPDATE_BRANCH_NAME
temple.check.in_git_repo()
temple.check.in_clean_repo()
temple.check.is_temple_project()
temple.check.not_has_branch(update_branch)
temple.check.not_has_branch(temp_update_branch)
temple.check.has_env_vars(temple.constants.GITHUB_API_TOKEN_ENV_VAR)
temple_config = temple.utils.read_temple_config()
old_template = old_template or temple_config['_template']
new_template = new_template or temple_config['_template']
old_version = old_version or temple_config['_version']
new_version = new_version or _get_latest_template_version(new_template)
if new_template == old_template and new_version == old_version and not enter_parameters:
print('No updates have happened to the template, so no files were updated')
return False
print('Creating branch {} for processing the update'.format(update_branch))
temple.utils.shell('git checkout -b {}'.format(update_branch),
stderr=subprocess.DEVNULL)
print('Creating temporary working branch {}'.format(temp_update_branch))
temple.utils.shell('git checkout --orphan {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git rm -rf .',
stdout=subprocess.DEVNULL)
_apply_template(old_template,
'.',
checkout=old_version,
extra_context=temple_config)
temple.utils.shell('git add .')
temple.utils.shell(
'git commit --no-verify -m "Initialize template from version {}"'.format(old_version),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Merge old template history into update branch.')
temple.utils.shell('git checkout {}'.format(update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell(
'git merge -s ours --no-edit --allow-unrelated-histories {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
print('Update template in temporary branch.')
temple.utils.shell('git checkout {}'.format(temp_update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git rm -rf .',
stdout=subprocess.DEVNULL)
# If the cookiecutter.json files have changed or the templates have changed,
# the user will need to re-enter the cookiecutter config
needs_new_cc_config = _needs_new_cc_config_for_update(old_template, old_version,
new_template, new_version)
if needs_new_cc_config:
if old_template != new_template:
cc_config_input_msg = (
'You will be prompted for the parameters of the new template.'
' Please read the docs at https://github.com/{} before entering parameters.'
' Press enter to continue'
).format(temple.utils.get_repo_path(new_template))
else:
cc_config_input_msg = (
'A new template variable has been defined in the updated template.'
' You will be prompted to enter all of the variables again. Variables'
' already configured in your project will have their values set as'
' defaults. Press enter to continue'
)
input(cc_config_input_msg)
# Even if there is no detected need to re-enter the cookiecutter config, the user
# can still re-enter config parameters with the "enter_parameters" flag
if needs_new_cc_config or enter_parameters:
_, temple_config = (
temple.utils.get_cookiecutter_config(new_template,
default_config=temple_config,
version=new_version))
_apply_template(new_template,
'.',
checkout=new_version,
extra_context=temple_config)
temple.utils.write_temple_config(temple_config, new_template, new_version)
temple.utils.shell('git add .')
temple.utils.shell(
'git commit --no-verify -m "Update template to version {}"'.format(new_version),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Merge updated template into update branch.')
temple.utils.shell('git checkout {}'.format(update_branch),
stderr=subprocess.DEVNULL)
temple.utils.shell('git merge --no-commit {}'.format(temp_update_branch),
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
# The temple.yaml file should always reflect what is in the new template
temple.utils.shell('git checkout --theirs {}'.format(temple.constants.TEMPLE_CONFIG_FILE),
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
print('Remove temporary template branch {}'.format(temp_update_branch))
temple.utils.shell('git branch -D {}'.format(temp_update_branch),
stdout=subprocess.DEVNULL)
print(textwrap.dedent("""\
Updating complete!
Please review the changes with "git status" for any errors or
conflicts. Once you are satisfied with the changes, add, commit,
push, and open a PR with the branch {}
""").format(update_branch))
return True |
def shell(cmd, check=True, stdin=None, stdout=None, stderr=None):
"""Runs a subprocess shell with check=True by default"""
return subprocess.run(cmd, shell=True, check=check, stdin=stdin, stdout=stdout, stderr=stderr) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.