Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
1,400 | orbingol/NURBS-Python | geomdl/linalg.py | vector_dot | def vector_dot(vector1, vector2):
""" Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Compute dot product
prod = 0.0
for v1, v2 in zip(vector1, vector2):
prod += v1 * v2
# Return the dot product of the input vectors
return prod | python | def vector_dot(vector1, vector2):
""" Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float
"""
try:
if vector1 is None or len(vector1) == 0 or vector2 is None or len(vector2) == 0:
raise ValueError("Input vectors cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
# Compute dot product
prod = 0.0
for v1, v2 in zip(vector1, vector2):
prod += v1 * v2
# Return the dot product of the input vectors
return prod | ['def', 'vector_dot', '(', 'vector1', ',', 'vector2', ')', ':', 'try', ':', 'if', 'vector1', 'is', 'None', 'or', 'len', '(', 'vector1', ')', '==', '0', 'or', 'vector2', 'is', 'None', 'or', 'len', '(', 'vector2', ')', '==', '0', ':', 'raise', 'ValueError', '(', '"Input vectors cannot be empty"', ')', 'except', 'TypeError', 'as', 'e', ':', 'print', '(', '"An error occurred: {}"', '.', 'format', '(', 'e', '.', 'args', '[', '-', '1', ']', ')', ')', 'raise', 'TypeError', '(', '"Input must be a list or tuple"', ')', 'except', 'Exception', ':', 'raise', '# Compute dot product', 'prod', '=', '0.0', 'for', 'v1', ',', 'v2', 'in', 'zip', '(', 'vector1', ',', 'vector2', ')', ':', 'prod', '+=', 'v1', '*', 'v2', '# Return the dot product of the input vectors', 'return', 'prod'] | Computes the dot-product of the input vectors.
:param vector1: input vector 1
:type vector1: list, tuple
:param vector2: input vector 2
:type vector2: list, tuple
:return: result of the dot product
:rtype: float | ['Computes', 'the', 'dot', '-', 'product', 'of', 'the', 'input', 'vectors', '.'] | train | https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L62-L87 |
1,401 | TissueMAPS/TmDeploy | elasticluster/elasticluster/cluster.py | Node.connect | def connect(self, keyfile=None):
"""Connect to the node via ssh using the paramiko library.
:return: :py:class:`paramiko.SSHClient` - ssh connection or None on
failure
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if keyfile and os.path.exists(keyfile):
ssh.load_host_keys(keyfile)
# Try connecting using the `preferred_ip`, if
# present. Otherwise, try all of them and set `preferred_ip`
# using the first that is working.
ips = self.ips[:]
# This is done in order to "sort" the IPs and put the preferred_ip first.
if self.preferred_ip:
if self.preferred_ip in ips:
ips.remove(self.preferred_ip)
else:
# Preferred is changed?
log.debug("IP %s does not seem to belong to %s anymore. Ignoring!", self.preferred_ip, self.name)
self.preferred_ip = ips[0]
for ip in itertools.chain([self.preferred_ip], ips):
if not ip:
continue
try:
log.debug("Trying to connect to host %s (%s)",
self.name, ip)
addr, port = parse_ip_address_and_port(ip, SSH_PORT)
ssh.connect(str(addr),
username=self.image_user,
allow_agent=True,
key_filename=self.user_key_private,
timeout=Node.connection_timeout,
port=port)
log.debug("Connection to %s succeeded on port %d!", ip, port)
if ip != self.preferred_ip:
log.debug("Setting `preferred_ip` to %s", ip)
self.preferred_ip = ip
# Connection successful.
return ssh
except socket.error as ex:
log.debug("Host %s (%s) not reachable: %s.",
self.name, ip, ex)
except paramiko.BadHostKeyException as ex:
log.error("Invalid host key: host %s (%s); check keyfile: %s",
self.name, ip, keyfile)
except paramiko.SSHException as ex:
log.debug("Ignoring error %s connecting to %s",
str(ex), self.name)
return None | python | def connect(self, keyfile=None):
"""Connect to the node via ssh using the paramiko library.
:return: :py:class:`paramiko.SSHClient` - ssh connection or None on
failure
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if keyfile and os.path.exists(keyfile):
ssh.load_host_keys(keyfile)
# Try connecting using the `preferred_ip`, if
# present. Otherwise, try all of them and set `preferred_ip`
# using the first that is working.
ips = self.ips[:]
# This is done in order to "sort" the IPs and put the preferred_ip first.
if self.preferred_ip:
if self.preferred_ip in ips:
ips.remove(self.preferred_ip)
else:
# Preferred is changed?
log.debug("IP %s does not seem to belong to %s anymore. Ignoring!", self.preferred_ip, self.name)
self.preferred_ip = ips[0]
for ip in itertools.chain([self.preferred_ip], ips):
if not ip:
continue
try:
log.debug("Trying to connect to host %s (%s)",
self.name, ip)
addr, port = parse_ip_address_and_port(ip, SSH_PORT)
ssh.connect(str(addr),
username=self.image_user,
allow_agent=True,
key_filename=self.user_key_private,
timeout=Node.connection_timeout,
port=port)
log.debug("Connection to %s succeeded on port %d!", ip, port)
if ip != self.preferred_ip:
log.debug("Setting `preferred_ip` to %s", ip)
self.preferred_ip = ip
# Connection successful.
return ssh
except socket.error as ex:
log.debug("Host %s (%s) not reachable: %s.",
self.name, ip, ex)
except paramiko.BadHostKeyException as ex:
log.error("Invalid host key: host %s (%s); check keyfile: %s",
self.name, ip, keyfile)
except paramiko.SSHException as ex:
log.debug("Ignoring error %s connecting to %s",
str(ex), self.name)
return None | ['def', 'connect', '(', 'self', ',', 'keyfile', '=', 'None', ')', ':', 'ssh', '=', 'paramiko', '.', 'SSHClient', '(', ')', 'ssh', '.', 'set_missing_host_key_policy', '(', 'paramiko', '.', 'AutoAddPolicy', '(', ')', ')', 'if', 'keyfile', 'and', 'os', '.', 'path', '.', 'exists', '(', 'keyfile', ')', ':', 'ssh', '.', 'load_host_keys', '(', 'keyfile', ')', '# Try connecting using the `preferred_ip`, if', '# present. Otherwise, try all of them and set `preferred_ip`', '# using the first that is working.', 'ips', '=', 'self', '.', 'ips', '[', ':', ']', '# This is done in order to "sort" the IPs and put the preferred_ip first.', 'if', 'self', '.', 'preferred_ip', ':', 'if', 'self', '.', 'preferred_ip', 'in', 'ips', ':', 'ips', '.', 'remove', '(', 'self', '.', 'preferred_ip', ')', 'else', ':', '# Preferred is changed?', 'log', '.', 'debug', '(', '"IP %s does not seem to belong to %s anymore. Ignoring!"', ',', 'self', '.', 'preferred_ip', ',', 'self', '.', 'name', ')', 'self', '.', 'preferred_ip', '=', 'ips', '[', '0', ']', 'for', 'ip', 'in', 'itertools', '.', 'chain', '(', '[', 'self', '.', 'preferred_ip', ']', ',', 'ips', ')', ':', 'if', 'not', 'ip', ':', 'continue', 'try', ':', 'log', '.', 'debug', '(', '"Trying to connect to host %s (%s)"', ',', 'self', '.', 'name', ',', 'ip', ')', 'addr', ',', 'port', '=', 'parse_ip_address_and_port', '(', 'ip', ',', 'SSH_PORT', ')', 'ssh', '.', 'connect', '(', 'str', '(', 'addr', ')', ',', 'username', '=', 'self', '.', 'image_user', ',', 'allow_agent', '=', 'True', ',', 'key_filename', '=', 'self', '.', 'user_key_private', ',', 'timeout', '=', 'Node', '.', 'connection_timeout', ',', 'port', '=', 'port', ')', 'log', '.', 'debug', '(', '"Connection to %s succeeded on port %d!"', ',', 'ip', ',', 'port', ')', 'if', 'ip', '!=', 'self', '.', 'preferred_ip', ':', 'log', '.', 'debug', '(', '"Setting `preferred_ip` to %s"', ',', 'ip', ')', 'self', '.', 'preferred_ip', '=', 'ip', '# Connection successful.', 'return', 'ssh', 'except', 'socket', '.', 'error', 'as', 'ex', ':', 'log', '.', 'debug', '(', '"Host %s (%s) not reachable: %s."', ',', 'self', '.', 'name', ',', 'ip', ',', 'ex', ')', 'except', 'paramiko', '.', 'BadHostKeyException', 'as', 'ex', ':', 'log', '.', 'error', '(', '"Invalid host key: host %s (%s); check keyfile: %s"', ',', 'self', '.', 'name', ',', 'ip', ',', 'keyfile', ')', 'except', 'paramiko', '.', 'SSHException', 'as', 'ex', ':', 'log', '.', 'debug', '(', '"Ignoring error %s connecting to %s"', ',', 'str', '(', 'ex', ')', ',', 'self', '.', 'name', ')', 'return', 'None'] | Connect to the node via ssh using the paramiko library.
:return: :py:class:`paramiko.SSHClient` - ssh connection or None on
failure | ['Connect', 'to', 'the', 'node', 'via', 'ssh', 'using', 'the', 'paramiko', 'library', '.'] | train | https://github.com/TissueMAPS/TmDeploy/blob/f891b4ffb21431988bc4a063ae871da3bf284a45/elasticluster/elasticluster/cluster.py#L1141-L1194 |
1,402 | assamite/creamas | creamas/core/simulation.py | Simulation.end | def end(self, folder=None):
"""End the simulation and destroy the current simulation environment.
"""
ret = self.env.destroy(folder=folder)
self._end_time = time.time()
self._log(logging.DEBUG, "Simulation run with {} steps took {:.3f}s to"
" complete, while actual processing time was {:.3f}s."
.format(self.age, self._end_time - self._start_time,
self._processing_time))
return ret | python | def end(self, folder=None):
"""End the simulation and destroy the current simulation environment.
"""
ret = self.env.destroy(folder=folder)
self._end_time = time.time()
self._log(logging.DEBUG, "Simulation run with {} steps took {:.3f}s to"
" complete, while actual processing time was {:.3f}s."
.format(self.age, self._end_time - self._start_time,
self._processing_time))
return ret | ['def', 'end', '(', 'self', ',', 'folder', '=', 'None', ')', ':', 'ret', '=', 'self', '.', 'env', '.', 'destroy', '(', 'folder', '=', 'folder', ')', 'self', '.', '_end_time', '=', 'time', '.', 'time', '(', ')', 'self', '.', '_log', '(', 'logging', '.', 'DEBUG', ',', '"Simulation run with {} steps took {:.3f}s to"', '" complete, while actual processing time was {:.3f}s."', '.', 'format', '(', 'self', '.', 'age', ',', 'self', '.', '_end_time', '-', 'self', '.', '_start_time', ',', 'self', '.', '_processing_time', ')', ')', 'return', 'ret'] | End the simulation and destroy the current simulation environment. | ['End', 'the', 'simulation', 'and', 'destroy', 'the', 'current', 'simulation', 'environment', '.'] | train | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L282-L291 |
1,403 | sprockets/sprockets-influxdb | sprockets_influxdb.py | Measurement.marshall | def marshall(self):
"""Return the measurement in the line protocol format.
:rtype: str
"""
return '{},{} {} {}'.format(
self._escape(self.name),
','.join(['{}={}'.format(self._escape(k), self._escape(v))
for k, v in self.tags.items()]),
self._marshall_fields(),
int(self.timestamp * 1000)) | python | def marshall(self):
"""Return the measurement in the line protocol format.
:rtype: str
"""
return '{},{} {} {}'.format(
self._escape(self.name),
','.join(['{}={}'.format(self._escape(k), self._escape(v))
for k, v in self.tags.items()]),
self._marshall_fields(),
int(self.timestamp * 1000)) | ['def', 'marshall', '(', 'self', ')', ':', 'return', "'{},{} {} {}'", '.', 'format', '(', 'self', '.', '_escape', '(', 'self', '.', 'name', ')', ',', "','", '.', 'join', '(', '[', "'{}={}'", '.', 'format', '(', 'self', '.', '_escape', '(', 'k', ')', ',', 'self', '.', '_escape', '(', 'v', ')', ')', 'for', 'k', ',', 'v', 'in', 'self', '.', 'tags', '.', 'items', '(', ')', ']', ')', ',', 'self', '.', '_marshall_fields', '(', ')', ',', 'int', '(', 'self', '.', 'timestamp', '*', '1000', ')', ')'] | Return the measurement in the line protocol format.
:rtype: str | ['Return', 'the', 'measurement', 'in', 'the', 'line', 'protocol', 'format', '.'] | train | https://github.com/sprockets/sprockets-influxdb/blob/cce73481b8f26b02e65e3f9914a9a22eceff3063/sprockets_influxdb.py#L858-L869 |
1,404 | lpantano/seqcluster | seqcluster/seqbuster/__init__.py | _download_mirbase | def _download_mirbase(args, version="CURRENT"):
"""
Download files from mirbase
"""
if not args.hairpin or not args.mirna:
logger.info("Working with version %s" % version)
hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz")
mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz")
if not file_exists(hairpin_fn):
cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn)
do.run(cmd_h, "download hairpin")
if not file_exists(mirna_fn):
cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn)
do.run(cmd_m, "download mirna")
else:
return args.hairpin, args.mirna | python | def _download_mirbase(args, version="CURRENT"):
"""
Download files from mirbase
"""
if not args.hairpin or not args.mirna:
logger.info("Working with version %s" % version)
hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz")
mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz")
if not file_exists(hairpin_fn):
cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version, hairpin_fn)
do.run(cmd_h, "download hairpin")
if not file_exists(mirna_fn):
cmd_m = "wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$" % (version, mirna_fn)
do.run(cmd_m, "download mirna")
else:
return args.hairpin, args.mirna | ['def', '_download_mirbase', '(', 'args', ',', 'version', '=', '"CURRENT"', ')', ':', 'if', 'not', 'args', '.', 'hairpin', 'or', 'not', 'args', '.', 'mirna', ':', 'logger', '.', 'info', '(', '"Working with version %s"', '%', 'version', ')', 'hairpin_fn', '=', 'op', '.', 'join', '(', 'op', '.', 'abspath', '(', 'args', '.', 'out', ')', ',', '"hairpin.fa.gz"', ')', 'mirna_fn', '=', 'op', '.', 'join', '(', 'op', '.', 'abspath', '(', 'args', '.', 'out', ')', ',', '"miRNA.str.gz"', ')', 'if', 'not', 'file_exists', '(', 'hairpin_fn', ')', ':', 'cmd_h', '=', '"wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$"', '%', '(', 'version', ',', 'hairpin_fn', ')', 'do', '.', 'run', '(', 'cmd_h', ',', '"download hairpin"', ')', 'if', 'not', 'file_exists', '(', 'mirna_fn', ')', ':', 'cmd_m', '=', '"wget ftp://mirbase.org/pub/mirbase/%s/miRNA.str.gz -O %s && gunzip -f !$"', '%', '(', 'version', ',', 'mirna_fn', ')', 'do', '.', 'run', '(', 'cmd_m', ',', '"download mirna"', ')', 'else', ':', 'return', 'args', '.', 'hairpin', ',', 'args', '.', 'mirna'] | Download files from mirbase | ['Download', 'files', 'from', 'mirbase'] | train | https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/seqbuster/__init__.py#L23-L38 |
1,405 | skioo/django-customer-billing | billing/models.py | total_amount | def total_amount(qs) -> Total:
"""Sums the amounts of the objects in the queryset, keeping each currency separate.
:param qs: A querystring containing objects that have an amount field of type Money.
:return: A Total object.
"""
aggregate = qs.values('amount_currency').annotate(sum=Sum('amount'))
return Total(Money(amount=r['sum'], currency=r['amount_currency']) for r in aggregate) | python | def total_amount(qs) -> Total:
"""Sums the amounts of the objects in the queryset, keeping each currency separate.
:param qs: A querystring containing objects that have an amount field of type Money.
:return: A Total object.
"""
aggregate = qs.values('amount_currency').annotate(sum=Sum('amount'))
return Total(Money(amount=r['sum'], currency=r['amount_currency']) for r in aggregate) | ['def', 'total_amount', '(', 'qs', ')', '->', 'Total', ':', 'aggregate', '=', 'qs', '.', 'values', '(', "'amount_currency'", ')', '.', 'annotate', '(', 'sum', '=', 'Sum', '(', "'amount'", ')', ')', 'return', 'Total', '(', 'Money', '(', 'amount', '=', 'r', '[', "'sum'", ']', ',', 'currency', '=', 'r', '[', "'amount_currency'", ']', ')', 'for', 'r', 'in', 'aggregate', ')'] | Sums the amounts of the objects in the queryset, keeping each currency separate.
:param qs: A querystring containing objects that have an amount field of type Money.
:return: A Total object. | ['Sums', 'the', 'amounts', 'of', 'the', 'objects', 'in', 'the', 'queryset', 'keeping', 'each', 'currency', 'separate', '.', ':', 'param', 'qs', ':', 'A', 'querystring', 'containing', 'objects', 'that', 'have', 'an', 'amount', 'field', 'of', 'type', 'Money', '.', ':', 'return', ':', 'A', 'Total', 'object', '.'] | train | https://github.com/skioo/django-customer-billing/blob/6ac1ed9ef9d1d7eee0379de7f0c4b76919ae1f2d/billing/models.py#L22-L28 |
1,406 | biocommons/bioutils | src/bioutils/sequences.py | aa3_to_aa1 | def aa3_to_aa1(seq):
"""convert string of 3-letter amino acids to 1-letter amino acids
>>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu")
'CATSARELAME'
>>> aa3_to_aa1(None)
"""
if seq is None:
return None
return "".join(aa3_to_aa1_lut[aa3]
for aa3 in [seq[i:i + 3] for i in range(0, len(seq), 3)]) | python | def aa3_to_aa1(seq):
"""convert string of 3-letter amino acids to 1-letter amino acids
>>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu")
'CATSARELAME'
>>> aa3_to_aa1(None)
"""
if seq is None:
return None
return "".join(aa3_to_aa1_lut[aa3]
for aa3 in [seq[i:i + 3] for i in range(0, len(seq), 3)]) | ['def', 'aa3_to_aa1', '(', 'seq', ')', ':', 'if', 'seq', 'is', 'None', ':', 'return', 'None', 'return', '""', '.', 'join', '(', 'aa3_to_aa1_lut', '[', 'aa3', ']', 'for', 'aa3', 'in', '[', 'seq', '[', 'i', ':', 'i', '+', '3', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'seq', ')', ',', '3', ')', ']', ')'] | convert string of 3-letter amino acids to 1-letter amino acids
>>> aa3_to_aa1("CysAlaThrSerAlaArgGluLeuAlaMetGlu")
'CATSARELAME'
>>> aa3_to_aa1(None) | ['convert', 'string', 'of', '3', '-', 'letter', 'amino', 'acids', 'to', '1', '-', 'letter', 'amino', 'acids'] | train | https://github.com/biocommons/bioutils/blob/88bcbdfa707268fed1110800e91b6d4f8e9475a0/src/bioutils/sequences.py#L158-L170 |
1,407 | exekias/droplet | droplet/web/wizards.py | next | def next(transport, wizard, step, data):
"""
Validate step and go to the next one (or finish the wizard)
:param transport: Transport object
:param wizard: Wizard block name
:param step: Current step number
:param data: form data for the step
"""
step = int(step)
wizard = blocks.get(wizard)
# Retrieve form block
form = wizard.next(step)
valid = forms.send(transport, form.register_name, data=data)
if valid:
if wizard.next(step+1) is None:
# It was last step
wizard.finish(transport)
return
# Next step
wizard.step = step+1
wizard.update(transport) | python | def next(transport, wizard, step, data):
"""
Validate step and go to the next one (or finish the wizard)
:param transport: Transport object
:param wizard: Wizard block name
:param step: Current step number
:param data: form data for the step
"""
step = int(step)
wizard = blocks.get(wizard)
# Retrieve form block
form = wizard.next(step)
valid = forms.send(transport, form.register_name, data=data)
if valid:
if wizard.next(step+1) is None:
# It was last step
wizard.finish(transport)
return
# Next step
wizard.step = step+1
wizard.update(transport) | ['def', 'next', '(', 'transport', ',', 'wizard', ',', 'step', ',', 'data', ')', ':', 'step', '=', 'int', '(', 'step', ')', 'wizard', '=', 'blocks', '.', 'get', '(', 'wizard', ')', '# Retrieve form block', 'form', '=', 'wizard', '.', 'next', '(', 'step', ')', 'valid', '=', 'forms', '.', 'send', '(', 'transport', ',', 'form', '.', 'register_name', ',', 'data', '=', 'data', ')', 'if', 'valid', ':', 'if', 'wizard', '.', 'next', '(', 'step', '+', '1', ')', 'is', 'None', ':', '# It was last step', 'wizard', '.', 'finish', '(', 'transport', ')', 'return', '# Next step', 'wizard', '.', 'step', '=', 'step', '+', '1', 'wizard', '.', 'update', '(', 'transport', ')'] | Validate step and go to the next one (or finish the wizard)
:param transport: Transport object
:param wizard: Wizard block name
:param step: Current step number
:param data: form data for the step | ['Validate', 'step', 'and', 'go', 'to', 'the', 'next', 'one', '(', 'or', 'finish', 'the', 'wizard', ')'] | train | https://github.com/exekias/droplet/blob/aeac573a2c1c4b774e99d5414a1c79b1bb734941/droplet/web/wizards.py#L86-L111 |
1,408 | calmjs/calmjs | src/calmjs/dist.py | validate_line_list | def validate_line_list(dist, attr, value):
"""
Validate that the value is compatible
"""
# does not work as reliably in Python 2.
if isinstance(value, str):
value = value.split()
value = list(value)
try:
check = (' '.join(value)).split()
if check == value:
return True
except Exception:
pass
raise DistutilsSetupError("%r must be a list of valid identifiers" % attr) | python | def validate_line_list(dist, attr, value):
"""
Validate that the value is compatible
"""
# does not work as reliably in Python 2.
if isinstance(value, str):
value = value.split()
value = list(value)
try:
check = (' '.join(value)).split()
if check == value:
return True
except Exception:
pass
raise DistutilsSetupError("%r must be a list of valid identifiers" % attr) | ['def', 'validate_line_list', '(', 'dist', ',', 'attr', ',', 'value', ')', ':', '# does not work as reliably in Python 2.', 'if', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'value', '=', 'value', '.', 'split', '(', ')', 'value', '=', 'list', '(', 'value', ')', 'try', ':', 'check', '=', '(', "' '", '.', 'join', '(', 'value', ')', ')', '.', 'split', '(', ')', 'if', 'check', '==', 'value', ':', 'return', 'True', 'except', 'Exception', ':', 'pass', 'raise', 'DistutilsSetupError', '(', '"%r must be a list of valid identifiers"', '%', 'attr', ')'] | Validate that the value is compatible | ['Validate', 'that', 'the', 'value', 'is', 'compatible'] | train | https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L79-L95 |
1,409 | apache/incubator-superset | superset/db_engine_specs.py | BaseEngineSpec.modify_url_for_impersonation | def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user is not None and username is not None:
url.username = username | python | def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user is not None and username is not None:
url.username = username | ['def', 'modify_url_for_impersonation', '(', 'cls', ',', 'url', ',', 'impersonate_user', ',', 'username', ')', ':', 'if', 'impersonate_user', 'is', 'not', 'None', 'and', 'username', 'is', 'not', 'None', ':', 'url', '.', 'username', '=', 'username'] | Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username | ['Modify', 'the', 'SQL', 'Alchemy', 'URL', 'object', 'with', 'the', 'user', 'to', 'impersonate', 'if', 'applicable', '.', ':', 'param', 'url', ':', 'SQLAlchemy', 'URL', 'object', ':', 'param', 'impersonate_user', ':', 'Bool', 'indicating', 'if', 'impersonation', 'is', 'enabled', ':', 'param', 'username', ':', 'Effective', 'username'] | train | https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L395-L403 |
1,410 | google/grr | grr/server/grr_response_server/check_lib/checks.py | Matcher.Detect | def Detect(self, baseline, host_data):
"""Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
"""
result = CheckResult()
for detector in self.detectors:
finding = detector(baseline, host_data)
if finding:
result.ExtendAnomalies([finding])
if result:
return result | python | def Detect(self, baseline, host_data):
"""Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
"""
result = CheckResult()
for detector in self.detectors:
finding = detector(baseline, host_data)
if finding:
result.ExtendAnomalies([finding])
if result:
return result | ['def', 'Detect', '(', 'self', ',', 'baseline', ',', 'host_data', ')', ':', 'result', '=', 'CheckResult', '(', ')', 'for', 'detector', 'in', 'self', '.', 'detectors', ':', 'finding', '=', 'detector', '(', 'baseline', ',', 'host_data', ')', 'if', 'finding', ':', 'result', '.', 'ExtendAnomalies', '(', '[', 'finding', ']', ')', 'if', 'result', ':', 'return', 'result'] | Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise. | ['Run', 'host_data', 'through', 'detectors', 'and', 'return', 'them', 'if', 'a', 'detector', 'triggers', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/check_lib/checks.py#L455-L473 |
1,411 | senaite/senaite.core | bika/lims/browser/analysisrequest/add2.py | AnalysisRequestAddView.get_batch | def get_batch(self):
"""Returns the Batch
"""
context = self.context
parent = api.get_parent(context)
if context.portal_type == "Batch":
return context
elif parent.portal_type == "Batch":
return parent
return None | python | def get_batch(self):
"""Returns the Batch
"""
context = self.context
parent = api.get_parent(context)
if context.portal_type == "Batch":
return context
elif parent.portal_type == "Batch":
return parent
return None | ['def', 'get_batch', '(', 'self', ')', ':', 'context', '=', 'self', '.', 'context', 'parent', '=', 'api', '.', 'get_parent', '(', 'context', ')', 'if', 'context', '.', 'portal_type', '==', '"Batch"', ':', 'return', 'context', 'elif', 'parent', '.', 'portal_type', '==', '"Batch"', ':', 'return', 'parent', 'return', 'None'] | Returns the Batch | ['Returns', 'the', 'Batch'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/add2.py#L348-L357 |
1,412 | limodou/uliweb | uliweb/lib/werkzeug/contrib/jsrouting.py | generate_map | def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
from warnings import warn
warn(DeprecationWarning('This module is deprecated'))
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in iteritems(rule._converters):
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return render_template(name_parts=name and name.split('.') or [],
rules=dumps(rules),
converters=converters) | python | def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
from warnings import warn
warn(DeprecationWarning('This module is deprecated'))
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in iteritems(rule._converters):
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return render_template(name_parts=name and name.split('.') or [],
rules=dumps(rules),
converters=converters) | ['def', 'generate_map', '(', 'map', ',', 'name', '=', "'url_map'", ')', ':', 'from', 'warnings', 'import', 'warn', 'warn', '(', 'DeprecationWarning', '(', "'This module is deprecated'", ')', ')', 'map', '.', 'update', '(', ')', 'rules', '=', '[', ']', 'converters', '=', '[', ']', 'for', 'rule', 'in', 'map', '.', 'iter_rules', '(', ')', ':', 'trace', '=', '[', '{', "'is_dynamic'", ':', 'is_dynamic', ',', "'data'", ':', 'data', '}', 'for', 'is_dynamic', ',', 'data', 'in', 'rule', '.', '_trace', ']', 'rule_converters', '=', '{', '}', 'for', 'key', ',', 'converter', 'in', 'iteritems', '(', 'rule', '.', '_converters', ')', ':', 'js_func', '=', 'js_to_url_function', '(', 'converter', ')', 'try', ':', 'index', '=', 'converters', '.', 'index', '(', 'js_func', ')', 'except', 'ValueError', ':', 'converters', '.', 'append', '(', 'js_func', ')', 'index', '=', 'len', '(', 'converters', ')', '-', '1', 'rule_converters', '[', 'key', ']', '=', 'index', 'rules', '.', 'append', '(', '{', "u'endpoint'", ':', 'rule', '.', 'endpoint', ',', "u'arguments'", ':', 'list', '(', 'rule', '.', 'arguments', ')', ',', "u'converters'", ':', 'rule_converters', ',', "u'trace'", ':', 'trace', ',', "u'defaults'", ':', 'rule', '.', 'defaults', '}', ')', 'return', 'render_template', '(', 'name_parts', '=', 'name', 'and', 'name', '.', 'split', '(', "'.'", ')', 'or', '[', ']', ',', 'rules', '=', 'dumps', '(', 'rules', ')', ',', 'converters', '=', 'converters', ')'] | Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation! | ['Generates', 'a', 'JavaScript', 'function', 'containing', 'the', 'rules', 'defined', 'in', 'this', 'map', 'to', 'be', 'used', 'with', 'a', 'MapAdapter', 's', 'generate_javascript', 'method', '.', 'If', 'you', 'don', 't', 'pass', 'a', 'name', 'the', 'returned', 'JavaScript', 'code', 'is', 'an', 'expression', 'that', 'returns', 'a', 'function', '.', 'Otherwise', 'it', 's', 'a', 'standalone', 'script', 'that', 'assigns', 'the', 'function', 'with', 'that', 'name', '.', 'Dotted', 'names', 'are', 'resolved', '(', 'so', 'you', 'an', 'use', 'a', 'name', 'like', 'obj', '.', 'url_for', ')'] | train | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/jsrouting.py#L170-L214 |
1,413 | opendatateam/udata | udata/entrypoints.py | known_dists | def known_dists():
'''Return a list of all Distributions exporting udata.* entrypoints'''
return (
dist for dist in pkg_resources.working_set
if any(k in ENTRYPOINTS for k in dist.get_entry_map().keys())
) | python | def known_dists():
'''Return a list of all Distributions exporting udata.* entrypoints'''
return (
dist for dist in pkg_resources.working_set
if any(k in ENTRYPOINTS for k in dist.get_entry_map().keys())
) | ['def', 'known_dists', '(', ')', ':', 'return', '(', 'dist', 'for', 'dist', 'in', 'pkg_resources', '.', 'working_set', 'if', 'any', '(', 'k', 'in', 'ENTRYPOINTS', 'for', 'k', 'in', 'dist', '.', 'get_entry_map', '(', ')', '.', 'keys', '(', ')', ')', ')'] | Return a list of all Distributions exporting udata.* entrypoints | ['Return', 'a', 'list', 'of', 'all', 'Distributions', 'exporting', 'udata', '.', '*', 'entrypoints'] | train | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/entrypoints.py#L56-L61 |
1,414 | saltstack/salt | salt/modules/boto3_elasticache.py | modify_cache_parameter_group | def modify_cache_parameter_group(name, region=None, key=None, keyid=None, profile=None,
**args):
'''
Update a cache parameter group in place.
Note that due to a design limitation in AWS, this function is not atomic -- a maximum of 20
params may be modified in one underlying boto call. This means that if more than 20 params
need to be changed, the update is performed in blocks of 20, which in turns means that if a
later sub-call fails after an earlier one has succeeded, the overall update will be left
partially applied.
CacheParameterGroupName
The name of the cache parameter group to modify.
ParameterNameValues
A [list] of {dicts}, each composed of a parameter name and a value, for the parameter
update. At least one parameter/value pair is required.
.. code-block:: yaml
ParameterNameValues:
- ParameterName: timeout
# Amazon requires ALL VALUES to be strings...
ParameterValue: "30"
- ParameterName: appendonly
# The YAML parser will turn a bare `yes` into a bool, which Amazon will then throw on...
ParameterValue: "yes"
Example:
.. code-block:: bash
salt myminion boto3_elasticache.modify_cache_parameter_group \
CacheParameterGroupName=myParamGroup \
ParameterNameValues='[ { ParameterName: timeout,
ParameterValue: "30" },
{ ParameterName: appendonly,
ParameterValue: "yes" } ]'
'''
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
Params = args['ParameterNameValues']
except ValueError as e:
raise SaltInvocationError('Invalid `ParameterNameValues` structure passed.')
while Params:
args.update({'ParameterNameValues': Params[:20]})
Params = Params[20:]
if not _modify_resource(name, name_param='CacheParameterGroupName',
desc='cache parameter group', res_type='cache_parameter_group',
region=region, key=key, keyid=keyid, profile=profile, **args):
return False
return True | python | def modify_cache_parameter_group(name, region=None, key=None, keyid=None, profile=None,
**args):
'''
Update a cache parameter group in place.
Note that due to a design limitation in AWS, this function is not atomic -- a maximum of 20
params may be modified in one underlying boto call. This means that if more than 20 params
need to be changed, the update is performed in blocks of 20, which in turns means that if a
later sub-call fails after an earlier one has succeeded, the overall update will be left
partially applied.
CacheParameterGroupName
The name of the cache parameter group to modify.
ParameterNameValues
A [list] of {dicts}, each composed of a parameter name and a value, for the parameter
update. At least one parameter/value pair is required.
.. code-block:: yaml
ParameterNameValues:
- ParameterName: timeout
# Amazon requires ALL VALUES to be strings...
ParameterValue: "30"
- ParameterName: appendonly
# The YAML parser will turn a bare `yes` into a bool, which Amazon will then throw on...
ParameterValue: "yes"
Example:
.. code-block:: bash
salt myminion boto3_elasticache.modify_cache_parameter_group \
CacheParameterGroupName=myParamGroup \
ParameterNameValues='[ { ParameterName: timeout,
ParameterValue: "30" },
{ ParameterName: appendonly,
ParameterValue: "yes" } ]'
'''
args = dict([(k, v) for k, v in args.items() if not k.startswith('_')])
try:
Params = args['ParameterNameValues']
except ValueError as e:
raise SaltInvocationError('Invalid `ParameterNameValues` structure passed.')
while Params:
args.update({'ParameterNameValues': Params[:20]})
Params = Params[20:]
if not _modify_resource(name, name_param='CacheParameterGroupName',
desc='cache parameter group', res_type='cache_parameter_group',
region=region, key=key, keyid=keyid, profile=profile, **args):
return False
return True | ['def', 'modify_cache_parameter_group', '(', 'name', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ',', '*', '*', 'args', ')', ':', 'args', '=', 'dict', '(', '[', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'args', '.', 'items', '(', ')', 'if', 'not', 'k', '.', 'startswith', '(', "'_'", ')', ']', ')', 'try', ':', 'Params', '=', 'args', '[', "'ParameterNameValues'", ']', 'except', 'ValueError', 'as', 'e', ':', 'raise', 'SaltInvocationError', '(', "'Invalid `ParameterNameValues` structure passed.'", ')', 'while', 'Params', ':', 'args', '.', 'update', '(', '{', "'ParameterNameValues'", ':', 'Params', '[', ':', '20', ']', '}', ')', 'Params', '=', 'Params', '[', '20', ':', ']', 'if', 'not', '_modify_resource', '(', 'name', ',', 'name_param', '=', "'CacheParameterGroupName'", ',', 'desc', '=', "'cache parameter group'", ',', 'res_type', '=', "'cache_parameter_group'", ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ',', '*', '*', 'args', ')', ':', 'return', 'False', 'return', 'True'] | Update a cache parameter group in place.
Note that due to a design limitation in AWS, this function is not atomic -- a maximum of 20
params may be modified in one underlying boto call. This means that if more than 20 params
need to be changed, the update is performed in blocks of 20, which in turns means that if a
later sub-call fails after an earlier one has succeeded, the overall update will be left
partially applied.
CacheParameterGroupName
The name of the cache parameter group to modify.
ParameterNameValues
A [list] of {dicts}, each composed of a parameter name and a value, for the parameter
update. At least one parameter/value pair is required.
.. code-block:: yaml
ParameterNameValues:
- ParameterName: timeout
# Amazon requires ALL VALUES to be strings...
ParameterValue: "30"
- ParameterName: appendonly
# The YAML parser will turn a bare `yes` into a bool, which Amazon will then throw on...
ParameterValue: "yes"
Example:
.. code-block:: bash
salt myminion boto3_elasticache.modify_cache_parameter_group \
CacheParameterGroupName=myParamGroup \
ParameterNameValues='[ { ParameterName: timeout,
ParameterValue: "30" },
{ ParameterName: appendonly,
ParameterValue: "yes" } ]' | ['Update', 'a', 'cache', 'parameter', 'group', 'in', 'place', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_elasticache.py#L977-L1028 |
1,415 | pyroscope/pyrocore | pavement.py | release | def release():
"check release before upload to PyPI"
sh("paver bdist_wheel")
wheels = path("dist").files("*.whl")
if not wheels:
error("\n*** ERROR: No release wheel was built!")
sys.exit(1)
if any(".dev" in i for i in wheels):
error("\n*** ERROR: You're still using a 'dev' version!")
sys.exit(1)
# Check that source distribution can be built and is complete
print
print "~~~ TESTING SOURCE BUILD".ljust(78, '~')
sh( "{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/"
" && /usr/bin/python setup.py sdist >/dev/null"
" && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }"
" | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then"
" echo '^^^ Difference in file lists! ^^^'; false;"
" else true; fi; } 2>&1"
% tuple([project["name"], version] * 4)
)
path("dist/%s-%s" % (project["name"], version)).rmtree()
print "~" * 78
print
print "~~~ sdist vs. git ".ljust(78, '~')
subprocess.call(
"unzip -v dist/pyrocore-*.zip | egrep '^ .+/' | cut -f2- -d/ | sort >./build/ls-sdist.txt"
" && git ls-files | sort >./build/ls-git.txt"
" && $(which colordiff || echo diff) -U0 ./build/ls-sdist.txt ./build/ls-git.txt || true", shell=True)
print "~" * 78
print
print "Created", " ".join([str(i) for i in path("dist").listdir()])
print "Use 'paver sdist bdist_wheel' to build the release and"
print " 'twine upload dist/*.{zip,whl}' to upload to PyPI"
print "Use 'paver dist_docs' to prepare an API documentation upload" | python | def release():
"check release before upload to PyPI"
sh("paver bdist_wheel")
wheels = path("dist").files("*.whl")
if not wheels:
error("\n*** ERROR: No release wheel was built!")
sys.exit(1)
if any(".dev" in i for i in wheels):
error("\n*** ERROR: You're still using a 'dev' version!")
sys.exit(1)
# Check that source distribution can be built and is complete
print
print "~~~ TESTING SOURCE BUILD".ljust(78, '~')
sh( "{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/"
" && /usr/bin/python setup.py sdist >/dev/null"
" && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }"
" | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then"
" echo '^^^ Difference in file lists! ^^^'; false;"
" else true; fi; } 2>&1"
% tuple([project["name"], version] * 4)
)
path("dist/%s-%s" % (project["name"], version)).rmtree()
print "~" * 78
print
print "~~~ sdist vs. git ".ljust(78, '~')
subprocess.call(
"unzip -v dist/pyrocore-*.zip | egrep '^ .+/' | cut -f2- -d/ | sort >./build/ls-sdist.txt"
" && git ls-files | sort >./build/ls-git.txt"
" && $(which colordiff || echo diff) -U0 ./build/ls-sdist.txt ./build/ls-git.txt || true", shell=True)
print "~" * 78
print
print "Created", " ".join([str(i) for i in path("dist").listdir()])
print "Use 'paver sdist bdist_wheel' to build the release and"
print " 'twine upload dist/*.{zip,whl}' to upload to PyPI"
print "Use 'paver dist_docs' to prepare an API documentation upload" | ['def', 'release', '(', ')', ':', 'sh', '(', '"paver bdist_wheel"', ')', 'wheels', '=', 'path', '(', '"dist"', ')', '.', 'files', '(', '"*.whl"', ')', 'if', 'not', 'wheels', ':', 'error', '(', '"\\n*** ERROR: No release wheel was built!"', ')', 'sys', '.', 'exit', '(', '1', ')', 'if', 'any', '(', '".dev"', 'in', 'i', 'for', 'i', 'in', 'wheels', ')', ':', 'error', '(', '"\\n*** ERROR: You\'re still using a \'dev\' version!"', ')', 'sys', '.', 'exit', '(', '1', ')', '# Check that source distribution can be built and is complete', 'print', 'print', '"~~~ TESTING SOURCE BUILD"', '.', 'ljust', '(', '78', ',', "'~'", ')', 'sh', '(', '"{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/"', '" && /usr/bin/python setup.py sdist >/dev/null"', '" && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }"', '" | cut -b26- | sort | uniq -c| egrep -v \'^ +2 +\' ; then"', '" echo \'^^^ Difference in file lists! ^^^\'; false;"', '" else true; fi; } 2>&1"', '%', 'tuple', '(', '[', 'project', '[', '"name"', ']', ',', 'version', ']', '*', '4', ')', ')', 'path', '(', '"dist/%s-%s"', '%', '(', 'project', '[', '"name"', ']', ',', 'version', ')', ')', '.', 'rmtree', '(', ')', 'print', '"~"', '*', '78', 'print', 'print', '"~~~ sdist vs. git "', '.', 'ljust', '(', '78', ',', "'~'", ')', 'subprocess', '.', 'call', '(', '"unzip -v dist/pyrocore-*.zip | egrep \'^ .+/\' | cut -f2- -d/ | sort >./build/ls-sdist.txt"', '" && git ls-files | sort >./build/ls-git.txt"', '" && $(which colordiff || echo diff) -U0 ./build/ls-sdist.txt ./build/ls-git.txt || true"', ',', 'shell', '=', 'True', ')', 'print', '"~"', '*', '78', 'print', 'print', '"Created"', ',', '" "', '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', 'path', '(', '"dist"', ')', '.', 'listdir', '(', ')', ']', ')', 'print', '"Use \'paver sdist bdist_wheel\' to build the release and"', 'print', '" \'twine upload dist/*.{zip,whl}\' to upload to PyPI"', 'print', '"Use \'paver dist_docs\' to prepare an API documentation upload"'] | check release before upload to PyPI | ['check', 'release', 'before', 'upload', 'to', 'PyPI'] | train | https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/pavement.py#L370-L407 |
1,416 | licenses/lice | lice/core.py | extract_vars | def extract_vars(template):
""" Extract variables from template. Variables are enclosed in
double curly braces.
"""
keys = set()
for match in re.finditer(r"\{\{ (?P<key>\w+) \}\}", template.getvalue()):
keys.add(match.groups()[0])
return sorted(list(keys)) | python | def extract_vars(template):
""" Extract variables from template. Variables are enclosed in
double curly braces.
"""
keys = set()
for match in re.finditer(r"\{\{ (?P<key>\w+) \}\}", template.getvalue()):
keys.add(match.groups()[0])
return sorted(list(keys)) | ['def', 'extract_vars', '(', 'template', ')', ':', 'keys', '=', 'set', '(', ')', 'for', 'match', 'in', 're', '.', 'finditer', '(', 'r"\\{\\{ (?P<key>\\w+) \\}\\}"', ',', 'template', '.', 'getvalue', '(', ')', ')', ':', 'keys', '.', 'add', '(', 'match', '.', 'groups', '(', ')', '[', '0', ']', ')', 'return', 'sorted', '(', 'list', '(', 'keys', ')', ')'] | Extract variables from template. Variables are enclosed in
double curly braces. | ['Extract', 'variables', 'from', 'template', '.', 'Variables', 'are', 'enclosed', 'in', 'double', 'curly', 'braces', '.'] | train | https://github.com/licenses/lice/blob/71635c2544d5edf9e93af4141467763916a86624/lice/core.py#L149-L156 |
1,417 | brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py | brocade_interface_ext.get_interface_switchport_output_switchport_acceptable_frame_type | def get_interface_switchport_output_switchport_acceptable_frame_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_switchport = ET.Element("get_interface_switchport")
config = get_interface_switchport
output = ET.SubElement(get_interface_switchport, "output")
switchport = ET.SubElement(output, "switchport")
interface_type_key = ET.SubElement(switchport, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(switchport, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
acceptable_frame_type = ET.SubElement(switchport, "acceptable-frame-type")
acceptable_frame_type.text = kwargs.pop('acceptable_frame_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | python | def get_interface_switchport_output_switchport_acceptable_frame_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_switchport = ET.Element("get_interface_switchport")
config = get_interface_switchport
output = ET.SubElement(get_interface_switchport, "output")
switchport = ET.SubElement(output, "switchport")
interface_type_key = ET.SubElement(switchport, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(switchport, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
acceptable_frame_type = ET.SubElement(switchport, "acceptable-frame-type")
acceptable_frame_type.text = kwargs.pop('acceptable_frame_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | ['def', 'get_interface_switchport_output_switchport_acceptable_frame_type', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_interface_switchport', '=', 'ET', '.', 'Element', '(', '"get_interface_switchport"', ')', 'config', '=', 'get_interface_switchport', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_interface_switchport', ',', '"output"', ')', 'switchport', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"switchport"', ')', 'interface_type_key', '=', 'ET', '.', 'SubElement', '(', 'switchport', ',', '"interface-type"', ')', 'interface_type_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'interface_type'", ')', 'interface_name_key', '=', 'ET', '.', 'SubElement', '(', 'switchport', ',', '"interface-name"', ')', 'interface_name_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'interface_name'", ')', 'acceptable_frame_type', '=', 'ET', '.', 'SubElement', '(', 'switchport', ',', '"acceptable-frame-type"', ')', 'acceptable_frame_type', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'acceptable_frame_type'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')'] | Auto Generated Code | ['Auto', 'Generated', 'Code'] | train | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L362-L378 |
1,418 | Erotemic/utool | utool/util_alg.py | upper_diag_self_prodx | def upper_diag_self_prodx(list_):
"""
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
"""
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2] | python | def upper_diag_self_prodx(list_):
"""
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
"""
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2] | ['def', 'upper_diag_self_prodx', '(', 'list_', ')', ':', 'return', '[', '(', 'item1', ',', 'item2', ')', 'for', 'n1', ',', 'item1', 'in', 'enumerate', '(', 'list_', ')', 'for', 'n2', ',', 'item2', 'in', 'enumerate', '(', 'list_', ')', 'if', 'n1', '<', 'n2', ']'] | upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)] | ['upper', 'diagnoal', 'of', 'cartesian', 'product', 'of', 'self', 'and', 'self', '.', 'Weird', 'name', '.', 'fixme'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L487-L511 |
1,419 | Robpol86/libnl | libnl/nl.py | nl_complete_msg | def nl_complete_msg(sk, msg):
"""Finalize Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450
This function finalizes a Netlink message by completing the message with desirable flags and values depending on the
socket configuration.
- If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the
socket.
- If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`).
- If not yet specified, the protocol field of the message will be set to the protocol field of the socket.
- The `NLM_F_REQUEST` Netlink message flag will be set.
- The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
"""
nlh = msg.nm_nlh
if nlh.nlmsg_pid == NL_AUTO_PORT:
nlh.nlmsg_pid = nl_socket_get_local_port(sk)
if nlh.nlmsg_seq == NL_AUTO_SEQ:
nlh.nlmsg_seq = sk.s_seq_next
sk.s_seq_next += 1
if msg.nm_protocol == -1:
msg.nm_protocol = sk.s_proto
nlh.nlmsg_flags |= NLM_F_REQUEST
if not sk.s_flags & NL_NO_AUTO_ACK:
nlh.nlmsg_flags |= NLM_F_ACK | python | def nl_complete_msg(sk, msg):
"""Finalize Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450
This function finalizes a Netlink message by completing the message with desirable flags and values depending on the
socket configuration.
- If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the
socket.
- If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`).
- If not yet specified, the protocol field of the message will be set to the protocol field of the socket.
- The `NLM_F_REQUEST` Netlink message flag will be set.
- The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
"""
nlh = msg.nm_nlh
if nlh.nlmsg_pid == NL_AUTO_PORT:
nlh.nlmsg_pid = nl_socket_get_local_port(sk)
if nlh.nlmsg_seq == NL_AUTO_SEQ:
nlh.nlmsg_seq = sk.s_seq_next
sk.s_seq_next += 1
if msg.nm_protocol == -1:
msg.nm_protocol = sk.s_proto
nlh.nlmsg_flags |= NLM_F_REQUEST
if not sk.s_flags & NL_NO_AUTO_ACK:
nlh.nlmsg_flags |= NLM_F_ACK | ['def', 'nl_complete_msg', '(', 'sk', ',', 'msg', ')', ':', 'nlh', '=', 'msg', '.', 'nm_nlh', 'if', 'nlh', '.', 'nlmsg_pid', '==', 'NL_AUTO_PORT', ':', 'nlh', '.', 'nlmsg_pid', '=', 'nl_socket_get_local_port', '(', 'sk', ')', 'if', 'nlh', '.', 'nlmsg_seq', '==', 'NL_AUTO_SEQ', ':', 'nlh', '.', 'nlmsg_seq', '=', 'sk', '.', 's_seq_next', 'sk', '.', 's_seq_next', '+=', '1', 'if', 'msg', '.', 'nm_protocol', '==', '-', '1', ':', 'msg', '.', 'nm_protocol', '=', 'sk', '.', 's_proto', 'nlh', '.', 'nlmsg_flags', '|=', 'NLM_F_REQUEST', 'if', 'not', 'sk', '.', 's_flags', '&', 'NL_NO_AUTO_ACK', ':', 'nlh', '.', 'nlmsg_flags', '|=', 'NLM_F_ACK'] | Finalize Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450
This function finalizes a Netlink message by completing the message with desirable flags and values depending on the
socket configuration.
- If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the
socket.
- If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`).
- If not yet specified, the protocol field of the message will be set to the protocol field of the socket.
- The `NLM_F_REQUEST` Netlink message flag will be set.
- The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance). | ['Finalize', 'Netlink', 'message', '.'] | train | https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl.py#L213-L242 |
1,420 | obulpathi/cdn-fastly-python | fastly/__init__.py | FastlyConnection.update_user | def update_user(self, user_id, **kwargs):
"""Update a user."""
body = self._formdata(kwargs, FastlyUser.FIELDS)
content = self._fetch("/user/%s" % user_id, method="PUT", body=body)
return FastlyUser(self, content) | python | def update_user(self, user_id, **kwargs):
"""Update a user."""
body = self._formdata(kwargs, FastlyUser.FIELDS)
content = self._fetch("/user/%s" % user_id, method="PUT", body=body)
return FastlyUser(self, content) | ['def', 'update_user', '(', 'self', ',', 'user_id', ',', '*', '*', 'kwargs', ')', ':', 'body', '=', 'self', '.', '_formdata', '(', 'kwargs', ',', 'FastlyUser', '.', 'FIELDS', ')', 'content', '=', 'self', '.', '_fetch', '(', '"/user/%s"', '%', 'user_id', ',', 'method', '=', '"PUT"', ',', 'body', '=', 'body', ')', 'return', 'FastlyUser', '(', 'self', ',', 'content', ')'] | Update a user. | ['Update', 'a', 'user', '.'] | train | https://github.com/obulpathi/cdn-fastly-python/blob/db2564b047e8af4bce72c3b88d6c27d3d0291425/fastly/__init__.py#L839-L843 |
1,421 | fermiPy/fermipy | fermipy/utils.py | make_psf_kernel | def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False):
"""
Generate a kernel for a point-source.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
npix : int
Number of pixels in X and Y dimensions.
cdelt : float
Pixel size in degrees.
"""
egy = psf.energies
x = make_pixel_distance(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
k[i] = psf.eval(i, x, scale_fn=psf_scale_fn)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k | python | def make_psf_kernel(psf, npix, cdelt, xpix, ypix, psf_scale_fn=None, normalize=False):
"""
Generate a kernel for a point-source.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
npix : int
Number of pixels in X and Y dimensions.
cdelt : float
Pixel size in degrees.
"""
egy = psf.energies
x = make_pixel_distance(npix, xpix, ypix)
x *= cdelt
k = np.zeros((len(egy), npix, npix))
for i in range(len(egy)):
k[i] = psf.eval(i, x, scale_fn=psf_scale_fn)
if normalize:
k /= (np.sum(k, axis=0)[np.newaxis, ...] * np.radians(cdelt) ** 2)
return k | ['def', 'make_psf_kernel', '(', 'psf', ',', 'npix', ',', 'cdelt', ',', 'xpix', ',', 'ypix', ',', 'psf_scale_fn', '=', 'None', ',', 'normalize', '=', 'False', ')', ':', 'egy', '=', 'psf', '.', 'energies', 'x', '=', 'make_pixel_distance', '(', 'npix', ',', 'xpix', ',', 'ypix', ')', 'x', '*=', 'cdelt', 'k', '=', 'np', '.', 'zeros', '(', '(', 'len', '(', 'egy', ')', ',', 'npix', ',', 'npix', ')', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'egy', ')', ')', ':', 'k', '[', 'i', ']', '=', 'psf', '.', 'eval', '(', 'i', ',', 'x', ',', 'scale_fn', '=', 'psf_scale_fn', ')', 'if', 'normalize', ':', 'k', '/=', '(', 'np', '.', 'sum', '(', 'k', ',', 'axis', '=', '0', ')', '[', 'np', '.', 'newaxis', ',', '...', ']', '*', 'np', '.', 'radians', '(', 'cdelt', ')', '**', '2', ')', 'return', 'k'] | Generate a kernel for a point-source.
Parameters
----------
psf : `~fermipy.irfs.PSFModel`
npix : int
Number of pixels in X and Y dimensions.
cdelt : float
Pixel size in degrees. | ['Generate', 'a', 'kernel', 'for', 'a', 'point', '-', 'source', '.'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/utils.py#L1777-L1805 |
1,422 | googleapis/gax-python | google/gax/api_callable.py | _bundleable | def _bundleable(desc):
"""Creates a function that transforms an API call into a bundling call.
It transform a_func from an API call that receives the requests and returns
the response into a callable that receives the same request, and
returns a :class:`bundling.Event`.
The returned Event object can be used to obtain the eventual result of the
bundled call.
Args:
desc (gax.BundleDescriptor): describes the bundling that a_func
supports.
Returns:
Callable: takes the API call's request and keyword args and returns a
bundling.Event object.
"""
def inner(a_func, settings, request, **kwargs):
"""Schedules execution of a bundling task."""
if not settings.bundler:
return a_func(request, **kwargs)
the_id = bundling.compute_bundle_id(
request, desc.request_discriminator_fields)
return settings.bundler.schedule(a_func, the_id, desc, request, kwargs)
return inner | python | def _bundleable(desc):
"""Creates a function that transforms an API call into a bundling call.
It transform a_func from an API call that receives the requests and returns
the response into a callable that receives the same request, and
returns a :class:`bundling.Event`.
The returned Event object can be used to obtain the eventual result of the
bundled call.
Args:
desc (gax.BundleDescriptor): describes the bundling that a_func
supports.
Returns:
Callable: takes the API call's request and keyword args and returns a
bundling.Event object.
"""
def inner(a_func, settings, request, **kwargs):
"""Schedules execution of a bundling task."""
if not settings.bundler:
return a_func(request, **kwargs)
the_id = bundling.compute_bundle_id(
request, desc.request_discriminator_fields)
return settings.bundler.schedule(a_func, the_id, desc, request, kwargs)
return inner | ['def', '_bundleable', '(', 'desc', ')', ':', 'def', 'inner', '(', 'a_func', ',', 'settings', ',', 'request', ',', '*', '*', 'kwargs', ')', ':', '"""Schedules execution of a bundling task."""', 'if', 'not', 'settings', '.', 'bundler', ':', 'return', 'a_func', '(', 'request', ',', '*', '*', 'kwargs', ')', 'the_id', '=', 'bundling', '.', 'compute_bundle_id', '(', 'request', ',', 'desc', '.', 'request_discriminator_fields', ')', 'return', 'settings', '.', 'bundler', '.', 'schedule', '(', 'a_func', ',', 'the_id', ',', 'desc', ',', 'request', ',', 'kwargs', ')', 'return', 'inner'] | Creates a function that transforms an API call into a bundling call.
It transform a_func from an API call that receives the requests and returns
the response into a callable that receives the same request, and
returns a :class:`bundling.Event`.
The returned Event object can be used to obtain the eventual result of the
bundled call.
Args:
desc (gax.BundleDescriptor): describes the bundling that a_func
supports.
Returns:
Callable: takes the API call's request and keyword args and returns a
bundling.Event object. | ['Creates', 'a', 'function', 'that', 'transforms', 'an', 'API', 'call', 'into', 'a', 'bundling', 'call', '.'] | train | https://github.com/googleapis/gax-python/blob/309aedfcfd48e4c8fa22dd60e9c84c3cc71bb20e/google/gax/api_callable.py#L43-L71 |
1,423 | trevisanj/f311 | f311/hapi.py | getStickXY | def getStickXY(TableName):
"""
Get X and Y for fine plotting of a stick spectrum.
Usage: X,Y = getStickXY(TableName).
"""
cent,intens = getColumns(TableName,('nu','sw'))
n = len(cent)
cent_ = zeros(n*3)
intens_ = zeros(n*3)
for i in range(n):
intens_[3*i] = 0
intens_[3*i+1] = intens[i]
intens_[3*i+2] = 0
cent_[(3*i):(3*i+3)] = cent[i]
return cent_,intens_ | python | def getStickXY(TableName):
"""
Get X and Y for fine plotting of a stick spectrum.
Usage: X,Y = getStickXY(TableName).
"""
cent,intens = getColumns(TableName,('nu','sw'))
n = len(cent)
cent_ = zeros(n*3)
intens_ = zeros(n*3)
for i in range(n):
intens_[3*i] = 0
intens_[3*i+1] = intens[i]
intens_[3*i+2] = 0
cent_[(3*i):(3*i+3)] = cent[i]
return cent_,intens_ | ['def', 'getStickXY', '(', 'TableName', ')', ':', 'cent', ',', 'intens', '=', 'getColumns', '(', 'TableName', ',', '(', "'nu'", ',', "'sw'", ')', ')', 'n', '=', 'len', '(', 'cent', ')', 'cent_', '=', 'zeros', '(', 'n', '*', '3', ')', 'intens_', '=', 'zeros', '(', 'n', '*', '3', ')', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'intens_', '[', '3', '*', 'i', ']', '=', '0', 'intens_', '[', '3', '*', 'i', '+', '1', ']', '=', 'intens', '[', 'i', ']', 'intens_', '[', '3', '*', 'i', '+', '2', ']', '=', '0', 'cent_', '[', '(', '3', '*', 'i', ')', ':', '(', '3', '*', 'i', '+', '3', ')', ']', '=', 'cent', '[', 'i', ']', 'return', 'cent_', ',', 'intens_'] | Get X and Y for fine plotting of a stick spectrum.
Usage: X,Y = getStickXY(TableName). | ['Get', 'X', 'and', 'Y', 'for', 'fine', 'plotting', 'of', 'a', 'stick', 'spectrum', '.', 'Usage', ':', 'X', 'Y', '=', 'getStickXY', '(', 'TableName', ')', '.'] | train | https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/hapi.py#L11730-L11744 |
1,424 | santosjorge/cufflinks | cufflinks/datagen.py | sinwave | def sinwave(n=4,inc=.25):
"""
Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis
"""
x=np.arange(-n,n,inc)
y=np.arange(-n,n,inc)
X,Y=np.meshgrid(x,y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)/(.5*R)
return pd.DataFrame(Z,index=x,columns=y) | python | def sinwave(n=4,inc=.25):
"""
Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis
"""
x=np.arange(-n,n,inc)
y=np.arange(-n,n,inc)
X,Y=np.meshgrid(x,y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)/(.5*R)
return pd.DataFrame(Z,index=x,columns=y) | ['def', 'sinwave', '(', 'n', '=', '4', ',', 'inc', '=', '.25', ')', ':', 'x', '=', 'np', '.', 'arange', '(', '-', 'n', ',', 'n', ',', 'inc', ')', 'y', '=', 'np', '.', 'arange', '(', '-', 'n', ',', 'n', ',', 'inc', ')', 'X', ',', 'Y', '=', 'np', '.', 'meshgrid', '(', 'x', ',', 'y', ')', 'R', '=', 'np', '.', 'sqrt', '(', 'X', '**', '2', '+', 'Y', '**', '2', ')', 'Z', '=', 'np', '.', 'sin', '(', 'R', ')', '/', '(', '.5', '*', 'R', ')', 'return', 'pd', '.', 'DataFrame', '(', 'Z', ',', 'index', '=', 'x', ',', 'columns', '=', 'y', ')'] | Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis | ['Returns', 'a', 'DataFrame', 'with', 'the', 'required', 'format', 'for', 'a', 'surface', '(', 'sine', 'wave', ')', 'plot'] | train | https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/datagen.py#L364-L381 |
1,425 | thespacedoctor/neddy | neddy/conesearch.py | conesearch._oversized_subqueries | def _oversized_subqueries(
self,
coordinate,
radiusArcsec):
"""
*subdivide oversized query*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _oversized_subqueries method
- @review: when complete add logging
"""
self.log.info('starting the ``_oversized_subqueries`` method')
import math
smallerRadiusArcsec = radiusArcsec / 2.
print "Calculating 7 sub-disks for coordinates %(coordinate)s, with smaller search radius of %(smallerRadiusArcsec)s arcsec" % locals()
ra = coordinate[0]
dec = coordinate[1]
shifts = [
(0, 0),
(0, math.sqrt(3.) / 2.),
(3. / 4., math.sqrt(3.) / 4.),
(3. / 4., -math.sqrt(3.) / 4.),
(0, -math.sqrt(3.) / 2.),
(-3. / 4., -math.sqrt(3.) / 4.),
(-3. / 4., math.sqrt(3.) / 4.)
]
subDiskCoordinates = []
count = 0
for s in shifts:
x1 = ra + s[0] * radiusArcsec / (60 * 60)
y1 = dec + s[1] * radiusArcsec / (60 * 60)
subDiskCoordinates.append((x1, y1))
names, searchParams = self.get_crossmatch_names(
listOfCoordinates=subDiskCoordinates,
radiusArcsec=smallerRadiusArcsec
)
self.log.info('completed the ``_oversized_subqueries`` method')
return names, searchParams | python | def _oversized_subqueries(
self,
coordinate,
radiusArcsec):
"""
*subdivide oversized query*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _oversized_subqueries method
- @review: when complete add logging
"""
self.log.info('starting the ``_oversized_subqueries`` method')
import math
smallerRadiusArcsec = radiusArcsec / 2.
print "Calculating 7 sub-disks for coordinates %(coordinate)s, with smaller search radius of %(smallerRadiusArcsec)s arcsec" % locals()
ra = coordinate[0]
dec = coordinate[1]
shifts = [
(0, 0),
(0, math.sqrt(3.) / 2.),
(3. / 4., math.sqrt(3.) / 4.),
(3. / 4., -math.sqrt(3.) / 4.),
(0, -math.sqrt(3.) / 2.),
(-3. / 4., -math.sqrt(3.) / 4.),
(-3. / 4., math.sqrt(3.) / 4.)
]
subDiskCoordinates = []
count = 0
for s in shifts:
x1 = ra + s[0] * radiusArcsec / (60 * 60)
y1 = dec + s[1] * radiusArcsec / (60 * 60)
subDiskCoordinates.append((x1, y1))
names, searchParams = self.get_crossmatch_names(
listOfCoordinates=subDiskCoordinates,
radiusArcsec=smallerRadiusArcsec
)
self.log.info('completed the ``_oversized_subqueries`` method')
return names, searchParams | ['def', '_oversized_subqueries', '(', 'self', ',', 'coordinate', ',', 'radiusArcsec', ')', ':', 'self', '.', 'log', '.', 'info', '(', "'starting the ``_oversized_subqueries`` method'", ')', 'import', 'math', 'smallerRadiusArcsec', '=', 'radiusArcsec', '/', '2.', 'print', '"Calculating 7 sub-disks for coordinates %(coordinate)s, with smaller search radius of %(smallerRadiusArcsec)s arcsec"', '%', 'locals', '(', ')', 'ra', '=', 'coordinate', '[', '0', ']', 'dec', '=', 'coordinate', '[', '1', ']', 'shifts', '=', '[', '(', '0', ',', '0', ')', ',', '(', '0', ',', 'math', '.', 'sqrt', '(', '3.', ')', '/', '2.', ')', ',', '(', '3.', '/', '4.', ',', 'math', '.', 'sqrt', '(', '3.', ')', '/', '4.', ')', ',', '(', '3.', '/', '4.', ',', '-', 'math', '.', 'sqrt', '(', '3.', ')', '/', '4.', ')', ',', '(', '0', ',', '-', 'math', '.', 'sqrt', '(', '3.', ')', '/', '2.', ')', ',', '(', '-', '3.', '/', '4.', ',', '-', 'math', '.', 'sqrt', '(', '3.', ')', '/', '4.', ')', ',', '(', '-', '3.', '/', '4.', ',', 'math', '.', 'sqrt', '(', '3.', ')', '/', '4.', ')', ']', 'subDiskCoordinates', '=', '[', ']', 'count', '=', '0', 'for', 's', 'in', 'shifts', ':', 'x1', '=', 'ra', '+', 's', '[', '0', ']', '*', 'radiusArcsec', '/', '(', '60', '*', '60', ')', 'y1', '=', 'dec', '+', 's', '[', '1', ']', '*', 'radiusArcsec', '/', '(', '60', '*', '60', ')', 'subDiskCoordinates', '.', 'append', '(', '(', 'x1', ',', 'y1', ')', ')', 'names', ',', 'searchParams', '=', 'self', '.', 'get_crossmatch_names', '(', 'listOfCoordinates', '=', 'subDiskCoordinates', ',', 'radiusArcsec', '=', 'smallerRadiusArcsec', ')', 'self', '.', 'log', '.', 'info', '(', "'completed the ``_oversized_subqueries`` method'", ')', 'return', 'names', ',', 'searchParams'] | *subdivide oversized query*
**Key Arguments:**
# -
**Return:**
- None
.. todo::
- @review: when complete, clean _oversized_subqueries method
- @review: when complete add logging | ['*', 'subdivide', 'oversized', 'query', '*'] | train | https://github.com/thespacedoctor/neddy/blob/f32653b7d6a39a2c46c5845f83b3a29056311e5e/neddy/conesearch.py#L296-L347 |
1,426 | jopohl/urh | src/urh/signalprocessing/ProtocolAnalyzer.py | ProtocolAnalyzer.estimate_frequency_for_zero | def estimate_frequency_for_zero(self, sample_rate: float, nbits=42) -> float:
"""
Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies
:param nbits:
:return:
"""
return self.__estimate_frequency_for_bit(False, sample_rate, nbits) | python | def estimate_frequency_for_zero(self, sample_rate: float, nbits=42) -> float:
"""
Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies
:param nbits:
:return:
"""
return self.__estimate_frequency_for_bit(False, sample_rate, nbits) | ['def', 'estimate_frequency_for_zero', '(', 'self', ',', 'sample_rate', ':', 'float', ',', 'nbits', '=', '42', ')', '->', 'float', ':', 'return', 'self', '.', '__estimate_frequency_for_bit', '(', 'False', ',', 'sample_rate', ',', 'nbits', ')'] | Calculates the frequency of at most nbits logical zeros and returns the mean of these frequencies
:param nbits:
:return: | ['Calculates', 'the', 'frequency', 'of', 'at', 'most', 'nbits', 'logical', 'zeros', 'and', 'returns', 'the', 'mean', 'of', 'these', 'frequencies'] | train | https://github.com/jopohl/urh/blob/2eb33b125c8407964cd1092843cde5010eb88aae/src/urh/signalprocessing/ProtocolAnalyzer.py#L496-L503 |
1,427 | cbclab/MOT | mot/lib/cl_environments.py | CLEnvironmentFactory.all_devices | def all_devices(cl_device_type=None, platform=None):
"""Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments.
"""
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
runtime_list = []
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
if cl_device_type:
devices = platform.get_devices(device_type=cl_device_type)
else:
devices = platform.get_devices()
for device in devices:
if device_supports_double(device):
env = CLEnvironment(platform, device)
runtime_list.append(env)
return runtime_list | python | def all_devices(cl_device_type=None, platform=None):
"""Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments.
"""
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
runtime_list = []
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
if cl_device_type:
devices = platform.get_devices(device_type=cl_device_type)
else:
devices = platform.get_devices()
for device in devices:
if device_supports_double(device):
env = CLEnvironment(platform, device)
runtime_list.append(env)
return runtime_list | ['def', 'all_devices', '(', 'cl_device_type', '=', 'None', ',', 'platform', '=', 'None', ')', ':', 'if', 'isinstance', '(', 'cl_device_type', ',', 'str', ')', ':', 'cl_device_type', '=', 'device_type_from_string', '(', 'cl_device_type', ')', 'runtime_list', '=', '[', ']', 'if', 'platform', 'is', 'None', ':', 'platforms', '=', 'cl', '.', 'get_platforms', '(', ')', 'else', ':', 'platforms', '=', '[', 'platform', ']', 'for', 'platform', 'in', 'platforms', ':', 'if', 'cl_device_type', ':', 'devices', '=', 'platform', '.', 'get_devices', '(', 'device_type', '=', 'cl_device_type', ')', 'else', ':', 'devices', '=', 'platform', '.', 'get_devices', '(', ')', 'for', 'device', 'in', 'devices', ':', 'if', 'device_supports_double', '(', 'device', ')', ':', 'env', '=', 'CLEnvironment', '(', 'platform', ',', 'device', ')', 'runtime_list', '.', 'append', '(', 'env', ')', 'return', 'runtime_list'] | Get multiple device environments, optionally only of the indicated type.
This will only fetch devices that support double point precision.
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU' or 'CPU'.
platform (opencl platform): The opencl platform to select the devices from
Returns:
list of CLEnvironment: List with the CL device environments. | ['Get', 'multiple', 'device', 'environments', 'optionally', 'only', 'of', 'the', 'indicated', 'type', '.'] | train | https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_environments.py#L210-L244 |
1,428 | theislab/anndata | anndata/readwrite/read.py | read_umi_tools | def read_umi_tools(filename: PathLike, dtype: str='float32') -> AnnData:
"""Read a gzipped condensed count matrix from umi_tools.
Parameters
----------
filename
File name to read from.
"""
# import pandas for conversion of a dict of dicts into a matrix
# import gzip to read a gzipped file :-)
import gzip
from pandas import DataFrame
dod = {} # this will contain basically everything
fh = gzip.open(fspath(filename))
header = fh.readline() # read the first line
for line in fh:
t = line.decode('ascii').split('\t') # gzip read bytes, hence the decoding
try:
dod[t[1]].update({t[0]:int(t[2])})
except KeyError:
dod[t[1]] = {t[0]:int(t[2])}
df = DataFrame.from_dict(dod, orient='index') # build the matrix
df.fillna(value=0., inplace=True) # many NaN, replace with zeros
return AnnData(np.array(df), {'obs_names': df.index}, {'var_names': df.columns}, dtype=dtype) | python | def read_umi_tools(filename: PathLike, dtype: str='float32') -> AnnData:
"""Read a gzipped condensed count matrix from umi_tools.
Parameters
----------
filename
File name to read from.
"""
# import pandas for conversion of a dict of dicts into a matrix
# import gzip to read a gzipped file :-)
import gzip
from pandas import DataFrame
dod = {} # this will contain basically everything
fh = gzip.open(fspath(filename))
header = fh.readline() # read the first line
for line in fh:
t = line.decode('ascii').split('\t') # gzip read bytes, hence the decoding
try:
dod[t[1]].update({t[0]:int(t[2])})
except KeyError:
dod[t[1]] = {t[0]:int(t[2])}
df = DataFrame.from_dict(dod, orient='index') # build the matrix
df.fillna(value=0., inplace=True) # many NaN, replace with zeros
return AnnData(np.array(df), {'obs_names': df.index}, {'var_names': df.columns}, dtype=dtype) | ['def', 'read_umi_tools', '(', 'filename', ':', 'PathLike', ',', 'dtype', ':', 'str', '=', "'float32'", ')', '->', 'AnnData', ':', '# import pandas for conversion of a dict of dicts into a matrix', '# import gzip to read a gzipped file :-)', 'import', 'gzip', 'from', 'pandas', 'import', 'DataFrame', 'dod', '=', '{', '}', '# this will contain basically everything', 'fh', '=', 'gzip', '.', 'open', '(', 'fspath', '(', 'filename', ')', ')', 'header', '=', 'fh', '.', 'readline', '(', ')', '# read the first line', 'for', 'line', 'in', 'fh', ':', 't', '=', 'line', '.', 'decode', '(', "'ascii'", ')', '.', 'split', '(', "'\\t'", ')', '# gzip read bytes, hence the decoding', 'try', ':', 'dod', '[', 't', '[', '1', ']', ']', '.', 'update', '(', '{', 't', '[', '0', ']', ':', 'int', '(', 't', '[', '2', ']', ')', '}', ')', 'except', 'KeyError', ':', 'dod', '[', 't', '[', '1', ']', ']', '=', '{', 't', '[', '0', ']', ':', 'int', '(', 't', '[', '2', ']', ')', '}', 'df', '=', 'DataFrame', '.', 'from_dict', '(', 'dod', ',', 'orient', '=', "'index'", ')', '# build the matrix', 'df', '.', 'fillna', '(', 'value', '=', '0.', ',', 'inplace', '=', 'True', ')', '# many NaN, replace with zeros', 'return', 'AnnData', '(', 'np', '.', 'array', '(', 'df', ')', ',', '{', "'obs_names'", ':', 'df', '.', 'index', '}', ',', '{', "'var_names'", ':', 'df', '.', 'columns', '}', ',', 'dtype', '=', 'dtype', ')'] | Read a gzipped condensed count matrix from umi_tools.
Parameters
----------
filename
File name to read from. | ['Read', 'a', 'gzipped', 'condensed', 'count', 'matrix', 'from', 'umi_tools', '.'] | train | https://github.com/theislab/anndata/blob/34f4eb63710628fbc15e7050e5efcac1d7806062/anndata/readwrite/read.py#L68-L94 |
1,429 | twilio/twilio-python | twilio/rest/api/v2010/account/usage/trigger.py | TriggerInstance.update | def update(self, callback_method=values.unset, callback_url=values.unset,
friendly_name=values.unset):
"""
Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: Updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return self._proxy.update(
callback_method=callback_method,
callback_url=callback_url,
friendly_name=friendly_name,
) | python | def update(self, callback_method=values.unset, callback_url=values.unset,
friendly_name=values.unset):
"""
Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: Updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance
"""
return self._proxy.update(
callback_method=callback_method,
callback_url=callback_url,
friendly_name=friendly_name,
) | ['def', 'update', '(', 'self', ',', 'callback_method', '=', 'values', '.', 'unset', ',', 'callback_url', '=', 'values', '.', 'unset', ',', 'friendly_name', '=', 'values', '.', 'unset', ')', ':', 'return', 'self', '.', '_proxy', '.', 'update', '(', 'callback_method', '=', 'callback_method', ',', 'callback_url', '=', 'callback_url', ',', 'friendly_name', '=', 'friendly_name', ',', ')'] | Update the TriggerInstance
:param unicode callback_method: The HTTP method to use to call callback_url
:param unicode callback_url: The URL we call when the trigger fires
:param unicode friendly_name: A string to describe the resource
:returns: Updated TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerInstance | ['Update', 'the', 'TriggerInstance'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/usage/trigger.py#L789-L805 |
1,430 | Karaage-Cluster/python-tldap | tldap/database/__init__.py | _python_to_mod_new | def _python_to_mod_new(changes: Changeset) -> Dict[str, List[List[bytes]]]:
""" Convert a LdapChanges object to a modlist for add operation. """
table: LdapObjectClass = type(changes.src)
fields = table.get_fields()
result: Dict[str, List[List[bytes]]] = {}
for name, field in fields.items():
if field.db_field:
try:
value = field.to_db(changes.get_value_as_list(name))
if len(value) > 0:
result[name] = value
except ValidationError as e:
raise ValidationError(f"{name}: {e}.")
return result | python | def _python_to_mod_new(changes: Changeset) -> Dict[str, List[List[bytes]]]:
""" Convert a LdapChanges object to a modlist for add operation. """
table: LdapObjectClass = type(changes.src)
fields = table.get_fields()
result: Dict[str, List[List[bytes]]] = {}
for name, field in fields.items():
if field.db_field:
try:
value = field.to_db(changes.get_value_as_list(name))
if len(value) > 0:
result[name] = value
except ValidationError as e:
raise ValidationError(f"{name}: {e}.")
return result | ['def', '_python_to_mod_new', '(', 'changes', ':', 'Changeset', ')', '->', 'Dict', '[', 'str', ',', 'List', '[', 'List', '[', 'bytes', ']', ']', ']', ':', 'table', ':', 'LdapObjectClass', '=', 'type', '(', 'changes', '.', 'src', ')', 'fields', '=', 'table', '.', 'get_fields', '(', ')', 'result', ':', 'Dict', '[', 'str', ',', 'List', '[', 'List', '[', 'bytes', ']', ']', ']', '=', '{', '}', 'for', 'name', ',', 'field', 'in', 'fields', '.', 'items', '(', ')', ':', 'if', 'field', '.', 'db_field', ':', 'try', ':', 'value', '=', 'field', '.', 'to_db', '(', 'changes', '.', 'get_value_as_list', '(', 'name', ')', ')', 'if', 'len', '(', 'value', ')', '>', '0', ':', 'result', '[', 'name', ']', '=', 'value', 'except', 'ValidationError', 'as', 'e', ':', 'raise', 'ValidationError', '(', 'f"{name}: {e}."', ')', 'return', 'result'] | Convert a LdapChanges object to a modlist for add operation. | ['Convert', 'a', 'LdapChanges', 'object', 'to', 'a', 'modlist', 'for', 'add', 'operation', '.'] | train | https://github.com/Karaage-Cluster/python-tldap/blob/61f1af74a3648cb6491e7eeb1ee2eb395d67bf59/tldap/database/__init__.py#L407-L423 |
1,431 | modin-project/modin | modin/pandas/base.py | BasePandasDataset.pow | def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
return self._binary_op(
"pow", other, axis=axis, level=level, fill_value=fill_value
) | python | def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
return self._binary_op(
"pow", other, axis=axis, level=level, fill_value=fill_value
) | ['def', 'pow', '(', 'self', ',', 'other', ',', 'axis', '=', '"columns"', ',', 'level', '=', 'None', ',', 'fill_value', '=', 'None', ')', ':', 'return', 'self', '.', '_binary_op', '(', '"pow"', ',', 'other', ',', 'axis', '=', 'axis', ',', 'level', '=', 'level', ',', 'fill_value', '=', 'fill_value', ')'] | Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied. | ['Pow', 'this', 'DataFrame', 'against', 'another', 'DataFrame', '/', 'Series', '/', 'scalar', '.', 'Args', ':', 'other', ':', 'The', 'object', 'to', 'use', 'to', 'apply', 'the', 'pow', 'against', 'this', '.', 'axis', ':', 'The', 'axis', 'to', 'pow', 'over', '.', 'level', ':', 'The', 'Multilevel', 'index', 'level', 'to', 'apply', 'pow', 'over', '.', 'fill_value', ':', 'The', 'value', 'to', 'fill', 'NaNs', 'with', '.', 'Returns', ':', 'A', 'new', 'DataFrame', 'with', 'the', 'Pow', 'applied', '.'] | train | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L1738-L1752 |
1,432 | mikekatz04/BOWIE | snr_calculator_folder/gwsnrcalc/genconutils/forminput.py | SensitivityInput.set_wd_noise | def set_wd_noise(self, wd_noise):
"""Add White Dwarf Background Noise
This adds the White Dwarf (WD) Background noise. This can either do calculations with,
without, or with and without WD noise.
Args:
wd_noise (bool or str, optional): Add or remove WD background noise. First option is to
have only calculations with the wd_noise. For this, use `yes` or True.
Second option is no WD noise. For this, use `no` or False. For both calculations
with and without WD noise, use `both`.
Raises:
ValueError: Input value is not one of the options.
"""
if isinstance(wd_noise, bool):
wd_noise = str(wd_noise)
if wd_noise.lower() == 'yes' or wd_noise.lower() == 'true':
wd_noise = 'True'
elif wd_noise.lower() == 'no' or wd_noise.lower() == 'false':
wd_noise = 'False'
elif wd_noise.lower() == 'both':
wd_noise = 'Both'
else:
raise ValueError('wd_noise must be yes, no, True, False, or Both.')
self.sensitivity_input.add_wd_noise = wd_noise
return | python | def set_wd_noise(self, wd_noise):
"""Add White Dwarf Background Noise
This adds the White Dwarf (WD) Background noise. This can either do calculations with,
without, or with and without WD noise.
Args:
wd_noise (bool or str, optional): Add or remove WD background noise. First option is to
have only calculations with the wd_noise. For this, use `yes` or True.
Second option is no WD noise. For this, use `no` or False. For both calculations
with and without WD noise, use `both`.
Raises:
ValueError: Input value is not one of the options.
"""
if isinstance(wd_noise, bool):
wd_noise = str(wd_noise)
if wd_noise.lower() == 'yes' or wd_noise.lower() == 'true':
wd_noise = 'True'
elif wd_noise.lower() == 'no' or wd_noise.lower() == 'false':
wd_noise = 'False'
elif wd_noise.lower() == 'both':
wd_noise = 'Both'
else:
raise ValueError('wd_noise must be yes, no, True, False, or Both.')
self.sensitivity_input.add_wd_noise = wd_noise
return | ['def', 'set_wd_noise', '(', 'self', ',', 'wd_noise', ')', ':', 'if', 'isinstance', '(', 'wd_noise', ',', 'bool', ')', ':', 'wd_noise', '=', 'str', '(', 'wd_noise', ')', 'if', 'wd_noise', '.', 'lower', '(', ')', '==', "'yes'", 'or', 'wd_noise', '.', 'lower', '(', ')', '==', "'true'", ':', 'wd_noise', '=', "'True'", 'elif', 'wd_noise', '.', 'lower', '(', ')', '==', "'no'", 'or', 'wd_noise', '.', 'lower', '(', ')', '==', "'false'", ':', 'wd_noise', '=', "'False'", 'elif', 'wd_noise', '.', 'lower', '(', ')', '==', "'both'", ':', 'wd_noise', '=', "'Both'", 'else', ':', 'raise', 'ValueError', '(', "'wd_noise must be yes, no, True, False, or Both.'", ')', 'self', '.', 'sensitivity_input', '.', 'add_wd_noise', '=', 'wd_noise', 'return'] | Add White Dwarf Background Noise
This adds the White Dwarf (WD) Background noise. This can either do calculations with,
without, or with and without WD noise.
Args:
wd_noise (bool or str, optional): Add or remove WD background noise. First option is to
have only calculations with the wd_noise. For this, use `yes` or True.
Second option is no WD noise. For this, use `no` or False. For both calculations
with and without WD noise, use `both`.
Raises:
ValueError: Input value is not one of the options. | ['Add', 'White', 'Dwarf', 'Background', 'Noise'] | train | https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/genconutils/forminput.py#L200-L229 |
1,433 | beregond/jsonmodels | jsonmodels/models.py | Base.populate | def populate(self, **values):
"""Populate values to fields. Skip non-existing."""
values = values.copy()
fields = list(self.iterate_with_name())
for _, structure_name, field in fields:
if structure_name in values:
field.__set__(self, values.pop(structure_name))
for name, _, field in fields:
if name in values:
field.__set__(self, values.pop(name)) | python | def populate(self, **values):
"""Populate values to fields. Skip non-existing."""
values = values.copy()
fields = list(self.iterate_with_name())
for _, structure_name, field in fields:
if structure_name in values:
field.__set__(self, values.pop(structure_name))
for name, _, field in fields:
if name in values:
field.__set__(self, values.pop(name)) | ['def', 'populate', '(', 'self', ',', '*', '*', 'values', ')', ':', 'values', '=', 'values', '.', 'copy', '(', ')', 'fields', '=', 'list', '(', 'self', '.', 'iterate_with_name', '(', ')', ')', 'for', '_', ',', 'structure_name', ',', 'field', 'in', 'fields', ':', 'if', 'structure_name', 'in', 'values', ':', 'field', '.', '__set__', '(', 'self', ',', 'values', '.', 'pop', '(', 'structure_name', ')', ')', 'for', 'name', ',', '_', ',', 'field', 'in', 'fields', ':', 'if', 'name', 'in', 'values', ':', 'field', '.', '__set__', '(', 'self', ',', 'values', '.', 'pop', '(', 'name', ')', ')'] | Populate values to fields. Skip non-existing. | ['Populate', 'values', 'to', 'fields', '.', 'Skip', 'non', '-', 'existing', '.'] | train | https://github.com/beregond/jsonmodels/blob/97a1a6b90a49490fc5a6078f49027055d2e13541/jsonmodels/models.py#L36-L45 |
1,434 | chaoss/grimoirelab-sortinghat | sortinghat/cmd/load.py | Load.__reset_unique_identities | def __reset_unique_identities(self):
"""Clear identities relationships and enrollments data"""
self.log("Reseting unique identities...")
self.log("Clearing identities relationships")
nids = 0
uidentities = api.unique_identities(self.db)
for uidentity in uidentities:
for identity in uidentity.identities:
api.move_identity(self.db, identity.id, identity.id)
nids += 1
self.log("Relationships cleared for %s identities" % nids)
self.log("Clearing enrollments")
with self.db.connect() as session:
enrollments = session.query(Enrollment).all()
for enr in enrollments:
session.delete(enr)
self.log("Enrollments cleared") | python | def __reset_unique_identities(self):
"""Clear identities relationships and enrollments data"""
self.log("Reseting unique identities...")
self.log("Clearing identities relationships")
nids = 0
uidentities = api.unique_identities(self.db)
for uidentity in uidentities:
for identity in uidentity.identities:
api.move_identity(self.db, identity.id, identity.id)
nids += 1
self.log("Relationships cleared for %s identities" % nids)
self.log("Clearing enrollments")
with self.db.connect() as session:
enrollments = session.query(Enrollment).all()
for enr in enrollments:
session.delete(enr)
self.log("Enrollments cleared") | ['def', '__reset_unique_identities', '(', 'self', ')', ':', 'self', '.', 'log', '(', '"Reseting unique identities..."', ')', 'self', '.', 'log', '(', '"Clearing identities relationships"', ')', 'nids', '=', '0', 'uidentities', '=', 'api', '.', 'unique_identities', '(', 'self', '.', 'db', ')', 'for', 'uidentity', 'in', 'uidentities', ':', 'for', 'identity', 'in', 'uidentity', '.', 'identities', ':', 'api', '.', 'move_identity', '(', 'self', '.', 'db', ',', 'identity', '.', 'id', ',', 'identity', '.', 'id', ')', 'nids', '+=', '1', 'self', '.', 'log', '(', '"Relationships cleared for %s identities"', '%', 'nids', ')', 'self', '.', 'log', '(', '"Clearing enrollments"', ')', 'with', 'self', '.', 'db', '.', 'connect', '(', ')', 'as', 'session', ':', 'enrollments', '=', 'session', '.', 'query', '(', 'Enrollment', ')', '.', 'all', '(', ')', 'for', 'enr', 'in', 'enrollments', ':', 'session', '.', 'delete', '(', 'enr', ')', 'self', '.', 'log', '(', '"Enrollments cleared"', ')'] | Clear identities relationships and enrollments data | ['Clear', 'identities', 'relationships', 'and', 'enrollments', 'data'] | train | https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/load.py#L325-L350 |
1,435 | bukun/TorCMS | torcms/model/post_model.py | MPost.get_all | def get_all(kind='2'):
'''
Get All the records.
'''
return TabPost.select().where(
(TabPost.kind == kind) &
(TabPost.valid == 1)
).order_by(
TabPost.time_update.desc()
) | python | def get_all(kind='2'):
'''
Get All the records.
'''
return TabPost.select().where(
(TabPost.kind == kind) &
(TabPost.valid == 1)
).order_by(
TabPost.time_update.desc()
) | ['def', 'get_all', '(', 'kind', '=', "'2'", ')', ':', 'return', 'TabPost', '.', 'select', '(', ')', '.', 'where', '(', '(', 'TabPost', '.', 'kind', '==', 'kind', ')', '&', '(', 'TabPost', '.', 'valid', '==', '1', ')', ')', '.', 'order_by', '(', 'TabPost', '.', 'time_update', '.', 'desc', '(', ')', ')'] | Get All the records. | ['Get', 'All', 'the', 'records', '.'] | train | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_model.py#L560-L569 |
1,436 | artefactual-labs/mets-reader-writer | metsrw/mets.py | METSDocument.remove_entry | def remove_entry(self, fs_entry):
"""Removes an FSEntry object from this METS document.
Any children of this FSEntry will also be removed. This will be removed
as a child of it's parent, if any.
:param metsrw.mets.FSEntry fs_entry: FSEntry to remove from the METS
"""
try:
self._root_elements.remove(fs_entry)
except ValueError: # fs_entry may not be in the root elements
pass
if fs_entry.parent:
fs_entry.parent.remove_child(fs_entry)
# Reset file lists so they get regenerated without the removed file(s)
self._all_files = None | python | def remove_entry(self, fs_entry):
"""Removes an FSEntry object from this METS document.
Any children of this FSEntry will also be removed. This will be removed
as a child of it's parent, if any.
:param metsrw.mets.FSEntry fs_entry: FSEntry to remove from the METS
"""
try:
self._root_elements.remove(fs_entry)
except ValueError: # fs_entry may not be in the root elements
pass
if fs_entry.parent:
fs_entry.parent.remove_child(fs_entry)
# Reset file lists so they get regenerated without the removed file(s)
self._all_files = None | ['def', 'remove_entry', '(', 'self', ',', 'fs_entry', ')', ':', 'try', ':', 'self', '.', '_root_elements', '.', 'remove', '(', 'fs_entry', ')', 'except', 'ValueError', ':', '# fs_entry may not be in the root elements', 'pass', 'if', 'fs_entry', '.', 'parent', ':', 'fs_entry', '.', 'parent', '.', 'remove_child', '(', 'fs_entry', ')', '# Reset file lists so they get regenerated without the removed file(s)', 'self', '.', '_all_files', '=', 'None'] | Removes an FSEntry object from this METS document.
Any children of this FSEntry will also be removed. This will be removed
as a child of it's parent, if any.
:param metsrw.mets.FSEntry fs_entry: FSEntry to remove from the METS | ['Removes', 'an', 'FSEntry', 'object', 'from', 'this', 'METS', 'document', '.'] | train | https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/mets.py#L123-L138 |
1,437 | adamziel/python_translate | python_translate/translations.py | MessageCatalogue.get | def get(self, id, domain='messages'):
"""
Gets a message translation.
@rtype: str
@return: The message translation
"""
assert isinstance(id, (str, unicode))
assert isinstance(domain, (str, unicode))
if self.defines(id, domain):
return self.messages[domain][id]
if self.fallback_catalogue is not None:
return self.fallback_catalogue.get(id, domain)
return id | python | def get(self, id, domain='messages'):
"""
Gets a message translation.
@rtype: str
@return: The message translation
"""
assert isinstance(id, (str, unicode))
assert isinstance(domain, (str, unicode))
if self.defines(id, domain):
return self.messages[domain][id]
if self.fallback_catalogue is not None:
return self.fallback_catalogue.get(id, domain)
return id | ['def', 'get', '(', 'self', ',', 'id', ',', 'domain', '=', "'messages'", ')', ':', 'assert', 'isinstance', '(', 'id', ',', '(', 'str', ',', 'unicode', ')', ')', 'assert', 'isinstance', '(', 'domain', ',', '(', 'str', ',', 'unicode', ')', ')', 'if', 'self', '.', 'defines', '(', 'id', ',', 'domain', ')', ':', 'return', 'self', '.', 'messages', '[', 'domain', ']', '[', 'id', ']', 'if', 'self', '.', 'fallback_catalogue', 'is', 'not', 'None', ':', 'return', 'self', '.', 'fallback_catalogue', '.', 'get', '(', 'id', ',', 'domain', ')', 'return', 'id'] | Gets a message translation.
@rtype: str
@return: The message translation | ['Gets', 'a', 'message', 'translation', '.'] | train | https://github.com/adamziel/python_translate/blob/0aee83f434bd2d1b95767bcd63adb7ac7036c7df/python_translate/translations.py#L114-L130 |
1,438 | digidotcom/python-devicecloud | devicecloud/devicecore.py | Device.is_connected | def is_connected(self, use_cached=True):
"""Return True if the device is currrently connect and False if not"""
device_json = self.get_device_json(use_cached)
return int(device_json.get("dpConnectionStatus")) > 0 | python | def is_connected(self, use_cached=True):
"""Return True if the device is currrently connect and False if not"""
device_json = self.get_device_json(use_cached)
return int(device_json.get("dpConnectionStatus")) > 0 | ['def', 'is_connected', '(', 'self', ',', 'use_cached', '=', 'True', ')', ':', 'device_json', '=', 'self', '.', 'get_device_json', '(', 'use_cached', ')', 'return', 'int', '(', 'device_json', '.', 'get', '(', '"dpConnectionStatus"', ')', ')', '>', '0'] | Return True if the device is currrently connect and False if not | ['Return', 'True', 'if', 'the', 'device', 'is', 'currrently', 'connect', 'and', 'False', 'if', 'not'] | train | https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/devicecore.py#L428-L431 |
1,439 | pytroll/posttroll | posttroll/address_receiver.py | AddressReceiver._check_age | def _check_age(self, pub, min_interval=timedelta(seconds=0)):
"""Check the age of the receiver.
"""
now = datetime.utcnow()
if (now - self._last_age_check) <= min_interval:
return
LOGGER.debug("%s - checking addresses", str(datetime.utcnow()))
self._last_age_check = now
to_del = []
with self._address_lock:
for addr, metadata in self._addresses.items():
atime = metadata["receive_time"]
if now - atime > self._max_age:
mda = {'status': False,
'URI': addr,
'service': metadata['service']}
msg = Message('/address/' + metadata['name'], 'info', mda)
to_del.append(addr)
LOGGER.info("publish remove '%s'", str(msg))
pub.send(msg.encode())
for addr in to_del:
del self._addresses[addr] | python | def _check_age(self, pub, min_interval=timedelta(seconds=0)):
"""Check the age of the receiver.
"""
now = datetime.utcnow()
if (now - self._last_age_check) <= min_interval:
return
LOGGER.debug("%s - checking addresses", str(datetime.utcnow()))
self._last_age_check = now
to_del = []
with self._address_lock:
for addr, metadata in self._addresses.items():
atime = metadata["receive_time"]
if now - atime > self._max_age:
mda = {'status': False,
'URI': addr,
'service': metadata['service']}
msg = Message('/address/' + metadata['name'], 'info', mda)
to_del.append(addr)
LOGGER.info("publish remove '%s'", str(msg))
pub.send(msg.encode())
for addr in to_del:
del self._addresses[addr] | ['def', '_check_age', '(', 'self', ',', 'pub', ',', 'min_interval', '=', 'timedelta', '(', 'seconds', '=', '0', ')', ')', ':', 'now', '=', 'datetime', '.', 'utcnow', '(', ')', 'if', '(', 'now', '-', 'self', '.', '_last_age_check', ')', '<=', 'min_interval', ':', 'return', 'LOGGER', '.', 'debug', '(', '"%s - checking addresses"', ',', 'str', '(', 'datetime', '.', 'utcnow', '(', ')', ')', ')', 'self', '.', '_last_age_check', '=', 'now', 'to_del', '=', '[', ']', 'with', 'self', '.', '_address_lock', ':', 'for', 'addr', ',', 'metadata', 'in', 'self', '.', '_addresses', '.', 'items', '(', ')', ':', 'atime', '=', 'metadata', '[', '"receive_time"', ']', 'if', 'now', '-', 'atime', '>', 'self', '.', '_max_age', ':', 'mda', '=', '{', "'status'", ':', 'False', ',', "'URI'", ':', 'addr', ',', "'service'", ':', 'metadata', '[', "'service'", ']', '}', 'msg', '=', 'Message', '(', "'/address/'", '+', 'metadata', '[', "'name'", ']', ',', "'info'", ',', 'mda', ')', 'to_del', '.', 'append', '(', 'addr', ')', 'LOGGER', '.', 'info', '(', '"publish remove \'%s\'"', ',', 'str', '(', 'msg', ')', ')', 'pub', '.', 'send', '(', 'msg', '.', 'encode', '(', ')', ')', 'for', 'addr', 'in', 'to_del', ':', 'del', 'self', '.', '_addresses', '[', 'addr', ']'] | Check the age of the receiver. | ['Check', 'the', 'age', 'of', 'the', 'receiver', '.'] | train | https://github.com/pytroll/posttroll/blob/8e811a0544b5182c4a72aed074b2ff8c4324e94d/posttroll/address_receiver.py#L114-L136 |
1,440 | croscon/fleaker | fleaker/marshmallow/fields/arrow.py | ArrowField._serialize | def _serialize(self, value, attr, obj):
"""Convert the Arrow object into a string."""
if isinstance(value, arrow.arrow.Arrow):
value = value.datetime
return super(ArrowField, self)._serialize(value, attr, obj) | python | def _serialize(self, value, attr, obj):
"""Convert the Arrow object into a string."""
if isinstance(value, arrow.arrow.Arrow):
value = value.datetime
return super(ArrowField, self)._serialize(value, attr, obj) | ['def', '_serialize', '(', 'self', ',', 'value', ',', 'attr', ',', 'obj', ')', ':', 'if', 'isinstance', '(', 'value', ',', 'arrow', '.', 'arrow', '.', 'Arrow', ')', ':', 'value', '=', 'value', '.', 'datetime', 'return', 'super', '(', 'ArrowField', ',', 'self', ')', '.', '_serialize', '(', 'value', ',', 'attr', ',', 'obj', ')'] | Convert the Arrow object into a string. | ['Convert', 'the', 'Arrow', 'object', 'into', 'a', 'string', '.'] | train | https://github.com/croscon/fleaker/blob/046b026b79c9912bceebb17114bc0c5d2d02e3c7/fleaker/marshmallow/fields/arrow.py#L42-L47 |
1,441 | openbermuda/ripl | ripl/caption.py | SlideShow.create_image | def create_image(self, image_file, caption):
""" Create an image with a caption """
suffix = 'png'
if image_file:
img = Image.open(os.path.join(self.gallery, image_file))
width, height = img.size
ratio = width/WIDTH
img = img.resize((int(width // ratio),
int(height // ratio)),
Image.ANTIALIAS)
else:
img = Image.new('RGB', (WIDTH, HEIGHT), 'black')
image = self.add_caption(img, caption)
image = img
return image | python | def create_image(self, image_file, caption):
""" Create an image with a caption """
suffix = 'png'
if image_file:
img = Image.open(os.path.join(self.gallery, image_file))
width, height = img.size
ratio = width/WIDTH
img = img.resize((int(width // ratio),
int(height // ratio)),
Image.ANTIALIAS)
else:
img = Image.new('RGB', (WIDTH, HEIGHT), 'black')
image = self.add_caption(img, caption)
image = img
return image | ['def', 'create_image', '(', 'self', ',', 'image_file', ',', 'caption', ')', ':', 'suffix', '=', "'png'", 'if', 'image_file', ':', 'img', '=', 'Image', '.', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'gallery', ',', 'image_file', ')', ')', 'width', ',', 'height', '=', 'img', '.', 'size', 'ratio', '=', 'width', '/', 'WIDTH', 'img', '=', 'img', '.', 'resize', '(', '(', 'int', '(', 'width', '//', 'ratio', ')', ',', 'int', '(', 'height', '//', 'ratio', ')', ')', ',', 'Image', '.', 'ANTIALIAS', ')', 'else', ':', 'img', '=', 'Image', '.', 'new', '(', "'RGB'", ',', '(', 'WIDTH', ',', 'HEIGHT', ')', ',', "'black'", ')', 'image', '=', 'self', '.', 'add_caption', '(', 'img', ',', 'caption', ')', 'image', '=', 'img', 'return', 'image'] | Create an image with a caption | ['Create', 'an', 'image', 'with', 'a', 'caption'] | train | https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/caption.py#L65-L81 |
1,442 | ranaroussi/ezibpy | ezibpy/ezibpy.py | ezIBpy.requestAccountUpdates | def requestAccountUpdates(self, subscribe=True):
"""
Register to account updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqaccountupdates.htm
"""
if self.subscribeAccount != subscribe:
self.subscribeAccount = subscribe
self.ibConn.reqAccountUpdates(subscribe, 0) | python | def requestAccountUpdates(self, subscribe=True):
"""
Register to account updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqaccountupdates.htm
"""
if self.subscribeAccount != subscribe:
self.subscribeAccount = subscribe
self.ibConn.reqAccountUpdates(subscribe, 0) | ['def', 'requestAccountUpdates', '(', 'self', ',', 'subscribe', '=', 'True', ')', ':', 'if', 'self', '.', 'subscribeAccount', '!=', 'subscribe', ':', 'self', '.', 'subscribeAccount', '=', 'subscribe', 'self', '.', 'ibConn', '.', 'reqAccountUpdates', '(', 'subscribe', ',', '0', ')'] | Register to account updates
https://www.interactivebrokers.com/en/software/api/apiguide/java/reqaccountupdates.htm | ['Register', 'to', 'account', 'updates', 'https', ':', '//', 'www', '.', 'interactivebrokers', '.', 'com', '/', 'en', '/', 'software', '/', 'api', '/', 'apiguide', '/', 'java', '/', 'reqaccountupdates', '.', 'htm'] | train | https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L1954-L1961 |
1,443 | wheeler-microfluidics/dmf-control-board-firmware | dmf_control_board_firmware/__init__.py | feedback_results_to_measurements_frame | def feedback_results_to_measurements_frame(feedback_result):
'''
Extract measured data from `FeedbackResults` instance into
`pandas.DataFrame`.
'''
index = pd.Index(feedback_result.time * 1e-3, name='seconds')
df_feedback = pd.DataFrame(np.column_stack([feedback_result.V_fb,
feedback_result.V_hv,
feedback_result.fb_resistor,
feedback_result.hv_resistor]),
columns=['V_fb', 'V_hv', 'fb_resistor',
'hv_resistor'],
index=index)
df_feedback.insert(0, 'frequency', feedback_result.frequency)
return df_feedback | python | def feedback_results_to_measurements_frame(feedback_result):
'''
Extract measured data from `FeedbackResults` instance into
`pandas.DataFrame`.
'''
index = pd.Index(feedback_result.time * 1e-3, name='seconds')
df_feedback = pd.DataFrame(np.column_stack([feedback_result.V_fb,
feedback_result.V_hv,
feedback_result.fb_resistor,
feedback_result.hv_resistor]),
columns=['V_fb', 'V_hv', 'fb_resistor',
'hv_resistor'],
index=index)
df_feedback.insert(0, 'frequency', feedback_result.frequency)
return df_feedback | ['def', 'feedback_results_to_measurements_frame', '(', 'feedback_result', ')', ':', 'index', '=', 'pd', '.', 'Index', '(', 'feedback_result', '.', 'time', '*', '1e-3', ',', 'name', '=', "'seconds'", ')', 'df_feedback', '=', 'pd', '.', 'DataFrame', '(', 'np', '.', 'column_stack', '(', '[', 'feedback_result', '.', 'V_fb', ',', 'feedback_result', '.', 'V_hv', ',', 'feedback_result', '.', 'fb_resistor', ',', 'feedback_result', '.', 'hv_resistor', ']', ')', ',', 'columns', '=', '[', "'V_fb'", ',', "'V_hv'", ',', "'fb_resistor'", ',', "'hv_resistor'", ']', ',', 'index', '=', 'index', ')', 'df_feedback', '.', 'insert', '(', '0', ',', "'frequency'", ',', 'feedback_result', '.', 'frequency', ')', 'return', 'df_feedback'] | Extract measured data from `FeedbackResults` instance into
`pandas.DataFrame`. | ['Extract', 'measured', 'data', 'from', 'FeedbackResults', 'instance', 'into', 'pandas', '.', 'DataFrame', '.'] | train | https://github.com/wheeler-microfluidics/dmf-control-board-firmware/blob/1cd8cc9a148d530f9a11f634f2dbfe73f08aa27c/dmf_control_board_firmware/__init__.py#L158-L172 |
1,444 | ioos/compliance-checker | compliance_checker/acdd.py | ACDDBaseCheck.check_acknowledgment | def check_acknowledgment(self, ds):
'''
Check if acknowledgment/acknowledgment attribute is present. Because
acknowledgement has its own check, we are keeping it out of the Global
Attributes (even though it is a Global Attr).
:param netCDF4.Dataset ds: An open netCDF dataset
'''
check = False
messages = []
if hasattr(ds, 'acknowledgment') or hasattr(ds, 'acknowledgement'):
check = True
else:
messages.append("acknowledgment/acknowledgement not present")
# name="Global Attributes" so gets grouped with Global Attributes
return Result(BaseCheck.MEDIUM, check, "Global Attributes", msgs=messages) | python | def check_acknowledgment(self, ds):
'''
Check if acknowledgment/acknowledgment attribute is present. Because
acknowledgement has its own check, we are keeping it out of the Global
Attributes (even though it is a Global Attr).
:param netCDF4.Dataset ds: An open netCDF dataset
'''
check = False
messages = []
if hasattr(ds, 'acknowledgment') or hasattr(ds, 'acknowledgement'):
check = True
else:
messages.append("acknowledgment/acknowledgement not present")
# name="Global Attributes" so gets grouped with Global Attributes
return Result(BaseCheck.MEDIUM, check, "Global Attributes", msgs=messages) | ['def', 'check_acknowledgment', '(', 'self', ',', 'ds', ')', ':', 'check', '=', 'False', 'messages', '=', '[', ']', 'if', 'hasattr', '(', 'ds', ',', "'acknowledgment'", ')', 'or', 'hasattr', '(', 'ds', ',', "'acknowledgement'", ')', ':', 'check', '=', 'True', 'else', ':', 'messages', '.', 'append', '(', '"acknowledgment/acknowledgement not present"', ')', '# name="Global Attributes" so gets grouped with Global Attributes', 'return', 'Result', '(', 'BaseCheck', '.', 'MEDIUM', ',', 'check', ',', '"Global Attributes"', ',', 'msgs', '=', 'messages', ')'] | Check if acknowledgment/acknowledgment attribute is present. Because
acknowledgement has its own check, we are keeping it out of the Global
Attributes (even though it is a Global Attr).
:param netCDF4.Dataset ds: An open netCDF dataset | ['Check', 'if', 'acknowledgment', '/', 'acknowledgment', 'attribute', 'is', 'present', '.', 'Because', 'acknowledgement', 'has', 'its', 'own', 'check', 'we', 'are', 'keeping', 'it', 'out', 'of', 'the', 'Global', 'Attributes', '(', 'even', 'though', 'it', 'is', 'a', 'Global', 'Attr', ')', '.'] | train | https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/acdd.py#L203-L219 |
1,445 | enkore/i3pystatus | i3pystatus/core/color.py | ColorRangeModule.get_hex_color_range | def get_hex_color_range(start_color, end_color, quantity):
"""
Generates a list of quantity Hex colors from start_color to end_color.
:param start_color: Hex or plain English color for start of range
:param end_color: Hex or plain English color for end of range
:param quantity: Number of colours to return
:return: A list of Hex color values
"""
raw_colors = [c.hex for c in list(Color(start_color).range_to(Color(end_color), quantity))]
colors = []
for color in raw_colors:
# i3bar expects the full Hex value but for some colors the colour
# module only returns partial values. So we need to convert these colors to the full
# Hex value.
if len(color) == 4:
fixed_color = "#"
for c in color[1:]:
fixed_color += c * 2
colors.append(fixed_color)
else:
colors.append(color)
return colors | python | def get_hex_color_range(start_color, end_color, quantity):
"""
Generates a list of quantity Hex colors from start_color to end_color.
:param start_color: Hex or plain English color for start of range
:param end_color: Hex or plain English color for end of range
:param quantity: Number of colours to return
:return: A list of Hex color values
"""
raw_colors = [c.hex for c in list(Color(start_color).range_to(Color(end_color), quantity))]
colors = []
for color in raw_colors:
# i3bar expects the full Hex value but for some colors the colour
# module only returns partial values. So we need to convert these colors to the full
# Hex value.
if len(color) == 4:
fixed_color = "#"
for c in color[1:]:
fixed_color += c * 2
colors.append(fixed_color)
else:
colors.append(color)
return colors | ['def', 'get_hex_color_range', '(', 'start_color', ',', 'end_color', ',', 'quantity', ')', ':', 'raw_colors', '=', '[', 'c', '.', 'hex', 'for', 'c', 'in', 'list', '(', 'Color', '(', 'start_color', ')', '.', 'range_to', '(', 'Color', '(', 'end_color', ')', ',', 'quantity', ')', ')', ']', 'colors', '=', '[', ']', 'for', 'color', 'in', 'raw_colors', ':', '# i3bar expects the full Hex value but for some colors the colour', '# module only returns partial values. So we need to convert these colors to the full', '# Hex value.', 'if', 'len', '(', 'color', ')', '==', '4', ':', 'fixed_color', '=', '"#"', 'for', 'c', 'in', 'color', '[', '1', ':', ']', ':', 'fixed_color', '+=', 'c', '*', '2', 'colors', '.', 'append', '(', 'fixed_color', ')', 'else', ':', 'colors', '.', 'append', '(', 'color', ')', 'return', 'colors'] | Generates a list of quantity Hex colors from start_color to end_color.
:param start_color: Hex or plain English color for start of range
:param end_color: Hex or plain English color for end of range
:param quantity: Number of colours to return
:return: A list of Hex color values | ['Generates', 'a', 'list', 'of', 'quantity', 'Hex', 'colors', 'from', 'start_color', 'to', 'end_color', '.'] | train | https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/core/color.py#L15-L38 |
1,446 | tonybaloney/retox | retox/ui.py | VirtualEnvironmentFrame.start | def start(self, activity, action):
'''
Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
'''
try:
self._start_action(activity, action)
except ValueError:
retox_log.debug("Could not find action %s in env %s" % (activity, self.name))
self.refresh() | python | def start(self, activity, action):
'''
Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action`
'''
try:
self._start_action(activity, action)
except ValueError:
retox_log.debug("Could not find action %s in env %s" % (activity, self.name))
self.refresh() | ['def', 'start', '(', 'self', ',', 'activity', ',', 'action', ')', ':', 'try', ':', 'self', '.', '_start_action', '(', 'activity', ',', 'action', ')', 'except', 'ValueError', ':', 'retox_log', '.', 'debug', '(', '"Could not find action %s in env %s"', '%', '(', 'activity', ',', 'self', '.', 'name', ')', ')', 'self', '.', 'refresh', '(', ')'] | Mark an action as started
:param activity: The virtualenv activity name
:type activity: ``str``
:param action: The virtualenv action
:type action: :class:`tox.session.Action` | ['Mark', 'an', 'action', 'as', 'started'] | train | https://github.com/tonybaloney/retox/blob/4635e31001d2ac083423f46766249ac8daca7c9c/retox/ui.py#L233-L247 |
1,447 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray._get_index_nd | def _get_index_nd(self, key):
"""Returns an index array for use in scatter_nd and gather_nd."""
def _is_advanced_index(index):
"""The definition of advanced index here includes integers as well, while
integers are considered as basic index type when the key contains only
slices and integers."""
return not isinstance(index, py_slice)
if isinstance(key, (NDArray, np.ndarray, list, integer_types, py_slice)):
key = (key,)
assert isinstance(key, tuple),\
'index=%s must be a NDArray, or np.ndarray, or list, or tuple ' \
' type to use advanced indexing, received type=%s' % (str(key), str(type(key)))
assert len(key) > 0, "Cannot slice with empty indices"
shape = self.shape
assert len(shape) >= len(key),\
"Slicing dimensions exceeds array dimensions, %d vs %d" % (len(key), len(shape))
indices = []
dtype = 'int32' # index data type passed to gather_nd op
need_broadcast = (len(key) != 1)
advanced_indices = [] # include list, NDArray, np.ndarray, integer
basic_indices = [] # include only slices
advanced_index_bshape = None # final advanced index shape
for i, idx_i in enumerate(key):
is_advanced_index = True
if isinstance(idx_i, (np.ndarray, list, tuple)):
idx_i = array(idx_i, ctx=self.context, dtype=dtype)
advanced_indices.append(i)
elif isinstance(idx_i, py_slice):
start, stop, step = _get_index_range(idx_i.start, idx_i.stop, shape[i], idx_i.step)
idx_i = arange(start, stop, step, ctx=self.context, dtype=dtype)
basic_indices.append(i)
is_advanced_index = False
elif isinstance(idx_i, integer_types):
start, stop, step = _get_index_range(idx_i, idx_i+1, shape[i], 1)
idx_i = arange(start, stop, step, ctx=self.context, dtype=dtype)
advanced_indices.append(i)
elif isinstance(idx_i, NDArray):
if dtype != idx_i.dtype:
idx_i = idx_i.astype(dtype)
advanced_indices.append(i)
else:
raise IndexError('Indexing NDArray with index=%s of type=%s is not supported'
% (str(key), str(type(key))))
if is_advanced_index:
if advanced_index_bshape is None:
advanced_index_bshape = idx_i.shape
elif advanced_index_bshape != idx_i.shape:
need_broadcast = True
advanced_index_bshape = _get_broadcast_shape(advanced_index_bshape, idx_i.shape)
indices.append(idx_i)
# Get final index shape for gather_nd. See the following reference
# for determining the output array shape.
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#combining-advanced-and-basic-indexing # pylint: disable=line-too-long
if len(advanced_indices) == 0:
raise ValueError('Advanced index tuple must contain at least one of the following types:'
' list, tuple, NDArray, np.ndarray, integer, received index=%s' % key)
# determine the output array's shape by checking whether advanced_indices are all adjacent
# or separated by slices
advanced_indices_adjacent = True
for i in range(0, len(advanced_indices)-1):
if advanced_indices[i] + 1 != advanced_indices[i+1]:
advanced_indices_adjacent = False
break
index_bshape_list = [] # index broadcasted shape
if advanced_indices_adjacent:
for i in range(0, advanced_indices[0]):
index_bshape_list.extend(indices[i].shape)
if not need_broadcast and indices[i].shape != advanced_index_bshape:
need_broadcast = True
index_bshape_list.extend(advanced_index_bshape)
for i in range(advanced_indices[-1]+1, len(indices)):
if not need_broadcast and indices[i].shape != advanced_index_bshape:
need_broadcast = True
index_bshape_list.extend(indices[i].shape)
else:
index_bshape_list.extend(advanced_index_bshape)
for i in basic_indices:
index_bshape_list.extend(indices[i].shape)
if not need_broadcast and indices[i].shape != advanced_index_bshape:
need_broadcast = True
index_bshape = tuple(index_bshape_list)
# Need to broadcast all ndarrays in indices to the final shape.
# For example, suppose an array has shape=(5, 6, 7, 8) and
# key=(slice(1, 5), [[1, 2]], slice(2, 5), [1]).
# Since key[1] and key[3] are two advanced indices here and they are
# separated by basic indices key[0] and key[2], the output shape
# is (1, 2, 4, 3), where the first two elements come from the shape
# that key[1] and key[3] should broadcast to, which is (1, 2), and
# the last two elements come from the shape of two basic indices.
# In order to broadcast all basic and advanced indices to the output shape,
# we need to reshape them based on their axis. For example, to broadcast key[0],
# with shape=(4,), we first need to reshape it into (1, 1, 4, 1), and then
# broadcast the reshaped array to (1, 2, 4, 3); to broadcast key[1], we first
# reshape it into (1, 2, 1, 1), then broadcast the reshaped array to (1, 2, 4, 3).
if need_broadcast:
broadcasted_indices = []
idx_rshape = [1] * len(index_bshape)
if advanced_indices_adjacent:
advanced_index_bshape_start = advanced_indices[0] # start index of advanced_index_bshape in index_shape
advanced_index_bshape_stop = advanced_index_bshape_start + len(advanced_index_bshape)
for i, idx in enumerate(key):
if _is_advanced_index(idx):
k = advanced_index_bshape_stop
# find the reshaped shape for indices[i]
for dim_size in indices[i].shape[::-1]:
k -= 1
idx_rshape[k] = dim_size
else:
if i < advanced_indices[0]: # slice is on the left side of advanced indices
idx_rshape[i] = indices[i].shape[0]
elif i > advanced_indices[-1]: # slice is on the right side of advanced indices
idx_rshape[i-len(key)] = indices[i].shape[0]
else:
raise ValueError('basic index i=%d cannot be between advanced index i=%d and i=%d'
% (i, advanced_indices[0], advanced_indices[-1]))
# broadcast current index to the final shape
broadcasted_indices.append(indices[i].reshape(tuple(idx_rshape)).broadcast_to(index_bshape))
# reset idx_rshape to ones
for j, _ in enumerate(idx_rshape):
idx_rshape[j] = 1
else:
basic_index_offset = len(advanced_index_bshape)
for i, idx in enumerate(key):
if _is_advanced_index(idx):
k = len(advanced_index_bshape)
for dim_size in indices[i].shape[::-1]:
k -= 1
idx_rshape[k] = dim_size
else:
idx_rshape[basic_index_offset] = indices[i].shape[0]
basic_index_offset += 1
# broadcast current index to the final shape
broadcasted_indices.append(indices[i].reshape(tuple(idx_rshape)).broadcast_to(index_bshape))
# reset idx_rshape to ones
for j, _ in enumerate(idx_rshape):
idx_rshape[j] = 1
indices = broadcasted_indices
return op.stack(*indices) | python | def _get_index_nd(self, key):
"""Returns an index array for use in scatter_nd and gather_nd."""
def _is_advanced_index(index):
"""The definition of advanced index here includes integers as well, while
integers are considered as basic index type when the key contains only
slices and integers."""
return not isinstance(index, py_slice)
if isinstance(key, (NDArray, np.ndarray, list, integer_types, py_slice)):
key = (key,)
assert isinstance(key, tuple),\
'index=%s must be a NDArray, or np.ndarray, or list, or tuple ' \
' type to use advanced indexing, received type=%s' % (str(key), str(type(key)))
assert len(key) > 0, "Cannot slice with empty indices"
shape = self.shape
assert len(shape) >= len(key),\
"Slicing dimensions exceeds array dimensions, %d vs %d" % (len(key), len(shape))
indices = []
dtype = 'int32' # index data type passed to gather_nd op
need_broadcast = (len(key) != 1)
advanced_indices = [] # include list, NDArray, np.ndarray, integer
basic_indices = [] # include only slices
advanced_index_bshape = None # final advanced index shape
for i, idx_i in enumerate(key):
is_advanced_index = True
if isinstance(idx_i, (np.ndarray, list, tuple)):
idx_i = array(idx_i, ctx=self.context, dtype=dtype)
advanced_indices.append(i)
elif isinstance(idx_i, py_slice):
start, stop, step = _get_index_range(idx_i.start, idx_i.stop, shape[i], idx_i.step)
idx_i = arange(start, stop, step, ctx=self.context, dtype=dtype)
basic_indices.append(i)
is_advanced_index = False
elif isinstance(idx_i, integer_types):
start, stop, step = _get_index_range(idx_i, idx_i+1, shape[i], 1)
idx_i = arange(start, stop, step, ctx=self.context, dtype=dtype)
advanced_indices.append(i)
elif isinstance(idx_i, NDArray):
if dtype != idx_i.dtype:
idx_i = idx_i.astype(dtype)
advanced_indices.append(i)
else:
raise IndexError('Indexing NDArray with index=%s of type=%s is not supported'
% (str(key), str(type(key))))
if is_advanced_index:
if advanced_index_bshape is None:
advanced_index_bshape = idx_i.shape
elif advanced_index_bshape != idx_i.shape:
need_broadcast = True
advanced_index_bshape = _get_broadcast_shape(advanced_index_bshape, idx_i.shape)
indices.append(idx_i)
# Get final index shape for gather_nd. See the following reference
# for determining the output array shape.
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#combining-advanced-and-basic-indexing # pylint: disable=line-too-long
if len(advanced_indices) == 0:
raise ValueError('Advanced index tuple must contain at least one of the following types:'
' list, tuple, NDArray, np.ndarray, integer, received index=%s' % key)
# determine the output array's shape by checking whether advanced_indices are all adjacent
# or separated by slices
advanced_indices_adjacent = True
for i in range(0, len(advanced_indices)-1):
if advanced_indices[i] + 1 != advanced_indices[i+1]:
advanced_indices_adjacent = False
break
index_bshape_list = [] # index broadcasted shape
if advanced_indices_adjacent:
for i in range(0, advanced_indices[0]):
index_bshape_list.extend(indices[i].shape)
if not need_broadcast and indices[i].shape != advanced_index_bshape:
need_broadcast = True
index_bshape_list.extend(advanced_index_bshape)
for i in range(advanced_indices[-1]+1, len(indices)):
if not need_broadcast and indices[i].shape != advanced_index_bshape:
need_broadcast = True
index_bshape_list.extend(indices[i].shape)
else:
index_bshape_list.extend(advanced_index_bshape)
for i in basic_indices:
index_bshape_list.extend(indices[i].shape)
if not need_broadcast and indices[i].shape != advanced_index_bshape:
need_broadcast = True
index_bshape = tuple(index_bshape_list)
# Need to broadcast all ndarrays in indices to the final shape.
# For example, suppose an array has shape=(5, 6, 7, 8) and
# key=(slice(1, 5), [[1, 2]], slice(2, 5), [1]).
# Since key[1] and key[3] are two advanced indices here and they are
# separated by basic indices key[0] and key[2], the output shape
# is (1, 2, 4, 3), where the first two elements come from the shape
# that key[1] and key[3] should broadcast to, which is (1, 2), and
# the last two elements come from the shape of two basic indices.
# In order to broadcast all basic and advanced indices to the output shape,
# we need to reshape them based on their axis. For example, to broadcast key[0],
# with shape=(4,), we first need to reshape it into (1, 1, 4, 1), and then
# broadcast the reshaped array to (1, 2, 4, 3); to broadcast key[1], we first
# reshape it into (1, 2, 1, 1), then broadcast the reshaped array to (1, 2, 4, 3).
if need_broadcast:
broadcasted_indices = []
idx_rshape = [1] * len(index_bshape)
if advanced_indices_adjacent:
advanced_index_bshape_start = advanced_indices[0] # start index of advanced_index_bshape in index_shape
advanced_index_bshape_stop = advanced_index_bshape_start + len(advanced_index_bshape)
for i, idx in enumerate(key):
if _is_advanced_index(idx):
k = advanced_index_bshape_stop
# find the reshaped shape for indices[i]
for dim_size in indices[i].shape[::-1]:
k -= 1
idx_rshape[k] = dim_size
else:
if i < advanced_indices[0]: # slice is on the left side of advanced indices
idx_rshape[i] = indices[i].shape[0]
elif i > advanced_indices[-1]: # slice is on the right side of advanced indices
idx_rshape[i-len(key)] = indices[i].shape[0]
else:
raise ValueError('basic index i=%d cannot be between advanced index i=%d and i=%d'
% (i, advanced_indices[0], advanced_indices[-1]))
# broadcast current index to the final shape
broadcasted_indices.append(indices[i].reshape(tuple(idx_rshape)).broadcast_to(index_bshape))
# reset idx_rshape to ones
for j, _ in enumerate(idx_rshape):
idx_rshape[j] = 1
else:
basic_index_offset = len(advanced_index_bshape)
for i, idx in enumerate(key):
if _is_advanced_index(idx):
k = len(advanced_index_bshape)
for dim_size in indices[i].shape[::-1]:
k -= 1
idx_rshape[k] = dim_size
else:
idx_rshape[basic_index_offset] = indices[i].shape[0]
basic_index_offset += 1
# broadcast current index to the final shape
broadcasted_indices.append(indices[i].reshape(tuple(idx_rshape)).broadcast_to(index_bshape))
# reset idx_rshape to ones
for j, _ in enumerate(idx_rshape):
idx_rshape[j] = 1
indices = broadcasted_indices
return op.stack(*indices) | ['def', '_get_index_nd', '(', 'self', ',', 'key', ')', ':', 'def', '_is_advanced_index', '(', 'index', ')', ':', '"""The definition of advanced index here includes integers as well, while\n integers are considered as basic index type when the key contains only\n slices and integers."""', 'return', 'not', 'isinstance', '(', 'index', ',', 'py_slice', ')', 'if', 'isinstance', '(', 'key', ',', '(', 'NDArray', ',', 'np', '.', 'ndarray', ',', 'list', ',', 'integer_types', ',', 'py_slice', ')', ')', ':', 'key', '=', '(', 'key', ',', ')', 'assert', 'isinstance', '(', 'key', ',', 'tuple', ')', ',', "'index=%s must be a NDArray, or np.ndarray, or list, or tuple '", "' type to use advanced indexing, received type=%s'", '%', '(', 'str', '(', 'key', ')', ',', 'str', '(', 'type', '(', 'key', ')', ')', ')', 'assert', 'len', '(', 'key', ')', '>', '0', ',', '"Cannot slice with empty indices"', 'shape', '=', 'self', '.', 'shape', 'assert', 'len', '(', 'shape', ')', '>=', 'len', '(', 'key', ')', ',', '"Slicing dimensions exceeds array dimensions, %d vs %d"', '%', '(', 'len', '(', 'key', ')', ',', 'len', '(', 'shape', ')', ')', 'indices', '=', '[', ']', 'dtype', '=', "'int32'", '# index data type passed to gather_nd op', 'need_broadcast', '=', '(', 'len', '(', 'key', ')', '!=', '1', ')', 'advanced_indices', '=', '[', ']', '# include list, NDArray, np.ndarray, integer', 'basic_indices', '=', '[', ']', '# include only slices', 'advanced_index_bshape', '=', 'None', '# final advanced index shape', 'for', 'i', ',', 'idx_i', 'in', 'enumerate', '(', 'key', ')', ':', 'is_advanced_index', '=', 'True', 'if', 'isinstance', '(', 'idx_i', ',', '(', 'np', '.', 'ndarray', ',', 'list', ',', 'tuple', ')', ')', ':', 'idx_i', '=', 'array', '(', 'idx_i', ',', 'ctx', '=', 'self', '.', 'context', ',', 'dtype', '=', 'dtype', ')', 'advanced_indices', '.', 'append', '(', 'i', ')', 'elif', 'isinstance', '(', 'idx_i', ',', 'py_slice', ')', ':', 'start', ',', 'stop', ',', 'step', '=', '_get_index_range', '(', 'idx_i', '.', 'start', ',', 'idx_i', '.', 'stop', ',', 'shape', '[', 'i', ']', ',', 'idx_i', '.', 'step', ')', 'idx_i', '=', 'arange', '(', 'start', ',', 'stop', ',', 'step', ',', 'ctx', '=', 'self', '.', 'context', ',', 'dtype', '=', 'dtype', ')', 'basic_indices', '.', 'append', '(', 'i', ')', 'is_advanced_index', '=', 'False', 'elif', 'isinstance', '(', 'idx_i', ',', 'integer_types', ')', ':', 'start', ',', 'stop', ',', 'step', '=', '_get_index_range', '(', 'idx_i', ',', 'idx_i', '+', '1', ',', 'shape', '[', 'i', ']', ',', '1', ')', 'idx_i', '=', 'arange', '(', 'start', ',', 'stop', ',', 'step', ',', 'ctx', '=', 'self', '.', 'context', ',', 'dtype', '=', 'dtype', ')', 'advanced_indices', '.', 'append', '(', 'i', ')', 'elif', 'isinstance', '(', 'idx_i', ',', 'NDArray', ')', ':', 'if', 'dtype', '!=', 'idx_i', '.', 'dtype', ':', 'idx_i', '=', 'idx_i', '.', 'astype', '(', 'dtype', ')', 'advanced_indices', '.', 'append', '(', 'i', ')', 'else', ':', 'raise', 'IndexError', '(', "'Indexing NDArray with index=%s of type=%s is not supported'", '%', '(', 'str', '(', 'key', ')', ',', 'str', '(', 'type', '(', 'key', ')', ')', ')', ')', 'if', 'is_advanced_index', ':', 'if', 'advanced_index_bshape', 'is', 'None', ':', 'advanced_index_bshape', '=', 'idx_i', '.', 'shape', 'elif', 'advanced_index_bshape', '!=', 'idx_i', '.', 'shape', ':', 'need_broadcast', '=', 'True', 'advanced_index_bshape', '=', '_get_broadcast_shape', '(', 'advanced_index_bshape', ',', 'idx_i', '.', 'shape', ')', 'indices', '.', 'append', '(', 'idx_i', ')', '# Get final index shape for gather_nd. See the following reference', '# for determining the output array shape.', '# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#combining-advanced-and-basic-indexing # pylint: disable=line-too-long', 'if', 'len', '(', 'advanced_indices', ')', '==', '0', ':', 'raise', 'ValueError', '(', "'Advanced index tuple must contain at least one of the following types:'", "' list, tuple, NDArray, np.ndarray, integer, received index=%s'", '%', 'key', ')', "# determine the output array's shape by checking whether advanced_indices are all adjacent", '# or separated by slices', 'advanced_indices_adjacent', '=', 'True', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'advanced_indices', ')', '-', '1', ')', ':', 'if', 'advanced_indices', '[', 'i', ']', '+', '1', '!=', 'advanced_indices', '[', 'i', '+', '1', ']', ':', 'advanced_indices_adjacent', '=', 'False', 'break', 'index_bshape_list', '=', '[', ']', '# index broadcasted shape', 'if', 'advanced_indices_adjacent', ':', 'for', 'i', 'in', 'range', '(', '0', ',', 'advanced_indices', '[', '0', ']', ')', ':', 'index_bshape_list', '.', 'extend', '(', 'indices', '[', 'i', ']', '.', 'shape', ')', 'if', 'not', 'need_broadcast', 'and', 'indices', '[', 'i', ']', '.', 'shape', '!=', 'advanced_index_bshape', ':', 'need_broadcast', '=', 'True', 'index_bshape_list', '.', 'extend', '(', 'advanced_index_bshape', ')', 'for', 'i', 'in', 'range', '(', 'advanced_indices', '[', '-', '1', ']', '+', '1', ',', 'len', '(', 'indices', ')', ')', ':', 'if', 'not', 'need_broadcast', 'and', 'indices', '[', 'i', ']', '.', 'shape', '!=', 'advanced_index_bshape', ':', 'need_broadcast', '=', 'True', 'index_bshape_list', '.', 'extend', '(', 'indices', '[', 'i', ']', '.', 'shape', ')', 'else', ':', 'index_bshape_list', '.', 'extend', '(', 'advanced_index_bshape', ')', 'for', 'i', 'in', 'basic_indices', ':', 'index_bshape_list', '.', 'extend', '(', 'indices', '[', 'i', ']', '.', 'shape', ')', 'if', 'not', 'need_broadcast', 'and', 'indices', '[', 'i', ']', '.', 'shape', '!=', 'advanced_index_bshape', ':', 'need_broadcast', '=', 'True', 'index_bshape', '=', 'tuple', '(', 'index_bshape_list', ')', '# Need to broadcast all ndarrays in indices to the final shape.', '# For example, suppose an array has shape=(5, 6, 7, 8) and', '# key=(slice(1, 5), [[1, 2]], slice(2, 5), [1]).', '# Since key[1] and key[3] are two advanced indices here and they are', '# separated by basic indices key[0] and key[2], the output shape', '# is (1, 2, 4, 3), where the first two elements come from the shape', '# that key[1] and key[3] should broadcast to, which is (1, 2), and', '# the last two elements come from the shape of two basic indices.', '# In order to broadcast all basic and advanced indices to the output shape,', '# we need to reshape them based on their axis. For example, to broadcast key[0],', '# with shape=(4,), we first need to reshape it into (1, 1, 4, 1), and then', '# broadcast the reshaped array to (1, 2, 4, 3); to broadcast key[1], we first', '# reshape it into (1, 2, 1, 1), then broadcast the reshaped array to (1, 2, 4, 3).', 'if', 'need_broadcast', ':', 'broadcasted_indices', '=', '[', ']', 'idx_rshape', '=', '[', '1', ']', '*', 'len', '(', 'index_bshape', ')', 'if', 'advanced_indices_adjacent', ':', 'advanced_index_bshape_start', '=', 'advanced_indices', '[', '0', ']', '# start index of advanced_index_bshape in index_shape', 'advanced_index_bshape_stop', '=', 'advanced_index_bshape_start', '+', 'len', '(', 'advanced_index_bshape', ')', 'for', 'i', ',', 'idx', 'in', 'enumerate', '(', 'key', ')', ':', 'if', '_is_advanced_index', '(', 'idx', ')', ':', 'k', '=', 'advanced_index_bshape_stop', '# find the reshaped shape for indices[i]', 'for', 'dim_size', 'in', 'indices', '[', 'i', ']', '.', 'shape', '[', ':', ':', '-', '1', ']', ':', 'k', '-=', '1', 'idx_rshape', '[', 'k', ']', '=', 'dim_size', 'else', ':', 'if', 'i', '<', 'advanced_indices', '[', '0', ']', ':', '# slice is on the left side of advanced indices', 'idx_rshape', '[', 'i', ']', '=', 'indices', '[', 'i', ']', '.', 'shape', '[', '0', ']', 'elif', 'i', '>', 'advanced_indices', '[', '-', '1', ']', ':', '# slice is on the right side of advanced indices', 'idx_rshape', '[', 'i', '-', 'len', '(', 'key', ')', ']', '=', 'indices', '[', 'i', ']', '.', 'shape', '[', '0', ']', 'else', ':', 'raise', 'ValueError', '(', "'basic index i=%d cannot be between advanced index i=%d and i=%d'", '%', '(', 'i', ',', 'advanced_indices', '[', '0', ']', ',', 'advanced_indices', '[', '-', '1', ']', ')', ')', '# broadcast current index to the final shape', 'broadcasted_indices', '.', 'append', '(', 'indices', '[', 'i', ']', '.', 'reshape', '(', 'tuple', '(', 'idx_rshape', ')', ')', '.', 'broadcast_to', '(', 'index_bshape', ')', ')', '# reset idx_rshape to ones', 'for', 'j', ',', '_', 'in', 'enumerate', '(', 'idx_rshape', ')', ':', 'idx_rshape', '[', 'j', ']', '=', '1', 'else', ':', 'basic_index_offset', '=', 'len', '(', 'advanced_index_bshape', ')', 'for', 'i', ',', 'idx', 'in', 'enumerate', '(', 'key', ')', ':', 'if', '_is_advanced_index', '(', 'idx', ')', ':', 'k', '=', 'len', '(', 'advanced_index_bshape', ')', 'for', 'dim_size', 'in', 'indices', '[', 'i', ']', '.', 'shape', '[', ':', ':', '-', '1', ']', ':', 'k', '-=', '1', 'idx_rshape', '[', 'k', ']', '=', 'dim_size', 'else', ':', 'idx_rshape', '[', 'basic_index_offset', ']', '=', 'indices', '[', 'i', ']', '.', 'shape', '[', '0', ']', 'basic_index_offset', '+=', '1', '# broadcast current index to the final shape', 'broadcasted_indices', '.', 'append', '(', 'indices', '[', 'i', ']', '.', 'reshape', '(', 'tuple', '(', 'idx_rshape', ')', ')', '.', 'broadcast_to', '(', 'index_bshape', ')', ')', '# reset idx_rshape to ones', 'for', 'j', ',', '_', 'in', 'enumerate', '(', 'idx_rshape', ')', ':', 'idx_rshape', '[', 'j', ']', '=', '1', 'indices', '=', 'broadcasted_indices', 'return', 'op', '.', 'stack', '(', '*', 'indices', ')'] | Returns an index array for use in scatter_nd and gather_nd. | ['Returns', 'an', 'index', 'array', 'for', 'use', 'in', 'scatter_nd', 'and', 'gather_nd', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L518-L662 |
1,448 | tariqdaouda/pyGeno | pyGeno/tools/parsers/FastqTools.py | FastqFile.newEntry | def newEntry(self, ident = "", seq = "", plus = "", qual = "") :
"""Appends an empty entry at the end of the CSV and returns it"""
e = FastqEntry()
self.data.append(e)
return e | python | def newEntry(self, ident = "", seq = "", plus = "", qual = "") :
"""Appends an empty entry at the end of the CSV and returns it"""
e = FastqEntry()
self.data.append(e)
return e | ['def', 'newEntry', '(', 'self', ',', 'ident', '=', '""', ',', 'seq', '=', '""', ',', 'plus', '=', '""', ',', 'qual', '=', '""', ')', ':', 'e', '=', 'FastqEntry', '(', ')', 'self', '.', 'data', '.', 'append', '(', 'e', ')', 'return', 'e'] | Appends an empty entry at the end of the CSV and returns it | ['Appends', 'an', 'empty', 'entry', 'at', 'the', 'end', 'of', 'the', 'CSV', 'and', 'returns', 'it'] | train | https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/tools/parsers/FastqTools.py#L76-L80 |
1,449 | materialsproject/pymatgen | pymatgen/util/num.py | monotonic | def monotonic(values, mode="<", atol=1.e-8):
"""
Returns False if values are not monotonic (decreasing|increasing).
mode is "<" for a decreasing sequence, ">" for an increasing sequence.
Two numbers are considered equal if they differ less that atol.
.. warning:
Not very efficient for large data sets.
>>> values = [1.2, 1.3, 1.4]
>>> monotonic(values, mode="<")
False
>>> monotonic(values, mode=">")
True
"""
if len(values) == 1:
return True
if mode == ">":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp <= v:
return False
elif mode == "<":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp >= v:
return False
else:
raise ValueError("Wrong mode %s" % str(mode))
return True | python | def monotonic(values, mode="<", atol=1.e-8):
"""
Returns False if values are not monotonic (decreasing|increasing).
mode is "<" for a decreasing sequence, ">" for an increasing sequence.
Two numbers are considered equal if they differ less that atol.
.. warning:
Not very efficient for large data sets.
>>> values = [1.2, 1.3, 1.4]
>>> monotonic(values, mode="<")
False
>>> monotonic(values, mode=">")
True
"""
if len(values) == 1:
return True
if mode == ">":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp <= v:
return False
elif mode == "<":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp >= v:
return False
else:
raise ValueError("Wrong mode %s" % str(mode))
return True | ['def', 'monotonic', '(', 'values', ',', 'mode', '=', '"<"', ',', 'atol', '=', '1.e-8', ')', ':', 'if', 'len', '(', 'values', ')', '==', '1', ':', 'return', 'True', 'if', 'mode', '==', '">"', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'values', ')', '-', '1', ')', ':', 'v', ',', 'vp', '=', 'values', '[', 'i', ']', ',', 'values', '[', 'i', '+', '1', ']', 'if', 'abs', '(', 'vp', '-', 'v', ')', '>', 'atol', 'and', 'vp', '<=', 'v', ':', 'return', 'False', 'elif', 'mode', '==', '"<"', ':', 'for', 'i', 'in', 'range', '(', 'len', '(', 'values', ')', '-', '1', ')', ':', 'v', ',', 'vp', '=', 'values', '[', 'i', ']', ',', 'values', '[', 'i', '+', '1', ']', 'if', 'abs', '(', 'vp', '-', 'v', ')', '>', 'atol', 'and', 'vp', '>=', 'v', ':', 'return', 'False', 'else', ':', 'raise', 'ValueError', '(', '"Wrong mode %s"', '%', 'str', '(', 'mode', ')', ')', 'return', 'True'] | Returns False if values are not monotonic (decreasing|increasing).
mode is "<" for a decreasing sequence, ">" for an increasing sequence.
Two numbers are considered equal if they differ less that atol.
.. warning:
Not very efficient for large data sets.
>>> values = [1.2, 1.3, 1.4]
>>> monotonic(values, mode="<")
False
>>> monotonic(values, mode=">")
True | ['Returns', 'False', 'if', 'values', 'are', 'not', 'monotonic', '(', 'decreasing|increasing', ')', '.', 'mode', 'is', '<', 'for', 'a', 'decreasing', 'sequence', '>', 'for', 'an', 'increasing', 'sequence', '.', 'Two', 'numbers', 'are', 'considered', 'equal', 'if', 'they', 'differ', 'less', 'that', 'atol', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/num.py#L104-L137 |
1,450 | mushkevych/scheduler | synergy/db/dao/job_dao.py | JobDao.run_query | def run_query(self, collection_name, query):
""" method runs query on a specified collection and return a list of filtered Job records """
cursor = self.ds.filter(collection_name, query)
return [Job.from_json(document) for document in cursor] | python | def run_query(self, collection_name, query):
""" method runs query on a specified collection and return a list of filtered Job records """
cursor = self.ds.filter(collection_name, query)
return [Job.from_json(document) for document in cursor] | ['def', 'run_query', '(', 'self', ',', 'collection_name', ',', 'query', ')', ':', 'cursor', '=', 'self', '.', 'ds', '.', 'filter', '(', 'collection_name', ',', 'query', ')', 'return', '[', 'Job', '.', 'from_json', '(', 'document', ')', 'for', 'document', 'in', 'cursor', ']'] | method runs query on a specified collection and return a list of filtered Job records | ['method', 'runs', 'query', 'on', 'a', 'specified', 'collection', 'and', 'return', 'a', 'list', 'of', 'filtered', 'Job', 'records'] | train | https://github.com/mushkevych/scheduler/blob/6740331360f49083c208085fb5a60ce80ebf418b/synergy/db/dao/job_dao.py#L94-L97 |
1,451 | python-diamond/Diamond | src/collectors/users/users.py | UsersCollector.get_default_config_help | def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help | python | def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help | ['def', 'get_default_config_help', '(', 'self', ')', ':', 'config_help', '=', 'super', '(', 'UsersCollector', ',', 'self', ')', '.', 'get_default_config_help', '(', ')', 'config_help', '.', 'update', '(', '{', '}', ')', 'return', 'config_help'] | Returns the default collector help text | ['Returns', 'the', 'default', 'collector', 'help', 'text'] | train | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/users/users.py#L29-L36 |
1,452 | chemlab/chemlab | chemlab/utils/pbc.py | noperiodic | def noperiodic(r_array, periodic, reference=None):
'''Rearrange the array of coordinates *r_array* in a way that doensn't
cross the periodic boundary.
Parameters
----------
r_array : :class:`numpy.ndarray`, (Nx3)
Array of 3D coordinates.
periodic: :class:`numpy.ndarray`, (3)
Periodic boundary dimensions.
reference: ``None`` or :class:`numpy.ndarray` (3)
The points will be moved to be in the periodic image centered on the reference.
If None, the first point will be taken as a reference
Returns
-------
A (N, 3) array of coordinates, all in the same periodic image.
Example
-------
>>> coordinates = np.array([[0.1, 0.0, 0.0], [0.9, 0.0, 0.0]])
>>> periodic = np.array([1, 1, 1])
>>> noperiodic(coordinates, periodic)
[[ 0.1, 0.0, 0.0],
[-0.1, 0.0, 0.0]]
'''
if reference is None:
center = r_array[0]
else:
center = reference
# Find the displacements
dr = (center - r_array)
drsign = np.sign(dr)
# Move things when the displacement is more than half the box size
tomove = np.abs(dr) >= periodic / 2.0
r_array[tomove] += (drsign * periodic)[tomove]
return r_array | python | def noperiodic(r_array, periodic, reference=None):
'''Rearrange the array of coordinates *r_array* in a way that doensn't
cross the periodic boundary.
Parameters
----------
r_array : :class:`numpy.ndarray`, (Nx3)
Array of 3D coordinates.
periodic: :class:`numpy.ndarray`, (3)
Periodic boundary dimensions.
reference: ``None`` or :class:`numpy.ndarray` (3)
The points will be moved to be in the periodic image centered on the reference.
If None, the first point will be taken as a reference
Returns
-------
A (N, 3) array of coordinates, all in the same periodic image.
Example
-------
>>> coordinates = np.array([[0.1, 0.0, 0.0], [0.9, 0.0, 0.0]])
>>> periodic = np.array([1, 1, 1])
>>> noperiodic(coordinates, periodic)
[[ 0.1, 0.0, 0.0],
[-0.1, 0.0, 0.0]]
'''
if reference is None:
center = r_array[0]
else:
center = reference
# Find the displacements
dr = (center - r_array)
drsign = np.sign(dr)
# Move things when the displacement is more than half the box size
tomove = np.abs(dr) >= periodic / 2.0
r_array[tomove] += (drsign * periodic)[tomove]
return r_array | ['def', 'noperiodic', '(', 'r_array', ',', 'periodic', ',', 'reference', '=', 'None', ')', ':', 'if', 'reference', 'is', 'None', ':', 'center', '=', 'r_array', '[', '0', ']', 'else', ':', 'center', '=', 'reference', '# Find the displacements', 'dr', '=', '(', 'center', '-', 'r_array', ')', 'drsign', '=', 'np', '.', 'sign', '(', 'dr', ')', '# Move things when the displacement is more than half the box size', 'tomove', '=', 'np', '.', 'abs', '(', 'dr', ')', '>=', 'periodic', '/', '2.0', 'r_array', '[', 'tomove', ']', '+=', '(', 'drsign', '*', 'periodic', ')', '[', 'tomove', ']', 'return', 'r_array'] | Rearrange the array of coordinates *r_array* in a way that doensn't
cross the periodic boundary.
Parameters
----------
r_array : :class:`numpy.ndarray`, (Nx3)
Array of 3D coordinates.
periodic: :class:`numpy.ndarray`, (3)
Periodic boundary dimensions.
reference: ``None`` or :class:`numpy.ndarray` (3)
The points will be moved to be in the periodic image centered on the reference.
If None, the first point will be taken as a reference
Returns
-------
A (N, 3) array of coordinates, all in the same periodic image.
Example
-------
>>> coordinates = np.array([[0.1, 0.0, 0.0], [0.9, 0.0, 0.0]])
>>> periodic = np.array([1, 1, 1])
>>> noperiodic(coordinates, periodic)
[[ 0.1, 0.0, 0.0],
[-0.1, 0.0, 0.0]] | ['Rearrange', 'the', 'array', 'of', 'coordinates', '*', 'r_array', '*', 'in', 'a', 'way', 'that', 'doensn', 't', 'cross', 'the', 'periodic', 'boundary', '.'] | train | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/utils/pbc.py#L34-L77 |
1,453 | ga4gh/ga4gh-server | ga4gh/server/gff3.py | Gff3Set._linkFeature | def _linkFeature(self, feature):
"""
Link a feature with its parents.
"""
parentNames = feature.attributes.get("Parent")
if parentNames is None:
self.roots.add(feature)
else:
for parentName in parentNames:
self._linkToParent(feature, parentName) | python | def _linkFeature(self, feature):
"""
Link a feature with its parents.
"""
parentNames = feature.attributes.get("Parent")
if parentNames is None:
self.roots.add(feature)
else:
for parentName in parentNames:
self._linkToParent(feature, parentName) | ['def', '_linkFeature', '(', 'self', ',', 'feature', ')', ':', 'parentNames', '=', 'feature', '.', 'attributes', '.', 'get', '(', '"Parent"', ')', 'if', 'parentNames', 'is', 'None', ':', 'self', '.', 'roots', '.', 'add', '(', 'feature', ')', 'else', ':', 'for', 'parentName', 'in', 'parentNames', ':', 'self', '.', '_linkToParent', '(', 'feature', ',', 'parentName', ')'] | Link a feature with its parents. | ['Link', 'a', 'feature', 'with', 'its', 'parents', '.'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/gff3.py#L165-L174 |
1,454 | ericsuh/dirichlet | dirichlet/simplex.py | _contour | def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf() | python | def _contour(f, vertexlabels=None, contourfunc=None, **kwargs):
'''Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting.'''
if contourfunc is None:
contourfunc = plt.tricontour
if vertexlabels is None:
vertexlabels = ('1','2','3')
x = np.linspace(0, 1, 100)
y = np.linspace(0, np.sqrt(3.0)/2.0, 100)
points2d = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
points3d = barycentric(points2d)
valid = (points3d.sum(axis=1) == 1.0) & ((0.0 <= points3d).all(axis=1))
points2d = points2d[np.where(valid),:][0]
points3d = points3d[np.where(valid),:][0]
z = f(points3d)
contourfunc(points2d[:,0], points2d[:,1], z, **kwargs)
_draw_axes(vertexlabels)
return plt.gcf() | ['def', '_contour', '(', 'f', ',', 'vertexlabels', '=', 'None', ',', 'contourfunc', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'contourfunc', 'is', 'None', ':', 'contourfunc', '=', 'plt', '.', 'tricontour', 'if', 'vertexlabels', 'is', 'None', ':', 'vertexlabels', '=', '(', "'1'", ',', "'2'", ',', "'3'", ')', 'x', '=', 'np', '.', 'linspace', '(', '0', ',', '1', ',', '100', ')', 'y', '=', 'np', '.', 'linspace', '(', '0', ',', 'np', '.', 'sqrt', '(', '3.0', ')', '/', '2.0', ',', '100', ')', 'points2d', '=', 'np', '.', 'transpose', '(', '[', 'np', '.', 'tile', '(', 'x', ',', 'len', '(', 'y', ')', ')', ',', 'np', '.', 'repeat', '(', 'y', ',', 'len', '(', 'x', ')', ')', ']', ')', 'points3d', '=', 'barycentric', '(', 'points2d', ')', 'valid', '=', '(', 'points3d', '.', 'sum', '(', 'axis', '=', '1', ')', '==', '1.0', ')', '&', '(', '(', '0.0', '<=', 'points3d', ')', '.', 'all', '(', 'axis', '=', '1', ')', ')', 'points2d', '=', 'points2d', '[', 'np', '.', 'where', '(', 'valid', ')', ',', ':', ']', '[', '0', ']', 'points3d', '=', 'points3d', '[', 'np', '.', 'where', '(', 'valid', ')', ',', ':', ']', '[', '0', ']', 'z', '=', 'f', '(', 'points3d', ')', 'contourfunc', '(', 'points2d', '[', ':', ',', '0', ']', ',', 'points2d', '[', ':', ',', '1', ']', ',', 'z', ',', '*', '*', 'kwargs', ')', '_draw_axes', '(', 'vertexlabels', ')', 'return', 'plt', '.', 'gcf', '(', ')'] | Workhorse function for the above, where ``contourfunc`` is the contour
plotting function to use for actual plotting. | ['Workhorse', 'function', 'for', 'the', 'above', 'where', 'contourfunc', 'is', 'the', 'contour', 'plotting', 'function', 'to', 'use', 'for', 'actual', 'plotting', '.'] | train | https://github.com/ericsuh/dirichlet/blob/bf39a6d219348cbb4ed95dc195587a9c55c633b9/dirichlet/simplex.py#L96-L114 |
1,455 | hyperledger/indy-plenum | plenum/server/node.py | Node.handleOneNodeMsg | def handleOneNodeMsg(self, wrappedMsg):
"""
Validate and process one message from a node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message
"""
try:
vmsg = self.validateNodeMsg(wrappedMsg)
if vmsg:
logger.trace("{} msg validated {}".format(self, wrappedMsg),
extra={"tags": ["node-msg-validation"]})
self.unpackNodeMsg(*vmsg)
else:
logger.debug("{} invalidated msg {}".format(self, wrappedMsg),
extra={"tags": ["node-msg-validation"]})
except SuspiciousNode as ex:
self.reportSuspiciousNodeEx(ex)
except Exception as ex:
msg, frm = wrappedMsg
self.discard(msg, ex, logger.info) | python | def handleOneNodeMsg(self, wrappedMsg):
"""
Validate and process one message from a node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message
"""
try:
vmsg = self.validateNodeMsg(wrappedMsg)
if vmsg:
logger.trace("{} msg validated {}".format(self, wrappedMsg),
extra={"tags": ["node-msg-validation"]})
self.unpackNodeMsg(*vmsg)
else:
logger.debug("{} invalidated msg {}".format(self, wrappedMsg),
extra={"tags": ["node-msg-validation"]})
except SuspiciousNode as ex:
self.reportSuspiciousNodeEx(ex)
except Exception as ex:
msg, frm = wrappedMsg
self.discard(msg, ex, logger.info) | ['def', 'handleOneNodeMsg', '(', 'self', ',', 'wrappedMsg', ')', ':', 'try', ':', 'vmsg', '=', 'self', '.', 'validateNodeMsg', '(', 'wrappedMsg', ')', 'if', 'vmsg', ':', 'logger', '.', 'trace', '(', '"{} msg validated {}"', '.', 'format', '(', 'self', ',', 'wrappedMsg', ')', ',', 'extra', '=', '{', '"tags"', ':', '[', '"node-msg-validation"', ']', '}', ')', 'self', '.', 'unpackNodeMsg', '(', '*', 'vmsg', ')', 'else', ':', 'logger', '.', 'debug', '(', '"{} invalidated msg {}"', '.', 'format', '(', 'self', ',', 'wrappedMsg', ')', ',', 'extra', '=', '{', '"tags"', ':', '[', '"node-msg-validation"', ']', '}', ')', 'except', 'SuspiciousNode', 'as', 'ex', ':', 'self', '.', 'reportSuspiciousNodeEx', '(', 'ex', ')', 'except', 'Exception', 'as', 'ex', ':', 'msg', ',', 'frm', '=', 'wrappedMsg', 'self', '.', 'discard', '(', 'msg', ',', 'ex', ',', 'logger', '.', 'info', ')'] | Validate and process one message from a node.
:param wrappedMsg: Tuple of message and the name of the node that sent
the message | ['Validate', 'and', 'process', 'one', 'message', 'from', 'a', 'node', '.'] | train | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L1867-L1887 |
1,456 | rm-hull/luma.core | luma/core/sprite_system.py | framerate_regulator.effective_FPS | def effective_FPS(self):
"""
Calculates the effective frames-per-second - this should largely
correlate to the desired FPS supplied in the constructor, but no
guarantees are given.
:returns: The effective frame rate.
:rtype: float
"""
if self.start_time is None:
self.start_time = 0
elapsed = monotonic() - self.start_time
return self.called / elapsed | python | def effective_FPS(self):
"""
Calculates the effective frames-per-second - this should largely
correlate to the desired FPS supplied in the constructor, but no
guarantees are given.
:returns: The effective frame rate.
:rtype: float
"""
if self.start_time is None:
self.start_time = 0
elapsed = monotonic() - self.start_time
return self.called / elapsed | ['def', 'effective_FPS', '(', 'self', ')', ':', 'if', 'self', '.', 'start_time', 'is', 'None', ':', 'self', '.', 'start_time', '=', '0', 'elapsed', '=', 'monotonic', '(', ')', '-', 'self', '.', 'start_time', 'return', 'self', '.', 'called', '/', 'elapsed'] | Calculates the effective frames-per-second - this should largely
correlate to the desired FPS supplied in the constructor, but no
guarantees are given.
:returns: The effective frame rate.
:rtype: float | ['Calculates', 'the', 'effective', 'frames', '-', 'per', '-', 'second', '-', 'this', 'should', 'largely', 'correlate', 'to', 'the', 'desired', 'FPS', 'supplied', 'in', 'the', 'constructor', 'but', 'no', 'guarantees', 'are', 'given', '.'] | train | https://github.com/rm-hull/luma.core/blob/034b628fb304a01e77732a299c0b42e94d6443db/luma/core/sprite_system.py#L216-L228 |
1,457 | DLR-RM/RAFCON | source/rafcon/core/states/container_state.py | ContainerState._check_transition_target | def _check_transition_target(self, transition):
"""Checks the validity of a transition target
Checks whether the transition target is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid
"""
to_state_id = transition.to_state
to_outcome_id = transition.to_outcome
if to_state_id == self.state_id:
if to_outcome_id not in self.outcomes:
return False, "to_outcome is not existing"
else:
if to_state_id not in self.states:
return False, "to_state is not existing"
if to_outcome_id is not None:
return False, "to_outcome must be None as transition goes to child state"
return True, "valid" | python | def _check_transition_target(self, transition):
"""Checks the validity of a transition target
Checks whether the transition target is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid
"""
to_state_id = transition.to_state
to_outcome_id = transition.to_outcome
if to_state_id == self.state_id:
if to_outcome_id not in self.outcomes:
return False, "to_outcome is not existing"
else:
if to_state_id not in self.states:
return False, "to_state is not existing"
if to_outcome_id is not None:
return False, "to_outcome must be None as transition goes to child state"
return True, "valid" | ['def', '_check_transition_target', '(', 'self', ',', 'transition', ')', ':', 'to_state_id', '=', 'transition', '.', 'to_state', 'to_outcome_id', '=', 'transition', '.', 'to_outcome', 'if', 'to_state_id', '==', 'self', '.', 'state_id', ':', 'if', 'to_outcome_id', 'not', 'in', 'self', '.', 'outcomes', ':', 'return', 'False', ',', '"to_outcome is not existing"', 'else', ':', 'if', 'to_state_id', 'not', 'in', 'self', '.', 'states', ':', 'return', 'False', ',', '"to_state is not existing"', 'if', 'to_outcome_id', 'is', 'not', 'None', ':', 'return', 'False', ',', '"to_outcome must be None as transition goes to child state"', 'return', 'True', ',', '"valid"'] | Checks the validity of a transition target
Checks whether the transition target is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid | ['Checks', 'the', 'validity', 'of', 'a', 'transition', 'target'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1978-L2000 |
1,458 | EpistasisLab/tpot | tpot/builtins/feature_set_selector.py | FeatureSetSelector.fit | def fit(self, X, y=None):
"""Fit FeatureSetSelector for feature selection
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
Returns
-------
self: object
Returns a copy of the estimator
"""
subset_df = pd.read_csv(self.subset_list, header=0, index_col=0)
if isinstance(self.sel_subset, int):
self.sel_subset_name = subset_df.index[self.sel_subset]
elif isinstance(self.sel_subset, str):
self.sel_subset_name = self.sel_subset
else: # list or tuple
self.sel_subset_name = []
for s in self.sel_subset:
if isinstance(s, int):
self.sel_subset_name.append(subset_df.index[s])
else:
self.sel_subset_name.append(s)
sel_features = subset_df.loc[self.sel_subset_name, 'Features']
if not isinstance(sel_features, str):
sel_features = ";".join(sel_features.tolist())
sel_uniq_features = set(sel_features.split(';'))
if isinstance(X, pd.DataFrame): # use columns' names
self.feature_names = list(X.columns.values)
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list]
elif isinstance(X, np.ndarray): # use index
self.feature_names = list(range(X.shape[1]))
sel_uniq_features = [int(val) for val in sel_uniq_features]
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = self.feat_list
if not len(self.feat_list):
raise ValueError('No feature is found on the subset list!')
return self | python | def fit(self, X, y=None):
"""Fit FeatureSetSelector for feature selection
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
Returns
-------
self: object
Returns a copy of the estimator
"""
subset_df = pd.read_csv(self.subset_list, header=0, index_col=0)
if isinstance(self.sel_subset, int):
self.sel_subset_name = subset_df.index[self.sel_subset]
elif isinstance(self.sel_subset, str):
self.sel_subset_name = self.sel_subset
else: # list or tuple
self.sel_subset_name = []
for s in self.sel_subset:
if isinstance(s, int):
self.sel_subset_name.append(subset_df.index[s])
else:
self.sel_subset_name.append(s)
sel_features = subset_df.loc[self.sel_subset_name, 'Features']
if not isinstance(sel_features, str):
sel_features = ";".join(sel_features.tolist())
sel_uniq_features = set(sel_features.split(';'))
if isinstance(X, pd.DataFrame): # use columns' names
self.feature_names = list(X.columns.values)
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list]
elif isinstance(X, np.ndarray): # use index
self.feature_names = list(range(X.shape[1]))
sel_uniq_features = [int(val) for val in sel_uniq_features]
self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))
self.feat_list_idx = self.feat_list
if not len(self.feat_list):
raise ValueError('No feature is found on the subset list!')
return self | ['def', 'fit', '(', 'self', ',', 'X', ',', 'y', '=', 'None', ')', ':', 'subset_df', '=', 'pd', '.', 'read_csv', '(', 'self', '.', 'subset_list', ',', 'header', '=', '0', ',', 'index_col', '=', '0', ')', 'if', 'isinstance', '(', 'self', '.', 'sel_subset', ',', 'int', ')', ':', 'self', '.', 'sel_subset_name', '=', 'subset_df', '.', 'index', '[', 'self', '.', 'sel_subset', ']', 'elif', 'isinstance', '(', 'self', '.', 'sel_subset', ',', 'str', ')', ':', 'self', '.', 'sel_subset_name', '=', 'self', '.', 'sel_subset', 'else', ':', '# list or tuple', 'self', '.', 'sel_subset_name', '=', '[', ']', 'for', 's', 'in', 'self', '.', 'sel_subset', ':', 'if', 'isinstance', '(', 's', ',', 'int', ')', ':', 'self', '.', 'sel_subset_name', '.', 'append', '(', 'subset_df', '.', 'index', '[', 's', ']', ')', 'else', ':', 'self', '.', 'sel_subset_name', '.', 'append', '(', 's', ')', 'sel_features', '=', 'subset_df', '.', 'loc', '[', 'self', '.', 'sel_subset_name', ',', "'Features'", ']', 'if', 'not', 'isinstance', '(', 'sel_features', ',', 'str', ')', ':', 'sel_features', '=', '";"', '.', 'join', '(', 'sel_features', '.', 'tolist', '(', ')', ')', 'sel_uniq_features', '=', 'set', '(', 'sel_features', '.', 'split', '(', "';'", ')', ')', 'if', 'isinstance', '(', 'X', ',', 'pd', '.', 'DataFrame', ')', ':', "# use columns' names", 'self', '.', 'feature_names', '=', 'list', '(', 'X', '.', 'columns', '.', 'values', ')', 'self', '.', 'feat_list', '=', 'sorted', '(', 'list', '(', 'set', '(', 'sel_uniq_features', ')', '.', 'intersection', '(', 'set', '(', 'self', '.', 'feature_names', ')', ')', ')', ')', 'self', '.', 'feat_list_idx', '=', '[', 'list', '(', 'X', '.', 'columns', ')', '.', 'index', '(', 'feat_name', ')', 'for', 'feat_name', 'in', 'self', '.', 'feat_list', ']', 'elif', 'isinstance', '(', 'X', ',', 'np', '.', 'ndarray', ')', ':', '# use index', 'self', '.', 'feature_names', '=', 'list', '(', 'range', '(', 'X', '.', 'shape', '[', '1', ']', ')', ')', 'sel_uniq_features', '=', '[', 'int', '(', 'val', ')', 'for', 'val', 'in', 'sel_uniq_features', ']', 'self', '.', 'feat_list', '=', 'sorted', '(', 'list', '(', 'set', '(', 'sel_uniq_features', ')', '.', 'intersection', '(', 'set', '(', 'self', '.', 'feature_names', ')', ')', ')', ')', 'self', '.', 'feat_list_idx', '=', 'self', '.', 'feat_list', 'if', 'not', 'len', '(', 'self', '.', 'feat_list', ')', ':', 'raise', 'ValueError', '(', "'No feature is found on the subset list!'", ')', 'return', 'self'] | Fit FeatureSetSelector for feature selection
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
Returns
-------
self: object
Returns a copy of the estimator | ['Fit', 'FeatureSetSelector', 'for', 'feature', 'selection'] | train | https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/feature_set_selector.py#L66-L114 |
1,459 | numenta/htmresearch | projects/speech_commands/analyze_experiment.py | findOptimalResults | def findOptimalResults(expName, suite, outFile):
"""
Go through every experiment in the specified folder. For each experiment, find
the iteration with the best validation score, and return the metrics
associated with that iteration.
"""
writer = csv.writer(outFile)
headers = ["testAccuracy", "bgAccuracy", "maxTotalAccuracy", "experiment path"]
writer.writerow(headers)
info = []
print("\n================",expName,"=====================")
try:
# Retrieve the last totalCorrect from each experiment
# Print them sorted from best to worst
values, params = suite.get_values_fix_params(
expName, 0, "testerror", "last")
for p in params:
expPath = p["name"]
if not "results" in expPath:
expPath = os.path.join("results", expPath)
maxTestAccuracy, maxValidationAccuracy, maxBGAccuracy, maxIter, maxTotalAccuracy = bestScore(expPath, suite)
row = [maxTestAccuracy, maxBGAccuracy, maxTotalAccuracy, expPath]
info.append(row)
writer.writerow(row)
print(tabulate(info, headers=headers, tablefmt="grid"))
except:
print("Couldn't analyze experiment",expName) | python | def findOptimalResults(expName, suite, outFile):
"""
Go through every experiment in the specified folder. For each experiment, find
the iteration with the best validation score, and return the metrics
associated with that iteration.
"""
writer = csv.writer(outFile)
headers = ["testAccuracy", "bgAccuracy", "maxTotalAccuracy", "experiment path"]
writer.writerow(headers)
info = []
print("\n================",expName,"=====================")
try:
# Retrieve the last totalCorrect from each experiment
# Print them sorted from best to worst
values, params = suite.get_values_fix_params(
expName, 0, "testerror", "last")
for p in params:
expPath = p["name"]
if not "results" in expPath:
expPath = os.path.join("results", expPath)
maxTestAccuracy, maxValidationAccuracy, maxBGAccuracy, maxIter, maxTotalAccuracy = bestScore(expPath, suite)
row = [maxTestAccuracy, maxBGAccuracy, maxTotalAccuracy, expPath]
info.append(row)
writer.writerow(row)
print(tabulate(info, headers=headers, tablefmt="grid"))
except:
print("Couldn't analyze experiment",expName) | ['def', 'findOptimalResults', '(', 'expName', ',', 'suite', ',', 'outFile', ')', ':', 'writer', '=', 'csv', '.', 'writer', '(', 'outFile', ')', 'headers', '=', '[', '"testAccuracy"', ',', '"bgAccuracy"', ',', '"maxTotalAccuracy"', ',', '"experiment path"', ']', 'writer', '.', 'writerow', '(', 'headers', ')', 'info', '=', '[', ']', 'print', '(', '"\\n================"', ',', 'expName', ',', '"====================="', ')', 'try', ':', '# Retrieve the last totalCorrect from each experiment', '# Print them sorted from best to worst', 'values', ',', 'params', '=', 'suite', '.', 'get_values_fix_params', '(', 'expName', ',', '0', ',', '"testerror"', ',', '"last"', ')', 'for', 'p', 'in', 'params', ':', 'expPath', '=', 'p', '[', '"name"', ']', 'if', 'not', '"results"', 'in', 'expPath', ':', 'expPath', '=', 'os', '.', 'path', '.', 'join', '(', '"results"', ',', 'expPath', ')', 'maxTestAccuracy', ',', 'maxValidationAccuracy', ',', 'maxBGAccuracy', ',', 'maxIter', ',', 'maxTotalAccuracy', '=', 'bestScore', '(', 'expPath', ',', 'suite', ')', 'row', '=', '[', 'maxTestAccuracy', ',', 'maxBGAccuracy', ',', 'maxTotalAccuracy', ',', 'expPath', ']', 'info', '.', 'append', '(', 'row', ')', 'writer', '.', 'writerow', '(', 'row', ')', 'print', '(', 'tabulate', '(', 'info', ',', 'headers', '=', 'headers', ',', 'tablefmt', '=', '"grid"', ')', ')', 'except', ':', 'print', '(', '"Couldn\'t analyze experiment"', ',', 'expName', ')'] | Go through every experiment in the specified folder. For each experiment, find
the iteration with the best validation score, and return the metrics
associated with that iteration. | ['Go', 'through', 'every', 'experiment', 'in', 'the', 'specified', 'folder', '.', 'For', 'each', 'experiment', 'find', 'the', 'iteration', 'with', 'the', 'best', 'validation', 'score', 'and', 'return', 'the', 'metrics', 'associated', 'with', 'that', 'iteration', '.'] | train | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/speech_commands/analyze_experiment.py#L189-L216 |
1,460 | bitprophet/ssh | ssh/sftp_client.py | SFTPClient.readlink | def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'readlink(%r)' % path)
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError('Readlink returned %d results' % count)
return _to_unicode(msg.get_string()) | python | def readlink(self, path):
"""
Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'readlink(%r)' % path)
t, msg = self._request(CMD_READLINK, path)
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
if count == 0:
return None
if count != 1:
raise SFTPError('Readlink returned %d results' % count)
return _to_unicode(msg.get_string()) | ['def', 'readlink', '(', 'self', ',', 'path', ')', ':', 'path', '=', 'self', '.', '_adjust_cwd', '(', 'path', ')', 'self', '.', '_log', '(', 'DEBUG', ',', "'readlink(%r)'", '%', 'path', ')', 't', ',', 'msg', '=', 'self', '.', '_request', '(', 'CMD_READLINK', ',', 'path', ')', 'if', 't', '!=', 'CMD_NAME', ':', 'raise', 'SFTPError', '(', "'Expected name response'", ')', 'count', '=', 'msg', '.', 'get_int', '(', ')', 'if', 'count', '==', '0', ':', 'return', 'None', 'if', 'count', '!=', '1', ':', 'raise', 'SFTPError', '(', "'Readlink returned %d results'", '%', 'count', ')', 'return', '_to_unicode', '(', 'msg', '.', 'get_string', '(', ')', ')'] | Return the target of a symbolic link (shortcut). You can use
L{symlink} to create these. The result may be either an absolute or
relative pathname.
@param path: path of the symbolic link file
@type path: str
@return: target path
@rtype: str | ['Return', 'the', 'target', 'of', 'a', 'symbolic', 'link', '(', 'shortcut', ')', '.', 'You', 'can', 'use', 'L', '{', 'symlink', '}', 'to', 'create', 'these', '.', 'The', 'result', 'may', 'be', 'either', 'an', 'absolute', 'or', 'relative', 'pathname', '.'] | train | https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/sftp_client.py#L453-L474 |
1,461 | moonso/loqusdb | loqusdb/utils/load.py | load_variants | def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
"""Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
"""
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted | python | def load_variants(adapter, vcf_obj, case_obj, skip_case_id=False, gq_treshold=None,
max_window=3000, variant_type='snv'):
"""Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int)
"""
if variant_type == 'snv':
nr_variants = case_obj['nr_variants']
else:
nr_variants = case_obj['nr_sv_variants']
nr_inserted = 0
case_id = case_obj['case_id']
if skip_case_id:
case_id = None
# Loop over the variants in the vcf
with click.progressbar(vcf_obj, label="Inserting variants",length=nr_variants) as bar:
variants = (build_variant(variant,case_obj,case_id, gq_treshold) for variant in bar)
if variant_type == 'sv':
for sv_variant in variants:
if not sv_variant:
continue
adapter.add_structural_variant(variant=sv_variant, max_window=max_window)
nr_inserted += 1
if variant_type == 'snv':
nr_inserted = adapter.add_variants(variants)
LOG.info("Inserted %s variants of type %s", nr_inserted, variant_type)
return nr_inserted | ['def', 'load_variants', '(', 'adapter', ',', 'vcf_obj', ',', 'case_obj', ',', 'skip_case_id', '=', 'False', ',', 'gq_treshold', '=', 'None', ',', 'max_window', '=', '3000', ',', 'variant_type', '=', "'snv'", ')', ':', 'if', 'variant_type', '==', "'snv'", ':', 'nr_variants', '=', 'case_obj', '[', "'nr_variants'", ']', 'else', ':', 'nr_variants', '=', 'case_obj', '[', "'nr_sv_variants'", ']', 'nr_inserted', '=', '0', 'case_id', '=', 'case_obj', '[', "'case_id'", ']', 'if', 'skip_case_id', ':', 'case_id', '=', 'None', '# Loop over the variants in the vcf', 'with', 'click', '.', 'progressbar', '(', 'vcf_obj', ',', 'label', '=', '"Inserting variants"', ',', 'length', '=', 'nr_variants', ')', 'as', 'bar', ':', 'variants', '=', '(', 'build_variant', '(', 'variant', ',', 'case_obj', ',', 'case_id', ',', 'gq_treshold', ')', 'for', 'variant', 'in', 'bar', ')', 'if', 'variant_type', '==', "'sv'", ':', 'for', 'sv_variant', 'in', 'variants', ':', 'if', 'not', 'sv_variant', ':', 'continue', 'adapter', '.', 'add_structural_variant', '(', 'variant', '=', 'sv_variant', ',', 'max_window', '=', 'max_window', ')', 'nr_inserted', '+=', '1', 'if', 'variant_type', '==', "'snv'", ':', 'nr_inserted', '=', 'adapter', '.', 'add_variants', '(', 'variants', ')', 'LOG', '.', 'info', '(', '"Inserted %s variants of type %s"', ',', 'nr_inserted', ',', 'variant_type', ')', 'return', 'nr_inserted'] | Load variants for a family into the database.
Args:
adapter (loqusdb.plugins.Adapter): initialized plugin
case_obj(Case): dict with case information
nr_variants(int)
skip_case_id (bool): whether to include the case id on variant level
or not
gq_treshold(int)
max_window(int): Specify the max size for sv windows
variant_type(str): 'sv' or 'snv'
Returns:
nr_inserted(int) | ['Load', 'variants', 'for', 'a', 'family', 'into', 'the', 'database', '.'] | train | https://github.com/moonso/loqusdb/blob/792dcd0d461aff5adc703c49eebf58964913a513/loqusdb/utils/load.py#L179-L222 |
1,462 | PSPC-SPAC-buyandsell/von_anchor | von_anchor/wallet/wallet.py | Wallet.reseed_init | async def reseed_init(self, next_seed: str = None) -> str:
"""
Begin reseed operation: generate new key. Raise WalletState if wallet is closed.
:param next_seed: incoming replacement seed (default random)
:return: new verification key
"""
LOGGER.debug('Wallet.reseed_init >>> next_seed: [SEED]')
if not self.handle:
LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await did.replace_keys_start(self.handle, self.did, json.dumps({'seed': next_seed} if next_seed else {}))
LOGGER.debug('Wallet.reseed_init <<< %s', rv)
return rv | python | async def reseed_init(self, next_seed: str = None) -> str:
"""
Begin reseed operation: generate new key. Raise WalletState if wallet is closed.
:param next_seed: incoming replacement seed (default random)
:return: new verification key
"""
LOGGER.debug('Wallet.reseed_init >>> next_seed: [SEED]')
if not self.handle:
LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await did.replace_keys_start(self.handle, self.did, json.dumps({'seed': next_seed} if next_seed else {}))
LOGGER.debug('Wallet.reseed_init <<< %s', rv)
return rv | ['async', 'def', 'reseed_init', '(', 'self', ',', 'next_seed', ':', 'str', '=', 'None', ')', '->', 'str', ':', 'LOGGER', '.', 'debug', '(', "'Wallet.reseed_init >>> next_seed: [SEED]'", ')', 'if', 'not', 'self', '.', 'handle', ':', 'LOGGER', '.', 'debug', '(', "'Wallet.reseed_init <!< Wallet %s is closed'", ',', 'self', '.', 'name', ')', 'raise', 'WalletState', '(', "'Wallet {} is closed'", '.', 'format', '(', 'self', '.', 'name', ')', ')', 'rv', '=', 'await', 'did', '.', 'replace_keys_start', '(', 'self', '.', 'handle', ',', 'self', '.', 'did', ',', 'json', '.', 'dumps', '(', '{', "'seed'", ':', 'next_seed', '}', 'if', 'next_seed', 'else', '{', '}', ')', ')', 'LOGGER', '.', 'debug', '(', "'Wallet.reseed_init <<< %s'", ',', 'rv', ')', 'return', 'rv'] | Begin reseed operation: generate new key. Raise WalletState if wallet is closed.
:param next_seed: incoming replacement seed (default random)
:return: new verification key | ['Begin', 'reseed', 'operation', ':', 'generate', 'new', 'key', '.', 'Raise', 'WalletState', 'if', 'wallet', 'is', 'closed', '.'] | train | https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/wallet.py#L1254-L1270 |
1,463 | ThreatConnect-Inc/tcex | tcex/tcex_ti_group.py | Group.tag | def tag(self, name, formatter=None):
"""Return instance of Tag.
Args:
name (str): The value for this tag.
formatter (method, optional): A method that take a tag value and returns a
formatted tag.
Returns:
obj: An instance of Tag.
"""
tag = Tag(name, formatter)
for tag_data in self._tags:
if tag_data.name == name:
tag = tag_data
break
else:
self._tags.append(tag)
return tag | python | def tag(self, name, formatter=None):
"""Return instance of Tag.
Args:
name (str): The value for this tag.
formatter (method, optional): A method that take a tag value and returns a
formatted tag.
Returns:
obj: An instance of Tag.
"""
tag = Tag(name, formatter)
for tag_data in self._tags:
if tag_data.name == name:
tag = tag_data
break
else:
self._tags.append(tag)
return tag | ['def', 'tag', '(', 'self', ',', 'name', ',', 'formatter', '=', 'None', ')', ':', 'tag', '=', 'Tag', '(', 'name', ',', 'formatter', ')', 'for', 'tag_data', 'in', 'self', '.', '_tags', ':', 'if', 'tag_data', '.', 'name', '==', 'name', ':', 'tag', '=', 'tag_data', 'break', 'else', ':', 'self', '.', '_tags', '.', 'append', '(', 'tag', ')', 'return', 'tag'] | Return instance of Tag.
Args:
name (str): The value for this tag.
formatter (method, optional): A method that take a tag value and returns a
formatted tag.
Returns:
obj: An instance of Tag. | ['Return', 'instance', 'of', 'Tag', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_group.py#L239-L257 |
1,464 | pandas-dev/pandas | pandas/util/_validators.py | validate_bool_kwarg | def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value | python | def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value | ['def', 'validate_bool_kwarg', '(', 'value', ',', 'arg_name', ')', ':', 'if', 'not', '(', 'is_bool', '(', 'value', ')', 'or', 'value', 'is', 'None', ')', ':', 'raise', 'ValueError', '(', '\'For argument "{arg}" expected type bool, received \'', "'type {typ}.'", '.', 'format', '(', 'arg', '=', 'arg_name', ',', 'typ', '=', 'type', '(', 'value', ')', '.', '__name__', ')', ')', 'return', 'value'] | Ensures that argument passed in arg_name is of type bool. | ['Ensures', 'that', 'argument', 'passed', 'in', 'arg_name', 'is', 'of', 'type', 'bool', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L221-L227 |
1,465 | smarie/python-parsyfiles | parsyfiles/plugins_base/support_for_primitive_types.py | _can_construct_from_str | def _can_construct_from_str(strict_mode: bool, from_type: Type, to_type: Type) -> bool:
"""
Returns true if the provided types are valid for constructor_with_str_arg conversion
Explicitly declare that we are not able to convert primitive types (they already have their own converters)
:param strict_mode:
:param from_type:
:param to_type:
:return:
"""
return to_type not in {int, float, bool} | python | def _can_construct_from_str(strict_mode: bool, from_type: Type, to_type: Type) -> bool:
"""
Returns true if the provided types are valid for constructor_with_str_arg conversion
Explicitly declare that we are not able to convert primitive types (they already have their own converters)
:param strict_mode:
:param from_type:
:param to_type:
:return:
"""
return to_type not in {int, float, bool} | ['def', '_can_construct_from_str', '(', 'strict_mode', ':', 'bool', ',', 'from_type', ':', 'Type', ',', 'to_type', ':', 'Type', ')', '->', 'bool', ':', 'return', 'to_type', 'not', 'in', '{', 'int', ',', 'float', ',', 'bool', '}'] | Returns true if the provided types are valid for constructor_with_str_arg conversion
Explicitly declare that we are not able to convert primitive types (they already have their own converters)
:param strict_mode:
:param from_type:
:param to_type:
:return: | ['Returns', 'true', 'if', 'the', 'provided', 'types', 'are', 'valid', 'for', 'constructor_with_str_arg', 'conversion', 'Explicitly', 'declare', 'that', 'we', 'are', 'not', 'able', 'to', 'convert', 'primitive', 'types', '(', 'they', 'already', 'have', 'their', 'own', 'converters', ')'] | train | https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/plugins_base/support_for_primitive_types.py#L121-L131 |
1,466 | boriel/zxbasic | arch/zx48k/optimizer.py | BasicBlock.is_used | def is_used(self, regs, i, top=None):
""" Checks whether any of the given regs are required from the given point
to the end or not.
"""
if i < 0:
i = 0
if self.lock:
return True
regs = list(regs) # make a copy
if top is None:
top = len(self)
else:
top -= 1
for ii in range(i, top):
for r in self.mem[ii].requires:
if r in regs:
return True
for r in self.mem[ii].destroys:
if r in regs:
regs.remove(r)
if not regs:
return False
self.lock = True
result = self.goes_requires(regs)
self.lock = False
return result | python | def is_used(self, regs, i, top=None):
""" Checks whether any of the given regs are required from the given point
to the end or not.
"""
if i < 0:
i = 0
if self.lock:
return True
regs = list(regs) # make a copy
if top is None:
top = len(self)
else:
top -= 1
for ii in range(i, top):
for r in self.mem[ii].requires:
if r in regs:
return True
for r in self.mem[ii].destroys:
if r in regs:
regs.remove(r)
if not regs:
return False
self.lock = True
result = self.goes_requires(regs)
self.lock = False
return result | ['def', 'is_used', '(', 'self', ',', 'regs', ',', 'i', ',', 'top', '=', 'None', ')', ':', 'if', 'i', '<', '0', ':', 'i', '=', '0', 'if', 'self', '.', 'lock', ':', 'return', 'True', 'regs', '=', 'list', '(', 'regs', ')', '# make a copy', 'if', 'top', 'is', 'None', ':', 'top', '=', 'len', '(', 'self', ')', 'else', ':', 'top', '-=', '1', 'for', 'ii', 'in', 'range', '(', 'i', ',', 'top', ')', ':', 'for', 'r', 'in', 'self', '.', 'mem', '[', 'ii', ']', '.', 'requires', ':', 'if', 'r', 'in', 'regs', ':', 'return', 'True', 'for', 'r', 'in', 'self', '.', 'mem', '[', 'ii', ']', '.', 'destroys', ':', 'if', 'r', 'in', 'regs', ':', 'regs', '.', 'remove', '(', 'r', ')', 'if', 'not', 'regs', ':', 'return', 'False', 'self', '.', 'lock', '=', 'True', 'result', '=', 'self', '.', 'goes_requires', '(', 'regs', ')', 'self', '.', 'lock', '=', 'False', 'return', 'result'] | Checks whether any of the given regs are required from the given point
to the end or not. | ['Checks', 'whether', 'any', 'of', 'the', 'given', 'regs', 'are', 'required', 'from', 'the', 'given', 'point', 'to', 'the', 'end', 'or', 'not', '.'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/optimizer.py#L1531-L1563 |
1,467 | welchbj/sublemon | sublemon/subprocess.py | SublemonSubprocess.wait_done | async def wait_done(self) -> int:
"""Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess.
"""
await self._done_running_evt.wait()
if self._exit_code is None:
raise SublemonLifetimeError(
'Subprocess exited abnormally with `None` exit code')
return self._exit_code | python | async def wait_done(self) -> int:
"""Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess.
"""
await self._done_running_evt.wait()
if self._exit_code is None:
raise SublemonLifetimeError(
'Subprocess exited abnormally with `None` exit code')
return self._exit_code | ['async', 'def', 'wait_done', '(', 'self', ')', '->', 'int', ':', 'await', 'self', '.', '_done_running_evt', '.', 'wait', '(', ')', 'if', 'self', '.', '_exit_code', 'is', 'None', ':', 'raise', 'SublemonLifetimeError', '(', "'Subprocess exited abnormally with `None` exit code'", ')', 'return', 'self', '.', '_exit_code'] | Coroutine to wait for subprocess run completion.
Returns:
The exit code of the subprocess. | ['Coroutine', 'to', 'wait', 'for', 'subprocess', 'run', 'completion', '.'] | train | https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/subprocess.py#L67-L78 |
1,468 | frmdstryr/enamlx | enamlx/qt/qt_plot_area.py | AbstractQtPlotItem._refresh_multi_axis | def _refresh_multi_axis(self):
""" If linked axis' are used, setup and link them """
d = self.declaration
#: Create a separate viewbox
self.viewbox = pg.ViewBox()
#: If this is the first nested plot, use the parent right axis
_plots = [c for c in self.parent().children() if isinstance(c,AbstractQtPlotItem)]
i = _plots.index(self)
if i==0:
self.axis = self.widget.getAxis('right')
self.widget.showAxis('right')
else:
self.axis = pg.AxisItem('right')
self.axis.setZValue(-10000)
#: Add new axis to scene
self.widget.layout.addItem(self.axis,2,i+2)
#: Link x axis to the parent axis
self.viewbox.setXLink(self.widget.vb)
#: Link y axis to the view
self.axis.linkToView(self.viewbox)
#: Set axis label
self.axis.setLabel(d.label_right)
#: Add Viewbox to parent scene
self.parent().parent_widget().scene().addItem(self.viewbox) | python | def _refresh_multi_axis(self):
""" If linked axis' are used, setup and link them """
d = self.declaration
#: Create a separate viewbox
self.viewbox = pg.ViewBox()
#: If this is the first nested plot, use the parent right axis
_plots = [c for c in self.parent().children() if isinstance(c,AbstractQtPlotItem)]
i = _plots.index(self)
if i==0:
self.axis = self.widget.getAxis('right')
self.widget.showAxis('right')
else:
self.axis = pg.AxisItem('right')
self.axis.setZValue(-10000)
#: Add new axis to scene
self.widget.layout.addItem(self.axis,2,i+2)
#: Link x axis to the parent axis
self.viewbox.setXLink(self.widget.vb)
#: Link y axis to the view
self.axis.linkToView(self.viewbox)
#: Set axis label
self.axis.setLabel(d.label_right)
#: Add Viewbox to parent scene
self.parent().parent_widget().scene().addItem(self.viewbox) | ['def', '_refresh_multi_axis', '(', 'self', ')', ':', 'd', '=', 'self', '.', 'declaration', '#: Create a separate viewbox', 'self', '.', 'viewbox', '=', 'pg', '.', 'ViewBox', '(', ')', '#: If this is the first nested plot, use the parent right axis', '_plots', '=', '[', 'c', 'for', 'c', 'in', 'self', '.', 'parent', '(', ')', '.', 'children', '(', ')', 'if', 'isinstance', '(', 'c', ',', 'AbstractQtPlotItem', ')', ']', 'i', '=', '_plots', '.', 'index', '(', 'self', ')', 'if', 'i', '==', '0', ':', 'self', '.', 'axis', '=', 'self', '.', 'widget', '.', 'getAxis', '(', "'right'", ')', 'self', '.', 'widget', '.', 'showAxis', '(', "'right'", ')', 'else', ':', 'self', '.', 'axis', '=', 'pg', '.', 'AxisItem', '(', "'right'", ')', 'self', '.', 'axis', '.', 'setZValue', '(', '-', '10000', ')', '#: Add new axis to scene', 'self', '.', 'widget', '.', 'layout', '.', 'addItem', '(', 'self', '.', 'axis', ',', '2', ',', 'i', '+', '2', ')', '#: Link x axis to the parent axis', 'self', '.', 'viewbox', '.', 'setXLink', '(', 'self', '.', 'widget', '.', 'vb', ')', '#: Link y axis to the view', 'self', '.', 'axis', '.', 'linkToView', '(', 'self', '.', 'viewbox', ')', '#: Set axis label', 'self', '.', 'axis', '.', 'setLabel', '(', 'd', '.', 'label_right', ')', '#: Add Viewbox to parent scene', 'self', '.', 'parent', '(', ')', '.', 'parent_widget', '(', ')', '.', 'scene', '(', ')', '.', 'addItem', '(', 'self', '.', 'viewbox', ')'] | If linked axis' are used, setup and link them | ['If', 'linked', 'axis', 'are', 'used', 'setup', 'and', 'link', 'them'] | train | https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_plot_area.py#L174-L204 |
1,469 | fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | moving_average | def moving_average(iterable, n):
"""
From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
"""
it = iter(iterable)
d = collections.deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n) | python | def moving_average(iterable, n):
"""
From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
"""
it = iter(iterable)
d = collections.deque(itertools.islice(it, n - 1))
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / float(n) | ['def', 'moving_average', '(', 'iterable', ',', 'n', ')', ':', 'it', '=', 'iter', '(', 'iterable', ')', 'd', '=', 'collections', '.', 'deque', '(', 'itertools', '.', 'islice', '(', 'it', ',', 'n', '-', '1', ')', ')', 'd', '.', 'appendleft', '(', '0', ')', 's', '=', 'sum', '(', 'd', ')', 'for', 'elem', 'in', 'it', ':', 's', '+=', 'elem', '-', 'd', '.', 'popleft', '(', ')', 'd', '.', 'append', '(', 'elem', ')', 'yield', 's', '/', 'float', '(', 'n', ')'] | From Python collections module documentation
moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0 | ['From', 'Python', 'collections', 'module', 'documentation'] | train | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L204-L217 |
1,470 | threeML/astromodels | astromodels/functions/priors.py | Gaussian.from_unit_cube | def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
mu = self.mu.value
sigma = self.sigma.value
sqrt_two = 1.414213562
if x < 1e-16 or (1 - x) < 1e-16:
res = -1e32
else:
res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x))
return res | python | def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
mu = self.mu.value
sigma = self.sigma.value
sqrt_two = 1.414213562
if x < 1e-16 or (1 - x) < 1e-16:
res = -1e32
else:
res = mu + sigma * sqrt_two * erfcinv(2 * (1 - x))
return res | ['def', 'from_unit_cube', '(', 'self', ',', 'x', ')', ':', 'mu', '=', 'self', '.', 'mu', '.', 'value', 'sigma', '=', 'self', '.', 'sigma', '.', 'value', 'sqrt_two', '=', '1.414213562', 'if', 'x', '<', '1e-16', 'or', '(', '1', '-', 'x', ')', '<', '1e-16', ':', 'res', '=', '-', '1e32', 'else', ':', 'res', '=', 'mu', '+', 'sigma', '*', 'sqrt_two', '*', 'erfcinv', '(', '2', '*', '(', '1', '-', 'x', ')', ')', 'return', 'res'] | Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return: | ['Used', 'by', 'multinest'] | train | https://github.com/threeML/astromodels/blob/9aac365a372f77603039533df9a6b694c1e360d5/astromodels/functions/priors.py#L75-L98 |
1,471 | django-fluent/django-fluent-dashboard | fluent_dashboard/modules.py | CacheStatusGroup.init_with_context | def init_with_context(self, context):
"""
Initializes the status list.
"""
super(CacheStatusGroup, self).init_with_context(context)
if 'dashboardmods' in settings.INSTALLED_APPS:
import dashboardmods
memcache_mods = dashboardmods.get_memcache_dash_modules()
try:
varnish_mods = dashboardmods.get_varnish_dash_modules()
except (socket.error, KeyError) as e:
# dashboardmods 2.2 throws KeyError for 'cache_misses' when the Varnish cache is empty.
# Socket errors are also ignored, to work similar to the memcache stats.
logger.exception("Unable to request Varnish stats: {0}".format(str(e)))
varnish_mods = []
except ImportError:
varnish_mods = []
self.children = memcache_mods + varnish_mods | python | def init_with_context(self, context):
"""
Initializes the status list.
"""
super(CacheStatusGroup, self).init_with_context(context)
if 'dashboardmods' in settings.INSTALLED_APPS:
import dashboardmods
memcache_mods = dashboardmods.get_memcache_dash_modules()
try:
varnish_mods = dashboardmods.get_varnish_dash_modules()
except (socket.error, KeyError) as e:
# dashboardmods 2.2 throws KeyError for 'cache_misses' when the Varnish cache is empty.
# Socket errors are also ignored, to work similar to the memcache stats.
logger.exception("Unable to request Varnish stats: {0}".format(str(e)))
varnish_mods = []
except ImportError:
varnish_mods = []
self.children = memcache_mods + varnish_mods | ['def', 'init_with_context', '(', 'self', ',', 'context', ')', ':', 'super', '(', 'CacheStatusGroup', ',', 'self', ')', '.', 'init_with_context', '(', 'context', ')', 'if', "'dashboardmods'", 'in', 'settings', '.', 'INSTALLED_APPS', ':', 'import', 'dashboardmods', 'memcache_mods', '=', 'dashboardmods', '.', 'get_memcache_dash_modules', '(', ')', 'try', ':', 'varnish_mods', '=', 'dashboardmods', '.', 'get_varnish_dash_modules', '(', ')', 'except', '(', 'socket', '.', 'error', ',', 'KeyError', ')', 'as', 'e', ':', "# dashboardmods 2.2 throws KeyError for 'cache_misses' when the Varnish cache is empty.", '# Socket errors are also ignored, to work similar to the memcache stats.', 'logger', '.', 'exception', '(', '"Unable to request Varnish stats: {0}"', '.', 'format', '(', 'str', '(', 'e', ')', ')', ')', 'varnish_mods', '=', '[', ']', 'except', 'ImportError', ':', 'varnish_mods', '=', '[', ']', 'self', '.', 'children', '=', 'memcache_mods', '+', 'varnish_mods'] | Initializes the status list. | ['Initializes', 'the', 'status', 'list', '.'] | train | https://github.com/django-fluent/django-fluent-dashboard/blob/aee7ef39e0586cd160036b13b7944b69cd2b4b8c/fluent_dashboard/modules.py#L250-L270 |
1,472 | hydraplatform/hydra-base | hydra_base/util/hdb.py | add_resource_types | def add_resource_types(resource_i, types):
"""
Save a reference to the types used for this resource.
@returns a list of type_ids representing the type ids
on the resource.
"""
if types is None:
return []
existing_type_ids = []
if resource_i.types:
for t in resource_i.types:
existing_type_ids.append(t.type_id)
new_type_ids = []
for templatetype in types:
if templatetype.id in existing_type_ids:
continue
rt_i = ResourceType()
rt_i.type_id = templatetype.id
rt_i.ref_key = resource_i.ref_key
if resource_i.ref_key == 'NODE':
rt_i.node_id = resource_i.id
elif resource_i.ref_key == 'LINK':
rt_i.link_id = resource_i.id
elif resource_i.ref_key == 'GROUP':
rt_i.group_id = resource_i.id
resource_i.types.append(rt_i)
new_type_ids.append(templatetype.id)
return new_type_ids | python | def add_resource_types(resource_i, types):
"""
Save a reference to the types used for this resource.
@returns a list of type_ids representing the type ids
on the resource.
"""
if types is None:
return []
existing_type_ids = []
if resource_i.types:
for t in resource_i.types:
existing_type_ids.append(t.type_id)
new_type_ids = []
for templatetype in types:
if templatetype.id in existing_type_ids:
continue
rt_i = ResourceType()
rt_i.type_id = templatetype.id
rt_i.ref_key = resource_i.ref_key
if resource_i.ref_key == 'NODE':
rt_i.node_id = resource_i.id
elif resource_i.ref_key == 'LINK':
rt_i.link_id = resource_i.id
elif resource_i.ref_key == 'GROUP':
rt_i.group_id = resource_i.id
resource_i.types.append(rt_i)
new_type_ids.append(templatetype.id)
return new_type_ids | ['def', 'add_resource_types', '(', 'resource_i', ',', 'types', ')', ':', 'if', 'types', 'is', 'None', ':', 'return', '[', ']', 'existing_type_ids', '=', '[', ']', 'if', 'resource_i', '.', 'types', ':', 'for', 't', 'in', 'resource_i', '.', 'types', ':', 'existing_type_ids', '.', 'append', '(', 't', '.', 'type_id', ')', 'new_type_ids', '=', '[', ']', 'for', 'templatetype', 'in', 'types', ':', 'if', 'templatetype', '.', 'id', 'in', 'existing_type_ids', ':', 'continue', 'rt_i', '=', 'ResourceType', '(', ')', 'rt_i', '.', 'type_id', '=', 'templatetype', '.', 'id', 'rt_i', '.', 'ref_key', '=', 'resource_i', '.', 'ref_key', 'if', 'resource_i', '.', 'ref_key', '==', "'NODE'", ':', 'rt_i', '.', 'node_id', '=', 'resource_i', '.', 'id', 'elif', 'resource_i', '.', 'ref_key', '==', "'LINK'", ':', 'rt_i', '.', 'link_id', '=', 'resource_i', '.', 'id', 'elif', 'resource_i', '.', 'ref_key', '==', "'GROUP'", ':', 'rt_i', '.', 'group_id', '=', 'resource_i', '.', 'id', 'resource_i', '.', 'types', '.', 'append', '(', 'rt_i', ')', 'new_type_ids', '.', 'append', '(', 'templatetype', '.', 'id', ')', 'return', 'new_type_ids'] | Save a reference to the types used for this resource.
@returns a list of type_ids representing the type ids
on the resource. | ['Save', 'a', 'reference', 'to', 'the', 'types', 'used', 'for', 'this', 'resource', '.'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/util/hdb.py#L37-L71 |
1,473 | sorgerlab/indra | indra/preassembler/__init__.py | Preassembler._get_stmt_by_group | def _get_stmt_by_group(self, stmt_type, stmts_this_type, eh):
"""Group Statements of `stmt_type` by their hierarchical relations."""
# Dict of stmt group key tuples, indexed by their first Agent
stmt_by_first = collections.defaultdict(lambda: [])
# Dict of stmt group key tuples, indexed by their second Agent
stmt_by_second = collections.defaultdict(lambda: [])
# Dict of statements with None first, with second Agent as keys
none_first = collections.defaultdict(lambda: [])
# Dict of statements with None second, with first Agent as keys
none_second = collections.defaultdict(lambda: [])
# The dict of all statement groups, with tuples of components
# or entity_matches_keys as keys
stmt_by_group = collections.defaultdict(lambda: [])
# Here we group Statements according to the hierarchy graph
# components that their agents are part of
for stmt_tuple in stmts_this_type:
_, stmt = stmt_tuple
entities = self._get_entities(stmt, stmt_type, eh)
# At this point we have an entity list
# If we're dealing with Complexes, sort the entities and use
# as dict key
if stmt_type == Complex:
# There shouldn't be any statements of the type
# e.g., Complex([Foo, None, Bar])
assert None not in entities
assert len(entities) > 0
entities.sort()
key = tuple(entities)
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
elif stmt_type == Conversion:
assert len(entities) > 0
key = (entities[0],
tuple(sorted(entities[1:len(stmt.obj_from)+1])),
tuple(sorted(entities[-len(stmt.obj_to):])))
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
# Now look at all other statement types
# All other statements will have one or two entities
elif len(entities) == 1:
# If only one entity, we only need the one key
# It should not be None!
assert None not in entities
key = tuple(entities)
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
else:
# Make sure we only have two entities, and they are not both
# None
key = tuple(entities)
assert len(key) == 2
assert key != (None, None)
# First agent is None; add in the statements, indexed by
# 2nd
if key[0] is None and stmt_tuple not in none_first[key[1]]:
none_first[key[1]].append(stmt_tuple)
# Second agent is None; add in the statements, indexed by
# 1st
elif key[1] is None and stmt_tuple not in none_second[key[0]]:
none_second[key[0]].append(stmt_tuple)
# Neither entity is None!
elif None not in key:
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
if key not in stmt_by_first[key[0]]:
stmt_by_first[key[0]].append(key)
if key not in stmt_by_second[key[1]]:
stmt_by_second[key[1]].append(key)
# When we've gotten here, we should have stmt_by_group entries, and
# we may or may not have stmt_by_first/second dicts filled out
# (depending on the statement type).
if none_first:
# Get the keys associated with stmts having a None first
# argument
for second_arg, stmts in none_first.items():
# Look for any statements with this second arg
second_arg_keys = stmt_by_second[second_arg]
# If there are no more specific statements matching this
# set of statements with a None first arg, then the
# statements with the None first arg deserve to be in
# their own group.
if not second_arg_keys:
stmt_by_group[(None, second_arg)] = stmts
# On the other hand, if there are statements with a matching
# second arg component, we need to add the None first
# statements to all groups with the matching second arg
for second_arg_key in second_arg_keys:
stmt_by_group[second_arg_key] += stmts
# Now do the corresponding steps for the statements with None as the
# second argument:
if none_second:
for first_arg, stmts in none_second.items():
# Look for any statements with this first arg
first_arg_keys = stmt_by_first[first_arg]
# If there are no more specific statements matching this
# set of statements with a None second arg, then the
# statements with the None second arg deserve to be in
# their own group.
if not first_arg_keys:
stmt_by_group[(first_arg, None)] = stmts
# On the other hand, if there are statements with a matching
# first arg component, we need to add the None second
# statements to all groups with the matching first arg
for first_arg_key in first_arg_keys:
stmt_by_group[first_arg_key] += stmts
return stmt_by_group | python | def _get_stmt_by_group(self, stmt_type, stmts_this_type, eh):
"""Group Statements of `stmt_type` by their hierarchical relations."""
# Dict of stmt group key tuples, indexed by their first Agent
stmt_by_first = collections.defaultdict(lambda: [])
# Dict of stmt group key tuples, indexed by their second Agent
stmt_by_second = collections.defaultdict(lambda: [])
# Dict of statements with None first, with second Agent as keys
none_first = collections.defaultdict(lambda: [])
# Dict of statements with None second, with first Agent as keys
none_second = collections.defaultdict(lambda: [])
# The dict of all statement groups, with tuples of components
# or entity_matches_keys as keys
stmt_by_group = collections.defaultdict(lambda: [])
# Here we group Statements according to the hierarchy graph
# components that their agents are part of
for stmt_tuple in stmts_this_type:
_, stmt = stmt_tuple
entities = self._get_entities(stmt, stmt_type, eh)
# At this point we have an entity list
# If we're dealing with Complexes, sort the entities and use
# as dict key
if stmt_type == Complex:
# There shouldn't be any statements of the type
# e.g., Complex([Foo, None, Bar])
assert None not in entities
assert len(entities) > 0
entities.sort()
key = tuple(entities)
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
elif stmt_type == Conversion:
assert len(entities) > 0
key = (entities[0],
tuple(sorted(entities[1:len(stmt.obj_from)+1])),
tuple(sorted(entities[-len(stmt.obj_to):])))
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
# Now look at all other statement types
# All other statements will have one or two entities
elif len(entities) == 1:
# If only one entity, we only need the one key
# It should not be None!
assert None not in entities
key = tuple(entities)
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
else:
# Make sure we only have two entities, and they are not both
# None
key = tuple(entities)
assert len(key) == 2
assert key != (None, None)
# First agent is None; add in the statements, indexed by
# 2nd
if key[0] is None and stmt_tuple not in none_first[key[1]]:
none_first[key[1]].append(stmt_tuple)
# Second agent is None; add in the statements, indexed by
# 1st
elif key[1] is None and stmt_tuple not in none_second[key[0]]:
none_second[key[0]].append(stmt_tuple)
# Neither entity is None!
elif None not in key:
if stmt_tuple not in stmt_by_group[key]:
stmt_by_group[key].append(stmt_tuple)
if key not in stmt_by_first[key[0]]:
stmt_by_first[key[0]].append(key)
if key not in stmt_by_second[key[1]]:
stmt_by_second[key[1]].append(key)
# When we've gotten here, we should have stmt_by_group entries, and
# we may or may not have stmt_by_first/second dicts filled out
# (depending on the statement type).
if none_first:
# Get the keys associated with stmts having a None first
# argument
for second_arg, stmts in none_first.items():
# Look for any statements with this second arg
second_arg_keys = stmt_by_second[second_arg]
# If there are no more specific statements matching this
# set of statements with a None first arg, then the
# statements with the None first arg deserve to be in
# their own group.
if not second_arg_keys:
stmt_by_group[(None, second_arg)] = stmts
# On the other hand, if there are statements with a matching
# second arg component, we need to add the None first
# statements to all groups with the matching second arg
for second_arg_key in second_arg_keys:
stmt_by_group[second_arg_key] += stmts
# Now do the corresponding steps for the statements with None as the
# second argument:
if none_second:
for first_arg, stmts in none_second.items():
# Look for any statements with this first arg
first_arg_keys = stmt_by_first[first_arg]
# If there are no more specific statements matching this
# set of statements with a None second arg, then the
# statements with the None second arg deserve to be in
# their own group.
if not first_arg_keys:
stmt_by_group[(first_arg, None)] = stmts
# On the other hand, if there are statements with a matching
# first arg component, we need to add the None second
# statements to all groups with the matching first arg
for first_arg_key in first_arg_keys:
stmt_by_group[first_arg_key] += stmts
return stmt_by_group | ['def', '_get_stmt_by_group', '(', 'self', ',', 'stmt_type', ',', 'stmts_this_type', ',', 'eh', ')', ':', '# Dict of stmt group key tuples, indexed by their first Agent', 'stmt_by_first', '=', 'collections', '.', 'defaultdict', '(', 'lambda', ':', '[', ']', ')', '# Dict of stmt group key tuples, indexed by their second Agent', 'stmt_by_second', '=', 'collections', '.', 'defaultdict', '(', 'lambda', ':', '[', ']', ')', '# Dict of statements with None first, with second Agent as keys', 'none_first', '=', 'collections', '.', 'defaultdict', '(', 'lambda', ':', '[', ']', ')', '# Dict of statements with None second, with first Agent as keys', 'none_second', '=', 'collections', '.', 'defaultdict', '(', 'lambda', ':', '[', ']', ')', '# The dict of all statement groups, with tuples of components', '# or entity_matches_keys as keys', 'stmt_by_group', '=', 'collections', '.', 'defaultdict', '(', 'lambda', ':', '[', ']', ')', '# Here we group Statements according to the hierarchy graph', '# components that their agents are part of', 'for', 'stmt_tuple', 'in', 'stmts_this_type', ':', '_', ',', 'stmt', '=', 'stmt_tuple', 'entities', '=', 'self', '.', '_get_entities', '(', 'stmt', ',', 'stmt_type', ',', 'eh', ')', '# At this point we have an entity list', "# If we're dealing with Complexes, sort the entities and use", '# as dict key', 'if', 'stmt_type', '==', 'Complex', ':', "# There shouldn't be any statements of the type", '# e.g., Complex([Foo, None, Bar])', 'assert', 'None', 'not', 'in', 'entities', 'assert', 'len', '(', 'entities', ')', '>', '0', 'entities', '.', 'sort', '(', ')', 'key', '=', 'tuple', '(', 'entities', ')', 'if', 'stmt_tuple', 'not', 'in', 'stmt_by_group', '[', 'key', ']', ':', 'stmt_by_group', '[', 'key', ']', '.', 'append', '(', 'stmt_tuple', ')', 'elif', 'stmt_type', '==', 'Conversion', ':', 'assert', 'len', '(', 'entities', ')', '>', '0', 'key', '=', '(', 'entities', '[', '0', ']', ',', 'tuple', '(', 'sorted', '(', 'entities', '[', '1', ':', 'len', '(', 'stmt', '.', 'obj_from', ')', '+', '1', ']', ')', ')', ',', 'tuple', '(', 'sorted', '(', 'entities', '[', '-', 'len', '(', 'stmt', '.', 'obj_to', ')', ':', ']', ')', ')', ')', 'if', 'stmt_tuple', 'not', 'in', 'stmt_by_group', '[', 'key', ']', ':', 'stmt_by_group', '[', 'key', ']', '.', 'append', '(', 'stmt_tuple', ')', '# Now look at all other statement types', '# All other statements will have one or two entities', 'elif', 'len', '(', 'entities', ')', '==', '1', ':', '# If only one entity, we only need the one key', '# It should not be None!', 'assert', 'None', 'not', 'in', 'entities', 'key', '=', 'tuple', '(', 'entities', ')', 'if', 'stmt_tuple', 'not', 'in', 'stmt_by_group', '[', 'key', ']', ':', 'stmt_by_group', '[', 'key', ']', '.', 'append', '(', 'stmt_tuple', ')', 'else', ':', '# Make sure we only have two entities, and they are not both', '# None', 'key', '=', 'tuple', '(', 'entities', ')', 'assert', 'len', '(', 'key', ')', '==', '2', 'assert', 'key', '!=', '(', 'None', ',', 'None', ')', '# First agent is None; add in the statements, indexed by', '# 2nd', 'if', 'key', '[', '0', ']', 'is', 'None', 'and', 'stmt_tuple', 'not', 'in', 'none_first', '[', 'key', '[', '1', ']', ']', ':', 'none_first', '[', 'key', '[', '1', ']', ']', '.', 'append', '(', 'stmt_tuple', ')', '# Second agent is None; add in the statements, indexed by', '# 1st', 'elif', 'key', '[', '1', ']', 'is', 'None', 'and', 'stmt_tuple', 'not', 'in', 'none_second', '[', 'key', '[', '0', ']', ']', ':', 'none_second', '[', 'key', '[', '0', ']', ']', '.', 'append', '(', 'stmt_tuple', ')', '# Neither entity is None!', 'elif', 'None', 'not', 'in', 'key', ':', 'if', 'stmt_tuple', 'not', 'in', 'stmt_by_group', '[', 'key', ']', ':', 'stmt_by_group', '[', 'key', ']', '.', 'append', '(', 'stmt_tuple', ')', 'if', 'key', 'not', 'in', 'stmt_by_first', '[', 'key', '[', '0', ']', ']', ':', 'stmt_by_first', '[', 'key', '[', '0', ']', ']', '.', 'append', '(', 'key', ')', 'if', 'key', 'not', 'in', 'stmt_by_second', '[', 'key', '[', '1', ']', ']', ':', 'stmt_by_second', '[', 'key', '[', '1', ']', ']', '.', 'append', '(', 'key', ')', "# When we've gotten here, we should have stmt_by_group entries, and", '# we may or may not have stmt_by_first/second dicts filled out', '# (depending on the statement type).', 'if', 'none_first', ':', '# Get the keys associated with stmts having a None first', '# argument', 'for', 'second_arg', ',', 'stmts', 'in', 'none_first', '.', 'items', '(', ')', ':', '# Look for any statements with this second arg', 'second_arg_keys', '=', 'stmt_by_second', '[', 'second_arg', ']', '# If there are no more specific statements matching this', '# set of statements with a None first arg, then the', '# statements with the None first arg deserve to be in', '# their own group.', 'if', 'not', 'second_arg_keys', ':', 'stmt_by_group', '[', '(', 'None', ',', 'second_arg', ')', ']', '=', 'stmts', '# On the other hand, if there are statements with a matching', '# second arg component, we need to add the None first', '# statements to all groups with the matching second arg', 'for', 'second_arg_key', 'in', 'second_arg_keys', ':', 'stmt_by_group', '[', 'second_arg_key', ']', '+=', 'stmts', '# Now do the corresponding steps for the statements with None as the', '# second argument:', 'if', 'none_second', ':', 'for', 'first_arg', ',', 'stmts', 'in', 'none_second', '.', 'items', '(', ')', ':', '# Look for any statements with this first arg', 'first_arg_keys', '=', 'stmt_by_first', '[', 'first_arg', ']', '# If there are no more specific statements matching this', '# set of statements with a None second arg, then the', '# statements with the None second arg deserve to be in', '# their own group.', 'if', 'not', 'first_arg_keys', ':', 'stmt_by_group', '[', '(', 'first_arg', ',', 'None', ')', ']', '=', 'stmts', '# On the other hand, if there are statements with a matching', '# first arg component, we need to add the None second', '# statements to all groups with the matching first arg', 'for', 'first_arg_key', 'in', 'first_arg_keys', ':', 'stmt_by_group', '[', 'first_arg_key', ']', '+=', 'stmts', 'return', 'stmt_by_group'] | Group Statements of `stmt_type` by their hierarchical relations. | ['Group', 'Statements', 'of', 'stmt_type', 'by', 'their', 'hierarchical', 'relations', '.'] | train | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/__init__.py#L220-L326 |
1,474 | saltstack/salt | salt/states/file.py | _gen_keep_files | def _gen_keep_files(name, require, walk_d=None):
'''
Generate the list of files that need to be kept when a dir based function
like directory or recurse has a clean.
'''
def _is_child(path, directory):
'''
Check whether ``path`` is child of ``directory``
'''
path = os.path.abspath(path)
directory = os.path.abspath(directory)
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir)
def _add_current_path(path):
_ret = set()
if os.path.isdir(path):
dirs, files = walk_d.get(path, ((), ()))
_ret.add(path)
for _name in files:
_ret.add(os.path.join(path, _name))
for _name in dirs:
_ret.add(os.path.join(path, _name))
return _ret
def _process_by_walk_d(name, ret):
if os.path.isdir(name):
walk_ret.update(_add_current_path(name))
dirs, _ = walk_d.get(name, ((), ()))
for _d in dirs:
p = os.path.join(name, _d)
walk_ret.update(_add_current_path(p))
_process_by_walk_d(p, ret)
def _process(name):
ret = set()
if os.path.isdir(name):
for root, dirs, files in salt.utils.path.os_walk(name):
ret.add(name)
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return ret
keep = set()
if isinstance(require, list):
required_files = [comp for comp in require if 'file' in comp]
for comp in required_files:
for low in __lowstate__:
# A requirement should match either the ID and the name of
# another state.
if low['name'] == comp['file'] or low['__id__'] == comp['file']:
fn = low['name']
fun = low['fun']
if os.path.isdir(fn):
if _is_child(fn, name):
if fun == 'recurse':
fkeep = _gen_recurse_managed_files(**low)[3]
log.debug('Keep from %s: %s', fn, fkeep)
keep.update(fkeep)
elif walk_d:
walk_ret = set()
_process_by_walk_d(fn, walk_ret)
keep.update(walk_ret)
else:
keep.update(_process(fn))
else:
keep.add(fn)
log.debug('Files to keep from required states: %s', list(keep))
return list(keep) | python | def _gen_keep_files(name, require, walk_d=None):
'''
Generate the list of files that need to be kept when a dir based function
like directory or recurse has a clean.
'''
def _is_child(path, directory):
'''
Check whether ``path`` is child of ``directory``
'''
path = os.path.abspath(path)
directory = os.path.abspath(directory)
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir)
def _add_current_path(path):
_ret = set()
if os.path.isdir(path):
dirs, files = walk_d.get(path, ((), ()))
_ret.add(path)
for _name in files:
_ret.add(os.path.join(path, _name))
for _name in dirs:
_ret.add(os.path.join(path, _name))
return _ret
def _process_by_walk_d(name, ret):
if os.path.isdir(name):
walk_ret.update(_add_current_path(name))
dirs, _ = walk_d.get(name, ((), ()))
for _d in dirs:
p = os.path.join(name, _d)
walk_ret.update(_add_current_path(p))
_process_by_walk_d(p, ret)
def _process(name):
ret = set()
if os.path.isdir(name):
for root, dirs, files in salt.utils.path.os_walk(name):
ret.add(name)
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return ret
keep = set()
if isinstance(require, list):
required_files = [comp for comp in require if 'file' in comp]
for comp in required_files:
for low in __lowstate__:
# A requirement should match either the ID and the name of
# another state.
if low['name'] == comp['file'] or low['__id__'] == comp['file']:
fn = low['name']
fun = low['fun']
if os.path.isdir(fn):
if _is_child(fn, name):
if fun == 'recurse':
fkeep = _gen_recurse_managed_files(**low)[3]
log.debug('Keep from %s: %s', fn, fkeep)
keep.update(fkeep)
elif walk_d:
walk_ret = set()
_process_by_walk_d(fn, walk_ret)
keep.update(walk_ret)
else:
keep.update(_process(fn))
else:
keep.add(fn)
log.debug('Files to keep from required states: %s', list(keep))
return list(keep) | ['def', '_gen_keep_files', '(', 'name', ',', 'require', ',', 'walk_d', '=', 'None', ')', ':', 'def', '_is_child', '(', 'path', ',', 'directory', ')', ':', "'''\n Check whether ``path`` is child of ``directory``\n '''", 'path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'path', ')', 'directory', '=', 'os', '.', 'path', '.', 'abspath', '(', 'directory', ')', 'relative', '=', 'os', '.', 'path', '.', 'relpath', '(', 'path', ',', 'directory', ')', 'return', 'not', 'relative', '.', 'startswith', '(', 'os', '.', 'pardir', ')', 'def', '_add_current_path', '(', 'path', ')', ':', '_ret', '=', 'set', '(', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'path', ')', ':', 'dirs', ',', 'files', '=', 'walk_d', '.', 'get', '(', 'path', ',', '(', '(', ')', ',', '(', ')', ')', ')', '_ret', '.', 'add', '(', 'path', ')', 'for', '_name', 'in', 'files', ':', '_ret', '.', 'add', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', '_name', ')', ')', 'for', '_name', 'in', 'dirs', ':', '_ret', '.', 'add', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', '_name', ')', ')', 'return', '_ret', 'def', '_process_by_walk_d', '(', 'name', ',', 'ret', ')', ':', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'name', ')', ':', 'walk_ret', '.', 'update', '(', '_add_current_path', '(', 'name', ')', ')', 'dirs', ',', '_', '=', 'walk_d', '.', 'get', '(', 'name', ',', '(', '(', ')', ',', '(', ')', ')', ')', 'for', '_d', 'in', 'dirs', ':', 'p', '=', 'os', '.', 'path', '.', 'join', '(', 'name', ',', '_d', ')', 'walk_ret', '.', 'update', '(', '_add_current_path', '(', 'p', ')', ')', '_process_by_walk_d', '(', 'p', ',', 'ret', ')', 'def', '_process', '(', 'name', ')', ':', 'ret', '=', 'set', '(', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'name', ')', ':', 'for', 'root', ',', 'dirs', ',', 'files', 'in', 'salt', '.', 'utils', '.', 'path', '.', 'os_walk', '(', 'name', ')', ':', 'ret', '.', 'add', '(', 'name', ')', 'for', 'name', 'in', 'files', ':', 'ret', '.', 'add', '(', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'name', ')', ')', 'for', 'name', 'in', 'dirs', ':', 'ret', '.', 'add', '(', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'name', ')', ')', 'return', 'ret', 'keep', '=', 'set', '(', ')', 'if', 'isinstance', '(', 'require', ',', 'list', ')', ':', 'required_files', '=', '[', 'comp', 'for', 'comp', 'in', 'require', 'if', "'file'", 'in', 'comp', ']', 'for', 'comp', 'in', 'required_files', ':', 'for', 'low', 'in', '__lowstate__', ':', '# A requirement should match either the ID and the name of', '# another state.', 'if', 'low', '[', "'name'", ']', '==', 'comp', '[', "'file'", ']', 'or', 'low', '[', "'__id__'", ']', '==', 'comp', '[', "'file'", ']', ':', 'fn', '=', 'low', '[', "'name'", ']', 'fun', '=', 'low', '[', "'fun'", ']', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'fn', ')', ':', 'if', '_is_child', '(', 'fn', ',', 'name', ')', ':', 'if', 'fun', '==', "'recurse'", ':', 'fkeep', '=', '_gen_recurse_managed_files', '(', '*', '*', 'low', ')', '[', '3', ']', 'log', '.', 'debug', '(', "'Keep from %s: %s'", ',', 'fn', ',', 'fkeep', ')', 'keep', '.', 'update', '(', 'fkeep', ')', 'elif', 'walk_d', ':', 'walk_ret', '=', 'set', '(', ')', '_process_by_walk_d', '(', 'fn', ',', 'walk_ret', ')', 'keep', '.', 'update', '(', 'walk_ret', ')', 'else', ':', 'keep', '.', 'update', '(', '_process', '(', 'fn', ')', ')', 'else', ':', 'keep', '.', 'add', '(', 'fn', ')', 'log', '.', 'debug', '(', "'Files to keep from required states: %s'", ',', 'list', '(', 'keep', ')', ')', 'return', 'list', '(', 'keep', ')'] | Generate the list of files that need to be kept when a dir based function
like directory or recurse has a clean. | ['Generate', 'the', 'list', 'of', 'files', 'that', 'need', 'to', 'be', 'kept', 'when', 'a', 'dir', 'based', 'function', 'like', 'directory', 'or', 'recurse', 'has', 'a', 'clean', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L559-L631 |
1,475 | ngmarchant/oasis | oasis/kad.py | KadaneSampler._sample_item | def _sample_item(self, **kwargs):
"""Sample an item from the pool according to the instrumental
distribution
"""
t = self.t_
if 'fixed_stratum' in kwargs:
stratum_idx = kwargs['fixed_stratum']
else:
stratum_idx = None
if stratum_idx is not None:
# Sample in given stratum
loc = self.strata._sample_in_stratum(stratum_idx, replace=False)
# Record instrumental distribution
if self.record_inst_hist:
self.inst_pmf_[stratum_idx,t] = 1
else:
# Choose stratum based on instrumental distribution
self._calc_inst_pmf()
if self.record_inst_hist:
inst_pmf = self.inst_pmf_[:,t]
else:
inst_pmf = self.inst_pmf_
loc, stratum_idx = self.strata.sample(pmf = inst_pmf, replace=False)
return loc, 1, {'stratum': stratum_idx} | python | def _sample_item(self, **kwargs):
"""Sample an item from the pool according to the instrumental
distribution
"""
t = self.t_
if 'fixed_stratum' in kwargs:
stratum_idx = kwargs['fixed_stratum']
else:
stratum_idx = None
if stratum_idx is not None:
# Sample in given stratum
loc = self.strata._sample_in_stratum(stratum_idx, replace=False)
# Record instrumental distribution
if self.record_inst_hist:
self.inst_pmf_[stratum_idx,t] = 1
else:
# Choose stratum based on instrumental distribution
self._calc_inst_pmf()
if self.record_inst_hist:
inst_pmf = self.inst_pmf_[:,t]
else:
inst_pmf = self.inst_pmf_
loc, stratum_idx = self.strata.sample(pmf = inst_pmf, replace=False)
return loc, 1, {'stratum': stratum_idx} | ['def', '_sample_item', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 't', '=', 'self', '.', 't_', 'if', "'fixed_stratum'", 'in', 'kwargs', ':', 'stratum_idx', '=', 'kwargs', '[', "'fixed_stratum'", ']', 'else', ':', 'stratum_idx', '=', 'None', 'if', 'stratum_idx', 'is', 'not', 'None', ':', '# Sample in given stratum', 'loc', '=', 'self', '.', 'strata', '.', '_sample_in_stratum', '(', 'stratum_idx', ',', 'replace', '=', 'False', ')', '# Record instrumental distribution', 'if', 'self', '.', 'record_inst_hist', ':', 'self', '.', 'inst_pmf_', '[', 'stratum_idx', ',', 't', ']', '=', '1', 'else', ':', '# Choose stratum based on instrumental distribution', 'self', '.', '_calc_inst_pmf', '(', ')', 'if', 'self', '.', 'record_inst_hist', ':', 'inst_pmf', '=', 'self', '.', 'inst_pmf_', '[', ':', ',', 't', ']', 'else', ':', 'inst_pmf', '=', 'self', '.', 'inst_pmf_', 'loc', ',', 'stratum_idx', '=', 'self', '.', 'strata', '.', 'sample', '(', 'pmf', '=', 'inst_pmf', ',', 'replace', '=', 'False', ')', 'return', 'loc', ',', '1', ',', '{', "'stratum'", ':', 'stratum_idx', '}'] | Sample an item from the pool according to the instrumental
distribution | ['Sample', 'an', 'item', 'from', 'the', 'pool', 'according', 'to', 'the', 'instrumental', 'distribution'] | train | https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/kad.py#L105-L131 |
1,476 | Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | uyirmei_constructed | def uyirmei_constructed( mei_idx, uyir_idx):
""" construct uyirmei letter give mei index and uyir index """
idx,idy = mei_idx,uyir_idx
assert ( idy >= 0 and idy < uyir_len() )
assert ( idx >= 0 and idx < 6+mei_len() )
return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx] | python | def uyirmei_constructed( mei_idx, uyir_idx):
""" construct uyirmei letter give mei index and uyir index """
idx,idy = mei_idx,uyir_idx
assert ( idy >= 0 and idy < uyir_len() )
assert ( idx >= 0 and idx < 6+mei_len() )
return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx] | ['def', 'uyirmei_constructed', '(', 'mei_idx', ',', 'uyir_idx', ')', ':', 'idx', ',', 'idy', '=', 'mei_idx', ',', 'uyir_idx', 'assert', '(', 'idy', '>=', '0', 'and', 'idy', '<', 'uyir_len', '(', ')', ')', 'assert', '(', 'idx', '>=', '0', 'and', 'idx', '<', '6', '+', 'mei_len', '(', ')', ')', 'return', 'grantha_agaram_letters', '[', 'mei_idx', ']', '+', 'accent_symbols', '[', 'uyir_idx', ']'] | construct uyirmei letter give mei index and uyir index | ['construct', 'uyirmei', 'letter', 'give', 'mei', 'index', 'and', 'uyir', 'index'] | train | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L265-L270 |
1,477 | openstack/networking-cisco | networking_cisco/ml2_drivers/ucsm/deprecated_network_driver.py | CiscoUcsmDriver._delete_vlan_profile | def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN Profile from UCS Manager."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
handle.StartTransaction()
obj = handle.GetManagedObject(
None,
self.ucsmsdk.FabricVlan.ClassId(),
{self.ucsmsdk.FabricVlan.DN: vlan_profile_dest})
if obj:
handle.RemoveManagedObject(obj)
handle.CompleteTransaction()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e) | python | def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN Profile from UCS Manager."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
handle.StartTransaction()
obj = handle.GetManagedObject(
None,
self.ucsmsdk.FabricVlan.ClassId(),
{self.ucsmsdk.FabricVlan.DN: vlan_profile_dest})
if obj:
handle.RemoveManagedObject(obj)
handle.CompleteTransaction()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e) | ['def', '_delete_vlan_profile', '(', 'self', ',', 'handle', ',', 'vlan_id', ',', 'ucsm_ip', ')', ':', 'vlan_name', '=', 'self', '.', 'make_vlan_name', '(', 'vlan_id', ')', 'vlan_profile_dest', '=', '(', 'const', '.', 'VLAN_PATH', '+', 'const', '.', 'VLAN_PROFILE_PATH_PREFIX', '+', 'vlan_name', ')', 'try', ':', 'handle', '.', 'StartTransaction', '(', ')', 'obj', '=', 'handle', '.', 'GetManagedObject', '(', 'None', ',', 'self', '.', 'ucsmsdk', '.', 'FabricVlan', '.', 'ClassId', '(', ')', ',', '{', 'self', '.', 'ucsmsdk', '.', 'FabricVlan', '.', 'DN', ':', 'vlan_profile_dest', '}', ')', 'if', 'obj', ':', 'handle', '.', 'RemoveManagedObject', '(', 'obj', ')', 'handle', '.', 'CompleteTransaction', '(', ')', 'except', 'Exception', 'as', 'e', ':', '# Raise a Neutron exception. Include a description of', '# the original exception.', 'raise', 'cexc', '.', 'UcsmConfigFailed', '(', 'config', '=', 'vlan_id', ',', 'ucsm_ip', '=', 'ucsm_ip', ',', 'exc', '=', 'e', ')'] | Deletes VLAN Profile from UCS Manager. | ['Deletes', 'VLAN', 'Profile', 'from', 'UCS', 'Manager', '.'] | train | https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/ucsm/deprecated_network_driver.py#L706-L727 |
1,478 | fedora-infra/fedmsg | fedmsg/core.py | FedMsgContext.tail_messages | def tail_messages(self, topic="", passive=False, **kw):
"""
Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`.
Args:
topic (six.text_type): The topic to subscribe to. The default is to
subscribe to all topics.
passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets
instead of connecting to them. Defaults to ``False``.
**kw: Additional keyword arguments. Currently none are used.
Yields:
tuple: A 4-tuple in the form (name, endpoint, topic, message).
"""
if not self.c.get('zmq_enabled', True):
raise ValueError("fedmsg.tail_messages() is only available for "
"zeromq. Use the hub-consumer approach for "
"STOMP or AMQP support.")
poller, subs = self._create_poller(topic=topic, passive=False, **kw)
try:
for msg in self._poll(poller, subs):
yield msg
finally:
self._close_subs(subs) | python | def tail_messages(self, topic="", passive=False, **kw):
"""
Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`.
Args:
topic (six.text_type): The topic to subscribe to. The default is to
subscribe to all topics.
passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets
instead of connecting to them. Defaults to ``False``.
**kw: Additional keyword arguments. Currently none are used.
Yields:
tuple: A 4-tuple in the form (name, endpoint, topic, message).
"""
if not self.c.get('zmq_enabled', True):
raise ValueError("fedmsg.tail_messages() is only available for "
"zeromq. Use the hub-consumer approach for "
"STOMP or AMQP support.")
poller, subs = self._create_poller(topic=topic, passive=False, **kw)
try:
for msg in self._poll(poller, subs):
yield msg
finally:
self._close_subs(subs) | ['def', 'tail_messages', '(', 'self', ',', 'topic', '=', '""', ',', 'passive', '=', 'False', ',', '*', '*', 'kw', ')', ':', 'if', 'not', 'self', '.', 'c', '.', 'get', '(', "'zmq_enabled'", ',', 'True', ')', ':', 'raise', 'ValueError', '(', '"fedmsg.tail_messages() is only available for "', '"zeromq. Use the hub-consumer approach for "', '"STOMP or AMQP support."', ')', 'poller', ',', 'subs', '=', 'self', '.', '_create_poller', '(', 'topic', '=', 'topic', ',', 'passive', '=', 'False', ',', '*', '*', 'kw', ')', 'try', ':', 'for', 'msg', 'in', 'self', '.', '_poll', '(', 'poller', ',', 'subs', ')', ':', 'yield', 'msg', 'finally', ':', 'self', '.', '_close_subs', '(', 'subs', ')'] | Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`.
Args:
topic (six.text_type): The topic to subscribe to. The default is to
subscribe to all topics.
passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets
instead of connecting to them. Defaults to ``False``.
**kw: Additional keyword arguments. Currently none are used.
Yields:
tuple: A 4-tuple in the form (name, endpoint, topic, message). | ['Subscribe', 'to', 'messages', 'published', 'on', 'the', 'sockets', 'listed', 'in', ':', 'ref', ':', 'conf', '-', 'endpoints', '.'] | train | https://github.com/fedora-infra/fedmsg/blob/c21d6b3ce023fc3c0e881c704f5b55fb6e6392d7/fedmsg/core.py#L345-L370 |
1,479 | TAPPGuild/bitjws | bitjws/jws.py | _jws_header | def _jws_header(keyid, algorithm):
"""Produce a base64-encoded JWS header."""
data = {
'typ': 'JWT',
'alg': algorithm.name,
# 'kid' is used to indicate the public part of the key
# used during signing.
'kid': keyid
}
datajson = json.dumps(data, sort_keys=True).encode('utf8')
return base64url_encode(datajson) | python | def _jws_header(keyid, algorithm):
"""Produce a base64-encoded JWS header."""
data = {
'typ': 'JWT',
'alg': algorithm.name,
# 'kid' is used to indicate the public part of the key
# used during signing.
'kid': keyid
}
datajson = json.dumps(data, sort_keys=True).encode('utf8')
return base64url_encode(datajson) | ['def', '_jws_header', '(', 'keyid', ',', 'algorithm', ')', ':', 'data', '=', '{', "'typ'", ':', "'JWT'", ',', "'alg'", ':', 'algorithm', '.', 'name', ',', "# 'kid' is used to indicate the public part of the key", '# used during signing.', "'kid'", ':', 'keyid', '}', 'datajson', '=', 'json', '.', 'dumps', '(', 'data', ',', 'sort_keys', '=', 'True', ')', '.', 'encode', '(', "'utf8'", ')', 'return', 'base64url_encode', '(', 'datajson', ')'] | Produce a base64-encoded JWS header. | ['Produce', 'a', 'base64', '-', 'encoded', 'JWS', 'header', '.'] | train | https://github.com/TAPPGuild/bitjws/blob/bcf943e0c60985da11fb7895a416525e63728c35/bitjws/jws.py#L70-L81 |
1,480 | gwastro/pycbc | pycbc/pnutils.py | energy_coefficients | def energy_coefficients(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1):
""" Return the energy coefficients. This assumes that the system has aligned spins only.
"""
implemented_phase_order = 7
implemented_spin_order = 7
if phase_order > implemented_phase_order:
raise ValueError("pN coeffiecients of that order have not been implemented")
elif phase_order == -1:
phase_order = implemented_phase_order
if spin_order > implemented_spin_order:
raise ValueError("pN coeffiecients of that order have not been implemented")
elif spin_order == -1:
spin_order = implemented_spin_order
qmdef1 = 1.0
qmdef2 = 1.0
M = m1 + m2
dm = (m1-m2)/M
m1M = m1 / M
m2M = m2 / M
s1z = s1z * m1M * m1M
s2z = s2z * m2M * m2M
_, eta = mass1_mass2_to_mchirp_eta(m1, m2)
ecof = numpy.zeros(phase_order+1)
# Orbital terms
if phase_order >= 0:
ecof[0] = 1.0
if phase_order >= 1:
ecof[1] = 0
if phase_order >= 2:
ecof[2] = -(1.0/12.0) * (9.0 + eta)
if phase_order >= 3:
ecof[3] = 0
if phase_order >= 4:
ecof[4] = (-81.0 + 57.0*eta - eta*eta) / 24.0
if phase_order >= 5:
ecof[5] = 0
if phase_order >= 6:
ecof[6] = - 675.0/64.0 + ( 34445.0/576.0 \
- 205.0/96.0 * lal.PI * lal.PI ) * eta \
- (155.0/96.0) *eta * eta - 35.0/5184.0 * eta * eta
# Spin terms
ESO15s1 = 8.0/3.0 + 2.0*m2/m1
ESO15s2 = 8.0/3.0 + 2.0*m1/m2
ESS2 = 1.0 / eta
EQM2s1 = qmdef1/2.0/m1M/m1M
EQM2s1L = -qmdef1*3.0/2.0/m1M/m1M
#EQM2s2 = qmdef2/2.0/m2M/m2M
EQM2s2L = -qmdef2*3.0/2.0/m2M/m2M
ESO25s1 = 11.0 - 61.0*eta/9.0 + (dm/m1M) * (-3.0 + 10.*eta/3.0)
ESO25s2 = 11.0 - 61.0*eta/9.0 + (dm/m2M) * (3.0 - 10.*eta/3.0)
ESO35s1 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 + (dm/m1M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0)
ESO35s2 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 - (dm/m2M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0)
if spin_order >=3:
ecof[3] += ESO15s1 * s1z + ESO15s2 * s2z
if spin_order >=4:
ecof[4] += ESS2 * (s1z*s2z - 3.0*s1z*s2z)
ecof[4] += EQM2s1*s1z*s1z + EQM2s1*s2z*s2z + EQM2s1L*s1z*s1z + EQM2s2L*s2z*s2z
if spin_order >=5:
ecof[5] = ESO25s1*s1z + ESO25s2*s2z
if spin_order >=7:
ecof[7] += ESO35s1*s1z + ESO35s2*s2z
return ecof | python | def energy_coefficients(m1, m2, s1z=0, s2z=0, phase_order=-1, spin_order=-1):
""" Return the energy coefficients. This assumes that the system has aligned spins only.
"""
implemented_phase_order = 7
implemented_spin_order = 7
if phase_order > implemented_phase_order:
raise ValueError("pN coeffiecients of that order have not been implemented")
elif phase_order == -1:
phase_order = implemented_phase_order
if spin_order > implemented_spin_order:
raise ValueError("pN coeffiecients of that order have not been implemented")
elif spin_order == -1:
spin_order = implemented_spin_order
qmdef1 = 1.0
qmdef2 = 1.0
M = m1 + m2
dm = (m1-m2)/M
m1M = m1 / M
m2M = m2 / M
s1z = s1z * m1M * m1M
s2z = s2z * m2M * m2M
_, eta = mass1_mass2_to_mchirp_eta(m1, m2)
ecof = numpy.zeros(phase_order+1)
# Orbital terms
if phase_order >= 0:
ecof[0] = 1.0
if phase_order >= 1:
ecof[1] = 0
if phase_order >= 2:
ecof[2] = -(1.0/12.0) * (9.0 + eta)
if phase_order >= 3:
ecof[3] = 0
if phase_order >= 4:
ecof[4] = (-81.0 + 57.0*eta - eta*eta) / 24.0
if phase_order >= 5:
ecof[5] = 0
if phase_order >= 6:
ecof[6] = - 675.0/64.0 + ( 34445.0/576.0 \
- 205.0/96.0 * lal.PI * lal.PI ) * eta \
- (155.0/96.0) *eta * eta - 35.0/5184.0 * eta * eta
# Spin terms
ESO15s1 = 8.0/3.0 + 2.0*m2/m1
ESO15s2 = 8.0/3.0 + 2.0*m1/m2
ESS2 = 1.0 / eta
EQM2s1 = qmdef1/2.0/m1M/m1M
EQM2s1L = -qmdef1*3.0/2.0/m1M/m1M
#EQM2s2 = qmdef2/2.0/m2M/m2M
EQM2s2L = -qmdef2*3.0/2.0/m2M/m2M
ESO25s1 = 11.0 - 61.0*eta/9.0 + (dm/m1M) * (-3.0 + 10.*eta/3.0)
ESO25s2 = 11.0 - 61.0*eta/9.0 + (dm/m2M) * (3.0 - 10.*eta/3.0)
ESO35s1 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 + (dm/m1M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0)
ESO35s2 = 135.0/4.0 - 367.0*eta/4.0 + 29.0*eta*eta/12.0 - (dm/m2M) * (-27.0/4.0 + 39.0*eta - 5.0*eta*eta/4.0)
if spin_order >=3:
ecof[3] += ESO15s1 * s1z + ESO15s2 * s2z
if spin_order >=4:
ecof[4] += ESS2 * (s1z*s2z - 3.0*s1z*s2z)
ecof[4] += EQM2s1*s1z*s1z + EQM2s1*s2z*s2z + EQM2s1L*s1z*s1z + EQM2s2L*s2z*s2z
if spin_order >=5:
ecof[5] = ESO25s1*s1z + ESO25s2*s2z
if spin_order >=7:
ecof[7] += ESO35s1*s1z + ESO35s2*s2z
return ecof | ['def', 'energy_coefficients', '(', 'm1', ',', 'm2', ',', 's1z', '=', '0', ',', 's2z', '=', '0', ',', 'phase_order', '=', '-', '1', ',', 'spin_order', '=', '-', '1', ')', ':', 'implemented_phase_order', '=', '7', 'implemented_spin_order', '=', '7', 'if', 'phase_order', '>', 'implemented_phase_order', ':', 'raise', 'ValueError', '(', '"pN coeffiecients of that order have not been implemented"', ')', 'elif', 'phase_order', '==', '-', '1', ':', 'phase_order', '=', 'implemented_phase_order', 'if', 'spin_order', '>', 'implemented_spin_order', ':', 'raise', 'ValueError', '(', '"pN coeffiecients of that order have not been implemented"', ')', 'elif', 'spin_order', '==', '-', '1', ':', 'spin_order', '=', 'implemented_spin_order', 'qmdef1', '=', '1.0', 'qmdef2', '=', '1.0', 'M', '=', 'm1', '+', 'm2', 'dm', '=', '(', 'm1', '-', 'm2', ')', '/', 'M', 'm1M', '=', 'm1', '/', 'M', 'm2M', '=', 'm2', '/', 'M', 's1z', '=', 's1z', '*', 'm1M', '*', 'm1M', 's2z', '=', 's2z', '*', 'm2M', '*', 'm2M', '_', ',', 'eta', '=', 'mass1_mass2_to_mchirp_eta', '(', 'm1', ',', 'm2', ')', 'ecof', '=', 'numpy', '.', 'zeros', '(', 'phase_order', '+', '1', ')', '# Orbital terms', 'if', 'phase_order', '>=', '0', ':', 'ecof', '[', '0', ']', '=', '1.0', 'if', 'phase_order', '>=', '1', ':', 'ecof', '[', '1', ']', '=', '0', 'if', 'phase_order', '>=', '2', ':', 'ecof', '[', '2', ']', '=', '-', '(', '1.0', '/', '12.0', ')', '*', '(', '9.0', '+', 'eta', ')', 'if', 'phase_order', '>=', '3', ':', 'ecof', '[', '3', ']', '=', '0', 'if', 'phase_order', '>=', '4', ':', 'ecof', '[', '4', ']', '=', '(', '-', '81.0', '+', '57.0', '*', 'eta', '-', 'eta', '*', 'eta', ')', '/', '24.0', 'if', 'phase_order', '>=', '5', ':', 'ecof', '[', '5', ']', '=', '0', 'if', 'phase_order', '>=', '6', ':', 'ecof', '[', '6', ']', '=', '-', '675.0', '/', '64.0', '+', '(', '34445.0', '/', '576.0', '-', '205.0', '/', '96.0', '*', 'lal', '.', 'PI', '*', 'lal', '.', 'PI', ')', '*', 'eta', '-', '(', '155.0', '/', '96.0', ')', '*', 'eta', '*', 'eta', '-', '35.0', '/', '5184.0', '*', 'eta', '*', 'eta', '# Spin terms', 'ESO15s1', '=', '8.0', '/', '3.0', '+', '2.0', '*', 'm2', '/', 'm1', 'ESO15s2', '=', '8.0', '/', '3.0', '+', '2.0', '*', 'm1', '/', 'm2', 'ESS2', '=', '1.0', '/', 'eta', 'EQM2s1', '=', 'qmdef1', '/', '2.0', '/', 'm1M', '/', 'm1M', 'EQM2s1L', '=', '-', 'qmdef1', '*', '3.0', '/', '2.0', '/', 'm1M', '/', 'm1M', '#EQM2s2 = qmdef2/2.0/m2M/m2M', 'EQM2s2L', '=', '-', 'qmdef2', '*', '3.0', '/', '2.0', '/', 'm2M', '/', 'm2M', 'ESO25s1', '=', '11.0', '-', '61.0', '*', 'eta', '/', '9.0', '+', '(', 'dm', '/', 'm1M', ')', '*', '(', '-', '3.0', '+', '10.', '*', 'eta', '/', '3.0', ')', 'ESO25s2', '=', '11.0', '-', '61.0', '*', 'eta', '/', '9.0', '+', '(', 'dm', '/', 'm2M', ')', '*', '(', '3.0', '-', '10.', '*', 'eta', '/', '3.0', ')', 'ESO35s1', '=', '135.0', '/', '4.0', '-', '367.0', '*', 'eta', '/', '4.0', '+', '29.0', '*', 'eta', '*', 'eta', '/', '12.0', '+', '(', 'dm', '/', 'm1M', ')', '*', '(', '-', '27.0', '/', '4.0', '+', '39.0', '*', 'eta', '-', '5.0', '*', 'eta', '*', 'eta', '/', '4.0', ')', 'ESO35s2', '=', '135.0', '/', '4.0', '-', '367.0', '*', 'eta', '/', '4.0', '+', '29.0', '*', 'eta', '*', 'eta', '/', '12.0', '-', '(', 'dm', '/', 'm2M', ')', '*', '(', '-', '27.0', '/', '4.0', '+', '39.0', '*', 'eta', '-', '5.0', '*', 'eta', '*', 'eta', '/', '4.0', ')', 'if', 'spin_order', '>=', '3', ':', 'ecof', '[', '3', ']', '+=', 'ESO15s1', '*', 's1z', '+', 'ESO15s2', '*', 's2z', 'if', 'spin_order', '>=', '4', ':', 'ecof', '[', '4', ']', '+=', 'ESS2', '*', '(', 's1z', '*', 's2z', '-', '3.0', '*', 's1z', '*', 's2z', ')', 'ecof', '[', '4', ']', '+=', 'EQM2s1', '*', 's1z', '*', 's1z', '+', 'EQM2s1', '*', 's2z', '*', 's2z', '+', 'EQM2s1L', '*', 's1z', '*', 's1z', '+', 'EQM2s2L', '*', 's2z', '*', 's2z', 'if', 'spin_order', '>=', '5', ':', 'ecof', '[', '5', ']', '=', 'ESO25s1', '*', 's1z', '+', 'ESO25s2', '*', 's2z', 'if', 'spin_order', '>=', '7', ':', 'ecof', '[', '7', ']', '+=', 'ESO35s1', '*', 's1z', '+', 'ESO35s2', '*', 's2z', 'return', 'ecof'] | Return the energy coefficients. This assumes that the system has aligned spins only. | ['Return', 'the', 'energy', 'coefficients', '.', 'This', 'assumes', 'that', 'the', 'system', 'has', 'aligned', 'spins', 'only', '.'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pnutils.py#L689-L762 |
1,481 | manolomartinez/greg | greg/classes.py | Session.retrieve_data_directory | def retrieve_data_directory(self):
"""
Retrieve the data directory
Look first into config_filename_global
then into config_filename_user. The latter takes preeminence.
"""
args = self.args
try:
if args['datadirectory']:
aux.ensure_dir(args['datadirectory'])
return args['datadirectory']
except KeyError:
pass
config = configparser.ConfigParser()
config.read([config_filename_global, self.config_filename_user])
section = config.default_section
data_path = config.get(section, 'Data directory',
fallback='~/.local/share/greg')
data_path_expanded = os.path.expanduser(data_path)
aux.ensure_dir(data_path_expanded)
return os.path.expanduser(data_path_expanded) | python | def retrieve_data_directory(self):
"""
Retrieve the data directory
Look first into config_filename_global
then into config_filename_user. The latter takes preeminence.
"""
args = self.args
try:
if args['datadirectory']:
aux.ensure_dir(args['datadirectory'])
return args['datadirectory']
except KeyError:
pass
config = configparser.ConfigParser()
config.read([config_filename_global, self.config_filename_user])
section = config.default_section
data_path = config.get(section, 'Data directory',
fallback='~/.local/share/greg')
data_path_expanded = os.path.expanduser(data_path)
aux.ensure_dir(data_path_expanded)
return os.path.expanduser(data_path_expanded) | ['def', 'retrieve_data_directory', '(', 'self', ')', ':', 'args', '=', 'self', '.', 'args', 'try', ':', 'if', 'args', '[', "'datadirectory'", ']', ':', 'aux', '.', 'ensure_dir', '(', 'args', '[', "'datadirectory'", ']', ')', 'return', 'args', '[', "'datadirectory'", ']', 'except', 'KeyError', ':', 'pass', 'config', '=', 'configparser', '.', 'ConfigParser', '(', ')', 'config', '.', 'read', '(', '[', 'config_filename_global', ',', 'self', '.', 'config_filename_user', ']', ')', 'section', '=', 'config', '.', 'default_section', 'data_path', '=', 'config', '.', 'get', '(', 'section', ',', "'Data directory'", ',', 'fallback', '=', "'~/.local/share/greg'", ')', 'data_path_expanded', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'data_path', ')', 'aux', '.', 'ensure_dir', '(', 'data_path_expanded', ')', 'return', 'os', '.', 'path', '.', 'expanduser', '(', 'data_path_expanded', ')'] | Retrieve the data directory
Look first into config_filename_global
then into config_filename_user. The latter takes preeminence. | ['Retrieve', 'the', 'data', 'directory', 'Look', 'first', 'into', 'config_filename_global', 'then', 'into', 'config_filename_user', '.', 'The', 'latter', 'takes', 'preeminence', '.'] | train | https://github.com/manolomartinez/greg/blob/63bb24197c13087a01963ac439cd8380007d9467/greg/classes.py#L71-L91 |
1,482 | Robin8Put/pmes | ams/utils/pagination.py | Paginator.get_range | def get_range(self):
""" Get range """
if not self.page:
return (1, self.last_blocks[self.coinid])
# Get start of the range
start = self.page * self.limit
# Get finish of the range
end = (self.page + 1) * self.limit
if start > self.last_blocks[self.coinid]:
return (1,1)
if end > self.last_blocks[self.coinid]:
return (start, self.last_blocks[self.coinid])
return (start, end) | python | def get_range(self):
""" Get range """
if not self.page:
return (1, self.last_blocks[self.coinid])
# Get start of the range
start = self.page * self.limit
# Get finish of the range
end = (self.page + 1) * self.limit
if start > self.last_blocks[self.coinid]:
return (1,1)
if end > self.last_blocks[self.coinid]:
return (start, self.last_blocks[self.coinid])
return (start, end) | ['def', 'get_range', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'page', ':', 'return', '(', '1', ',', 'self', '.', 'last_blocks', '[', 'self', '.', 'coinid', ']', ')', '# Get start of the range', 'start', '=', 'self', '.', 'page', '*', 'self', '.', 'limit', '# Get finish of the range', 'end', '=', '(', 'self', '.', 'page', '+', '1', ')', '*', 'self', '.', 'limit', 'if', 'start', '>', 'self', '.', 'last_blocks', '[', 'self', '.', 'coinid', ']', ':', 'return', '(', '1', ',', '1', ')', 'if', 'end', '>', 'self', '.', 'last_blocks', '[', 'self', '.', 'coinid', ']', ':', 'return', '(', 'start', ',', 'self', '.', 'last_blocks', '[', 'self', '.', 'coinid', ']', ')', 'return', '(', 'start', ',', 'end', ')'] | Get range | ['Get', 'range'] | train | https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/ams/utils/pagination.py#L28-L44 |
1,483 | hvac/hvac | hvac/v1/__init__.py | Client.create_vault_ec2_client_configuration | def create_vault_ec2_client_configuration(self, access_key, secret_key, endpoint=None, mount_point='aws-ec2'):
"""POST /auth/<mount_point>/config/client
Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs.
The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The
credentials configured using this endpoint will be used to query the status of the instances via
DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be
retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the
credentials are still not found and if the method is configured on an EC2 instance with metadata querying
capabilities, the credentials are fetched automatically
:param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the
specific configurations. If using the iam auth method without inferencing, then no credentials are
necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these
credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these
credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set
for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured
role, and that role must have the permissions described here.
:type access_key: str|unicode
:param secret_key: AWS Secret key with permissions to query AWS APIs.
:type secret_key: str|unicode
:param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls.
:type endpoint: str|unicode
:param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2"
is the default argument for backwards comparability within this module.
:type mount_point: str|unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'access_key': access_key,
'secret_key': secret_key
}
if endpoint is not None:
params['endpoint'] = endpoint
return self._adapter.post('/v1/auth/{0}/config/client'.format(mount_point), json=params) | python | def create_vault_ec2_client_configuration(self, access_key, secret_key, endpoint=None, mount_point='aws-ec2'):
"""POST /auth/<mount_point>/config/client
Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs.
The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The
credentials configured using this endpoint will be used to query the status of the instances via
DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be
retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the
credentials are still not found and if the method is configured on an EC2 instance with metadata querying
capabilities, the credentials are fetched automatically
:param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the
specific configurations. If using the iam auth method without inferencing, then no credentials are
necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these
credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these
credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set
for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured
role, and that role must have the permissions described here.
:type access_key: str|unicode
:param secret_key: AWS Secret key with permissions to query AWS APIs.
:type secret_key: str|unicode
:param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls.
:type endpoint: str|unicode
:param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2"
is the default argument for backwards comparability within this module.
:type mount_point: str|unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'access_key': access_key,
'secret_key': secret_key
}
if endpoint is not None:
params['endpoint'] = endpoint
return self._adapter.post('/v1/auth/{0}/config/client'.format(mount_point), json=params) | ['def', 'create_vault_ec2_client_configuration', '(', 'self', ',', 'access_key', ',', 'secret_key', ',', 'endpoint', '=', 'None', ',', 'mount_point', '=', "'aws-ec2'", ')', ':', 'params', '=', '{', "'access_key'", ':', 'access_key', ',', "'secret_key'", ':', 'secret_key', '}', 'if', 'endpoint', 'is', 'not', 'None', ':', 'params', '[', "'endpoint'", ']', '=', 'endpoint', 'return', 'self', '.', '_adapter', '.', 'post', '(', "'/v1/auth/{0}/config/client'", '.', 'format', '(', 'mount_point', ')', ',', 'json', '=', 'params', ')'] | POST /auth/<mount_point>/config/client
Configure the credentials required to perform API calls to AWS as well as custom endpoints to talk to AWS APIs.
The instance identity document fetched from the PKCS#7 signature will provide the EC2 instance ID. The
credentials configured using this endpoint will be used to query the status of the instances via
DescribeInstances API. If static credentials are not provided using this endpoint, then the credentials will be
retrieved from the environment variables AWS_ACCESS_KEY, AWS_SECRET_KEY and AWS_REGION respectively. If the
credentials are still not found and if the method is configured on an EC2 instance with metadata querying
capabilities, the credentials are fetched automatically
:param access_key: AWS Access key with permissions to query AWS APIs. The permissions required depend on the
specific configurations. If using the iam auth method without inferencing, then no credentials are
necessary. If using the ec2 auth method or using the iam auth method with inferencing, then these
credentials need access to ec2:DescribeInstances. If additionally a bound_iam_role is specified, then these
credentials also need access to iam:GetInstanceProfile. If, however, an alternate sts configuration is set
for the target account, then the credentials must be permissioned to call sts:AssumeRole on the configured
role, and that role must have the permissions described here.
:type access_key: str|unicode
:param secret_key: AWS Secret key with permissions to query AWS APIs.
:type secret_key: str|unicode
:param endpoint: URL to override the default generated endpoint for making AWS EC2 API calls.
:type endpoint: str|unicode
:param mount_point: The "path" the AWS auth backend was mounted on. Vault currently defaults to "aws". "aws-ec2"
is the default argument for backwards comparability within this module.
:type mount_point: str|unicode
:return: The response of the request.
:rtype: requests.Response | ['POST', '/', 'auth', '/', '<mount_point', '>', '/', 'config', '/', 'client'] | train | https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/v1/__init__.py#L906-L942 |
1,484 | eonpatapon/contrail-api-cli | contrail_api_cli/resource.py | Resource.parent | def parent(self, resource):
"""Set parent resource
:param resource: parent resource
:type resource: Resource
:raises ResourceNotFound: resource not found on the API
"""
resource.check()
self['parent_type'] = resource.type
self['parent_uuid'] = resource.uuid | python | def parent(self, resource):
"""Set parent resource
:param resource: parent resource
:type resource: Resource
:raises ResourceNotFound: resource not found on the API
"""
resource.check()
self['parent_type'] = resource.type
self['parent_uuid'] = resource.uuid | ['def', 'parent', '(', 'self', ',', 'resource', ')', ':', 'resource', '.', 'check', '(', ')', 'self', '[', "'parent_type'", ']', '=', 'resource', '.', 'type', 'self', '[', "'parent_uuid'", ']', '=', 'resource', '.', 'uuid'] | Set parent resource
:param resource: parent resource
:type resource: Resource
:raises ResourceNotFound: resource not found on the API | ['Set', 'parent', 'resource'] | train | https://github.com/eonpatapon/contrail-api-cli/blob/1571bf523fa054f3d6bf83dba43a224fea173a73/contrail_api_cli/resource.py#L550-L560 |
1,485 | cloudant/python-cloudant | src/cloudant/database.py | CouchDatabase._get_view_result | def _get_view_result(view, raw_result, **kwargs):
""" Get view results helper. """
if raw_result:
return view(**kwargs)
if kwargs:
return Result(view, **kwargs)
return view.result | python | def _get_view_result(view, raw_result, **kwargs):
""" Get view results helper. """
if raw_result:
return view(**kwargs)
if kwargs:
return Result(view, **kwargs)
return view.result | ['def', '_get_view_result', '(', 'view', ',', 'raw_result', ',', '*', '*', 'kwargs', ')', ':', 'if', 'raw_result', ':', 'return', 'view', '(', '*', '*', 'kwargs', ')', 'if', 'kwargs', ':', 'return', 'Result', '(', 'view', ',', '*', '*', 'kwargs', ')', 'return', 'view', '.', 'result'] | Get view results helper. | ['Get', 'view', 'results', 'helper', '.'] | train | https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/database.py#L400-L407 |
1,486 | wesyoung/pyzyre | zyre/_zyre_ctypes.py | Zyre.set_header | def set_header(self, name, format, *args):
"""
Set node header; these are provided to other nodes during discovery
and come in each ENTER message.
"""
return lib.zyre_set_header(self._as_parameter_, name, format, *args) | python | def set_header(self, name, format, *args):
"""
Set node header; these are provided to other nodes during discovery
and come in each ENTER message.
"""
return lib.zyre_set_header(self._as_parameter_, name, format, *args) | ['def', 'set_header', '(', 'self', ',', 'name', ',', 'format', ',', '*', 'args', ')', ':', 'return', 'lib', '.', 'zyre_set_header', '(', 'self', '.', '_as_parameter_', ',', 'name', ',', 'format', ',', '*', 'args', ')'] | Set node header; these are provided to other nodes during discovery
and come in each ENTER message. | ['Set', 'node', 'header', ';', 'these', 'are', 'provided', 'to', 'other', 'nodes', 'during', 'discovery', 'and', 'come', 'in', 'each', 'ENTER', 'message', '.'] | train | https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/zyre/_zyre_ctypes.py#L226-L231 |
1,487 | KrishnaswamyLab/PHATE | Python/phate/plot.py | scatter2d | def scatter2d(data, **kwargs):
"""Create a 2D scatter plot
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better.
Parameters
----------
data : array-like, shape=[n_samples, n_features]
Input data. Only the first two components will be used.
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
s : float, optional (default: 1)
Point size.
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: True)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
xticks : True, False, or list-like (default: False)
If True, keeps default x ticks. If False, removes x ticks.
If a list, sets custom x ticks
yticks : True, False, or list-like (default: False)
If True, keeps default y ticks. If False, removes y ticks.
If a list, sets custom y ticks
zticks : True, False, or list-like (default: False)
If True, keeps default z ticks. If False, removes z ticks.
If a list, sets custom z ticks. Only used for 3D plots.
xticklabels : True, False, or list-like (default: True)
If True, keeps default x tick labels. If False, removes x tick labels.
If a list, sets custom x tick labels
yticklabels : True, False, or list-like (default: True)
If True, keeps default y tick labels. If False, removes y tick labels.
If a list, sets custom y tick labels
zticklabels : True, False, or list-like (default: True)
If True, keeps default z tick labels. If False, removes z tick labels.
If a list, sets custom z tick labels. Only used for 3D plots.
label_prefix : str or None (default: "PHATE")
Prefix for all axis labels. Axes will be labelled `label_prefix`1,
`label_prefix`2, etc. Can be overriden by setting `xlabel`,
`ylabel`, and `zlabel`.
xlabel : str or None (default : None)
Label for the x axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
ylabel : str or None (default : None)
Label for the y axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
zlabel : str or None (default : None)
Label for the z axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
Only used for 3D plots.
title : str or None (default: None)
axis title. If None, no title is set.
legend_title : str (default: "")
title for the colorbar of legend
legend_loc : int or string or pair of floats, default: 'best'
Matplotlib legend location. Only used for discrete data.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
filename : str or None (default: None)
file to which the output is saved
dpi : int or None, optional (default: None)
The resolution in dots per inch. If None it will default to the value
savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi
to be the value of the figure. Only used if filename is not None.
**plot_kwargs : keyword arguments
Extra arguments passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `matplotlib.Axes`
axis on which plot was drawn
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> ###
>>> # Running PHATE
>>> ###
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20,
... branch_length=100)
>>> tree_data.shape
(2000, 100)
>>> phate_operator = phate.PHATE(k=5, a=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(2000, 2)
>>> ###
>>> # Plotting using phate.plot
>>> ###
>>> phate.plot.scatter2d(tree_phate, c=tree_clusters)
>>> # You can also pass the PHATE operator instead of data
>>> phate.plot.scatter2d(phate_operator, c=tree_clusters)
>>> phate.plot.scatter3d(phate_operator, c=tree_clusters)
>>> ###
>>> # Using a cmap dictionary
>>> ###
>>> import numpy as np
>>> X = np.random.normal(0,1,[1000,2])
>>> c = np.random.choice(['a','b'], 1000, replace=True)
>>> X[c=='a'] += 10
>>> phate.plot.scatter2d(X, c=c, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'})
"""
warnings.warn("`phate.plot.scatter2d` is deprecated. "
"Use `scprep.plot.scatter2d` instead.",
FutureWarning)
data = _get_plot_data(data, ndim=2)
return scprep.plot.scatter2d(data, **kwargs) | python | def scatter2d(data, **kwargs):
"""Create a 2D scatter plot
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better.
Parameters
----------
data : array-like, shape=[n_samples, n_features]
Input data. Only the first two components will be used.
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
s : float, optional (default: 1)
Point size.
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: True)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
xticks : True, False, or list-like (default: False)
If True, keeps default x ticks. If False, removes x ticks.
If a list, sets custom x ticks
yticks : True, False, or list-like (default: False)
If True, keeps default y ticks. If False, removes y ticks.
If a list, sets custom y ticks
zticks : True, False, or list-like (default: False)
If True, keeps default z ticks. If False, removes z ticks.
If a list, sets custom z ticks. Only used for 3D plots.
xticklabels : True, False, or list-like (default: True)
If True, keeps default x tick labels. If False, removes x tick labels.
If a list, sets custom x tick labels
yticklabels : True, False, or list-like (default: True)
If True, keeps default y tick labels. If False, removes y tick labels.
If a list, sets custom y tick labels
zticklabels : True, False, or list-like (default: True)
If True, keeps default z tick labels. If False, removes z tick labels.
If a list, sets custom z tick labels. Only used for 3D plots.
label_prefix : str or None (default: "PHATE")
Prefix for all axis labels. Axes will be labelled `label_prefix`1,
`label_prefix`2, etc. Can be overriden by setting `xlabel`,
`ylabel`, and `zlabel`.
xlabel : str or None (default : None)
Label for the x axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
ylabel : str or None (default : None)
Label for the y axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
zlabel : str or None (default : None)
Label for the z axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
Only used for 3D plots.
title : str or None (default: None)
axis title. If None, no title is set.
legend_title : str (default: "")
title for the colorbar of legend
legend_loc : int or string or pair of floats, default: 'best'
Matplotlib legend location. Only used for discrete data.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
filename : str or None (default: None)
file to which the output is saved
dpi : int or None, optional (default: None)
The resolution in dots per inch. If None it will default to the value
savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi
to be the value of the figure. Only used if filename is not None.
**plot_kwargs : keyword arguments
Extra arguments passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `matplotlib.Axes`
axis on which plot was drawn
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> ###
>>> # Running PHATE
>>> ###
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20,
... branch_length=100)
>>> tree_data.shape
(2000, 100)
>>> phate_operator = phate.PHATE(k=5, a=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(2000, 2)
>>> ###
>>> # Plotting using phate.plot
>>> ###
>>> phate.plot.scatter2d(tree_phate, c=tree_clusters)
>>> # You can also pass the PHATE operator instead of data
>>> phate.plot.scatter2d(phate_operator, c=tree_clusters)
>>> phate.plot.scatter3d(phate_operator, c=tree_clusters)
>>> ###
>>> # Using a cmap dictionary
>>> ###
>>> import numpy as np
>>> X = np.random.normal(0,1,[1000,2])
>>> c = np.random.choice(['a','b'], 1000, replace=True)
>>> X[c=='a'] += 10
>>> phate.plot.scatter2d(X, c=c, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'})
"""
warnings.warn("`phate.plot.scatter2d` is deprecated. "
"Use `scprep.plot.scatter2d` instead.",
FutureWarning)
data = _get_plot_data(data, ndim=2)
return scprep.plot.scatter2d(data, **kwargs) | ['def', 'scatter2d', '(', 'data', ',', '*', '*', 'kwargs', ')', ':', 'warnings', '.', 'warn', '(', '"`phate.plot.scatter2d` is deprecated. "', '"Use `scprep.plot.scatter2d` instead."', ',', 'FutureWarning', ')', 'data', '=', '_get_plot_data', '(', 'data', ',', 'ndim', '=', '2', ')', 'return', 'scprep', '.', 'plot', '.', 'scatter2d', '(', 'data', ',', '*', '*', 'kwargs', ')'] | Create a 2D scatter plot
Builds upon `matplotlib.pyplot.scatter` with nice defaults
and handles categorical colors / legends better.
Parameters
----------
data : array-like, shape=[n_samples, n_features]
Input data. Only the first two components will be used.
c : list-like or None, optional (default: None)
Color vector. Can be a single color value (RGB, RGBA, or named
matplotlib colors), an array of these of length n_samples, or a list of
discrete or continuous values of any data type. If `c` is not a single
or list of matplotlib colors, the values in `c` will be used to
populate the legend / colorbar with colors from `cmap`
cmap : `matplotlib` colormap, str, dict or None, optional (default: None)
matplotlib colormap. If None, uses `tab20` for discrete data and
`inferno` for continuous data. If a dictionary, expects one key
for every unique value in `c`, where values are valid matplotlib colors
(hsv, rbg, rgba, or named colors)
s : float, optional (default: 1)
Point size.
discrete : bool or None, optional (default: None)
If True, the legend is categorical. If False, the legend is a colorbar.
If None, discreteness is detected automatically. Data containing
non-numeric `c` is always discrete, and numeric data with 20 or less
unique values is discrete.
ax : `matplotlib.Axes` or None, optional (default: None)
axis on which to plot. If None, an axis is created
legend : bool, optional (default: True)
States whether or not to create a legend. If data is continuous,
the legend is a colorbar.
figsize : tuple, optional (default: None)
Tuple of floats for creation of new `matplotlib` figure. Only used if
`ax` is None.
xticks : True, False, or list-like (default: False)
If True, keeps default x ticks. If False, removes x ticks.
If a list, sets custom x ticks
yticks : True, False, or list-like (default: False)
If True, keeps default y ticks. If False, removes y ticks.
If a list, sets custom y ticks
zticks : True, False, or list-like (default: False)
If True, keeps default z ticks. If False, removes z ticks.
If a list, sets custom z ticks. Only used for 3D plots.
xticklabels : True, False, or list-like (default: True)
If True, keeps default x tick labels. If False, removes x tick labels.
If a list, sets custom x tick labels
yticklabels : True, False, or list-like (default: True)
If True, keeps default y tick labels. If False, removes y tick labels.
If a list, sets custom y tick labels
zticklabels : True, False, or list-like (default: True)
If True, keeps default z tick labels. If False, removes z tick labels.
If a list, sets custom z tick labels. Only used for 3D plots.
label_prefix : str or None (default: "PHATE")
Prefix for all axis labels. Axes will be labelled `label_prefix`1,
`label_prefix`2, etc. Can be overriden by setting `xlabel`,
`ylabel`, and `zlabel`.
xlabel : str or None (default : None)
Label for the x axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
ylabel : str or None (default : None)
Label for the y axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
zlabel : str or None (default : None)
Label for the z axis. Overrides the automatic label given by
label_prefix. If None and label_prefix is None, no label is set.
Only used for 3D plots.
title : str or None (default: None)
axis title. If None, no title is set.
legend_title : str (default: "")
title for the colorbar of legend
legend_loc : int or string or pair of floats, default: 'best'
Matplotlib legend location. Only used for discrete data.
See <https://matplotlib.org/api/_as_gen/matplotlib.pyplot.legend.html>
for details.
filename : str or None (default: None)
file to which the output is saved
dpi : int or None, optional (default: None)
The resolution in dots per inch. If None it will default to the value
savefig.dpi in the matplotlibrc file. If 'figure' it will set the dpi
to be the value of the figure. Only used if filename is not None.
**plot_kwargs : keyword arguments
Extra arguments passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `matplotlib.Axes`
axis on which plot was drawn
Examples
--------
>>> import phate
>>> import matplotlib.pyplot as plt
>>> ###
>>> # Running PHATE
>>> ###
>>> tree_data, tree_clusters = phate.tree.gen_dla(n_dim=100, n_branch=20,
... branch_length=100)
>>> tree_data.shape
(2000, 100)
>>> phate_operator = phate.PHATE(k=5, a=20, t=150)
>>> tree_phate = phate_operator.fit_transform(tree_data)
>>> tree_phate.shape
(2000, 2)
>>> ###
>>> # Plotting using phate.plot
>>> ###
>>> phate.plot.scatter2d(tree_phate, c=tree_clusters)
>>> # You can also pass the PHATE operator instead of data
>>> phate.plot.scatter2d(phate_operator, c=tree_clusters)
>>> phate.plot.scatter3d(phate_operator, c=tree_clusters)
>>> ###
>>> # Using a cmap dictionary
>>> ###
>>> import numpy as np
>>> X = np.random.normal(0,1,[1000,2])
>>> c = np.random.choice(['a','b'], 1000, replace=True)
>>> X[c=='a'] += 10
>>> phate.plot.scatter2d(X, c=c, cmap={'a' : [1,0,0,1], 'b' : 'xkcd:sky blue'}) | ['Create', 'a', '2D', 'scatter', 'plot'] | train | https://github.com/KrishnaswamyLab/PHATE/blob/346a4597dcfc523f8bef99bce482e677282b6719/Python/phate/plot.py#L220-L345 |
1,488 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavwp.py | MAVWPLoader._read_waypoints_v110 | def _read_waypoints_v110(self, file):
'''read a version 110 waypoint'''
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 12:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[2]), # frame
int(a[3]), # command
int(a[1]), # current
int(a[11]), # autocontinue
float(a[4]), # param1,
float(a[5]), # param2,
float(a[6]), # param3
float(a[7]), # param4
float(a[8]), # x (latitude)
float(a[9]), # y (longitude)
float(a[10]) # z (altitude)
)
if w.command == 0 and w.seq == 0 and self.count() == 0:
# special handling for Mission Planner created home wp
w.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT
self.add(w, comment)
comment = '' | python | def _read_waypoints_v110(self, file):
'''read a version 110 waypoint'''
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 12:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[2]), # frame
int(a[3]), # command
int(a[1]), # current
int(a[11]), # autocontinue
float(a[4]), # param1,
float(a[5]), # param2,
float(a[6]), # param3
float(a[7]), # param4
float(a[8]), # x (latitude)
float(a[9]), # y (longitude)
float(a[10]) # z (altitude)
)
if w.command == 0 and w.seq == 0 and self.count() == 0:
# special handling for Mission Planner created home wp
w.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT
self.add(w, comment)
comment = '' | ['def', '_read_waypoints_v110', '(', 'self', ',', 'file', ')', ':', 'comment', '=', "''", 'for', 'line', 'in', 'file', ':', 'if', 'line', '.', 'startswith', '(', "'#'", ')', ':', 'comment', '=', 'line', '[', '1', ':', ']', '.', 'lstrip', '(', ')', 'continue', 'line', '=', 'line', '.', 'strip', '(', ')', 'if', 'not', 'line', ':', 'continue', 'a', '=', 'line', '.', 'split', '(', ')', 'if', 'len', '(', 'a', ')', '!=', '12', ':', 'raise', 'MAVWPError', '(', '"invalid waypoint line with %u values"', '%', 'len', '(', 'a', ')', ')', 'if', 'mavutil', '.', 'mavlink10', '(', ')', ':', 'fn', '=', 'mavutil', '.', 'mavlink', '.', 'MAVLink_mission_item_message', 'else', ':', 'fn', '=', 'mavutil', '.', 'mavlink', '.', 'MAVLink_waypoint_message', 'w', '=', 'fn', '(', 'self', '.', 'target_system', ',', 'self', '.', 'target_component', ',', 'int', '(', 'a', '[', '0', ']', ')', ',', '# seq', 'int', '(', 'a', '[', '2', ']', ')', ',', '# frame', 'int', '(', 'a', '[', '3', ']', ')', ',', '# command', 'int', '(', 'a', '[', '1', ']', ')', ',', '# current', 'int', '(', 'a', '[', '11', ']', ')', ',', '# autocontinue', 'float', '(', 'a', '[', '4', ']', ')', ',', '# param1,', 'float', '(', 'a', '[', '5', ']', ')', ',', '# param2,', 'float', '(', 'a', '[', '6', ']', ')', ',', '# param3', 'float', '(', 'a', '[', '7', ']', ')', ',', '# param4', 'float', '(', 'a', '[', '8', ']', ')', ',', '# x (latitude)', 'float', '(', 'a', '[', '9', ']', ')', ',', '# y (longitude)', 'float', '(', 'a', '[', '10', ']', ')', '# z (altitude)', ')', 'if', 'w', '.', 'command', '==', '0', 'and', 'w', '.', 'seq', '==', '0', 'and', 'self', '.', 'count', '(', ')', '==', '0', ':', '# special handling for Mission Planner created home wp', 'w', '.', 'command', '=', 'mavutil', '.', 'mavlink', '.', 'MAV_CMD_NAV_WAYPOINT', 'self', '.', 'add', '(', 'w', ',', 'comment', ')', 'comment', '=', "''"] | read a version 110 waypoint | ['read', 'a', 'version', '110', 'waypoint'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavwp.py#L170-L205 |
1,489 | noahbenson/neuropythy | neuropythy/geometry/util.py | cartesian_to_barycentric_3D | def cartesian_to_barycentric_3D(tri, xy):
'''
cartesian_to_barycentric_3D(tri,xy) is identical to cartesian_to_barycentric_2D(tri,xy) except
it works on 3D data. Note that if tri is a 3 x 3 x n, a 3 x n x 3 or an n x 3 x 3 matrix, the
first dimension must always be the triangle vertices and the second 3-sized dimension must be
the (x,y,z) coordinates.
'''
xy = np.asarray(xy)
tri = np.asarray(tri)
if len(xy.shape) == 1:
return cartesian_to_barycentric_3D(np.transpose(np.asarray([tri]), (1,2,0)),
np.asarray([xy]).T)[:,0]
xy = xy if xy.shape[0] == 3 else xy.T
if tri.shape[0] == 3:
tri = tri if tri.shape[1] == 3 else np.transpose(tri, (0,2,1))
elif tri.shape[1] == 3:
tri = tri.T if tri.shape[0] == 3 else np.transpose(tri, (1,2,0))
elif tri.shape[2] == 3:
tri = np.transpose(tri, (2,1,0) if tri.shape[1] == 3 else (2,0,1))
if tri.shape[0] != 3 or tri.shape[1] != 3:
raise ValueError('Triangle array did not have dimensions of sizes 3 and 3')
if xy.shape[0] != 3:
raise ValueError('coordinate matrix did not have a dimension of size 3')
if tri.shape[2] != xy.shape[1]:
raise ValueError('number of triangles and coordinates must match')
# The algorithm here is borrowed from this stack-exchange post:
# http://gamedev.stackexchange.com/questions/23743
# in which it is attributed to Christer Ericson's book Real-Time Collision Detection.
v0 = tri[1] - tri[0]
v1 = tri[2] - tri[0]
v2 = xy - tri[0]
d00 = np.sum(v0 * v0, axis=0)
d01 = np.sum(v0 * v1, axis=0)
d11 = np.sum(v1 * v1, axis=0)
d20 = np.sum(v2 * v0, axis=0)
d21 = np.sum(v2 * v1, axis=0)
den = d00*d11 - d01*d01
zero = np.isclose(den, 0)
unit = 1 - zero
den += zero
l2 = unit * (d11 * d20 - d01 * d21) / den
l3 = unit * (d00 * d21 - d01 * d20) / den
return np.asarray([1.0 - l2 - l3, l2]) | python | def cartesian_to_barycentric_3D(tri, xy):
'''
cartesian_to_barycentric_3D(tri,xy) is identical to cartesian_to_barycentric_2D(tri,xy) except
it works on 3D data. Note that if tri is a 3 x 3 x n, a 3 x n x 3 or an n x 3 x 3 matrix, the
first dimension must always be the triangle vertices and the second 3-sized dimension must be
the (x,y,z) coordinates.
'''
xy = np.asarray(xy)
tri = np.asarray(tri)
if len(xy.shape) == 1:
return cartesian_to_barycentric_3D(np.transpose(np.asarray([tri]), (1,2,0)),
np.asarray([xy]).T)[:,0]
xy = xy if xy.shape[0] == 3 else xy.T
if tri.shape[0] == 3:
tri = tri if tri.shape[1] == 3 else np.transpose(tri, (0,2,1))
elif tri.shape[1] == 3:
tri = tri.T if tri.shape[0] == 3 else np.transpose(tri, (1,2,0))
elif tri.shape[2] == 3:
tri = np.transpose(tri, (2,1,0) if tri.shape[1] == 3 else (2,0,1))
if tri.shape[0] != 3 or tri.shape[1] != 3:
raise ValueError('Triangle array did not have dimensions of sizes 3 and 3')
if xy.shape[0] != 3:
raise ValueError('coordinate matrix did not have a dimension of size 3')
if tri.shape[2] != xy.shape[1]:
raise ValueError('number of triangles and coordinates must match')
# The algorithm here is borrowed from this stack-exchange post:
# http://gamedev.stackexchange.com/questions/23743
# in which it is attributed to Christer Ericson's book Real-Time Collision Detection.
v0 = tri[1] - tri[0]
v1 = tri[2] - tri[0]
v2 = xy - tri[0]
d00 = np.sum(v0 * v0, axis=0)
d01 = np.sum(v0 * v1, axis=0)
d11 = np.sum(v1 * v1, axis=0)
d20 = np.sum(v2 * v0, axis=0)
d21 = np.sum(v2 * v1, axis=0)
den = d00*d11 - d01*d01
zero = np.isclose(den, 0)
unit = 1 - zero
den += zero
l2 = unit * (d11 * d20 - d01 * d21) / den
l3 = unit * (d00 * d21 - d01 * d20) / den
return np.asarray([1.0 - l2 - l3, l2]) | ['def', 'cartesian_to_barycentric_3D', '(', 'tri', ',', 'xy', ')', ':', 'xy', '=', 'np', '.', 'asarray', '(', 'xy', ')', 'tri', '=', 'np', '.', 'asarray', '(', 'tri', ')', 'if', 'len', '(', 'xy', '.', 'shape', ')', '==', '1', ':', 'return', 'cartesian_to_barycentric_3D', '(', 'np', '.', 'transpose', '(', 'np', '.', 'asarray', '(', '[', 'tri', ']', ')', ',', '(', '1', ',', '2', ',', '0', ')', ')', ',', 'np', '.', 'asarray', '(', '[', 'xy', ']', ')', '.', 'T', ')', '[', ':', ',', '0', ']', 'xy', '=', 'xy', 'if', 'xy', '.', 'shape', '[', '0', ']', '==', '3', 'else', 'xy', '.', 'T', 'if', 'tri', '.', 'shape', '[', '0', ']', '==', '3', ':', 'tri', '=', 'tri', 'if', 'tri', '.', 'shape', '[', '1', ']', '==', '3', 'else', 'np', '.', 'transpose', '(', 'tri', ',', '(', '0', ',', '2', ',', '1', ')', ')', 'elif', 'tri', '.', 'shape', '[', '1', ']', '==', '3', ':', 'tri', '=', 'tri', '.', 'T', 'if', 'tri', '.', 'shape', '[', '0', ']', '==', '3', 'else', 'np', '.', 'transpose', '(', 'tri', ',', '(', '1', ',', '2', ',', '0', ')', ')', 'elif', 'tri', '.', 'shape', '[', '2', ']', '==', '3', ':', 'tri', '=', 'np', '.', 'transpose', '(', 'tri', ',', '(', '2', ',', '1', ',', '0', ')', 'if', 'tri', '.', 'shape', '[', '1', ']', '==', '3', 'else', '(', '2', ',', '0', ',', '1', ')', ')', 'if', 'tri', '.', 'shape', '[', '0', ']', '!=', '3', 'or', 'tri', '.', 'shape', '[', '1', ']', '!=', '3', ':', 'raise', 'ValueError', '(', "'Triangle array did not have dimensions of sizes 3 and 3'", ')', 'if', 'xy', '.', 'shape', '[', '0', ']', '!=', '3', ':', 'raise', 'ValueError', '(', "'coordinate matrix did not have a dimension of size 3'", ')', 'if', 'tri', '.', 'shape', '[', '2', ']', '!=', 'xy', '.', 'shape', '[', '1', ']', ':', 'raise', 'ValueError', '(', "'number of triangles and coordinates must match'", ')', '# The algorithm here is borrowed from this stack-exchange post:', '# http://gamedev.stackexchange.com/questions/23743', "# in which it is attributed to Christer Ericson's book Real-Time Collision Detection.", 'v0', '=', 'tri', '[', '1', ']', '-', 'tri', '[', '0', ']', 'v1', '=', 'tri', '[', '2', ']', '-', 'tri', '[', '0', ']', 'v2', '=', 'xy', '-', 'tri', '[', '0', ']', 'd00', '=', 'np', '.', 'sum', '(', 'v0', '*', 'v0', ',', 'axis', '=', '0', ')', 'd01', '=', 'np', '.', 'sum', '(', 'v0', '*', 'v1', ',', 'axis', '=', '0', ')', 'd11', '=', 'np', '.', 'sum', '(', 'v1', '*', 'v1', ',', 'axis', '=', '0', ')', 'd20', '=', 'np', '.', 'sum', '(', 'v2', '*', 'v0', ',', 'axis', '=', '0', ')', 'd21', '=', 'np', '.', 'sum', '(', 'v2', '*', 'v1', ',', 'axis', '=', '0', ')', 'den', '=', 'd00', '*', 'd11', '-', 'd01', '*', 'd01', 'zero', '=', 'np', '.', 'isclose', '(', 'den', ',', '0', ')', 'unit', '=', '1', '-', 'zero', 'den', '+=', 'zero', 'l2', '=', 'unit', '*', '(', 'd11', '*', 'd20', '-', 'd01', '*', 'd21', ')', '/', 'den', 'l3', '=', 'unit', '*', '(', 'd00', '*', 'd21', '-', 'd01', '*', 'd20', ')', '/', 'den', 'return', 'np', '.', 'asarray', '(', '[', '1.0', '-', 'l2', '-', 'l3', ',', 'l2', ']', ')'] | cartesian_to_barycentric_3D(tri,xy) is identical to cartesian_to_barycentric_2D(tri,xy) except
it works on 3D data. Note that if tri is a 3 x 3 x n, a 3 x n x 3 or an n x 3 x 3 matrix, the
first dimension must always be the triangle vertices and the second 3-sized dimension must be
the (x,y,z) coordinates. | ['cartesian_to_barycentric_3D', '(', 'tri', 'xy', ')', 'is', 'identical', 'to', 'cartesian_to_barycentric_2D', '(', 'tri', 'xy', ')', 'except', 'it', 'works', 'on', '3D', 'data', '.', 'Note', 'that', 'if', 'tri', 'is', 'a', '3', 'x', '3', 'x', 'n', 'a', '3', 'x', 'n', 'x', '3', 'or', 'an', 'n', 'x', '3', 'x', '3', 'matrix', 'the', 'first', 'dimension', 'must', 'always', 'be', 'the', 'triangle', 'vertices', 'and', 'the', 'second', '3', '-', 'sized', 'dimension', 'must', 'be', 'the', '(', 'x', 'y', 'z', ')', 'coordinates', '.'] | train | https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/util.py#L371-L413 |
1,490 | fhcrc/seqmagick | seqmagick/subcommands/quality_filter.py | RecordReportHandler._found_barcode | def _found_barcode(self, record, sample, barcode=None):
"""Hook called when barcode is found"""
assert record.id == self.current_record['sequence_name']
self.current_record['sample'] = sample | python | def _found_barcode(self, record, sample, barcode=None):
"""Hook called when barcode is found"""
assert record.id == self.current_record['sequence_name']
self.current_record['sample'] = sample | ['def', '_found_barcode', '(', 'self', ',', 'record', ',', 'sample', ',', 'barcode', '=', 'None', ')', ':', 'assert', 'record', '.', 'id', '==', 'self', '.', 'current_record', '[', "'sequence_name'", ']', 'self', '.', 'current_record', '[', "'sample'", ']', '=', 'sample'] | Hook called when barcode is found | ['Hook', 'called', 'when', 'barcode', 'is', 'found'] | train | https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L327-L330 |
1,491 | kalefranz/auxlib | auxlib/_vendor/boltons/timeutils.py | daterange | def daterange(start, stop, step=1, inclusive=False):
"""In the spirit of :func:`range` and :func:`xrange`, the `daterange`
generator that yields a sequence of :class:`~datetime.date`
objects, starting at *start*, incrementing by *step*, until *stop*
is reached.
When *inclusive* is True, the final date may be *stop*, **if**
*step* falls evenly on it. By default, *step* is one day. See
details below for many more details.
Args:
start (datetime.date): The starting date The first value in
the sequence.
stop (datetime.date): The stopping date. By default not
included in return. Can be `None` to yield an infinite
sequence.
step (int): The value to increment *start* by to reach
*stop*. Can be an :class:`int` number of days, a
:class:`datetime.timedelta`, or a :class:`tuple` of integers,
`(year, month, day)`. Positive and negative *step* values
are supported.
inclusive (bool): Whether or not the *stop* date can be
returned. *stop* is only returned when a *step* falls evenly
on it.
>>> christmas = date(year=2015, month=12, day=25)
>>> boxing_day = date(year=2015, month=12, day=26)
>>> new_year = date(year=2016, month=1, day=1)
>>> for day in daterange(christmas, new_year):
... print(repr(day))
datetime.date(2015, 12, 25)
datetime.date(2015, 12, 26)
datetime.date(2015, 12, 27)
datetime.date(2015, 12, 28)
datetime.date(2015, 12, 29)
datetime.date(2015, 12, 30)
datetime.date(2015, 12, 31)
>>> for day in daterange(christmas, boxing_day):
... print(repr(day))
datetime.date(2015, 12, 25)
>>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1),
... step=(0, 1, 0), inclusive=True):
... print(repr(day))
datetime.date(2017, 5, 1)
datetime.date(2017, 6, 1)
datetime.date(2017, 7, 1)
datetime.date(2017, 8, 1)
*Be careful when using stop=None, as this will yield an infinite
sequence of dates.*
"""
if not isinstance(start, date):
raise TypeError("start expected datetime.date instance")
if stop and not isinstance(stop, date):
raise TypeError("stop expected datetime.date instance or None")
try:
y_step, m_step, d_step = step
except TypeError:
y_step, m_step, d_step = 0, 0, step
else:
y_step, m_step = int(y_step), int(m_step)
if isinstance(d_step, int):
d_step = timedelta(days=int(d_step))
elif isinstance(d_step, timedelta):
pass
else:
raise ValueError('step expected int, timedelta, or tuple'
' (year, month, day), not: %r' % step)
if stop is None:
finished = lambda t: False
elif start < stop:
finished = operator.gt if inclusive else operator.ge
else:
finished = operator.lt if inclusive else operator.le
now = start
while not finished(now, stop):
yield now
if y_step or m_step:
m_y_step, cur_month = divmod(now.month + m_step, 12)
now = now.replace(year=now.year + y_step + m_y_step,
month=cur_month or 12)
now = now + d_step
return | python | def daterange(start, stop, step=1, inclusive=False):
"""In the spirit of :func:`range` and :func:`xrange`, the `daterange`
generator that yields a sequence of :class:`~datetime.date`
objects, starting at *start*, incrementing by *step*, until *stop*
is reached.
When *inclusive* is True, the final date may be *stop*, **if**
*step* falls evenly on it. By default, *step* is one day. See
details below for many more details.
Args:
start (datetime.date): The starting date The first value in
the sequence.
stop (datetime.date): The stopping date. By default not
included in return. Can be `None` to yield an infinite
sequence.
step (int): The value to increment *start* by to reach
*stop*. Can be an :class:`int` number of days, a
:class:`datetime.timedelta`, or a :class:`tuple` of integers,
`(year, month, day)`. Positive and negative *step* values
are supported.
inclusive (bool): Whether or not the *stop* date can be
returned. *stop* is only returned when a *step* falls evenly
on it.
>>> christmas = date(year=2015, month=12, day=25)
>>> boxing_day = date(year=2015, month=12, day=26)
>>> new_year = date(year=2016, month=1, day=1)
>>> for day in daterange(christmas, new_year):
... print(repr(day))
datetime.date(2015, 12, 25)
datetime.date(2015, 12, 26)
datetime.date(2015, 12, 27)
datetime.date(2015, 12, 28)
datetime.date(2015, 12, 29)
datetime.date(2015, 12, 30)
datetime.date(2015, 12, 31)
>>> for day in daterange(christmas, boxing_day):
... print(repr(day))
datetime.date(2015, 12, 25)
>>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1),
... step=(0, 1, 0), inclusive=True):
... print(repr(day))
datetime.date(2017, 5, 1)
datetime.date(2017, 6, 1)
datetime.date(2017, 7, 1)
datetime.date(2017, 8, 1)
*Be careful when using stop=None, as this will yield an infinite
sequence of dates.*
"""
if not isinstance(start, date):
raise TypeError("start expected datetime.date instance")
if stop and not isinstance(stop, date):
raise TypeError("stop expected datetime.date instance or None")
try:
y_step, m_step, d_step = step
except TypeError:
y_step, m_step, d_step = 0, 0, step
else:
y_step, m_step = int(y_step), int(m_step)
if isinstance(d_step, int):
d_step = timedelta(days=int(d_step))
elif isinstance(d_step, timedelta):
pass
else:
raise ValueError('step expected int, timedelta, or tuple'
' (year, month, day), not: %r' % step)
if stop is None:
finished = lambda t: False
elif start < stop:
finished = operator.gt if inclusive else operator.ge
else:
finished = operator.lt if inclusive else operator.le
now = start
while not finished(now, stop):
yield now
if y_step or m_step:
m_y_step, cur_month = divmod(now.month + m_step, 12)
now = now.replace(year=now.year + y_step + m_y_step,
month=cur_month or 12)
now = now + d_step
return | ['def', 'daterange', '(', 'start', ',', 'stop', ',', 'step', '=', '1', ',', 'inclusive', '=', 'False', ')', ':', 'if', 'not', 'isinstance', '(', 'start', ',', 'date', ')', ':', 'raise', 'TypeError', '(', '"start expected datetime.date instance"', ')', 'if', 'stop', 'and', 'not', 'isinstance', '(', 'stop', ',', 'date', ')', ':', 'raise', 'TypeError', '(', '"stop expected datetime.date instance or None"', ')', 'try', ':', 'y_step', ',', 'm_step', ',', 'd_step', '=', 'step', 'except', 'TypeError', ':', 'y_step', ',', 'm_step', ',', 'd_step', '=', '0', ',', '0', ',', 'step', 'else', ':', 'y_step', ',', 'm_step', '=', 'int', '(', 'y_step', ')', ',', 'int', '(', 'm_step', ')', 'if', 'isinstance', '(', 'd_step', ',', 'int', ')', ':', 'd_step', '=', 'timedelta', '(', 'days', '=', 'int', '(', 'd_step', ')', ')', 'elif', 'isinstance', '(', 'd_step', ',', 'timedelta', ')', ':', 'pass', 'else', ':', 'raise', 'ValueError', '(', "'step expected int, timedelta, or tuple'", "' (year, month, day), not: %r'", '%', 'step', ')', 'if', 'stop', 'is', 'None', ':', 'finished', '=', 'lambda', 't', ':', 'False', 'elif', 'start', '<', 'stop', ':', 'finished', '=', 'operator', '.', 'gt', 'if', 'inclusive', 'else', 'operator', '.', 'ge', 'else', ':', 'finished', '=', 'operator', '.', 'lt', 'if', 'inclusive', 'else', 'operator', '.', 'le', 'now', '=', 'start', 'while', 'not', 'finished', '(', 'now', ',', 'stop', ')', ':', 'yield', 'now', 'if', 'y_step', 'or', 'm_step', ':', 'm_y_step', ',', 'cur_month', '=', 'divmod', '(', 'now', '.', 'month', '+', 'm_step', ',', '12', ')', 'now', '=', 'now', '.', 'replace', '(', 'year', '=', 'now', '.', 'year', '+', 'y_step', '+', 'm_y_step', ',', 'month', '=', 'cur_month', 'or', '12', ')', 'now', '=', 'now', '+', 'd_step', 'return'] | In the spirit of :func:`range` and :func:`xrange`, the `daterange`
generator that yields a sequence of :class:`~datetime.date`
objects, starting at *start*, incrementing by *step*, until *stop*
is reached.
When *inclusive* is True, the final date may be *stop*, **if**
*step* falls evenly on it. By default, *step* is one day. See
details below for many more details.
Args:
start (datetime.date): The starting date The first value in
the sequence.
stop (datetime.date): The stopping date. By default not
included in return. Can be `None` to yield an infinite
sequence.
step (int): The value to increment *start* by to reach
*stop*. Can be an :class:`int` number of days, a
:class:`datetime.timedelta`, or a :class:`tuple` of integers,
`(year, month, day)`. Positive and negative *step* values
are supported.
inclusive (bool): Whether or not the *stop* date can be
returned. *stop* is only returned when a *step* falls evenly
on it.
>>> christmas = date(year=2015, month=12, day=25)
>>> boxing_day = date(year=2015, month=12, day=26)
>>> new_year = date(year=2016, month=1, day=1)
>>> for day in daterange(christmas, new_year):
... print(repr(day))
datetime.date(2015, 12, 25)
datetime.date(2015, 12, 26)
datetime.date(2015, 12, 27)
datetime.date(2015, 12, 28)
datetime.date(2015, 12, 29)
datetime.date(2015, 12, 30)
datetime.date(2015, 12, 31)
>>> for day in daterange(christmas, boxing_day):
... print(repr(day))
datetime.date(2015, 12, 25)
>>> for day in daterange(date(2017, 5, 1), date(2017, 8, 1),
... step=(0, 1, 0), inclusive=True):
... print(repr(day))
datetime.date(2017, 5, 1)
datetime.date(2017, 6, 1)
datetime.date(2017, 7, 1)
datetime.date(2017, 8, 1)
*Be careful when using stop=None, as this will yield an infinite
sequence of dates.* | ['In', 'the', 'spirit', 'of', ':', 'func', ':', 'range', 'and', ':', 'func', ':', 'xrange', 'the', 'daterange', 'generator', 'that', 'yields', 'a', 'sequence', 'of', ':', 'class', ':', '~datetime', '.', 'date', 'objects', 'starting', 'at', '*', 'start', '*', 'incrementing', 'by', '*', 'step', '*', 'until', '*', 'stop', '*', 'is', 'reached', '.'] | train | https://github.com/kalefranz/auxlib/blob/6ff2d6b57d128d0b9ed8f01ad83572e938da064f/auxlib/_vendor/boltons/timeutils.py#L280-L364 |
1,492 | pygobject/pgi | pgi/clib/gir/gibaseinfo.py | GIBaseInfo._take_ownership | def _take_ownership(self):
"""Make the Python instance take ownership of the GIBaseInfo. i.e.
unref if the python instance gets gc'ed.
"""
if self:
ptr = cast(self.value, GIBaseInfo)
_UnrefFinalizer.track(self, ptr)
self.__owns = True | python | def _take_ownership(self):
"""Make the Python instance take ownership of the GIBaseInfo. i.e.
unref if the python instance gets gc'ed.
"""
if self:
ptr = cast(self.value, GIBaseInfo)
_UnrefFinalizer.track(self, ptr)
self.__owns = True | ['def', '_take_ownership', '(', 'self', ')', ':', 'if', 'self', ':', 'ptr', '=', 'cast', '(', 'self', '.', 'value', ',', 'GIBaseInfo', ')', '_UnrefFinalizer', '.', 'track', '(', 'self', ',', 'ptr', ')', 'self', '.', '__owns', '=', 'True'] | Make the Python instance take ownership of the GIBaseInfo. i.e.
unref if the python instance gets gc'ed. | ['Make', 'the', 'Python', 'instance', 'take', 'ownership', 'of', 'the', 'GIBaseInfo', '.', 'i', '.', 'e', '.', 'unref', 'if', 'the', 'python', 'instance', 'gets', 'gc', 'ed', '.'] | train | https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/clib/gir/gibaseinfo.py#L61-L69 |
1,493 | jepegit/cellpy | cellpy/readers/cellreader.py | just_load_srno | def just_load_srno(srno, prm_filename=None):
"""Simply load an dataset based on serial number (srno).
This convenience function reads a dataset based on a serial number. This
serial number (srno) must then be defined in your database. It is mainly
used to check that things are set up correctly.
Args:
prm_filename: name of parameter file (optional).
srno (int): serial number
Example:
>>> srno = 918
>>> just_load_srno(srno)
srno: 918
read prms
....
"""
from cellpy import dbreader, filefinder
print("just_load_srno: srno: %i" % srno)
# ------------reading parameters--------------------------------------------
# print "just_load_srno: read prms"
# prm = prmreader.read(prm_filename)
#
# print prm
print("just_load_srno: making class and setting prms")
d = CellpyData()
# ------------reading db----------------------------------------------------
print()
print("just_load_srno: starting to load reader")
# reader = dbreader.reader(prm_filename)
reader = dbreader.Reader()
print("------ok------")
run_name = reader.get_cell_name(srno)
print("just_load_srno: run_name:")
print(run_name)
m = reader.get_mass(srno)
print("just_load_srno: mass: %f" % m)
print()
# ------------loadcell------------------------------------------------------
print("just_load_srno: getting file_names")
raw_files, cellpy_file = filefinder.search_for_files(run_name)
print("raw_files:", raw_files)
print("cellpy_file:", cellpy_file)
print("just_load_srno: running loadcell")
d.loadcell(raw_files, cellpy_file, mass=m)
print("------ok------")
# ------------do stuff------------------------------------------------------
print("just_load_srno: getting step_numbers for charge")
v = d.get_step_numbers("charge")
print(v)
print()
print("just_load_srno: finding C-rates")
d.find_C_rates(v, silent=False)
print()
print("just_load_srno: OK")
return True | python | def just_load_srno(srno, prm_filename=None):
"""Simply load an dataset based on serial number (srno).
This convenience function reads a dataset based on a serial number. This
serial number (srno) must then be defined in your database. It is mainly
used to check that things are set up correctly.
Args:
prm_filename: name of parameter file (optional).
srno (int): serial number
Example:
>>> srno = 918
>>> just_load_srno(srno)
srno: 918
read prms
....
"""
from cellpy import dbreader, filefinder
print("just_load_srno: srno: %i" % srno)
# ------------reading parameters--------------------------------------------
# print "just_load_srno: read prms"
# prm = prmreader.read(prm_filename)
#
# print prm
print("just_load_srno: making class and setting prms")
d = CellpyData()
# ------------reading db----------------------------------------------------
print()
print("just_load_srno: starting to load reader")
# reader = dbreader.reader(prm_filename)
reader = dbreader.Reader()
print("------ok------")
run_name = reader.get_cell_name(srno)
print("just_load_srno: run_name:")
print(run_name)
m = reader.get_mass(srno)
print("just_load_srno: mass: %f" % m)
print()
# ------------loadcell------------------------------------------------------
print("just_load_srno: getting file_names")
raw_files, cellpy_file = filefinder.search_for_files(run_name)
print("raw_files:", raw_files)
print("cellpy_file:", cellpy_file)
print("just_load_srno: running loadcell")
d.loadcell(raw_files, cellpy_file, mass=m)
print("------ok------")
# ------------do stuff------------------------------------------------------
print("just_load_srno: getting step_numbers for charge")
v = d.get_step_numbers("charge")
print(v)
print()
print("just_load_srno: finding C-rates")
d.find_C_rates(v, silent=False)
print()
print("just_load_srno: OK")
return True | ['def', 'just_load_srno', '(', 'srno', ',', 'prm_filename', '=', 'None', ')', ':', 'from', 'cellpy', 'import', 'dbreader', ',', 'filefinder', 'print', '(', '"just_load_srno: srno: %i"', '%', 'srno', ')', '# ------------reading parameters--------------------------------------------', '# print "just_load_srno: read prms"', '# prm = prmreader.read(prm_filename)', '#', '# print prm', 'print', '(', '"just_load_srno: making class and setting prms"', ')', 'd', '=', 'CellpyData', '(', ')', '# ------------reading db----------------------------------------------------', 'print', '(', ')', 'print', '(', '"just_load_srno: starting to load reader"', ')', '# reader = dbreader.reader(prm_filename)', 'reader', '=', 'dbreader', '.', 'Reader', '(', ')', 'print', '(', '"------ok------"', ')', 'run_name', '=', 'reader', '.', 'get_cell_name', '(', 'srno', ')', 'print', '(', '"just_load_srno: run_name:"', ')', 'print', '(', 'run_name', ')', 'm', '=', 'reader', '.', 'get_mass', '(', 'srno', ')', 'print', '(', '"just_load_srno: mass: %f"', '%', 'm', ')', 'print', '(', ')', '# ------------loadcell------------------------------------------------------', 'print', '(', '"just_load_srno: getting file_names"', ')', 'raw_files', ',', 'cellpy_file', '=', 'filefinder', '.', 'search_for_files', '(', 'run_name', ')', 'print', '(', '"raw_files:"', ',', 'raw_files', ')', 'print', '(', '"cellpy_file:"', ',', 'cellpy_file', ')', 'print', '(', '"just_load_srno: running loadcell"', ')', 'd', '.', 'loadcell', '(', 'raw_files', ',', 'cellpy_file', ',', 'mass', '=', 'm', ')', 'print', '(', '"------ok------"', ')', '# ------------do stuff------------------------------------------------------', 'print', '(', '"just_load_srno: getting step_numbers for charge"', ')', 'v', '=', 'd', '.', 'get_step_numbers', '(', '"charge"', ')', 'print', '(', 'v', ')', 'print', '(', ')', 'print', '(', '"just_load_srno: finding C-rates"', ')', 'd', '.', 'find_C_rates', '(', 'v', ',', 'silent', '=', 'False', ')', 'print', '(', ')', 'print', '(', '"just_load_srno: OK"', ')', 'return', 'True'] | Simply load an dataset based on serial number (srno).
This convenience function reads a dataset based on a serial number. This
serial number (srno) must then be defined in your database. It is mainly
used to check that things are set up correctly.
Args:
prm_filename: name of parameter file (optional).
srno (int): serial number
Example:
>>> srno = 918
>>> just_load_srno(srno)
srno: 918
read prms
.... | ['Simply', 'load', 'an', 'dataset', 'based', 'on', 'serial', 'number', '(', 'srno', ')', '.'] | train | https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/readers/cellreader.py#L4223-L4290 |
1,494 | thisfred/val | val/_val.py | BaseSchema.validate | def validate(self, data):
"""Validate data. Raise NotValid error for invalid data."""
validated = self._validated(data)
errors = []
for validator in self.additional_validators:
if not validator(validated):
errors.append(
"%s invalidated by '%s'" % (
validated, _get_repr(validator)))
if errors:
raise NotValid(*errors)
if self.default is UNSPECIFIED:
return validated
if self.null_values is not UNSPECIFIED\
and validated in self.null_values:
return self.default
if validated is None:
return self.default
return validated | python | def validate(self, data):
"""Validate data. Raise NotValid error for invalid data."""
validated = self._validated(data)
errors = []
for validator in self.additional_validators:
if not validator(validated):
errors.append(
"%s invalidated by '%s'" % (
validated, _get_repr(validator)))
if errors:
raise NotValid(*errors)
if self.default is UNSPECIFIED:
return validated
if self.null_values is not UNSPECIFIED\
and validated in self.null_values:
return self.default
if validated is None:
return self.default
return validated | ['def', 'validate', '(', 'self', ',', 'data', ')', ':', 'validated', '=', 'self', '.', '_validated', '(', 'data', ')', 'errors', '=', '[', ']', 'for', 'validator', 'in', 'self', '.', 'additional_validators', ':', 'if', 'not', 'validator', '(', 'validated', ')', ':', 'errors', '.', 'append', '(', '"%s invalidated by \'%s\'"', '%', '(', 'validated', ',', '_get_repr', '(', 'validator', ')', ')', ')', 'if', 'errors', ':', 'raise', 'NotValid', '(', '*', 'errors', ')', 'if', 'self', '.', 'default', 'is', 'UNSPECIFIED', ':', 'return', 'validated', 'if', 'self', '.', 'null_values', 'is', 'not', 'UNSPECIFIED', 'and', 'validated', 'in', 'self', '.', 'null_values', ':', 'return', 'self', '.', 'default', 'if', 'validated', 'is', 'None', ':', 'return', 'self', '.', 'default', 'return', 'validated'] | Validate data. Raise NotValid error for invalid data. | ['Validate', 'data', '.', 'Raise', 'NotValid', 'error', 'for', 'invalid', 'data', '.'] | train | https://github.com/thisfred/val/blob/ba022e0c6c47acb3b8a45e7c44c84cc0f495c41c/val/_val.py#L243-L265 |
1,495 | wright-group/WrightTools | WrightTools/data/_spcm.py | from_spcm | def from_spcm(filepath, name=None, *, delimiter=",", parent=None, verbose=True) -> Data:
"""Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object
"""
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
# check filepath
if not ".asc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
# parse name
if not name:
name = filepath.name.split(".")[0]
# create headers dictionary
headers = collections.OrderedDict()
header_lines = 0
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
while True:
line = f.readline().strip()
header_lines += 1
if len(line) == 0:
break
else:
key, value = line.split(":", 1)
if key.strip() == "Revision":
headers["resolution"] = int(value.strip(" bits ADC"))
else:
headers[key.strip()] = value.strip()
line = f.readline().strip()
while "_BEGIN" in line:
header_lines += 1
section = line.split("_BEGIN")[0]
while True:
line = f.readline().strip()
header_lines += 1
if section + "_END" in line:
break
if section == "SYS_PARA":
use_type = {
"B": lambda b: int(b) == 1,
"C": str, # e.g. #SP [SP_OVERFL,C,N]
"F": float,
"I": int,
"L": int, # e.g. #DI [DI_MAXCNT,L,128]
"S": str,
"U": int, # unsigned int?
}
item = line[line.find("[") + 1 : line.find("]")].split(",")
key = item[0]
value = use_type[item[1]](item[2])
headers[key] = value
else:
splitted = line.split()
value = splitted[-1][1:-1].split(",")
key = " ".join(splitted[:-1])
headers[key] = value
line = f.readline().strip()
if "END" in line:
header_lines += 1
break
if "Date" in headers.keys() and "Time" in headers.keys():
# NOTE: reports created in local time, no-way to calculate absolute time
created = " ".join([headers["Date"], headers["Time"]])
created = time.strptime(created, "%Y-%m-%d %H:%M:%S")
created = timestamp.TimeStamp(time.mktime(created)).RFC3339
headers["created"] = created
# initialize data object
kwargs = {"name": name, "kind": "spcm", "source": filestr, **headers}
if parent:
data = parent.create_data(**kwargs)
else:
data = Data(**kwargs)
# import data
f.seek(0)
arr = np.genfromtxt(
f, skip_header=(header_lines + 1), skip_footer=1, delimiter=delimiter, unpack=True
)
f.close()
# construct data
data.create_variable(name="time", values=arr[0], units="ns")
data.create_channel(name="counts", values=arr[1])
data.transform("time")
# finish
if verbose:
print("data created at {0}".format(data.fullpath))
print(" kind: {0}".format(data.kind))
print(" range: {0} to {1} (ns)".format(data.time[0], data.time[-1]))
print(" size: {0}".format(data.size))
if "SP_COL_T" in data.attrs.keys():
print(" collection time: {0} sec".format(data.attrs["SP_COL_T"]))
return data | python | def from_spcm(filepath, name=None, *, delimiter=",", parent=None, verbose=True) -> Data:
"""Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object
"""
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
# check filepath
if not ".asc" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".asc")
# parse name
if not name:
name = filepath.name.split(".")[0]
# create headers dictionary
headers = collections.OrderedDict()
header_lines = 0
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
while True:
line = f.readline().strip()
header_lines += 1
if len(line) == 0:
break
else:
key, value = line.split(":", 1)
if key.strip() == "Revision":
headers["resolution"] = int(value.strip(" bits ADC"))
else:
headers[key.strip()] = value.strip()
line = f.readline().strip()
while "_BEGIN" in line:
header_lines += 1
section = line.split("_BEGIN")[0]
while True:
line = f.readline().strip()
header_lines += 1
if section + "_END" in line:
break
if section == "SYS_PARA":
use_type = {
"B": lambda b: int(b) == 1,
"C": str, # e.g. #SP [SP_OVERFL,C,N]
"F": float,
"I": int,
"L": int, # e.g. #DI [DI_MAXCNT,L,128]
"S": str,
"U": int, # unsigned int?
}
item = line[line.find("[") + 1 : line.find("]")].split(",")
key = item[0]
value = use_type[item[1]](item[2])
headers[key] = value
else:
splitted = line.split()
value = splitted[-1][1:-1].split(",")
key = " ".join(splitted[:-1])
headers[key] = value
line = f.readline().strip()
if "END" in line:
header_lines += 1
break
if "Date" in headers.keys() and "Time" in headers.keys():
# NOTE: reports created in local time, no-way to calculate absolute time
created = " ".join([headers["Date"], headers["Time"]])
created = time.strptime(created, "%Y-%m-%d %H:%M:%S")
created = timestamp.TimeStamp(time.mktime(created)).RFC3339
headers["created"] = created
# initialize data object
kwargs = {"name": name, "kind": "spcm", "source": filestr, **headers}
if parent:
data = parent.create_data(**kwargs)
else:
data = Data(**kwargs)
# import data
f.seek(0)
arr = np.genfromtxt(
f, skip_header=(header_lines + 1), skip_footer=1, delimiter=delimiter, unpack=True
)
f.close()
# construct data
data.create_variable(name="time", values=arr[0], units="ns")
data.create_channel(name="counts", values=arr[1])
data.transform("time")
# finish
if verbose:
print("data created at {0}".format(data.fullpath))
print(" kind: {0}".format(data.kind))
print(" range: {0} to {1} (ns)".format(data.time[0], data.time[-1]))
print(" size: {0}".format(data.size))
if "SP_COL_T" in data.attrs.keys():
print(" collection time: {0} sec".format(data.attrs["SP_COL_T"]))
return data | ['def', 'from_spcm', '(', 'filepath', ',', 'name', '=', 'None', ',', '*', ',', 'delimiter', '=', '","', ',', 'parent', '=', 'None', ',', 'verbose', '=', 'True', ')', '->', 'Data', ':', 'filestr', '=', 'os', '.', 'fspath', '(', 'filepath', ')', 'filepath', '=', 'pathlib', '.', 'Path', '(', 'filepath', ')', '# check filepath', 'if', 'not', '".asc"', 'in', 'filepath', '.', 'suffixes', ':', 'wt_exceptions', '.', 'WrongFileTypeWarning', '.', 'warn', '(', 'filepath', ',', '".asc"', ')', '# parse name', 'if', 'not', 'name', ':', 'name', '=', 'filepath', '.', 'name', '.', 'split', '(', '"."', ')', '[', '0', ']', '# create headers dictionary', 'headers', '=', 'collections', '.', 'OrderedDict', '(', ')', 'header_lines', '=', '0', 'ds', '=', 'np', '.', 'DataSource', '(', 'None', ')', 'f', '=', 'ds', '.', 'open', '(', 'filestr', ',', '"rt"', ')', 'while', 'True', ':', 'line', '=', 'f', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'header_lines', '+=', '1', 'if', 'len', '(', 'line', ')', '==', '0', ':', 'break', 'else', ':', 'key', ',', 'value', '=', 'line', '.', 'split', '(', '":"', ',', '1', ')', 'if', 'key', '.', 'strip', '(', ')', '==', '"Revision"', ':', 'headers', '[', '"resolution"', ']', '=', 'int', '(', 'value', '.', 'strip', '(', '" bits ADC"', ')', ')', 'else', ':', 'headers', '[', 'key', '.', 'strip', '(', ')', ']', '=', 'value', '.', 'strip', '(', ')', 'line', '=', 'f', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'while', '"_BEGIN"', 'in', 'line', ':', 'header_lines', '+=', '1', 'section', '=', 'line', '.', 'split', '(', '"_BEGIN"', ')', '[', '0', ']', 'while', 'True', ':', 'line', '=', 'f', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'header_lines', '+=', '1', 'if', 'section', '+', '"_END"', 'in', 'line', ':', 'break', 'if', 'section', '==', '"SYS_PARA"', ':', 'use_type', '=', '{', '"B"', ':', 'lambda', 'b', ':', 'int', '(', 'b', ')', '==', '1', ',', '"C"', ':', 'str', ',', '# e.g. #SP [SP_OVERFL,C,N]', '"F"', ':', 'float', ',', '"I"', ':', 'int', ',', '"L"', ':', 'int', ',', '# e.g. #DI [DI_MAXCNT,L,128]', '"S"', ':', 'str', ',', '"U"', ':', 'int', ',', '# unsigned int?', '}', 'item', '=', 'line', '[', 'line', '.', 'find', '(', '"["', ')', '+', '1', ':', 'line', '.', 'find', '(', '"]"', ')', ']', '.', 'split', '(', '","', ')', 'key', '=', 'item', '[', '0', ']', 'value', '=', 'use_type', '[', 'item', '[', '1', ']', ']', '(', 'item', '[', '2', ']', ')', 'headers', '[', 'key', ']', '=', 'value', 'else', ':', 'splitted', '=', 'line', '.', 'split', '(', ')', 'value', '=', 'splitted', '[', '-', '1', ']', '[', '1', ':', '-', '1', ']', '.', 'split', '(', '","', ')', 'key', '=', '" "', '.', 'join', '(', 'splitted', '[', ':', '-', '1', ']', ')', 'headers', '[', 'key', ']', '=', 'value', 'line', '=', 'f', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'if', '"END"', 'in', 'line', ':', 'header_lines', '+=', '1', 'break', 'if', '"Date"', 'in', 'headers', '.', 'keys', '(', ')', 'and', '"Time"', 'in', 'headers', '.', 'keys', '(', ')', ':', '# NOTE: reports created in local time, no-way to calculate absolute time', 'created', '=', '" "', '.', 'join', '(', '[', 'headers', '[', '"Date"', ']', ',', 'headers', '[', '"Time"', ']', ']', ')', 'created', '=', 'time', '.', 'strptime', '(', 'created', ',', '"%Y-%m-%d %H:%M:%S"', ')', 'created', '=', 'timestamp', '.', 'TimeStamp', '(', 'time', '.', 'mktime', '(', 'created', ')', ')', '.', 'RFC3339', 'headers', '[', '"created"', ']', '=', 'created', '# initialize data object', 'kwargs', '=', '{', '"name"', ':', 'name', ',', '"kind"', ':', '"spcm"', ',', '"source"', ':', 'filestr', ',', '*', '*', 'headers', '}', 'if', 'parent', ':', 'data', '=', 'parent', '.', 'create_data', '(', '*', '*', 'kwargs', ')', 'else', ':', 'data', '=', 'Data', '(', '*', '*', 'kwargs', ')', '# import data', 'f', '.', 'seek', '(', '0', ')', 'arr', '=', 'np', '.', 'genfromtxt', '(', 'f', ',', 'skip_header', '=', '(', 'header_lines', '+', '1', ')', ',', 'skip_footer', '=', '1', ',', 'delimiter', '=', 'delimiter', ',', 'unpack', '=', 'True', ')', 'f', '.', 'close', '(', ')', '# construct data', 'data', '.', 'create_variable', '(', 'name', '=', '"time"', ',', 'values', '=', 'arr', '[', '0', ']', ',', 'units', '=', '"ns"', ')', 'data', '.', 'create_channel', '(', 'name', '=', '"counts"', ',', 'values', '=', 'arr', '[', '1', ']', ')', 'data', '.', 'transform', '(', '"time"', ')', '# finish', 'if', 'verbose', ':', 'print', '(', '"data created at {0}"', '.', 'format', '(', 'data', '.', 'fullpath', ')', ')', 'print', '(', '" kind: {0}"', '.', 'format', '(', 'data', '.', 'kind', ')', ')', 'print', '(', '" range: {0} to {1} (ns)"', '.', 'format', '(', 'data', '.', 'time', '[', '0', ']', ',', 'data', '.', 'time', '[', '-', '1', ']', ')', ')', 'print', '(', '" size: {0}"', '.', 'format', '(', 'data', '.', 'size', ')', ')', 'if', '"SP_COL_T"', 'in', 'data', '.', 'attrs', '.', 'keys', '(', ')', ':', 'print', '(', '" collection time: {0} sec"', '.', 'format', '(', 'data', '.', 'attrs', '[', '"SP_COL_T"', ']', ')', ')', 'return', 'data'] | Create a ``Data`` object from a Becker & Hickl spcm file (ASCII-exported, ``.asc``).
If provided, setup parameters are stored in the ``attrs`` dictionary of the ``Data`` object.
See the `spcm`__ software hompage for more info.
__ http://www.becker-hickl.com/software/spcm.htm
Parameters
----------
filepath : path-like
Path to SPC-xxx .asc file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
delimiter : string (optional)
The string used to separate values. Default is ','.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
WrightTools.data.Data object | ['Create', 'a', 'Data', 'object', 'from', 'a', 'Becker', '&', 'Hickl', 'spcm', 'file', '(', 'ASCII', '-', 'exported', '.', 'asc', ')', '.'] | train | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_spcm.py#L28-L145 |
1,496 | Unidata/MetPy | metpy/calc/basic.py | sigma_to_pressure | def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop | python | def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop | ['def', 'sigma_to_pressure', '(', 'sigma', ',', 'psfc', ',', 'ptop', ')', ':', 'if', 'np', '.', 'any', '(', 'sigma', '<', '0', ')', 'or', 'np', '.', 'any', '(', 'sigma', '>', '1', ')', ':', 'raise', 'ValueError', '(', "'Sigma values should be bounded by 0 and 1'", ')', 'if', 'psfc', '.', 'magnitude', '<', '0', 'or', 'ptop', '.', 'magnitude', '<', '0', ':', 'raise', 'ValueError', '(', "'Pressure input should be non-negative'", ')', 'return', 'sigma', '*', '(', 'psfc', '-', 'ptop', ')', '+', 'ptop'] | r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain | ['r', 'Calculate', 'pressure', 'from', 'sigma', 'values', '.'] | train | https://github.com/Unidata/MetPy/blob/16f68a94919b9a82dcf9cada2169cf039129e67b/metpy/calc/basic.py#L583-L620 |
1,497 | holgern/pyedflib | demo/stacklineplot.py | stackplot_t | def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None):
"""
will plot a stack of traces one above the other assuming
tarray.shape = numSamples, numRows
"""
data = tarray
numSamples, numRows = tarray.shape
# data = np.random.randn(numSamples,numRows) # test data
# data.shape = numSamples, numRows
if seconds:
t = seconds * np.arange(numSamples, dtype=float)/numSamples
# import pdb
# pdb.set_trace()
if start_time:
t = t+start_time
xlm = (start_time, start_time+seconds)
else:
xlm = (0,seconds)
else:
t = np.arange(numSamples, dtype=float)
xlm = (0,numSamples)
ticklocs = []
ax = plt.subplot(111)
plt.xlim(*xlm)
# xticks(np.linspace(xlm, 10))
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7 # Crowd them a bit.
y0 = dmin
y1 = (numRows-1) * dr + dmax
plt.ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
# print "segs[-1].shape:", segs[-1].shape
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
# set the yticks to use axes coords on the y axis
ax.set_yticks(ticklocs)
# ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
# if not plt.ylabels:
plt.ylabels = ["%d" % ii for ii in range(numRows)]
ax.set_yticklabels(ylabels)
plt.xlabel('time (s)') | python | def stackplot_t(tarray, seconds=None, start_time=None, ylabels=None):
"""
will plot a stack of traces one above the other assuming
tarray.shape = numSamples, numRows
"""
data = tarray
numSamples, numRows = tarray.shape
# data = np.random.randn(numSamples,numRows) # test data
# data.shape = numSamples, numRows
if seconds:
t = seconds * np.arange(numSamples, dtype=float)/numSamples
# import pdb
# pdb.set_trace()
if start_time:
t = t+start_time
xlm = (start_time, start_time+seconds)
else:
xlm = (0,seconds)
else:
t = np.arange(numSamples, dtype=float)
xlm = (0,numSamples)
ticklocs = []
ax = plt.subplot(111)
plt.xlim(*xlm)
# xticks(np.linspace(xlm, 10))
dmin = data.min()
dmax = data.max()
dr = (dmax - dmin)*0.7 # Crowd them a bit.
y0 = dmin
y1 = (numRows-1) * dr + dmax
plt.ylim(y0, y1)
segs = []
for i in range(numRows):
segs.append(np.hstack((t[:,np.newaxis], data[:,i,np.newaxis])))
# print "segs[-1].shape:", segs[-1].shape
ticklocs.append(i*dr)
offsets = np.zeros((numRows,2), dtype=float)
offsets[:,1] = ticklocs
lines = LineCollection(segs, offsets=offsets,
transOffset=None,
)
ax.add_collection(lines)
# set the yticks to use axes coords on the y axis
ax.set_yticks(ticklocs)
# ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])
# if not plt.ylabels:
plt.ylabels = ["%d" % ii for ii in range(numRows)]
ax.set_yticklabels(ylabels)
plt.xlabel('time (s)') | ['def', 'stackplot_t', '(', 'tarray', ',', 'seconds', '=', 'None', ',', 'start_time', '=', 'None', ',', 'ylabels', '=', 'None', ')', ':', 'data', '=', 'tarray', 'numSamples', ',', 'numRows', '=', 'tarray', '.', 'shape', '# data = np.random.randn(numSamples,numRows) # test data', '# data.shape = numSamples, numRows', 'if', 'seconds', ':', 't', '=', 'seconds', '*', 'np', '.', 'arange', '(', 'numSamples', ',', 'dtype', '=', 'float', ')', '/', 'numSamples', '# import pdb', '# pdb.set_trace()', 'if', 'start_time', ':', 't', '=', 't', '+', 'start_time', 'xlm', '=', '(', 'start_time', ',', 'start_time', '+', 'seconds', ')', 'else', ':', 'xlm', '=', '(', '0', ',', 'seconds', ')', 'else', ':', 't', '=', 'np', '.', 'arange', '(', 'numSamples', ',', 'dtype', '=', 'float', ')', 'xlm', '=', '(', '0', ',', 'numSamples', ')', 'ticklocs', '=', '[', ']', 'ax', '=', 'plt', '.', 'subplot', '(', '111', ')', 'plt', '.', 'xlim', '(', '*', 'xlm', ')', '# xticks(np.linspace(xlm, 10))', 'dmin', '=', 'data', '.', 'min', '(', ')', 'dmax', '=', 'data', '.', 'max', '(', ')', 'dr', '=', '(', 'dmax', '-', 'dmin', ')', '*', '0.7', '# Crowd them a bit.', 'y0', '=', 'dmin', 'y1', '=', '(', 'numRows', '-', '1', ')', '*', 'dr', '+', 'dmax', 'plt', '.', 'ylim', '(', 'y0', ',', 'y1', ')', 'segs', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'numRows', ')', ':', 'segs', '.', 'append', '(', 'np', '.', 'hstack', '(', '(', 't', '[', ':', ',', 'np', '.', 'newaxis', ']', ',', 'data', '[', ':', ',', 'i', ',', 'np', '.', 'newaxis', ']', ')', ')', ')', '# print "segs[-1].shape:", segs[-1].shape', 'ticklocs', '.', 'append', '(', 'i', '*', 'dr', ')', 'offsets', '=', 'np', '.', 'zeros', '(', '(', 'numRows', ',', '2', ')', ',', 'dtype', '=', 'float', ')', 'offsets', '[', ':', ',', '1', ']', '=', 'ticklocs', 'lines', '=', 'LineCollection', '(', 'segs', ',', 'offsets', '=', 'offsets', ',', 'transOffset', '=', 'None', ',', ')', 'ax', '.', 'add_collection', '(', 'lines', ')', '# set the yticks to use axes coords on the y axis', 'ax', '.', 'set_yticks', '(', 'ticklocs', ')', "# ax.set_yticklabels(['PG3', 'PG5', 'PG7', 'PG9'])", '# if not plt.ylabels:', 'plt', '.', 'ylabels', '=', '[', '"%d"', '%', 'ii', 'for', 'ii', 'in', 'range', '(', 'numRows', ')', ']', 'ax', '.', 'set_yticklabels', '(', 'ylabels', ')', 'plt', '.', 'xlabel', '(', "'time (s)'", ')'] | will plot a stack of traces one above the other assuming
tarray.shape = numSamples, numRows | ['will', 'plot', 'a', 'stack', 'of', 'traces', 'one', 'above', 'the', 'other', 'assuming', 'tarray', '.', 'shape', '=', 'numSamples', 'numRows'] | train | https://github.com/holgern/pyedflib/blob/0f787fc1202b84a6f30d098296acf72666eaeeb4/demo/stacklineplot.py#L20-L76 |
1,498 | cdgriffith/Reusables | reusables/file_operations.py | find_files | def find_files(directory=".", ext=None, name=None,
match_case=False, disable_glob=False, depth=None,
abspath=False, enable_scandir=False):
"""
Walk through a file directory and return an iterator of files
that match requirements. Will autodetect if name has glob as magic
characters.
Note: For the example below, you can use find_files_list to return as a
list, this is simply an easy way to show the output.
.. code:: python
list(reusables.find_files(name="ex", match_case=True))
# ['C:\\example.pdf',
# 'C:\\My_exam_score.txt']
list(reusables.find_files(name="*free*"))
# ['C:\\my_stuff\\Freedom_fight.pdf']
list(reusables.find_files(ext=".pdf"))
# ['C:\\Example.pdf',
# 'C:\\how_to_program.pdf',
# 'C:\\Hunks_and_Chicks.pdf']
list(reusables.find_files(name="*chris*"))
# ['C:\\Christmas_card.docx',
# 'C:\\chris_stuff.zip']
:param directory: Top location to recursively search for matching files
:param ext: Extensions of the file you are looking for
:param name: Part of the file name
:param match_case: If name or ext has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:param abspath: Return files with their absolute paths
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generator of all files in the specified directory
"""
if ext or not name:
disable_glob = True
if not disable_glob:
disable_glob = not glob.has_magic(name)
if ext and isinstance(ext, str):
ext = [ext]
elif ext and not isinstance(ext, (list, tuple)):
raise TypeError("extension must be either one extension or a list")
if abspath:
directory = os.path.abspath(directory)
starting_depth = directory.count(os.sep)
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
if depth and root.count(os.sep) - starting_depth >= depth:
continue
if not disable_glob:
if match_case:
raise ValueError("Cannot use glob and match case, please "
"either disable glob or not set match_case")
glob_generator = glob.iglob(os.path.join(root, name))
for item in glob_generator:
yield item
continue
for file_name in files:
if ext:
for end in ext:
if file_name.lower().endswith(end.lower() if not
match_case else end):
break
else:
continue
if name:
if match_case and name not in file_name:
continue
elif name.lower() not in file_name.lower():
continue
yield os.path.join(root, file_name) | python | def find_files(directory=".", ext=None, name=None,
match_case=False, disable_glob=False, depth=None,
abspath=False, enable_scandir=False):
"""
Walk through a file directory and return an iterator of files
that match requirements. Will autodetect if name has glob as magic
characters.
Note: For the example below, you can use find_files_list to return as a
list, this is simply an easy way to show the output.
.. code:: python
list(reusables.find_files(name="ex", match_case=True))
# ['C:\\example.pdf',
# 'C:\\My_exam_score.txt']
list(reusables.find_files(name="*free*"))
# ['C:\\my_stuff\\Freedom_fight.pdf']
list(reusables.find_files(ext=".pdf"))
# ['C:\\Example.pdf',
# 'C:\\how_to_program.pdf',
# 'C:\\Hunks_and_Chicks.pdf']
list(reusables.find_files(name="*chris*"))
# ['C:\\Christmas_card.docx',
# 'C:\\chris_stuff.zip']
:param directory: Top location to recursively search for matching files
:param ext: Extensions of the file you are looking for
:param name: Part of the file name
:param match_case: If name or ext has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:param abspath: Return files with their absolute paths
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generator of all files in the specified directory
"""
if ext or not name:
disable_glob = True
if not disable_glob:
disable_glob = not glob.has_magic(name)
if ext and isinstance(ext, str):
ext = [ext]
elif ext and not isinstance(ext, (list, tuple)):
raise TypeError("extension must be either one extension or a list")
if abspath:
directory = os.path.abspath(directory)
starting_depth = directory.count(os.sep)
for root, dirs, files in _walk(directory, enable_scandir=enable_scandir):
if depth and root.count(os.sep) - starting_depth >= depth:
continue
if not disable_glob:
if match_case:
raise ValueError("Cannot use glob and match case, please "
"either disable glob or not set match_case")
glob_generator = glob.iglob(os.path.join(root, name))
for item in glob_generator:
yield item
continue
for file_name in files:
if ext:
for end in ext:
if file_name.lower().endswith(end.lower() if not
match_case else end):
break
else:
continue
if name:
if match_case and name not in file_name:
continue
elif name.lower() not in file_name.lower():
continue
yield os.path.join(root, file_name) | ['def', 'find_files', '(', 'directory', '=', '"."', ',', 'ext', '=', 'None', ',', 'name', '=', 'None', ',', 'match_case', '=', 'False', ',', 'disable_glob', '=', 'False', ',', 'depth', '=', 'None', ',', 'abspath', '=', 'False', ',', 'enable_scandir', '=', 'False', ')', ':', 'if', 'ext', 'or', 'not', 'name', ':', 'disable_glob', '=', 'True', 'if', 'not', 'disable_glob', ':', 'disable_glob', '=', 'not', 'glob', '.', 'has_magic', '(', 'name', ')', 'if', 'ext', 'and', 'isinstance', '(', 'ext', ',', 'str', ')', ':', 'ext', '=', '[', 'ext', ']', 'elif', 'ext', 'and', 'not', 'isinstance', '(', 'ext', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'raise', 'TypeError', '(', '"extension must be either one extension or a list"', ')', 'if', 'abspath', ':', 'directory', '=', 'os', '.', 'path', '.', 'abspath', '(', 'directory', ')', 'starting_depth', '=', 'directory', '.', 'count', '(', 'os', '.', 'sep', ')', 'for', 'root', ',', 'dirs', ',', 'files', 'in', '_walk', '(', 'directory', ',', 'enable_scandir', '=', 'enable_scandir', ')', ':', 'if', 'depth', 'and', 'root', '.', 'count', '(', 'os', '.', 'sep', ')', '-', 'starting_depth', '>=', 'depth', ':', 'continue', 'if', 'not', 'disable_glob', ':', 'if', 'match_case', ':', 'raise', 'ValueError', '(', '"Cannot use glob and match case, please "', '"either disable glob or not set match_case"', ')', 'glob_generator', '=', 'glob', '.', 'iglob', '(', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'name', ')', ')', 'for', 'item', 'in', 'glob_generator', ':', 'yield', 'item', 'continue', 'for', 'file_name', 'in', 'files', ':', 'if', 'ext', ':', 'for', 'end', 'in', 'ext', ':', 'if', 'file_name', '.', 'lower', '(', ')', '.', 'endswith', '(', 'end', '.', 'lower', '(', ')', 'if', 'not', 'match_case', 'else', 'end', ')', ':', 'break', 'else', ':', 'continue', 'if', 'name', ':', 'if', 'match_case', 'and', 'name', 'not', 'in', 'file_name', ':', 'continue', 'elif', 'name', '.', 'lower', '(', ')', 'not', 'in', 'file_name', '.', 'lower', '(', ')', ':', 'continue', 'yield', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'file_name', ')'] | Walk through a file directory and return an iterator of files
that match requirements. Will autodetect if name has glob as magic
characters.
Note: For the example below, you can use find_files_list to return as a
list, this is simply an easy way to show the output.
.. code:: python
list(reusables.find_files(name="ex", match_case=True))
# ['C:\\example.pdf',
# 'C:\\My_exam_score.txt']
list(reusables.find_files(name="*free*"))
# ['C:\\my_stuff\\Freedom_fight.pdf']
list(reusables.find_files(ext=".pdf"))
# ['C:\\Example.pdf',
# 'C:\\how_to_program.pdf',
# 'C:\\Hunks_and_Chicks.pdf']
list(reusables.find_files(name="*chris*"))
# ['C:\\Christmas_card.docx',
# 'C:\\chris_stuff.zip']
:param directory: Top location to recursively search for matching files
:param ext: Extensions of the file you are looking for
:param name: Part of the file name
:param match_case: If name or ext has to be a direct match or not
:param disable_glob: Do not look for globable names or use glob magic check
:param depth: How many directories down to search
:param abspath: Return files with their absolute paths
:param enable_scandir: on python < 3.5 enable external scandir package
:return: generator of all files in the specified directory | ['Walk', 'through', 'a', 'file', 'directory', 'and', 'return', 'an', 'iterator', 'of', 'files', 'that', 'match', 'requirements', '.', 'Will', 'autodetect', 'if', 'name', 'has', 'glob', 'as', 'magic', 'characters', '.'] | train | https://github.com/cdgriffith/Reusables/blob/bc32f72e4baee7d76a6d58b88fcb23dd635155cd/reusables/file_operations.py#L463-L541 |
1,499 | sbg/sevenbridges-python | sevenbridges/models/app.py | App.sync | def sync(self):
"""
Syncs the parent app changes with the current app instance.
:return: Synced App object.
"""
app = self._api.post(url=self._URL['sync'].format(id=self.id)).json()
return App(api=self._api, **app) | python | def sync(self):
"""
Syncs the parent app changes with the current app instance.
:return: Synced App object.
"""
app = self._api.post(url=self._URL['sync'].format(id=self.id)).json()
return App(api=self._api, **app) | ['def', 'sync', '(', 'self', ')', ':', 'app', '=', 'self', '.', '_api', '.', 'post', '(', 'url', '=', 'self', '.', '_URL', '[', "'sync'", ']', '.', 'format', '(', 'id', '=', 'self', '.', 'id', ')', ')', '.', 'json', '(', ')', 'return', 'App', '(', 'api', '=', 'self', '.', '_api', ',', '*', '*', 'app', ')'] | Syncs the parent app changes with the current app instance.
:return: Synced App object. | ['Syncs', 'the', 'parent', 'app', 'changes', 'with', 'the', 'current', 'app', 'instance', '.', ':', 'return', ':', 'Synced', 'App', 'object', '.'] | train | https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/models/app.py#L208-L214 |
Subsets and Splits