code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def htmlDocContentDumpFormatOutput(self, cur, encoding, format):
"""Dump an HTML document. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format) | Dump an HTML document. | Below is the the instruction that describes the task:
### Input:
Dump an HTML document.
### Response:
def htmlDocContentDumpFormatOutput(self, cur, encoding, format):
"""Dump an HTML document. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.htmlDocContentDumpFormatOutput(self._o, cur__o, encoding, format) |
def _su_scripts_regex(self):
"""
:return:
[compiled regex, function]
"""
sups = re.escape(''.join([k for k in self.superscripts.keys()]))
subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp
su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' +
r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format(
su_=subs + sups, sub=subs, sup=sups)
su_regex = re.compile(su_regex)
def su_replace(m):
esc, sub, root_sup, sup = m.groups()
if esc is not None:
return esc
elif sub is not None:
return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}'
elif root_sup is not None:
return ''.join([self.superscripts[c] for c in root_sup])
elif sup is not None:
return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}'
else:
raise TypeError("Regex bug: this should never be reached")
return [su_regex, su_replace] | :return:
[compiled regex, function] | Below is the the instruction that describes the task:
### Input:
:return:
[compiled regex, function]
### Response:
def _su_scripts_regex(self):
"""
:return:
[compiled regex, function]
"""
sups = re.escape(''.join([k for k in self.superscripts.keys()]))
subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp
su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' +
r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format(
su_=subs + sups, sub=subs, sup=sups)
su_regex = re.compile(su_regex)
def su_replace(m):
esc, sub, root_sup, sup = m.groups()
if esc is not None:
return esc
elif sub is not None:
return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}'
elif root_sup is not None:
return ''.join([self.superscripts[c] for c in root_sup])
elif sup is not None:
return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}'
else:
raise TypeError("Regex bug: this should never be reached")
return [su_regex, su_replace] |
def mni2tal(xin):
"""
mni2tal for converting from ch2/mni space to tal - very approximate.
This is a standard approach but it's not very accurate.
ANTsR function: `mni2tal`
Arguments
---------
xin : tuple
point in mni152 space.
Returns
-------
tuple
Example
-------
>>> import ants
>>> ants.mni2tal( (10,12,14) )
References
----------
http://bioimagesuite.yale.edu/mni2tal/501_95733_More\%20Accurate\%20Talairach\%20Coordinates\%20SLIDES.pdf
http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach
"""
if (not isinstance(xin, (tuple,list))) or (len(xin) != 3):
raise ValueError('xin must be tuple/list with 3 coordinates')
x = list(xin)
# The input image is in RAS coordinates but we use ITK which returns LPS
# coordinates. So we need to flip the coordinates such that L => R and P => A to
# get RAS (MNI) coordinates
x[0] = x[0] * (-1) # flip X
x[1] = x[1] * (-1) # flip Y
xout = x
if (x[2] >= 0):
xout[0] = x[0] * 0.99
xout[1] = x[1] * 0.9688 + 0.046 * x[2]
xout[2] = x[1] * (-0.0485) + 0.9189 * x[2]
if (x[2] < 0):
xout[0] = x[0] * 0.99
xout[1] = x[1] * 0.9688 + 0.042 * x[2]
xout[2] = x[1] * (-0.0485) + 0.839 * x[2]
return(xout) | mni2tal for converting from ch2/mni space to tal - very approximate.
This is a standard approach but it's not very accurate.
ANTsR function: `mni2tal`
Arguments
---------
xin : tuple
point in mni152 space.
Returns
-------
tuple
Example
-------
>>> import ants
>>> ants.mni2tal( (10,12,14) )
References
----------
http://bioimagesuite.yale.edu/mni2tal/501_95733_More\%20Accurate\%20Talairach\%20Coordinates\%20SLIDES.pdf
http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach | Below is the the instruction that describes the task:
### Input:
mni2tal for converting from ch2/mni space to tal - very approximate.
This is a standard approach but it's not very accurate.
ANTsR function: `mni2tal`
Arguments
---------
xin : tuple
point in mni152 space.
Returns
-------
tuple
Example
-------
>>> import ants
>>> ants.mni2tal( (10,12,14) )
References
----------
http://bioimagesuite.yale.edu/mni2tal/501_95733_More\%20Accurate\%20Talairach\%20Coordinates\%20SLIDES.pdf
http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach
### Response:
def mni2tal(xin):
"""
mni2tal for converting from ch2/mni space to tal - very approximate.
This is a standard approach but it's not very accurate.
ANTsR function: `mni2tal`
Arguments
---------
xin : tuple
point in mni152 space.
Returns
-------
tuple
Example
-------
>>> import ants
>>> ants.mni2tal( (10,12,14) )
References
----------
http://bioimagesuite.yale.edu/mni2tal/501_95733_More\%20Accurate\%20Talairach\%20Coordinates\%20SLIDES.pdf
http://imaging.mrc-cbu.cam.ac.uk/imaging/MniTalairach
"""
if (not isinstance(xin, (tuple,list))) or (len(xin) != 3):
raise ValueError('xin must be tuple/list with 3 coordinates')
x = list(xin)
# The input image is in RAS coordinates but we use ITK which returns LPS
# coordinates. So we need to flip the coordinates such that L => R and P => A to
# get RAS (MNI) coordinates
x[0] = x[0] * (-1) # flip X
x[1] = x[1] * (-1) # flip Y
xout = x
if (x[2] >= 0):
xout[0] = x[0] * 0.99
xout[1] = x[1] * 0.9688 + 0.046 * x[2]
xout[2] = x[1] * (-0.0485) + 0.9189 * x[2]
if (x[2] < 0):
xout[0] = x[0] * 0.99
xout[1] = x[1] * 0.9688 + 0.042 * x[2]
xout[2] = x[1] * (-0.0485) + 0.839 * x[2]
return(xout) |
def calculate_checksum(isbn):
"""Calculate ISBN checksum.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
Returns:
``str``: Checksum for given ISBN or SBN
"""
isbn = [int(i) for i in _isbn_cleanse(isbn, checksum=False)]
if len(isbn) == 9:
products = [x * y for x, y in zip(isbn, range(1, 10))]
check = sum(products) % 11
if check == 10:
check = 'X'
else:
# As soon as Python 2.4 support is dumped
# [(isbn[i] if i % 2 == 0 else isbn[i] * 3) for i in range(12)]
products = []
for i in range(12):
if i % 2 == 0:
products.append(isbn[i])
else:
products.append(isbn[i] * 3)
check = 10 - sum(products) % 10
if check == 10:
check = 0
return str(check) | Calculate ISBN checksum.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
Returns:
``str``: Checksum for given ISBN or SBN | Below is the the instruction that describes the task:
### Input:
Calculate ISBN checksum.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
Returns:
``str``: Checksum for given ISBN or SBN
### Response:
def calculate_checksum(isbn):
"""Calculate ISBN checksum.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
Returns:
``str``: Checksum for given ISBN or SBN
"""
isbn = [int(i) for i in _isbn_cleanse(isbn, checksum=False)]
if len(isbn) == 9:
products = [x * y for x, y in zip(isbn, range(1, 10))]
check = sum(products) % 11
if check == 10:
check = 'X'
else:
# As soon as Python 2.4 support is dumped
# [(isbn[i] if i % 2 == 0 else isbn[i] * 3) for i in range(12)]
products = []
for i in range(12):
if i % 2 == 0:
products.append(isbn[i])
else:
products.append(isbn[i] * 3)
check = 10 - sum(products) % 10
if check == 10:
check = 0
return str(check) |
def begin_transaction(self, transaction_type, trace_parent=None):
"""Register the start of a transaction on the client
"""
return self.tracer.begin_transaction(transaction_type, trace_parent=trace_parent) | Register the start of a transaction on the client | Below is the the instruction that describes the task:
### Input:
Register the start of a transaction on the client
### Response:
def begin_transaction(self, transaction_type, trace_parent=None):
"""Register the start of a transaction on the client
"""
return self.tracer.begin_transaction(transaction_type, trace_parent=trace_parent) |
def modify(self, fd, eventmask):
"""
Change the bit-mask of events associated with a previously-registered
descriptor.
:param fd:
The descriptor to modify.
:param eventmask:
New bit-mask of events that will be monitored.
:raises ValueError:
If :meth:`closed()` is True
:raises OSError:
If the underlying ``epoll_ctl(2)`` fails. The error message matches
those found in the manual page.
"""
if self._epfd < 0:
_err_closed()
ev = epoll_event()
ev.events = eventmask
ev.data.fd = fd
epoll_ctl(self._epfd, EPOLL_CTL_MOD, fd, byref(ev)) | Change the bit-mask of events associated with a previously-registered
descriptor.
:param fd:
The descriptor to modify.
:param eventmask:
New bit-mask of events that will be monitored.
:raises ValueError:
If :meth:`closed()` is True
:raises OSError:
If the underlying ``epoll_ctl(2)`` fails. The error message matches
those found in the manual page. | Below is the the instruction that describes the task:
### Input:
Change the bit-mask of events associated with a previously-registered
descriptor.
:param fd:
The descriptor to modify.
:param eventmask:
New bit-mask of events that will be monitored.
:raises ValueError:
If :meth:`closed()` is True
:raises OSError:
If the underlying ``epoll_ctl(2)`` fails. The error message matches
those found in the manual page.
### Response:
def modify(self, fd, eventmask):
"""
Change the bit-mask of events associated with a previously-registered
descriptor.
:param fd:
The descriptor to modify.
:param eventmask:
New bit-mask of events that will be monitored.
:raises ValueError:
If :meth:`closed()` is True
:raises OSError:
If the underlying ``epoll_ctl(2)`` fails. The error message matches
those found in the manual page.
"""
if self._epfd < 0:
_err_closed()
ev = epoll_event()
ev.events = eventmask
ev.data.fd = fd
epoll_ctl(self._epfd, EPOLL_CTL_MOD, fd, byref(ev)) |
def _kafka_failure(self, item, spider, response):
'''
Callback for failed send
'''
item['success'] = False
item['exception'] = traceback.format_exc()
item['spiderid'] = spider.name
item = self._clean_item(item)
self.logger.error("Failed to send page to Kafka", item) | Callback for failed send | Below is the the instruction that describes the task:
### Input:
Callback for failed send
### Response:
def _kafka_failure(self, item, spider, response):
'''
Callback for failed send
'''
item['success'] = False
item['exception'] = traceback.format_exc()
item['spiderid'] = spider.name
item = self._clean_item(item)
self.logger.error("Failed to send page to Kafka", item) |
def _categorize_file_diffs(self, file_diffs):
"""Partition file changes into admissible and inadmissible changes"""
# TODO move this into a new validator
candidate_feature_diffs = []
valid_init_diffs = []
inadmissible_files = []
for diff in file_diffs:
valid, failures = check_from_class(
ProjectStructureCheck, diff, self.project)
if valid:
if pathlib.Path(diff.b_path).parts[-1] != '__init__.py':
candidate_feature_diffs.append(diff)
logger.debug(
'Categorized {file} as CANDIDATE FEATURE MODULE'
.format(file=diff.b_path))
else:
valid_init_diffs.append(diff)
logger.debug(
'Categorized {file} as VALID INIT MODULE'
.format(file=diff.b_path))
else:
inadmissible_files.append(diff)
logger.debug(
'Categorized {file} as INADMISSIBLE; '
'failures were {failures}'
.format(file=diff.b_path, failures=failures))
logger.info(
'Admitted {} candidate feature{} '
'and {} __init__ module{} '
'and rejected {} file{}'
.format(len(candidate_feature_diffs),
make_plural_suffix(candidate_feature_diffs),
len(valid_init_diffs),
make_plural_suffix(valid_init_diffs),
len(inadmissible_files),
make_plural_suffix(inadmissible_files)))
return candidate_feature_diffs, valid_init_diffs, inadmissible_files | Partition file changes into admissible and inadmissible changes | Below is the the instruction that describes the task:
### Input:
Partition file changes into admissible and inadmissible changes
### Response:
def _categorize_file_diffs(self, file_diffs):
"""Partition file changes into admissible and inadmissible changes"""
# TODO move this into a new validator
candidate_feature_diffs = []
valid_init_diffs = []
inadmissible_files = []
for diff in file_diffs:
valid, failures = check_from_class(
ProjectStructureCheck, diff, self.project)
if valid:
if pathlib.Path(diff.b_path).parts[-1] != '__init__.py':
candidate_feature_diffs.append(diff)
logger.debug(
'Categorized {file} as CANDIDATE FEATURE MODULE'
.format(file=diff.b_path))
else:
valid_init_diffs.append(diff)
logger.debug(
'Categorized {file} as VALID INIT MODULE'
.format(file=diff.b_path))
else:
inadmissible_files.append(diff)
logger.debug(
'Categorized {file} as INADMISSIBLE; '
'failures were {failures}'
.format(file=diff.b_path, failures=failures))
logger.info(
'Admitted {} candidate feature{} '
'and {} __init__ module{} '
'and rejected {} file{}'
.format(len(candidate_feature_diffs),
make_plural_suffix(candidate_feature_diffs),
len(valid_init_diffs),
make_plural_suffix(valid_init_diffs),
len(inadmissible_files),
make_plural_suffix(inadmissible_files)))
return candidate_feature_diffs, valid_init_diffs, inadmissible_files |
def caller_path(steps=1):
"""Return the path to the source file of the current frames' caller."""
frame = sys._getframe(steps + 1)
try:
path = os.path.dirname(frame.f_code.co_filename)
finally:
del frame
if not path:
path = os.getcwd()
return os.path.realpath(path) | Return the path to the source file of the current frames' caller. | Below is the the instruction that describes the task:
### Input:
Return the path to the source file of the current frames' caller.
### Response:
def caller_path(steps=1):
"""Return the path to the source file of the current frames' caller."""
frame = sys._getframe(steps + 1)
try:
path = os.path.dirname(frame.f_code.co_filename)
finally:
del frame
if not path:
path = os.getcwd()
return os.path.realpath(path) |
def delete(self, count=1):
"""
Delete specified number of characters and Return the deleted text.
"""
if self.cursor_position < len(self.text):
deleted = self.document.text_after_cursor[:count]
self.text = self.text[:self.cursor_position] + \
self.text[self.cursor_position + len(deleted):]
return deleted
else:
return '' | Delete specified number of characters and Return the deleted text. | Below is the the instruction that describes the task:
### Input:
Delete specified number of characters and Return the deleted text.
### Response:
def delete(self, count=1):
"""
Delete specified number of characters and Return the deleted text.
"""
if self.cursor_position < len(self.text):
deleted = self.document.text_after_cursor[:count]
self.text = self.text[:self.cursor_position] + \
self.text[self.cursor_position + len(deleted):]
return deleted
else:
return '' |
def create_from_config(self, config):
"""Create a template object file defined in the config object
"""
configService = ConfigService()
template = TemplateService()
template.output = config["output"]["location"]
template_file = configService.get_template_from_config(config)
template.input = os.path.basename(template_file)
template.env = Environment(loader=FileSystemLoader(os.path.dirname(template_file)))
return template | Create a template object file defined in the config object | Below is the the instruction that describes the task:
### Input:
Create a template object file defined in the config object
### Response:
def create_from_config(self, config):
"""Create a template object file defined in the config object
"""
configService = ConfigService()
template = TemplateService()
template.output = config["output"]["location"]
template_file = configService.get_template_from_config(config)
template.input = os.path.basename(template_file)
template.env = Environment(loader=FileSystemLoader(os.path.dirname(template_file)))
return template |
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent) | Deep Copy of a LabeledTree | Below is the the instruction that describes the task:
### Input:
Deep Copy of a LabeledTree
### Response:
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent) |
def jac(self, xy=None):
"""Get the Jacobian at (x, y).
"""
if xy is not None:
self.set_xy(xy)
ux = numpy.dot(self.ax, self.xy_list[: len(self.ax)])
vx = numpy.dot(self.bx, self.xy_list[: len(self.bx)])
uy = numpy.dot(self.ay, self.xy_list[: len(self.ay)])
vy = numpy.dot(self.by, self.xy_list[: len(self.by)])
ux_dx = numpy.dot(self.ax, self.dx_list[: len(self.ax)])
vx_dx = numpy.dot(self.bx, self.dx_list[: len(self.bx)])
uy_dx = numpy.dot(self.ay, self.dx_list[: len(self.ay)])
vy_dx = numpy.dot(self.by, self.dx_list[: len(self.by)])
ux_dy = numpy.dot(self.ax, self.dy_list[: len(self.ax)])
vx_dy = numpy.dot(self.bx, self.dy_list[: len(self.bx)])
uy_dy = numpy.dot(self.ay, self.dy_list[: len(self.ay)])
vy_dy = numpy.dot(self.by, self.dy_list[: len(self.by)])
jac = numpy.array(
[
[
(ux_dx * vx - vx_dx * ux) / vx ** 2,
(ux_dy * vx - vx_dy * ux) / vx ** 2,
],
[
(uy_dx * vy - vy_dx * uy) / vy ** 2,
(uy_dy * vy - vy_dy * uy) / vy ** 2,
],
]
)
return jac | Get the Jacobian at (x, y). | Below is the the instruction that describes the task:
### Input:
Get the Jacobian at (x, y).
### Response:
def jac(self, xy=None):
"""Get the Jacobian at (x, y).
"""
if xy is not None:
self.set_xy(xy)
ux = numpy.dot(self.ax, self.xy_list[: len(self.ax)])
vx = numpy.dot(self.bx, self.xy_list[: len(self.bx)])
uy = numpy.dot(self.ay, self.xy_list[: len(self.ay)])
vy = numpy.dot(self.by, self.xy_list[: len(self.by)])
ux_dx = numpy.dot(self.ax, self.dx_list[: len(self.ax)])
vx_dx = numpy.dot(self.bx, self.dx_list[: len(self.bx)])
uy_dx = numpy.dot(self.ay, self.dx_list[: len(self.ay)])
vy_dx = numpy.dot(self.by, self.dx_list[: len(self.by)])
ux_dy = numpy.dot(self.ax, self.dy_list[: len(self.ax)])
vx_dy = numpy.dot(self.bx, self.dy_list[: len(self.bx)])
uy_dy = numpy.dot(self.ay, self.dy_list[: len(self.ay)])
vy_dy = numpy.dot(self.by, self.dy_list[: len(self.by)])
jac = numpy.array(
[
[
(ux_dx * vx - vx_dx * ux) / vx ** 2,
(ux_dy * vx - vx_dy * ux) / vx ** 2,
],
[
(uy_dx * vy - vy_dx * uy) / vy ** 2,
(uy_dy * vy - vy_dy * uy) / vy ** 2,
],
]
)
return jac |
def add(self, effect=None, act=None, obj=None,
policy=None, policies=None):
"""Insert an individual (effect, action, object) triple or all
triples for a policy or list of policies.
"""
if policies is not None:
for p in policies:
self.add(policy=p)
elif policy is not None:
for e, a, o in policy:
self.add(e, a, o)
else:
objc = obj.components if obj is not None else []
self.tree[act.components + objc] = effect | Insert an individual (effect, action, object) triple or all
triples for a policy or list of policies. | Below is the the instruction that describes the task:
### Input:
Insert an individual (effect, action, object) triple or all
triples for a policy or list of policies.
### Response:
def add(self, effect=None, act=None, obj=None,
policy=None, policies=None):
"""Insert an individual (effect, action, object) triple or all
triples for a policy or list of policies.
"""
if policies is not None:
for p in policies:
self.add(policy=p)
elif policy is not None:
for e, a, o in policy:
self.add(e, a, o)
else:
objc = obj.components if obj is not None else []
self.tree[act.components + objc] = effect |
def sendmail(subject, text, mailto, sender=None):
"""
Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status
"""
def user_at_host():
from socket import gethostname
return os.getlogin() + "@" + gethostname()
# Body of the message.
try:
sender = user_at_host() if sender is None else sender
except OSError:
sender = 'abipyscheduler@youknowwhere'
if is_string(mailto): mailto = [mailto]
from email.mime.text import MIMEText
mail = MIMEText(text)
mail["Subject"] = subject
mail["From"] = sender
mail["To"] = ", ".join(mailto)
msg = mail.as_string()
# sendmail works much better than the python interface.
# Note that sendmail is available only on Unix-like OS.
from subprocess import Popen, PIPE
import sys
sendmail = which("sendmail")
if sendmail is None: return -1
if sys.version_info[0] < 3:
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE)
else:
# msg is string not bytes so must use universal_newlines
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE, universal_newlines=True)
outdata, errdata = p.communicate(msg)
return len(errdata) | Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status | Below is the the instruction that describes the task:
### Input:
Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status
### Response:
def sendmail(subject, text, mailto, sender=None):
"""
Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status
"""
def user_at_host():
from socket import gethostname
return os.getlogin() + "@" + gethostname()
# Body of the message.
try:
sender = user_at_host() if sender is None else sender
except OSError:
sender = 'abipyscheduler@youknowwhere'
if is_string(mailto): mailto = [mailto]
from email.mime.text import MIMEText
mail = MIMEText(text)
mail["Subject"] = subject
mail["From"] = sender
mail["To"] = ", ".join(mailto)
msg = mail.as_string()
# sendmail works much better than the python interface.
# Note that sendmail is available only on Unix-like OS.
from subprocess import Popen, PIPE
import sys
sendmail = which("sendmail")
if sendmail is None: return -1
if sys.version_info[0] < 3:
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE)
else:
# msg is string not bytes so must use universal_newlines
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE, universal_newlines=True)
outdata, errdata = p.communicate(msg)
return len(errdata) |
def offset(self, offset):
"""
Apply an OFFSET to the query and return the newly resulting Query.
"""
query = self._copy()
query._offset = offset
return query | Apply an OFFSET to the query and return the newly resulting Query. | Below is the the instruction that describes the task:
### Input:
Apply an OFFSET to the query and return the newly resulting Query.
### Response:
def offset(self, offset):
"""
Apply an OFFSET to the query and return the newly resulting Query.
"""
query = self._copy()
query._offset = offset
return query |
def inq_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
num_bits=4, inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if i_init is None:
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
i = get_parameter_or_create(
"I", (outmaps, inp.shape[base_axis]) + tuple(kernel),
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed) | Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable` | Below is the the instruction that describes the task:
### Input:
Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
### Response:
def inq_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
num_bits=4, inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if i_init is None:
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
i = get_parameter_or_create(
"I", (outmaps, inp.shape[base_axis]) + tuple(kernel),
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed) |
def wrap_handler(cls, handler, protocol, **kwargs):
''' Wrap a request handler with the matching protocol handler '''
def _wrapper(request, *args, **kwargs):
instance = cls(request=request, **kwargs)
if protocol == Resource.Protocol.http:
return instance._wrap_http(handler, request=request, **kwargs)
elif protocol == Resource.Protocol.websocket:
return instance._wrap_ws(handler, request=request, **kwargs)
elif protocol == Resource.Protocol.amqp:
return instance._wrap_amqp(view_type, *args, **kwargs)
else:
raise Exception('Communication protocol not specified')
return _wrapper | Wrap a request handler with the matching protocol handler | Below is the the instruction that describes the task:
### Input:
Wrap a request handler with the matching protocol handler
### Response:
def wrap_handler(cls, handler, protocol, **kwargs):
''' Wrap a request handler with the matching protocol handler '''
def _wrapper(request, *args, **kwargs):
instance = cls(request=request, **kwargs)
if protocol == Resource.Protocol.http:
return instance._wrap_http(handler, request=request, **kwargs)
elif protocol == Resource.Protocol.websocket:
return instance._wrap_ws(handler, request=request, **kwargs)
elif protocol == Resource.Protocol.amqp:
return instance._wrap_amqp(view_type, *args, **kwargs)
else:
raise Exception('Communication protocol not specified')
return _wrapper |
def _compute_anom_score(self, lag_window_points, point):
"""
Compute anomaly score for a single data point.
Anomaly score for a single data point(t,v) equals: abs(v - ema(lagging window)).
:param list lag_window_points: values in the lagging window.
:param float point: data point value.
:return float: the anomaly score.
"""
ema = utils.compute_ema(self.smoothing_factor, lag_window_points)[-1]
return abs(point - ema) | Compute anomaly score for a single data point.
Anomaly score for a single data point(t,v) equals: abs(v - ema(lagging window)).
:param list lag_window_points: values in the lagging window.
:param float point: data point value.
:return float: the anomaly score. | Below is the the instruction that describes the task:
### Input:
Compute anomaly score for a single data point.
Anomaly score for a single data point(t,v) equals: abs(v - ema(lagging window)).
:param list lag_window_points: values in the lagging window.
:param float point: data point value.
:return float: the anomaly score.
### Response:
def _compute_anom_score(self, lag_window_points, point):
"""
Compute anomaly score for a single data point.
Anomaly score for a single data point(t,v) equals: abs(v - ema(lagging window)).
:param list lag_window_points: values in the lagging window.
:param float point: data point value.
:return float: the anomaly score.
"""
ema = utils.compute_ema(self.smoothing_factor, lag_window_points)[-1]
return abs(point - ema) |
def question(title="", text="", width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None):
"""
Display a question, possible answer are yes/no.
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: The answer as a boolean
:rtype: bool
"""
response = _simple_dialog(Gtk.MessageType.QUESTION, text, title, width, height, timeout)
if response == Gtk.ResponseType.YES:
return True
elif response == Gtk.ResponseType.NO:
return False
return None | Display a question, possible answer are yes/no.
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: The answer as a boolean
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Display a question, possible answer are yes/no.
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: The answer as a boolean
:rtype: bool
### Response:
def question(title="", text="", width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None):
"""
Display a question, possible answer are yes/no.
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: The answer as a boolean
:rtype: bool
"""
response = _simple_dialog(Gtk.MessageType.QUESTION, text, title, width, height, timeout)
if response == Gtk.ResponseType.YES:
return True
elif response == Gtk.ResponseType.NO:
return False
return None |
async def populate(self, agent_cls, *args, **kwargs):
'''Populate all the slave grid environments with agents. Assumes that
no agents have been spawned yet to the slave environment grids. This
excludes the slave environment managers as they are not in the grids.)
'''
n = self.gs[0] * self.gs[1]
tasks = []
for addr in self.addrs:
task = asyncio.ensure_future(self._populate_slave(addr, agent_cls,
n, *args,
**kwargs))
tasks.append(task)
rets = await asyncio.gather(*tasks)
return rets | Populate all the slave grid environments with agents. Assumes that
no agents have been spawned yet to the slave environment grids. This
excludes the slave environment managers as they are not in the grids.) | Below is the the instruction that describes the task:
### Input:
Populate all the slave grid environments with agents. Assumes that
no agents have been spawned yet to the slave environment grids. This
excludes the slave environment managers as they are not in the grids.)
### Response:
async def populate(self, agent_cls, *args, **kwargs):
'''Populate all the slave grid environments with agents. Assumes that
no agents have been spawned yet to the slave environment grids. This
excludes the slave environment managers as they are not in the grids.)
'''
n = self.gs[0] * self.gs[1]
tasks = []
for addr in self.addrs:
task = asyncio.ensure_future(self._populate_slave(addr, agent_cls,
n, *args,
**kwargs))
tasks.append(task)
rets = await asyncio.gather(*tasks)
return rets |
def _update_deferred(self, event):
"""
This does the actual work of updating channel metadata. This is called
by the update(), and runs this method in another thread.
"""
if isinstance(event, ChannelCreated):
i = event.channel[u'id']
event.channel[u'is_archived'] = event.channel[u'is_member'] = False
self.channels[i] = event.channel
elif isinstance(event, ChannelArchive):
self.channels[event.channel][u'is_archived'] = True
elif isinstance(event, GroupArchive):
self.groups[event.channel][u'is_archived'] = True
elif isinstance(event, ChannelDeleted):
# FIXME: Handle delete events properly.
# Channels don't really get deleted, they're more just archived.
self.channels[event.channel][u'is_archived'] = True
self.channels[event.channel][u'is_open'] = False
elif isinstance(event, GroupClose):
# When you close a group, it isn't open to you anymore, but it might
# still exist. Treat it like ChannelDeleted
self.groups[event.channel][u'is_archived'] = True
self.groups[event.channel][u'is_open'] = False
elif isinstance(event, ChannelJoined):
cid = event.channel[u'id']
self.channels[cid] = event.channel
elif isinstance(event, GroupJoined):
gid = event.channel[u'id']
self.groups[gid] = event.channel
elif isinstance(event, ChannelLeft):
self.channels[event.channel][u'is_member'] = False
elif isinstance(event, GroupLeft):
self.groups[event.channel][u'is_member'] = False
elif isinstance(event, ChannelMarked):
# TODO: implement datetime handler properly
self.channels[event.channel][u'last_read'] = event._b[u'ts']
elif isinstance(event, GroupMarked):
self.groups[event.channel][u'last_read'] = event._b[u'ts']
elif isinstance(event, ChannelRename):
self.channels[event.channel[u'id']][u'name'] = event.channel[u'name']
elif isinstance(event, GroupRename):
self.groups[event.channel[u'id']][u'name'] = event.channel[u'name']
elif isinstance(event, ChannelUnarchive):
self.channels[event.channel][u'is_archived'] = False
elif isinstance(event, GroupUnarchive):
self.groups[event.channel][u'is_archived'] = False
elif isinstance(event, ImClose):
self.ims[event.channel][u'is_open'] = False
elif isinstance(event, ImCreated):
i = event.channel[u'id']
event.channel[u'user'] = event.user
self.ims[i] = event.channel
elif isinstance(event, ImMarked):
# TODO: implement datetime handler properly
self.ims[event.channel][u'last_read'] = event._b[u'ts']
elif isinstance(event, ImOpen):
self.ims[event.channel][u'is_open'] = True
elif isinstance(event, PresenceChange):
self.users[event.user][u'presence'] = event.presence
elif isinstance(event, UserChange):
# Everything but the status is provided
# Copy this out of the existing object
uid = event.user[u'id']
if event.user.get(u'status') is None and u'presence' in self.users[uid]:
event.user[u'status'] = self.users[uid][u'presence']
self.users[uid] = event.user
elif isinstance(event, TeamPrefChange):
self.team[u'prefs'][event.name] = event.value
elif isinstance(event, TeamJoin):
uid = event.user[u'id']
self.users[uid] = event.user
elif isinstance(event, BotAdded) or isinstance(event, BotChanged):
bid = event.bot[u'id']
self.bots[bid] = event.bot | This does the actual work of updating channel metadata. This is called
by the update(), and runs this method in another thread. | Below is the the instruction that describes the task:
### Input:
This does the actual work of updating channel metadata. This is called
by the update(), and runs this method in another thread.
### Response:
def _update_deferred(self, event):
"""
This does the actual work of updating channel metadata. This is called
by the update(), and runs this method in another thread.
"""
if isinstance(event, ChannelCreated):
i = event.channel[u'id']
event.channel[u'is_archived'] = event.channel[u'is_member'] = False
self.channels[i] = event.channel
elif isinstance(event, ChannelArchive):
self.channels[event.channel][u'is_archived'] = True
elif isinstance(event, GroupArchive):
self.groups[event.channel][u'is_archived'] = True
elif isinstance(event, ChannelDeleted):
# FIXME: Handle delete events properly.
# Channels don't really get deleted, they're more just archived.
self.channels[event.channel][u'is_archived'] = True
self.channels[event.channel][u'is_open'] = False
elif isinstance(event, GroupClose):
# When you close a group, it isn't open to you anymore, but it might
# still exist. Treat it like ChannelDeleted
self.groups[event.channel][u'is_archived'] = True
self.groups[event.channel][u'is_open'] = False
elif isinstance(event, ChannelJoined):
cid = event.channel[u'id']
self.channels[cid] = event.channel
elif isinstance(event, GroupJoined):
gid = event.channel[u'id']
self.groups[gid] = event.channel
elif isinstance(event, ChannelLeft):
self.channels[event.channel][u'is_member'] = False
elif isinstance(event, GroupLeft):
self.groups[event.channel][u'is_member'] = False
elif isinstance(event, ChannelMarked):
# TODO: implement datetime handler properly
self.channels[event.channel][u'last_read'] = event._b[u'ts']
elif isinstance(event, GroupMarked):
self.groups[event.channel][u'last_read'] = event._b[u'ts']
elif isinstance(event, ChannelRename):
self.channels[event.channel[u'id']][u'name'] = event.channel[u'name']
elif isinstance(event, GroupRename):
self.groups[event.channel[u'id']][u'name'] = event.channel[u'name']
elif isinstance(event, ChannelUnarchive):
self.channels[event.channel][u'is_archived'] = False
elif isinstance(event, GroupUnarchive):
self.groups[event.channel][u'is_archived'] = False
elif isinstance(event, ImClose):
self.ims[event.channel][u'is_open'] = False
elif isinstance(event, ImCreated):
i = event.channel[u'id']
event.channel[u'user'] = event.user
self.ims[i] = event.channel
elif isinstance(event, ImMarked):
# TODO: implement datetime handler properly
self.ims[event.channel][u'last_read'] = event._b[u'ts']
elif isinstance(event, ImOpen):
self.ims[event.channel][u'is_open'] = True
elif isinstance(event, PresenceChange):
self.users[event.user][u'presence'] = event.presence
elif isinstance(event, UserChange):
# Everything but the status is provided
# Copy this out of the existing object
uid = event.user[u'id']
if event.user.get(u'status') is None and u'presence' in self.users[uid]:
event.user[u'status'] = self.users[uid][u'presence']
self.users[uid] = event.user
elif isinstance(event, TeamPrefChange):
self.team[u'prefs'][event.name] = event.value
elif isinstance(event, TeamJoin):
uid = event.user[u'id']
self.users[uid] = event.user
elif isinstance(event, BotAdded) or isinstance(event, BotChanged):
bid = event.bot[u'id']
self.bots[bid] = event.bot |
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY):
"""List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
"""
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="/projects",
item_to_value=_item_to_project,
items_key="projects",
page_token=page_token,
max_results=max_results,
) | List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client. | Below is the the instruction that describes the task:
### Input:
List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
### Response:
def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY):
"""List projects for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list
:type max_results: int
:param max_results: (Optional) maximum number of projects to return,
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token:
(Optional) Token representing a cursor into the projects. If
not passed, the API will return the first page of projects.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
:type retry: :class:`google.api_core.retry.Retry`
:param retry: (Optional) How to retry the RPC.
:rtype: :class:`~google.api_core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.client.Project`
accessible to the current client.
"""
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path="/projects",
item_to_value=_item_to_project,
items_key="projects",
page_token=page_token,
max_results=max_results,
) |
def add_option(self, group):
""" Add option for self to the parser group object. """
group.add_argument(
"--{0}".format(self.option), action="store_true", help=self.name) | Add option for self to the parser group object. | Below is the the instruction that describes the task:
### Input:
Add option for self to the parser group object.
### Response:
def add_option(self, group):
""" Add option for self to the parser group object. """
group.add_argument(
"--{0}".format(self.option), action="store_true", help=self.name) |
def get_ngrams(self, minimum, maximum, skip_sizes=None):
"""Returns a generator supplying the n-grams (`minimum` <= n
<= `maximum`) for this text.
Each iteration of the generator supplies a tuple consisting of
the size of the n-grams and a `collections.Counter` of the
n-grams.
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:param skip_sizes: sizes to not generate n-grams for
:type skip_sizes: `list` of `int`
:rtype: `generator`
"""
skip_sizes = skip_sizes or []
tokens = self.get_tokens()
for size in range(minimum, maximum + 1):
if size not in skip_sizes:
ngrams = collections.Counter(self._ngrams(tokens, size))
yield (size, ngrams) | Returns a generator supplying the n-grams (`minimum` <= n
<= `maximum`) for this text.
Each iteration of the generator supplies a tuple consisting of
the size of the n-grams and a `collections.Counter` of the
n-grams.
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:param skip_sizes: sizes to not generate n-grams for
:type skip_sizes: `list` of `int`
:rtype: `generator` | Below is the the instruction that describes the task:
### Input:
Returns a generator supplying the n-grams (`minimum` <= n
<= `maximum`) for this text.
Each iteration of the generator supplies a tuple consisting of
the size of the n-grams and a `collections.Counter` of the
n-grams.
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:param skip_sizes: sizes to not generate n-grams for
:type skip_sizes: `list` of `int`
:rtype: `generator`
### Response:
def get_ngrams(self, minimum, maximum, skip_sizes=None):
"""Returns a generator supplying the n-grams (`minimum` <= n
<= `maximum`) for this text.
Each iteration of the generator supplies a tuple consisting of
the size of the n-grams and a `collections.Counter` of the
n-grams.
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:param skip_sizes: sizes to not generate n-grams for
:type skip_sizes: `list` of `int`
:rtype: `generator`
"""
skip_sizes = skip_sizes or []
tokens = self.get_tokens()
for size in range(minimum, maximum + 1):
if size not in skip_sizes:
ngrams = collections.Counter(self._ngrams(tokens, size))
yield (size, ngrams) |
def get_yarn_applications(self, start_time, end_time, filter_str="", limit=100,
offset=0):
"""
Returns a list of YARN applications that satisfy the filter
@type start_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param start_time: Applications must have ended after this time
@type end_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param filter_str: A filter to apply to the applications. For example:
'user = root and applicationDuration > 5s'
@param limit: The maximum number of results to return
@param offset: The offset into the return list
@since: API v6
"""
params = {
'from': start_time.isoformat(),
'to': end_time.isoformat(),
'filter': filter_str,
'limit': limit,
'offset': offset
}
return self._get("yarnApplications", ApiYarnApplicationResponse,
params=params, api_version=6) | Returns a list of YARN applications that satisfy the filter
@type start_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param start_time: Applications must have ended after this time
@type end_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param filter_str: A filter to apply to the applications. For example:
'user = root and applicationDuration > 5s'
@param limit: The maximum number of results to return
@param offset: The offset into the return list
@since: API v6 | Below is the the instruction that describes the task:
### Input:
Returns a list of YARN applications that satisfy the filter
@type start_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param start_time: Applications must have ended after this time
@type end_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param filter_str: A filter to apply to the applications. For example:
'user = root and applicationDuration > 5s'
@param limit: The maximum number of results to return
@param offset: The offset into the return list
@since: API v6
### Response:
def get_yarn_applications(self, start_time, end_time, filter_str="", limit=100,
offset=0):
"""
Returns a list of YARN applications that satisfy the filter
@type start_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param start_time: Applications must have ended after this time
@type end_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param filter_str: A filter to apply to the applications. For example:
'user = root and applicationDuration > 5s'
@param limit: The maximum number of results to return
@param offset: The offset into the return list
@since: API v6
"""
params = {
'from': start_time.isoformat(),
'to': end_time.isoformat(),
'filter': filter_str,
'limit': limit,
'offset': offset
}
return self._get("yarnApplications", ApiYarnApplicationResponse,
params=params, api_version=6) |
def serialise_to_nrml(self, filename, use_defaults=False):
'''
Writes the source model to a nrml source model file given by the
filename
:param str filename:
Path to output file
:param bool use_defaults:
Boolean to indicate whether to use default values (True) or not.
If set to False, ValueErrors will be raised when an essential
attribute is missing.
'''
source_model = self.convert_to_oqhazardlib(
PoissonTOM(1.0), 2.0, 2.0, 10.0, use_defaults=use_defaults)
write_source_model(filename, source_model, name=self.name) | Writes the source model to a nrml source model file given by the
filename
:param str filename:
Path to output file
:param bool use_defaults:
Boolean to indicate whether to use default values (True) or not.
If set to False, ValueErrors will be raised when an essential
attribute is missing. | Below is the the instruction that describes the task:
### Input:
Writes the source model to a nrml source model file given by the
filename
:param str filename:
Path to output file
:param bool use_defaults:
Boolean to indicate whether to use default values (True) or not.
If set to False, ValueErrors will be raised when an essential
attribute is missing.
### Response:
def serialise_to_nrml(self, filename, use_defaults=False):
'''
Writes the source model to a nrml source model file given by the
filename
:param str filename:
Path to output file
:param bool use_defaults:
Boolean to indicate whether to use default values (True) or not.
If set to False, ValueErrors will be raised when an essential
attribute is missing.
'''
source_model = self.convert_to_oqhazardlib(
PoissonTOM(1.0), 2.0, 2.0, 10.0, use_defaults=use_defaults)
write_source_model(filename, source_model, name=self.name) |
def sentence2freqt(docgraph, root, successors=None, include_pos=False,
escape_func=FREQT_ESCAPE_FUNC):
"""convert a sentence subgraph into a FREQT string."""
if successors is None:
successors = sorted_bfs_successors(docgraph, root)
if root in successors: # root node has children / subgraphs
embed_str = u"".join(sentence2freqt(docgraph, child, successors,
include_pos=include_pos,
escape_func=escape_func)
for child in successors[root])
return node2freqt(
docgraph, root, embed_str, include_pos=include_pos,
escape_func=escape_func)
else: # root node has no children / subgraphs
return node2freqt(docgraph, root, include_pos=include_pos,
escape_func=escape_func) | convert a sentence subgraph into a FREQT string. | Below is the the instruction that describes the task:
### Input:
convert a sentence subgraph into a FREQT string.
### Response:
def sentence2freqt(docgraph, root, successors=None, include_pos=False,
escape_func=FREQT_ESCAPE_FUNC):
"""convert a sentence subgraph into a FREQT string."""
if successors is None:
successors = sorted_bfs_successors(docgraph, root)
if root in successors: # root node has children / subgraphs
embed_str = u"".join(sentence2freqt(docgraph, child, successors,
include_pos=include_pos,
escape_func=escape_func)
for child in successors[root])
return node2freqt(
docgraph, root, embed_str, include_pos=include_pos,
escape_func=escape_func)
else: # root node has no children / subgraphs
return node2freqt(docgraph, root, include_pos=include_pos,
escape_func=escape_func) |
def _load(self, scale=1.0):
"""Load the ABI relative spectral responses
"""
LOG.debug("File: %s", str(self.requested_band_filename))
data = np.genfromtxt(self.requested_band_filename,
unpack=True,
names=['wavelength',
'wavenumber',
'response'],
skip_header=2)
wvl = data['wavelength'] * scale
resp = data['response']
self.rsr = {'wavelength': wvl, 'response': resp} | Load the ABI relative spectral responses | Below is the the instruction that describes the task:
### Input:
Load the ABI relative spectral responses
### Response:
def _load(self, scale=1.0):
"""Load the ABI relative spectral responses
"""
LOG.debug("File: %s", str(self.requested_band_filename))
data = np.genfromtxt(self.requested_band_filename,
unpack=True,
names=['wavelength',
'wavenumber',
'response'],
skip_header=2)
wvl = data['wavelength'] * scale
resp = data['response']
self.rsr = {'wavelength': wvl, 'response': resp} |
def calculate_permute_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N', C', H', W']
Note that here [N', C', H', W'] means all possible permutations of [N, C, H, W]
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType, StringTensorType],
good_output_types=[FloatTensorType, Int64TensorType, StringTensorType])
input = operator.inputs[0]
output = operator.outputs[0]
axes = [int(i) for i in operator.raw_operator.permute.axis]
input_shape = copy.deepcopy(input.type.shape)
output.type.shape = [input_shape[a] for a in axes] | Allowed input/output patterns are
1. [N, C, H, W] ---> [N', C', H', W']
Note that here [N', C', H', W'] means all possible permutations of [N, C, H, W] | Below is the the instruction that describes the task:
### Input:
Allowed input/output patterns are
1. [N, C, H, W] ---> [N', C', H', W']
Note that here [N', C', H', W'] means all possible permutations of [N, C, H, W]
### Response:
def calculate_permute_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C, H, W] ---> [N', C', H', W']
Note that here [N', C', H', W'] means all possible permutations of [N, C, H, W]
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType, Int64TensorType, StringTensorType],
good_output_types=[FloatTensorType, Int64TensorType, StringTensorType])
input = operator.inputs[0]
output = operator.outputs[0]
axes = [int(i) for i in operator.raw_operator.permute.axis]
input_shape = copy.deepcopy(input.type.shape)
output.type.shape = [input_shape[a] for a in axes] |
def copy_file(self, filepath):
"""
Returns flag which says to copy rather than link a file.
"""
copy_file = False
try:
copy_file = self.data[filepath]['copy']
except KeyError:
return False
return copy_file | Returns flag which says to copy rather than link a file. | Below is the the instruction that describes the task:
### Input:
Returns flag which says to copy rather than link a file.
### Response:
def copy_file(self, filepath):
"""
Returns flag which says to copy rather than link a file.
"""
copy_file = False
try:
copy_file = self.data[filepath]['copy']
except KeyError:
return False
return copy_file |
def crypto_sign_keypair():
"""
Returns a randomly generated public key and secret key.
:rtype: (bytes(public_key), bytes(secret_key))
"""
pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES)
sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES)
rc = lib.crypto_sign_keypair(pk, sk)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return (
ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:],
ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:],
) | Returns a randomly generated public key and secret key.
:rtype: (bytes(public_key), bytes(secret_key)) | Below is the the instruction that describes the task:
### Input:
Returns a randomly generated public key and secret key.
:rtype: (bytes(public_key), bytes(secret_key))
### Response:
def crypto_sign_keypair():
"""
Returns a randomly generated public key and secret key.
:rtype: (bytes(public_key), bytes(secret_key))
"""
pk = ffi.new("unsigned char[]", crypto_sign_PUBLICKEYBYTES)
sk = ffi.new("unsigned char[]", crypto_sign_SECRETKEYBYTES)
rc = lib.crypto_sign_keypair(pk, sk)
ensure(rc == 0,
'Unexpected library error',
raising=exc.RuntimeError)
return (
ffi.buffer(pk, crypto_sign_PUBLICKEYBYTES)[:],
ffi.buffer(sk, crypto_sign_SECRETKEYBYTES)[:],
) |
def _transition_stage(self, step, total_steps, brightness=None):
"""
Get a transition stage at a specific step.
:param step: The current step.
:param total_steps: The total number of steps.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The stage at the specific step.
"""
if brightness is not None:
self._assert_is_brightness(brightness)
brightness = self._interpolate(self.brightness, brightness,
step, total_steps)
return {'brightness': brightness} | Get a transition stage at a specific step.
:param step: The current step.
:param total_steps: The total number of steps.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The stage at the specific step. | Below is the the instruction that describes the task:
### Input:
Get a transition stage at a specific step.
:param step: The current step.
:param total_steps: The total number of steps.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The stage at the specific step.
### Response:
def _transition_stage(self, step, total_steps, brightness=None):
"""
Get a transition stage at a specific step.
:param step: The current step.
:param total_steps: The total number of steps.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The stage at the specific step.
"""
if brightness is not None:
self._assert_is_brightness(brightness)
brightness = self._interpolate(self.brightness, brightness,
step, total_steps)
return {'brightness': brightness} |
def _index_fs(self):
"""Returns a deque object full of local file system items.
:returns: ``deque``
"""
indexed_objects = self._return_deque()
directory = self.job_args.get('directory')
if directory:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._drectory_local_files(
directory=directory
)
)
object_names = self.job_args.get('object')
if object_names:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._named_local_files(
object_names=object_names
)
)
return indexed_objects | Returns a deque object full of local file system items.
:returns: ``deque`` | Below is the the instruction that describes the task:
### Input:
Returns a deque object full of local file system items.
:returns: ``deque``
### Response:
def _index_fs(self):
"""Returns a deque object full of local file system items.
:returns: ``deque``
"""
indexed_objects = self._return_deque()
directory = self.job_args.get('directory')
if directory:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._drectory_local_files(
directory=directory
)
)
object_names = self.job_args.get('object')
if object_names:
indexed_objects = self._return_deque(
deque=indexed_objects,
item=self._named_local_files(
object_names=object_names
)
)
return indexed_objects |
def delete(self, list_id, webhook_id):
"""
Delete a specific webhook in a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param webhook_id: The unique id for the webhook.
:type webhook_id: :py:class:`str`
"""
self.list_id = list_id
self.webhook_id = webhook_id
return self._mc_client._delete(url=self._build_path(list_id, 'webhooks', webhook_id)) | Delete a specific webhook in a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param webhook_id: The unique id for the webhook.
:type webhook_id: :py:class:`str` | Below is the the instruction that describes the task:
### Input:
Delete a specific webhook in a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param webhook_id: The unique id for the webhook.
:type webhook_id: :py:class:`str`
### Response:
def delete(self, list_id, webhook_id):
"""
Delete a specific webhook in a list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param webhook_id: The unique id for the webhook.
:type webhook_id: :py:class:`str`
"""
self.list_id = list_id
self.webhook_id = webhook_id
return self._mc_client._delete(url=self._build_path(list_id, 'webhooks', webhook_id)) |
def main():
"""Event display for an event of station 503
Date Time Timestamp Nanoseconds
2012-03-29 10:51:36 1333018296 870008589
Number of MIPs
35.0 51.9 35.8 78.9
Arrival time
15.0 17.5 20.0 27.5
"""
# Detector positions in ENU relative to the station GPS
x = [-6.34, -2.23, -3.6, 3.46]
y = [6.34, 2.23, -3.6, 3.46]
# Scale mips to fit the graph
n = [35.0, 51.9, 35.8, 78.9]
# Make times relative to first detection
t = [15., 17.5, 20., 27.5]
dt = [ti - min(t) for ti in t]
plot = Plot()
plot.scatter([0], [0], mark='triangle')
plot.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below')
plot.scatter_table(x, y, dt, n)
plot.set_scalebar(location="lower right")
plot.set_colorbar('$\Delta$t [ns]')
plot.set_axis_equal()
plot.set_mlimits(max=16.)
plot.set_slimits(min=10., max=100.)
plot.set_xlabel('x [m]')
plot.set_ylabel('y [m]')
plot.save('event_display')
# Add event by Station 508
# Detector positions in ENU relative to the station GPS
x508 = [6.12, 0.00, -3.54, 3.54]
y508 = [-6.12, -13.23, -3.54, 3.54]
# Event GPS timestamp: 1371498167.016412100
# MIPS
n508 = [5.6, 16.7, 36.6, 9.0]
# Arrival Times
t508 = [15., 22.5, 22.5, 30.]
dt508 = [ti - min(t508) for ti in t508]
plot = MultiPlot(1, 2, width=r'.33\linewidth')
plot.set_xlimits_for_all(min=-10, max=15)
plot.set_ylimits_for_all(min=-15, max=10)
plot.set_mlimits_for_all(min=0., max=16.)
plot.set_colorbar('$\Delta$t [ns]', False)
plot.set_colormap('blackwhite')
plot.set_scalebar_for_all(location="upper right")
p = plot.get_subplot_at(0, 0)
p.scatter([0], [0], mark='triangle')
p.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below')
p.scatter_table(x, y, dt, n)
p.set_axis_equal()
p = plot.get_subplot_at(0, 1)
p.scatter([0], [0], mark='triangle')
p.add_pin_at_xy(0, 0, 'Station 508', use_arrow=False, location='below')
p.scatter_table(x508, y508, dt508, n508)
p.set_axis_equal()
plot.show_yticklabels_for_all([(0, 0)])
plot.show_xticklabels_for_all([(0, 0), (0, 1)])
plot.set_xlabel('x [m]')
plot.set_ylabel('y [m]')
plot.save('multi_event_display') | Event display for an event of station 503
Date Time Timestamp Nanoseconds
2012-03-29 10:51:36 1333018296 870008589
Number of MIPs
35.0 51.9 35.8 78.9
Arrival time
15.0 17.5 20.0 27.5 | Below is the the instruction that describes the task:
### Input:
Event display for an event of station 503
Date Time Timestamp Nanoseconds
2012-03-29 10:51:36 1333018296 870008589
Number of MIPs
35.0 51.9 35.8 78.9
Arrival time
15.0 17.5 20.0 27.5
### Response:
def main():
"""Event display for an event of station 503
Date Time Timestamp Nanoseconds
2012-03-29 10:51:36 1333018296 870008589
Number of MIPs
35.0 51.9 35.8 78.9
Arrival time
15.0 17.5 20.0 27.5
"""
# Detector positions in ENU relative to the station GPS
x = [-6.34, -2.23, -3.6, 3.46]
y = [6.34, 2.23, -3.6, 3.46]
# Scale mips to fit the graph
n = [35.0, 51.9, 35.8, 78.9]
# Make times relative to first detection
t = [15., 17.5, 20., 27.5]
dt = [ti - min(t) for ti in t]
plot = Plot()
plot.scatter([0], [0], mark='triangle')
plot.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below')
plot.scatter_table(x, y, dt, n)
plot.set_scalebar(location="lower right")
plot.set_colorbar('$\Delta$t [ns]')
plot.set_axis_equal()
plot.set_mlimits(max=16.)
plot.set_slimits(min=10., max=100.)
plot.set_xlabel('x [m]')
plot.set_ylabel('y [m]')
plot.save('event_display')
# Add event by Station 508
# Detector positions in ENU relative to the station GPS
x508 = [6.12, 0.00, -3.54, 3.54]
y508 = [-6.12, -13.23, -3.54, 3.54]
# Event GPS timestamp: 1371498167.016412100
# MIPS
n508 = [5.6, 16.7, 36.6, 9.0]
# Arrival Times
t508 = [15., 22.5, 22.5, 30.]
dt508 = [ti - min(t508) for ti in t508]
plot = MultiPlot(1, 2, width=r'.33\linewidth')
plot.set_xlimits_for_all(min=-10, max=15)
plot.set_ylimits_for_all(min=-15, max=10)
plot.set_mlimits_for_all(min=0., max=16.)
plot.set_colorbar('$\Delta$t [ns]', False)
plot.set_colormap('blackwhite')
plot.set_scalebar_for_all(location="upper right")
p = plot.get_subplot_at(0, 0)
p.scatter([0], [0], mark='triangle')
p.add_pin_at_xy(0, 0, 'Station 503', use_arrow=False, location='below')
p.scatter_table(x, y, dt, n)
p.set_axis_equal()
p = plot.get_subplot_at(0, 1)
p.scatter([0], [0], mark='triangle')
p.add_pin_at_xy(0, 0, 'Station 508', use_arrow=False, location='below')
p.scatter_table(x508, y508, dt508, n508)
p.set_axis_equal()
plot.show_yticklabels_for_all([(0, 0)])
plot.show_xticklabels_for_all([(0, 0), (0, 1)])
plot.set_xlabel('x [m]')
plot.set_ylabel('y [m]')
plot.save('multi_event_display') |
def check_ace(path, objectType, user, permission=None, acetype=None, propagation=None, exactPermissionMatch=False):
'''
Checks a path to verify the ACE (access control entry) specified exists
Args:
path: path to the file/reg key
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: user that the ACL is for
permission: permission to test for (READ, FULLCONTROL, etc)
acetype: the type of ACE (ALLOW or DENY)
propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc)
exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously)
Returns (dict): 'Exists' true if the ACE exists, false if it does not
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol
'''
ret = {'result': False,
'Exists': False,
'comment': ''}
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
permission = permission.upper() if permission else None
acetype = acetype.upper() if permission else None
propagation = propagation.upper() if propagation else None
permissionbit = dc.getPermissionBit(objectTypeBit, permission) if permission else None
acetypebit = dc.getAceTypeBit(acetype) if acetype else None
propagationbit = dc.getPropagationBit(objectTypeBit, propagation) if propagation else None
sidRet = _getUserSid(user)
if not sidRet['result']:
return sidRet
dacls = _get_dacl(path, objectTypeBit)
ret['result'] = True
if dacls:
for counter in range(0, dacls.GetAceCount()):
ace = dacls.GetAce(counter)
if ace[2] == sidRet['sid']:
if not acetypebit or ace[0][0] == acetypebit:
if not propagationbit or (ace[0][1] & propagationbit) == propagationbit:
if not permissionbit:
ret['Exists'] = True
return ret
if exactPermissionMatch:
if ace[1] == permissionbit:
ret['Exists'] = True
return ret
else:
if (ace[1] & permissionbit) == permissionbit:
ret['Exists'] = True
return ret
else:
ret['comment'] = 'No DACL found for object.'
return ret | Checks a path to verify the ACE (access control entry) specified exists
Args:
path: path to the file/reg key
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: user that the ACL is for
permission: permission to test for (READ, FULLCONTROL, etc)
acetype: the type of ACE (ALLOW or DENY)
propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc)
exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously)
Returns (dict): 'Exists' true if the ACE exists, false if it does not
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol | Below is the the instruction that describes the task:
### Input:
Checks a path to verify the ACE (access control entry) specified exists
Args:
path: path to the file/reg key
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: user that the ACL is for
permission: permission to test for (READ, FULLCONTROL, etc)
acetype: the type of ACE (ALLOW or DENY)
propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc)
exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously)
Returns (dict): 'Exists' true if the ACE exists, false if it does not
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol
### Response:
def check_ace(path, objectType, user, permission=None, acetype=None, propagation=None, exactPermissionMatch=False):
'''
Checks a path to verify the ACE (access control entry) specified exists
Args:
path: path to the file/reg key
objectType: The type of object (FILE, DIRECTORY, REGISTRY)
user: user that the ACL is for
permission: permission to test for (READ, FULLCONTROL, etc)
acetype: the type of ACE (ALLOW or DENY)
propagation: the propagation type of the ACE (FILES, FOLDERS, KEY, KEY&SUBKEYS, SUBKEYS, etc)
exactPermissionMatch: the ACL must match exactly, IE if READ is specified, the user must have READ exactly and not FULLCONTROL (which also has the READ permission obviously)
Returns (dict): 'Exists' true if the ACE exists, false if it does not
CLI Example:
.. code-block:: bash
salt 'minion-id' win_dacl.check_ace c:\temp directory <username> fullcontrol
'''
ret = {'result': False,
'Exists': False,
'comment': ''}
dc = daclConstants()
objectTypeBit = dc.getObjectTypeBit(objectType)
path = dc.processPath(path, objectTypeBit)
permission = permission.upper() if permission else None
acetype = acetype.upper() if permission else None
propagation = propagation.upper() if propagation else None
permissionbit = dc.getPermissionBit(objectTypeBit, permission) if permission else None
acetypebit = dc.getAceTypeBit(acetype) if acetype else None
propagationbit = dc.getPropagationBit(objectTypeBit, propagation) if propagation else None
sidRet = _getUserSid(user)
if not sidRet['result']:
return sidRet
dacls = _get_dacl(path, objectTypeBit)
ret['result'] = True
if dacls:
for counter in range(0, dacls.GetAceCount()):
ace = dacls.GetAce(counter)
if ace[2] == sidRet['sid']:
if not acetypebit or ace[0][0] == acetypebit:
if not propagationbit or (ace[0][1] & propagationbit) == propagationbit:
if not permissionbit:
ret['Exists'] = True
return ret
if exactPermissionMatch:
if ace[1] == permissionbit:
ret['Exists'] = True
return ret
else:
if (ace[1] & permissionbit) == permissionbit:
ret['Exists'] = True
return ret
else:
ret['comment'] = 'No DACL found for object.'
return ret |
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseFont.isCompatible`.
Subclasses may override this method.
"""
font1 = self
font2 = other
# incompatible guidelines
guidelines1 = set(font1.guidelines)
guidelines2 = set(font2.guidelines)
if len(guidelines1) != len(guidelines2):
reporter.warning = True
reporter.guidelineCountDifference = True
if len(guidelines1.difference(guidelines2)) != 0:
reporter.warning = True
reporter.guidelinesMissingFromFont2 = list(
guidelines1.difference(guidelines2))
if len(guidelines2.difference(guidelines1)) != 0:
reporter.warning = True
reporter.guidelinesMissingInFont1 = list(
guidelines2.difference(guidelines1))
# incompatible layers
layers1 = set(font1.layerOrder)
layers2 = set(font2.layerOrder)
if len(layers1) != len(layers2):
reporter.warning = True
reporter.layerCountDifference = True
if len(layers1.difference(layers2)) != 0:
reporter.warning = True
reporter.layersMissingFromFont2 = list(layers1.difference(layers2))
if len(layers2.difference(layers1)) != 0:
reporter.warning = True
reporter.layersMissingInFont1 = list(layers2.difference(layers1))
# test layers
for layerName in sorted(layers1.intersection(layers2)):
layer1 = font1.getLayer(layerName)
layer2 = font2.getLayer(layerName)
layerCompatibility = layer1.isCompatible(layer2)[1]
if layerCompatibility.fatal or layerCompatibility.warning:
if layerCompatibility.fatal:
reporter.fatal = True
if layerCompatibility.warning:
reporter.warning = True
reporter.layers.append(layerCompatibility) | This is the environment implementation of
:meth:`BaseFont.isCompatible`.
Subclasses may override this method. | Below is the the instruction that describes the task:
### Input:
This is the environment implementation of
:meth:`BaseFont.isCompatible`.
Subclasses may override this method.
### Response:
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseFont.isCompatible`.
Subclasses may override this method.
"""
font1 = self
font2 = other
# incompatible guidelines
guidelines1 = set(font1.guidelines)
guidelines2 = set(font2.guidelines)
if len(guidelines1) != len(guidelines2):
reporter.warning = True
reporter.guidelineCountDifference = True
if len(guidelines1.difference(guidelines2)) != 0:
reporter.warning = True
reporter.guidelinesMissingFromFont2 = list(
guidelines1.difference(guidelines2))
if len(guidelines2.difference(guidelines1)) != 0:
reporter.warning = True
reporter.guidelinesMissingInFont1 = list(
guidelines2.difference(guidelines1))
# incompatible layers
layers1 = set(font1.layerOrder)
layers2 = set(font2.layerOrder)
if len(layers1) != len(layers2):
reporter.warning = True
reporter.layerCountDifference = True
if len(layers1.difference(layers2)) != 0:
reporter.warning = True
reporter.layersMissingFromFont2 = list(layers1.difference(layers2))
if len(layers2.difference(layers1)) != 0:
reporter.warning = True
reporter.layersMissingInFont1 = list(layers2.difference(layers1))
# test layers
for layerName in sorted(layers1.intersection(layers2)):
layer1 = font1.getLayer(layerName)
layer2 = font2.getLayer(layerName)
layerCompatibility = layer1.isCompatible(layer2)[1]
if layerCompatibility.fatal or layerCompatibility.warning:
if layerCompatibility.fatal:
reporter.fatal = True
if layerCompatibility.warning:
reporter.warning = True
reporter.layers.append(layerCompatibility) |
def remove_duplicates(seq):
"""
Return unique elements from list while preserving order.
From https://stackoverflow.com/a/480227/2589328
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | Return unique elements from list while preserving order.
From https://stackoverflow.com/a/480227/2589328 | Below is the the instruction that describes the task:
### Input:
Return unique elements from list while preserving order.
From https://stackoverflow.com/a/480227/2589328
### Response:
def remove_duplicates(seq):
"""
Return unique elements from list while preserving order.
From https://stackoverflow.com/a/480227/2589328
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] |
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.') | r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message | Below is the the instruction that describes the task:
### Input:
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
### Response:
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.') |
def _initialize(self):
""" Open from caffe weights """
self._graph = tf.Graph()
with self._graph.as_default():
self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels))
weights = self.build_alexnet_weights()
self._output_tensor = self.build_alexnet(weights)
self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer)
self._initialized = True | Open from caffe weights | Below is the the instruction that describes the task:
### Input:
Open from caffe weights
### Response:
def _initialize(self):
""" Open from caffe weights """
self._graph = tf.Graph()
with self._graph.as_default():
self._input_node = tf.placeholder(tf.float32, (self._batch_size, self._im_height, self._im_width, self._num_channels))
weights = self.build_alexnet_weights()
self._output_tensor = self.build_alexnet(weights)
self._feature_tensor = self.build_alexnet(weights, output_layer=self._feature_layer)
self._initialized = True |
def ReadFileObject(self, definitions_registry, file_object):
"""Reads data type definitions from a file-like object into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
file_object (file): file-like object to read from.
Raises:
FormatError: if the definitions values are missing or if the format is
incorrect.
"""
last_definition_object = None
error_location = None
error_message = None
try:
yaml_generator = yaml.safe_load_all(file_object)
for yaml_definition in yaml_generator:
definition_object = self._ReadDefinition(
definitions_registry, yaml_definition)
if not definition_object:
error_location = self._GetFormatErrorLocation(
yaml_definition, last_definition_object)
error_message = '{0:s} Missing definition object.'.format(
error_location)
raise errors.FormatError(error_message)
definitions_registry.RegisterDefinition(definition_object)
last_definition_object = definition_object
except errors.DefinitionReaderError as exception:
error_message = 'in: {0:s} {1:s}'.format(
exception.name or '<NAMELESS>', exception.message)
raise errors.FormatError(error_message)
except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception:
error_location = self._GetFormatErrorLocation({}, last_definition_object)
error_message = '{0:s} {1!s}'.format(error_location, exception)
raise errors.FormatError(error_message) | Reads data type definitions from a file-like object into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
file_object (file): file-like object to read from.
Raises:
FormatError: if the definitions values are missing or if the format is
incorrect. | Below is the the instruction that describes the task:
### Input:
Reads data type definitions from a file-like object into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
file_object (file): file-like object to read from.
Raises:
FormatError: if the definitions values are missing or if the format is
incorrect.
### Response:
def ReadFileObject(self, definitions_registry, file_object):
"""Reads data type definitions from a file-like object into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
file_object (file): file-like object to read from.
Raises:
FormatError: if the definitions values are missing or if the format is
incorrect.
"""
last_definition_object = None
error_location = None
error_message = None
try:
yaml_generator = yaml.safe_load_all(file_object)
for yaml_definition in yaml_generator:
definition_object = self._ReadDefinition(
definitions_registry, yaml_definition)
if not definition_object:
error_location = self._GetFormatErrorLocation(
yaml_definition, last_definition_object)
error_message = '{0:s} Missing definition object.'.format(
error_location)
raise errors.FormatError(error_message)
definitions_registry.RegisterDefinition(definition_object)
last_definition_object = definition_object
except errors.DefinitionReaderError as exception:
error_message = 'in: {0:s} {1:s}'.format(
exception.name or '<NAMELESS>', exception.message)
raise errors.FormatError(error_message)
except (yaml.reader.ReaderError, yaml.scanner.ScannerError) as exception:
error_location = self._GetFormatErrorLocation({}, last_definition_object)
error_message = '{0:s} {1!s}'.format(error_location, exception)
raise errors.FormatError(error_message) |
def entries(self, query=None):
"""Fetches all Entries from the Space (up to the set limit, can be modified in `query`).
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = client.entries()
[<Entry[cat] id='happycat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>,
<Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>,
<Entry[cat] id='garfield'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>,
<Entry[cat] id='nyancat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>,
<Entry[human] id='finn'>,
<Entry[dog] id='jake'>]
"""
if query is None:
query = {}
self._normalize_select(query)
return self._get(
self.environment_url('/entries'),
query
) | Fetches all Entries from the Space (up to the set limit, can be modified in `query`).
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = client.entries()
[<Entry[cat] id='happycat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>,
<Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>,
<Entry[cat] id='garfield'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>,
<Entry[cat] id='nyancat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>,
<Entry[human] id='finn'>,
<Entry[dog] id='jake'>] | Below is the the instruction that describes the task:
### Input:
Fetches all Entries from the Space (up to the set limit, can be modified in `query`).
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = client.entries()
[<Entry[cat] id='happycat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>,
<Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>,
<Entry[cat] id='garfield'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>,
<Entry[cat] id='nyancat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>,
<Entry[human] id='finn'>,
<Entry[dog] id='jake'>]
### Response:
def entries(self, query=None):
"""Fetches all Entries from the Space (up to the set limit, can be modified in `query`).
API Reference: https://www.contentful.com/developers/docs/references/content-delivery-api/#/reference/entries/entries-collection/get-all-entries-of-a-space
:param query: (optional) Dict with API options.
:return: List of :class:`Entry <contentful.entry.Entry>` objects.
:rtype: List of contentful.entry.Entry
Usage:
>>> entries = client.entries()
[<Entry[cat] id='happycat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='5ETMRzkl9KM4omyMwKAOki'>,
<Entry[dog] id='6KntaYXaHSyIw8M6eo26OK'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='7qVBlCjpWE86Oseo40gAEY'>,
<Entry[cat] id='garfield'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='4MU1s3potiUEM2G4okYOqw'>,
<Entry[cat] id='nyancat'>,
<Entry[1t9IbcfdCk6m04uISSsaIK] id='ge1xHyH3QOWucKWCCAgIG'>,
<Entry[human] id='finn'>,
<Entry[dog] id='jake'>]
"""
if query is None:
query = {}
self._normalize_select(query)
return self._get(
self.environment_url('/entries'),
query
) |
def _detect(self):
""" Detect shadowing local variables
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'}
"""
results = []
for contract in self.contracts:
shadows = self.detect_shadowing_definitions(contract)
if shadows:
for shadow in shadows:
local_parent_name = shadow[1]
local_variable = shadow[2]
overshadowed = shadow[3]
info = '{}.{}.{} (local variable @ {}) shadows:\n'.format(contract.name,
local_parent_name,
local_variable.name,
local_variable.source_mapping_str)
for overshadowed_entry in overshadowed:
info += "\t- {}.{} ({} @ {})\n".format(overshadowed_entry[1],
overshadowed_entry[2],
overshadowed_entry[0],
overshadowed_entry[2].source_mapping_str)
# Generate relevant JSON data for this shadowing definition.
json = self.generate_json_result(info)
self.add_variable_to_json(local_variable, json)
for overshadowed_entry in overshadowed:
if overshadowed_entry[0] in [self.OVERSHADOWED_FUNCTION, self.OVERSHADOWED_MODIFIER,
self.OVERSHADOWED_EVENT]:
self.add_function_to_json(overshadowed_entry[2], json)
elif overshadowed_entry[0] == self.OVERSHADOWED_STATE_VARIABLE:
self.add_variable_to_json(overshadowed_entry[2], json)
results.append(json)
return results | Detect shadowing local variables
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'} | Below is the the instruction that describes the task:
### Input:
Detect shadowing local variables
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'}
### Response:
def _detect(self):
""" Detect shadowing local variables
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'}
"""
results = []
for contract in self.contracts:
shadows = self.detect_shadowing_definitions(contract)
if shadows:
for shadow in shadows:
local_parent_name = shadow[1]
local_variable = shadow[2]
overshadowed = shadow[3]
info = '{}.{}.{} (local variable @ {}) shadows:\n'.format(contract.name,
local_parent_name,
local_variable.name,
local_variable.source_mapping_str)
for overshadowed_entry in overshadowed:
info += "\t- {}.{} ({} @ {})\n".format(overshadowed_entry[1],
overshadowed_entry[2],
overshadowed_entry[0],
overshadowed_entry[2].source_mapping_str)
# Generate relevant JSON data for this shadowing definition.
json = self.generate_json_result(info)
self.add_variable_to_json(local_variable, json)
for overshadowed_entry in overshadowed:
if overshadowed_entry[0] in [self.OVERSHADOWED_FUNCTION, self.OVERSHADOWED_MODIFIER,
self.OVERSHADOWED_EVENT]:
self.add_function_to_json(overshadowed_entry[2], json)
elif overshadowed_entry[0] == self.OVERSHADOWED_STATE_VARIABLE:
self.add_variable_to_json(overshadowed_entry[2], json)
results.append(json)
return results |
def find(self):
"""
根据查询条件,获取包含所有满足条件的对象。
:rtype: list
"""
content = self._do_request(self.dump())
objs = []
for result in content['results']:
obj = self._new_object()
obj._update_data(self._process_result(result))
objs.append(obj)
return objs | 根据查询条件,获取包含所有满足条件的对象。
:rtype: list | Below is the the instruction that describes the task:
### Input:
根据查询条件,获取包含所有满足条件的对象。
:rtype: list
### Response:
def find(self):
"""
根据查询条件,获取包含所有满足条件的对象。
:rtype: list
"""
content = self._do_request(self.dump())
objs = []
for result in content['results']:
obj = self._new_object()
obj._update_data(self._process_result(result))
objs.append(obj)
return objs |
def cli(ctx, board, scons, project_dir, sayyes):
"""Manage apio projects."""
if scons:
Project().create_sconstruct(project_dir, sayyes)
elif board:
Project().create_ini(board, project_dir, sayyes)
else:
click.secho(ctx.get_help()) | Manage apio projects. | Below is the the instruction that describes the task:
### Input:
Manage apio projects.
### Response:
def cli(ctx, board, scons, project_dir, sayyes):
"""Manage apio projects."""
if scons:
Project().create_sconstruct(project_dir, sayyes)
elif board:
Project().create_ini(board, project_dir, sayyes)
else:
click.secho(ctx.get_help()) |
def load_gpi(self, gpi_path):
"""
Loads a GPI as a file from the `config.gpi_authority_path`
"""
if self.config.gpi_authority_path is not None:
gpis = dict()
parser = entityparser.GpiParser()
with open(self.config.gpi_authority_path) as gpi_f:
entities = parser.parse(file=gpi_f)
for entity in entities:
gpis[entity["id"]] = {
"symbol": entity["label"],
"name": entity["full_name"],
"synonyms": entitywriter.stringify(entity["synonyms"]),
"type": entity["type"]
}
return gpis
# If there is no config file path, return None
return None | Loads a GPI as a file from the `config.gpi_authority_path` | Below is the the instruction that describes the task:
### Input:
Loads a GPI as a file from the `config.gpi_authority_path`
### Response:
def load_gpi(self, gpi_path):
"""
Loads a GPI as a file from the `config.gpi_authority_path`
"""
if self.config.gpi_authority_path is not None:
gpis = dict()
parser = entityparser.GpiParser()
with open(self.config.gpi_authority_path) as gpi_f:
entities = parser.parse(file=gpi_f)
for entity in entities:
gpis[entity["id"]] = {
"symbol": entity["label"],
"name": entity["full_name"],
"synonyms": entitywriter.stringify(entity["synonyms"]),
"type": entity["type"]
}
return gpis
# If there is no config file path, return None
return None |
def get_instance(self, payload):
"""
Build an instance of EnvironmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
"""
return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | Build an instance of EnvironmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of EnvironmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of EnvironmentInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentInstance
"""
return EnvironmentInstance(self._version, payload, service_sid=self._solution['service_sid'], ) |
def download(self,
tool: Tool,
force=False) -> bool:
"""
Attempts to download the Docker image for a given tool from
`DockerHub <https://hub.docker.com>`_. If the force parameter is set to
True, any existing image will be overwritten.
Returns:
`True` if successfully downloaded, else `False`.
"""
return self.__installation.build.download(tool.image,
force=force) | Attempts to download the Docker image for a given tool from
`DockerHub <https://hub.docker.com>`_. If the force parameter is set to
True, any existing image will be overwritten.
Returns:
`True` if successfully downloaded, else `False`. | Below is the the instruction that describes the task:
### Input:
Attempts to download the Docker image for a given tool from
`DockerHub <https://hub.docker.com>`_. If the force parameter is set to
True, any existing image will be overwritten.
Returns:
`True` if successfully downloaded, else `False`.
### Response:
def download(self,
tool: Tool,
force=False) -> bool:
"""
Attempts to download the Docker image for a given tool from
`DockerHub <https://hub.docker.com>`_. If the force parameter is set to
True, any existing image will be overwritten.
Returns:
`True` if successfully downloaded, else `False`.
"""
return self.__installation.build.download(tool.image,
force=force) |
def daterange_(self, datecol, date_start, op, **args):
"""
Returns a DataSwim instance with rows in a date range
"""
df = self._daterange(datecol, date_start, op, **args)
if df is None:
self.err("Can not select date range data")
return self._duplicate_(df) | Returns a DataSwim instance with rows in a date range | Below is the the instruction that describes the task:
### Input:
Returns a DataSwim instance with rows in a date range
### Response:
def daterange_(self, datecol, date_start, op, **args):
"""
Returns a DataSwim instance with rows in a date range
"""
df = self._daterange(datecol, date_start, op, **args)
if df is None:
self.err("Can not select date range data")
return self._duplicate_(df) |
def init_states(batch_size, num_lstm_layer, num_hidden):
"""
Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int
"""
init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
return init_c + init_h | Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int | Below is the the instruction that describes the task:
### Input:
Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int
### Response:
def init_states(batch_size, num_lstm_layer, num_hidden):
"""
Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int
"""
init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
return init_c + init_h |
def update_html_symlink(html_dir):
""""Maintail symlink: "today.html", "yesterday.html" """
today = datetime.date.today()
yesterday = datetime.date.today() - datetime.timedelta(days=1)
for from_date, alias_name in (
(today, 'today.html'), (yesterday, 'yesterday.html')):
from_date_file_path = os.path.join(html_dir, 'day_%s.html' % from_date)
symlink_path = os.path.join(html_dir, alias_name)
try:
os.unlink(symlink_path)
except OSError:
pass
os.symlink(from_date_file_path, symlink_path) | Maintail symlink: "today.html", "yesterday.html" | Below is the the instruction that describes the task:
### Input:
Maintail symlink: "today.html", "yesterday.html"
### Response:
def update_html_symlink(html_dir):
""""Maintail symlink: "today.html", "yesterday.html" """
today = datetime.date.today()
yesterday = datetime.date.today() - datetime.timedelta(days=1)
for from_date, alias_name in (
(today, 'today.html'), (yesterday, 'yesterday.html')):
from_date_file_path = os.path.join(html_dir, 'day_%s.html' % from_date)
symlink_path = os.path.join(html_dir, alias_name)
try:
os.unlink(symlink_path)
except OSError:
pass
os.symlink(from_date_file_path, symlink_path) |
def get_commit_message(self, commit_sha):
"""
Return the commit message for the current commit hash,
replace #<PRID> with GH-<PRID>
"""
cmd = ["git", "show", "-s", "--format=%B", commit_sha]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
message = output.strip().decode("utf-8")
if self.config["fix_commit_msg"]:
return message.replace("#", "GH-")
else:
return message | Return the commit message for the current commit hash,
replace #<PRID> with GH-<PRID> | Below is the the instruction that describes the task:
### Input:
Return the commit message for the current commit hash,
replace #<PRID> with GH-<PRID>
### Response:
def get_commit_message(self, commit_sha):
"""
Return the commit message for the current commit hash,
replace #<PRID> with GH-<PRID>
"""
cmd = ["git", "show", "-s", "--format=%B", commit_sha]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
message = output.strip().decode("utf-8")
if self.config["fix_commit_msg"]:
return message.replace("#", "GH-")
else:
return message |
def securityEventDF(symbol=None, token='', version=''):
'''The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = securityEvent(symbol, token, version)
data = []
for key in x:
d = x[key]
d['symbol'] = key
data.append(d)
df = pd.DataFrame(data)
_toDatetime(df)
return df | The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result | Below is the the instruction that describes the task:
### Input:
The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
### Response:
def securityEventDF(symbol=None, token='', version=''):
'''The Security event message is used to indicate events that apply to a security. A Security event message will be sent whenever such event occurs
https://iexcloud.io/docs/api/#deep-security-event
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
x = securityEvent(symbol, token, version)
data = []
for key in x:
d = x[key]
d['symbol'] = key
data.append(d)
df = pd.DataFrame(data)
_toDatetime(df)
return df |
def addDeprecatedIndividual(self, old_id, new_ids=None):
"""
Will mark the oldid as a deprecated individual.
if one newid is supplied, it will mark it as replaced by.
if >1 newid is supplied, it will mark it with consider properties
:param g:
:param oldid: the individual id to deprecate
:param newids: the individual idlist that is the replacement(s) of
the old individual. Not required.
:return:
"""
self.graph.addTriple(
old_id, self.globaltt['type'], self.globaltt['named_individual'])
self._addReplacementIds(old_id, new_ids) | Will mark the oldid as a deprecated individual.
if one newid is supplied, it will mark it as replaced by.
if >1 newid is supplied, it will mark it with consider properties
:param g:
:param oldid: the individual id to deprecate
:param newids: the individual idlist that is the replacement(s) of
the old individual. Not required.
:return: | Below is the the instruction that describes the task:
### Input:
Will mark the oldid as a deprecated individual.
if one newid is supplied, it will mark it as replaced by.
if >1 newid is supplied, it will mark it with consider properties
:param g:
:param oldid: the individual id to deprecate
:param newids: the individual idlist that is the replacement(s) of
the old individual. Not required.
:return:
### Response:
def addDeprecatedIndividual(self, old_id, new_ids=None):
"""
Will mark the oldid as a deprecated individual.
if one newid is supplied, it will mark it as replaced by.
if >1 newid is supplied, it will mark it with consider properties
:param g:
:param oldid: the individual id to deprecate
:param newids: the individual idlist that is the replacement(s) of
the old individual. Not required.
:return:
"""
self.graph.addTriple(
old_id, self.globaltt['type'], self.globaltt['named_individual'])
self._addReplacementIds(old_id, new_ids) |
def attach_tracker(self, stanza, tracker=None, token=None):
"""
Configure tracking for a stanza without sending it.
:param stanza: Message stanza to send.
:type stanza: :class:`aioxmpp.Message`
:param tracker: Message tracker to use.
:type tracker: :class:`~.MessageTracker` or :data:`None`
:param token: Optional stanza token for more fine-grained tracking.
:type token: :class:`~.StanzaToken`
:rtype: :class:`~.MessageTracker`
:return: The message tracker.
If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is
created.
If `token` is not :data:`None`, updates to the stanza `token` are
reflected in the `tracker`.
If an error reply is received, the tracker will enter
:class:`~.MessageState.ERROR` and the error will be set as
:attr:`~.MessageTracker.response`.
You should use :meth:`send_tracked` if possible. This method however is
very useful if you need to track carbon copies of sent messages, as a
stanza token is not available here and re-sending the message to obtain
one is generally not desirable ☺.
"""
if tracker is None:
tracker = MessageTracker()
stanza.autoset_id()
key = stanza.to.bare(), stanza.id_
self._trackers[key] = tracker
tracker.on_closed.connect(
functools.partial(self._tracker_closed, key)
)
if token is not None:
token.future.add_done_callback(
functools.partial(
self._stanza_sent,
tracker,
token,
)
)
return tracker | Configure tracking for a stanza without sending it.
:param stanza: Message stanza to send.
:type stanza: :class:`aioxmpp.Message`
:param tracker: Message tracker to use.
:type tracker: :class:`~.MessageTracker` or :data:`None`
:param token: Optional stanza token for more fine-grained tracking.
:type token: :class:`~.StanzaToken`
:rtype: :class:`~.MessageTracker`
:return: The message tracker.
If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is
created.
If `token` is not :data:`None`, updates to the stanza `token` are
reflected in the `tracker`.
If an error reply is received, the tracker will enter
:class:`~.MessageState.ERROR` and the error will be set as
:attr:`~.MessageTracker.response`.
You should use :meth:`send_tracked` if possible. This method however is
very useful if you need to track carbon copies of sent messages, as a
stanza token is not available here and re-sending the message to obtain
one is generally not desirable ☺. | Below is the the instruction that describes the task:
### Input:
Configure tracking for a stanza without sending it.
:param stanza: Message stanza to send.
:type stanza: :class:`aioxmpp.Message`
:param tracker: Message tracker to use.
:type tracker: :class:`~.MessageTracker` or :data:`None`
:param token: Optional stanza token for more fine-grained tracking.
:type token: :class:`~.StanzaToken`
:rtype: :class:`~.MessageTracker`
:return: The message tracker.
If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is
created.
If `token` is not :data:`None`, updates to the stanza `token` are
reflected in the `tracker`.
If an error reply is received, the tracker will enter
:class:`~.MessageState.ERROR` and the error will be set as
:attr:`~.MessageTracker.response`.
You should use :meth:`send_tracked` if possible. This method however is
very useful if you need to track carbon copies of sent messages, as a
stanza token is not available here and re-sending the message to obtain
one is generally not desirable ☺.
### Response:
def attach_tracker(self, stanza, tracker=None, token=None):
"""
Configure tracking for a stanza without sending it.
:param stanza: Message stanza to send.
:type stanza: :class:`aioxmpp.Message`
:param tracker: Message tracker to use.
:type tracker: :class:`~.MessageTracker` or :data:`None`
:param token: Optional stanza token for more fine-grained tracking.
:type token: :class:`~.StanzaToken`
:rtype: :class:`~.MessageTracker`
:return: The message tracker.
If `tracker` is :data:`None`, a new :class:`~.MessageTracker` is
created.
If `token` is not :data:`None`, updates to the stanza `token` are
reflected in the `tracker`.
If an error reply is received, the tracker will enter
:class:`~.MessageState.ERROR` and the error will be set as
:attr:`~.MessageTracker.response`.
You should use :meth:`send_tracked` if possible. This method however is
very useful if you need to track carbon copies of sent messages, as a
stanza token is not available here and re-sending the message to obtain
one is generally not desirable ☺.
"""
if tracker is None:
tracker = MessageTracker()
stanza.autoset_id()
key = stanza.to.bare(), stanza.id_
self._trackers[key] = tracker
tracker.on_closed.connect(
functools.partial(self._tracker_closed, key)
)
if token is not None:
token.future.add_done_callback(
functools.partial(
self._stanza_sent,
tracker,
token,
)
)
return tracker |
def alterar(self, id_script, id_script_type, script, description, model=None):
"""Change Script from by the identifier.
:param id_script: Identifier of the Script. Integer value and greater than zero.
:param id_script_type: Identifier of the Script Type. Integer value and greater than zero.
:param script: Script name. String with a minimum 3 and maximum of 40 characters
:param description: Script description. String with a minimum 3 and maximum of 100 characters
:return: None
:raise InvalidParameterError: The identifier of Script, script Type, script or description is null and invalid.
:raise RoteiroNaoExisteError: Script not registered.
:raise TipoRoteiroNaoExisteError: Script Type not registered.
:raise NomeRoteiroDuplicadoError: Script already registered with informed.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_script):
raise InvalidParameterError(u'The identifier of Script is invalid or was not informed.')
script_map = dict()
script_map['id_script_type'] = id_script_type
script_map['script'] = script
script_map['model'] = model
script_map['description'] = description
url = 'script/edit/' + str(id_script) + '/'
code, xml = self.submit({'script': script_map}, 'PUT', url)
return self.response(code, xml) | Change Script from by the identifier.
:param id_script: Identifier of the Script. Integer value and greater than zero.
:param id_script_type: Identifier of the Script Type. Integer value and greater than zero.
:param script: Script name. String with a minimum 3 and maximum of 40 characters
:param description: Script description. String with a minimum 3 and maximum of 100 characters
:return: None
:raise InvalidParameterError: The identifier of Script, script Type, script or description is null and invalid.
:raise RoteiroNaoExisteError: Script not registered.
:raise TipoRoteiroNaoExisteError: Script Type not registered.
:raise NomeRoteiroDuplicadoError: Script already registered with informed.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | Below is the the instruction that describes the task:
### Input:
Change Script from by the identifier.
:param id_script: Identifier of the Script. Integer value and greater than zero.
:param id_script_type: Identifier of the Script Type. Integer value and greater than zero.
:param script: Script name. String with a minimum 3 and maximum of 40 characters
:param description: Script description. String with a minimum 3 and maximum of 100 characters
:return: None
:raise InvalidParameterError: The identifier of Script, script Type, script or description is null and invalid.
:raise RoteiroNaoExisteError: Script not registered.
:raise TipoRoteiroNaoExisteError: Script Type not registered.
:raise NomeRoteiroDuplicadoError: Script already registered with informed.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
### Response:
def alterar(self, id_script, id_script_type, script, description, model=None):
"""Change Script from by the identifier.
:param id_script: Identifier of the Script. Integer value and greater than zero.
:param id_script_type: Identifier of the Script Type. Integer value and greater than zero.
:param script: Script name. String with a minimum 3 and maximum of 40 characters
:param description: Script description. String with a minimum 3 and maximum of 100 characters
:return: None
:raise InvalidParameterError: The identifier of Script, script Type, script or description is null and invalid.
:raise RoteiroNaoExisteError: Script not registered.
:raise TipoRoteiroNaoExisteError: Script Type not registered.
:raise NomeRoteiroDuplicadoError: Script already registered with informed.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_script):
raise InvalidParameterError(u'The identifier of Script is invalid or was not informed.')
script_map = dict()
script_map['id_script_type'] = id_script_type
script_map['script'] = script
script_map['model'] = model
script_map['description'] = description
url = 'script/edit/' + str(id_script) + '/'
code, xml = self.submit({'script': script_map}, 'PUT', url)
return self.response(code, xml) |
def start_notify(self, on_change):
"""Enable notification of changes for this characteristic on the
specified on_change callback. on_change should be a function that takes
one parameter which is the value (as a string of bytes) of the changed
characteristic value.
"""
# Tell the device what callback to use for changes to this characteristic.
self._device._notify_characteristic(self._characteristic, on_change)
# Turn on notifications of characteristic changes.
self._device._peripheral.setNotifyValue_forCharacteristic_(True,
self._characteristic) | Enable notification of changes for this characteristic on the
specified on_change callback. on_change should be a function that takes
one parameter which is the value (as a string of bytes) of the changed
characteristic value. | Below is the the instruction that describes the task:
### Input:
Enable notification of changes for this characteristic on the
specified on_change callback. on_change should be a function that takes
one parameter which is the value (as a string of bytes) of the changed
characteristic value.
### Response:
def start_notify(self, on_change):
"""Enable notification of changes for this characteristic on the
specified on_change callback. on_change should be a function that takes
one parameter which is the value (as a string of bytes) of the changed
characteristic value.
"""
# Tell the device what callback to use for changes to this characteristic.
self._device._notify_characteristic(self._characteristic, on_change)
# Turn on notifications of characteristic changes.
self._device._peripheral.setNotifyValue_forCharacteristic_(True,
self._characteristic) |
def stream(self, status=values.unset, iccid=values.unset,
rate_plan=values.unset, e_id=values.unset,
sim_registration_code=values.unset, limit=None, page_size=None):
"""
Streams SimInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.sim.SimInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
iccid=iccid,
rate_plan=rate_plan,
e_id=e_id,
sim_registration_code=sim_registration_code,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) | Streams SimInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.sim.SimInstance] | Below is the the instruction that describes the task:
### Input:
Streams SimInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.sim.SimInstance]
### Response:
def stream(self, status=values.unset, iccid=values.unset,
rate_plan=values.unset, e_id=values.unset,
sim_registration_code=values.unset, limit=None, page_size=None):
"""
Streams SimInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode status: The status
:param unicode iccid: The iccid
:param unicode rate_plan: The rate_plan
:param unicode e_id: The e_id
:param unicode sim_registration_code: The sim_registration_code
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.sim.SimInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
status=status,
iccid=iccid,
rate_plan=rate_plan,
e_id=e_id,
sim_registration_code=sim_registration_code,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) |
def add_linear_obj(model):
"""Add a linear version of a minimal medium to the model solver.
Changes the optimization objective to finding the growth medium requiring
the smallest total import flux::
minimize sum |r_i| for r_i in import_reactions
Arguments
---------
model : cobra.Model
The model to modify.
"""
coefs = {}
for rxn in find_boundary_types(model, "exchange"):
export = len(rxn.reactants) == 1
if export:
coefs[rxn.reverse_variable] = 1
else:
coefs[rxn.forward_variable] = 1
model.objective.set_linear_coefficients(coefs)
model.objective.direction = "min" | Add a linear version of a minimal medium to the model solver.
Changes the optimization objective to finding the growth medium requiring
the smallest total import flux::
minimize sum |r_i| for r_i in import_reactions
Arguments
---------
model : cobra.Model
The model to modify. | Below is the the instruction that describes the task:
### Input:
Add a linear version of a minimal medium to the model solver.
Changes the optimization objective to finding the growth medium requiring
the smallest total import flux::
minimize sum |r_i| for r_i in import_reactions
Arguments
---------
model : cobra.Model
The model to modify.
### Response:
def add_linear_obj(model):
"""Add a linear version of a minimal medium to the model solver.
Changes the optimization objective to finding the growth medium requiring
the smallest total import flux::
minimize sum |r_i| for r_i in import_reactions
Arguments
---------
model : cobra.Model
The model to modify.
"""
coefs = {}
for rxn in find_boundary_types(model, "exchange"):
export = len(rxn.reactants) == 1
if export:
coefs[rxn.reverse_variable] = 1
else:
coefs[rxn.forward_variable] = 1
model.objective.set_linear_coefficients(coefs)
model.objective.direction = "min" |
def S(self):
"""Allow for the projection (and update) of nested values contained within the single match of an array.
Projection operator: https://docs.mongodb.com/manual/reference/operator/projection/positional/#proj._S_
Array update operator: https://docs.mongodb.com/manual/reference/operator/update/positional/
"""
if self._combining:
raise TypeError("Unable to dereference after combining fields.")
instance = self.__class__(self._document, self._field)
instance._name = self._name + '.' + '$' # pylint:disable=protected-access
return instance | Allow for the projection (and update) of nested values contained within the single match of an array.
Projection operator: https://docs.mongodb.com/manual/reference/operator/projection/positional/#proj._S_
Array update operator: https://docs.mongodb.com/manual/reference/operator/update/positional/ | Below is the the instruction that describes the task:
### Input:
Allow for the projection (and update) of nested values contained within the single match of an array.
Projection operator: https://docs.mongodb.com/manual/reference/operator/projection/positional/#proj._S_
Array update operator: https://docs.mongodb.com/manual/reference/operator/update/positional/
### Response:
def S(self):
"""Allow for the projection (and update) of nested values contained within the single match of an array.
Projection operator: https://docs.mongodb.com/manual/reference/operator/projection/positional/#proj._S_
Array update operator: https://docs.mongodb.com/manual/reference/operator/update/positional/
"""
if self._combining:
raise TypeError("Unable to dereference after combining fields.")
instance = self.__class__(self._document, self._field)
instance._name = self._name + '.' + '$' # pylint:disable=protected-access
return instance |
def register(id, url=None):
"""Register a UUID key in the global S3 bucket."""
bucket = registration_s3_bucket()
key = registration_key(id)
obj = bucket.Object(key)
obj.put(Body=url or "missing")
return _generate_s3_url(bucket, key) | Register a UUID key in the global S3 bucket. | Below is the the instruction that describes the task:
### Input:
Register a UUID key in the global S3 bucket.
### Response:
def register(id, url=None):
"""Register a UUID key in the global S3 bucket."""
bucket = registration_s3_bucket()
key = registration_key(id)
obj = bucket.Object(key)
obj.put(Body=url or "missing")
return _generate_s3_url(bucket, key) |
def render_head(self, ctx, data):
"""
This renderer calculates content for the <head> tag by concatenating the
values from L{getHeadContent} and the overridden L{head} method.
"""
req = inevow.IRequest(ctx)
more = getattr(self.fragment, 'head', None)
if more is not None:
fragmentHead = more()
else:
fragmentHead = None
return ctx.tag[filter(None, list(self.getHeadContent(req)) +
[fragmentHead])] | This renderer calculates content for the <head> tag by concatenating the
values from L{getHeadContent} and the overridden L{head} method. | Below is the the instruction that describes the task:
### Input:
This renderer calculates content for the <head> tag by concatenating the
values from L{getHeadContent} and the overridden L{head} method.
### Response:
def render_head(self, ctx, data):
"""
This renderer calculates content for the <head> tag by concatenating the
values from L{getHeadContent} and the overridden L{head} method.
"""
req = inevow.IRequest(ctx)
more = getattr(self.fragment, 'head', None)
if more is not None:
fragmentHead = more()
else:
fragmentHead = None
return ctx.tag[filter(None, list(self.getHeadContent(req)) +
[fragmentHead])] |
def generate_message(contract: Contract, condition_kwargs: Mapping[str, Any]) -> str:
"""Generate the message upon contract violation."""
# pylint: disable=protected-access
parts = [] # type: List[str]
if contract.location is not None:
parts.append("{}:\n".format(contract.location))
if contract.description is not None:
parts.append("{}: ".format(contract.description))
lambda_inspection = None # type: Optional[ConditionLambdaInspection]
if not _is_lambda(a_function=contract.condition):
condition_text = contract.condition.__name__
else:
# We need to extract the source code corresponding to the decorator since inspect.getsource() is broken with
# lambdas.
# Find the line corresponding to the condition lambda
lines, condition_lineno = inspect.findsource(contract.condition)
filename = inspect.getsourcefile(contract.condition)
decorator_inspection = inspect_decorator(lines=lines, lineno=condition_lineno, filename=filename)
lambda_inspection = find_lambda_condition(decorator_inspection=decorator_inspection)
assert lambda_inspection is not None, \
"Expected lambda_inspection to be non-None if _is_lambda is True on: {}".format(contract.condition)
condition_text = lambda_inspection.text
parts.append(condition_text)
repr_vals = repr_values(
condition=contract.condition,
lambda_inspection=lambda_inspection,
condition_kwargs=condition_kwargs,
a_repr=contract._a_repr)
if len(repr_vals) == 0:
# Do not append anything since no value could be represented as a string.
# This could appear in case we have, for example, a generator expression as the return value of a lambda.
pass
elif len(repr_vals) == 1:
parts.append(': ')
parts.append(repr_vals[0])
else:
parts.append(':\n')
parts.append('\n'.join(repr_vals))
msg = "".join(parts)
return msg | Generate the message upon contract violation. | Below is the the instruction that describes the task:
### Input:
Generate the message upon contract violation.
### Response:
def generate_message(contract: Contract, condition_kwargs: Mapping[str, Any]) -> str:
"""Generate the message upon contract violation."""
# pylint: disable=protected-access
parts = [] # type: List[str]
if contract.location is not None:
parts.append("{}:\n".format(contract.location))
if contract.description is not None:
parts.append("{}: ".format(contract.description))
lambda_inspection = None # type: Optional[ConditionLambdaInspection]
if not _is_lambda(a_function=contract.condition):
condition_text = contract.condition.__name__
else:
# We need to extract the source code corresponding to the decorator since inspect.getsource() is broken with
# lambdas.
# Find the line corresponding to the condition lambda
lines, condition_lineno = inspect.findsource(contract.condition)
filename = inspect.getsourcefile(contract.condition)
decorator_inspection = inspect_decorator(lines=lines, lineno=condition_lineno, filename=filename)
lambda_inspection = find_lambda_condition(decorator_inspection=decorator_inspection)
assert lambda_inspection is not None, \
"Expected lambda_inspection to be non-None if _is_lambda is True on: {}".format(contract.condition)
condition_text = lambda_inspection.text
parts.append(condition_text)
repr_vals = repr_values(
condition=contract.condition,
lambda_inspection=lambda_inspection,
condition_kwargs=condition_kwargs,
a_repr=contract._a_repr)
if len(repr_vals) == 0:
# Do not append anything since no value could be represented as a string.
# This could appear in case we have, for example, a generator expression as the return value of a lambda.
pass
elif len(repr_vals) == 1:
parts.append(': ')
parts.append(repr_vals[0])
else:
parts.append(':\n')
parts.append('\n'.join(repr_vals))
msg = "".join(parts)
return msg |
def iter_cast(inputs, dst_type, return_type=None):
"""Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be
converted to this type, otherwise an iterator.
Returns:
iterator or specified type: The converted object.
"""
if not isinstance(inputs, collections_abc.Iterable):
raise TypeError('inputs must be an iterable object')
if not isinstance(dst_type, type):
raise TypeError('"dst_type" must be a valid type')
out_iterable = six.moves.map(dst_type, inputs)
if return_type is None:
return out_iterable
else:
return return_type(out_iterable) | Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be
converted to this type, otherwise an iterator.
Returns:
iterator or specified type: The converted object. | Below is the the instruction that describes the task:
### Input:
Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be
converted to this type, otherwise an iterator.
Returns:
iterator or specified type: The converted object.
### Response:
def iter_cast(inputs, dst_type, return_type=None):
"""Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be
converted to this type, otherwise an iterator.
Returns:
iterator or specified type: The converted object.
"""
if not isinstance(inputs, collections_abc.Iterable):
raise TypeError('inputs must be an iterable object')
if not isinstance(dst_type, type):
raise TypeError('"dst_type" must be a valid type')
out_iterable = six.moves.map(dst_type, inputs)
if return_type is None:
return out_iterable
else:
return return_type(out_iterable) |
def build_gui(self, container):
"""Build GUI such that image list area is maximized."""
vbox, sw, orientation = Widgets.get_oriented_box(container)
captions = (('Channel:', 'label', 'Channel Name', 'combobox',
'Modified only', 'checkbutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.channel_name.set_tooltip('Channel for locating images to save')
b.channel_name.add_callback('activated', self.select_channel_cb)
mod_only = self.settings.get('modified_only', True)
b.modified_only.set_state(mod_only)
b.modified_only.add_callback('activated', lambda *args: self.redo())
b.modified_only.set_tooltip("Show only locally modified images")
container.add_widget(w, stretch=0)
captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'),
('Suffix:', 'llabel', 'Suffix', 'entry'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.outdir.set_text(self.outdir)
b.outdir.set_tooltip('Output directory')
b.outdir.add_callback('activated', lambda w: self.set_outdir())
b.browse.set_tooltip('Browse for output directory')
b.browse.add_callback('activated', lambda w: self.browse_outdir())
b.suffix.set_text(self.suffix)
b.suffix.set_tooltip('Suffix to append to filename')
b.suffix.add_callback('activated', lambda w: self.set_suffix())
container.add_widget(w, stretch=0)
self.treeview = Widgets.TreeView(auto_expand=True,
sortable=True,
selection='multiple',
use_alt_row_color=True)
self.treeview.setup_table(self.columns, 1, 'IMAGE')
self.treeview.add_callback('selected', self.toggle_save_cb)
container.add_widget(self.treeview, stretch=1)
captions = (('Status', 'llabel'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.status.set_text('')
b.status.set_tooltip('Status message')
container.add_widget(w, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button('Save')
btn.set_tooltip('Save selected image(s)')
btn.add_callback('activated', lambda w: self.save_images())
btn.set_enabled(False)
btns.add_widget(btn, stretch=0)
self.w.save = btn
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
container.add_widget(btns, stretch=0)
self.gui_up = True
# Initialize directory selection dialog
self.dirsel = DirectorySelection(self.fv.w.root.get_widget())
# Generate initial listing
self.update_channels() | Build GUI such that image list area is maximized. | Below is the the instruction that describes the task:
### Input:
Build GUI such that image list area is maximized.
### Response:
def build_gui(self, container):
"""Build GUI such that image list area is maximized."""
vbox, sw, orientation = Widgets.get_oriented_box(container)
captions = (('Channel:', 'label', 'Channel Name', 'combobox',
'Modified only', 'checkbutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.channel_name.set_tooltip('Channel for locating images to save')
b.channel_name.add_callback('activated', self.select_channel_cb)
mod_only = self.settings.get('modified_only', True)
b.modified_only.set_state(mod_only)
b.modified_only.add_callback('activated', lambda *args: self.redo())
b.modified_only.set_tooltip("Show only locally modified images")
container.add_widget(w, stretch=0)
captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'),
('Suffix:', 'llabel', 'Suffix', 'entry'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.outdir.set_text(self.outdir)
b.outdir.set_tooltip('Output directory')
b.outdir.add_callback('activated', lambda w: self.set_outdir())
b.browse.set_tooltip('Browse for output directory')
b.browse.add_callback('activated', lambda w: self.browse_outdir())
b.suffix.set_text(self.suffix)
b.suffix.set_tooltip('Suffix to append to filename')
b.suffix.add_callback('activated', lambda w: self.set_suffix())
container.add_widget(w, stretch=0)
self.treeview = Widgets.TreeView(auto_expand=True,
sortable=True,
selection='multiple',
use_alt_row_color=True)
self.treeview.setup_table(self.columns, 1, 'IMAGE')
self.treeview.add_callback('selected', self.toggle_save_cb)
container.add_widget(self.treeview, stretch=1)
captions = (('Status', 'llabel'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.status.set_text('')
b.status.set_tooltip('Status message')
container.add_widget(w, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button('Save')
btn.set_tooltip('Save selected image(s)')
btn.add_callback('activated', lambda w: self.save_images())
btn.set_enabled(False)
btns.add_widget(btn, stretch=0)
self.w.save = btn
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
container.add_widget(btns, stretch=0)
self.gui_up = True
# Initialize directory selection dialog
self.dirsel = DirectorySelection(self.fv.w.root.get_widget())
# Generate initial listing
self.update_channels() |
def add_spot(self, component=None, feature=None, **kwargs):
"""
Shortcut to :meth:`add_feature` but with kind='spot'
"""
if component is None:
if len(self.hierarchy.get_stars())==1:
component = self.hierarchy.get_stars()[0]
else:
raise ValueError("must provide component for spot")
kwargs.setdefault('component', component)
kwargs.setdefault('feature', feature)
return self.add_feature('spot', **kwargs) | Shortcut to :meth:`add_feature` but with kind='spot' | Below is the the instruction that describes the task:
### Input:
Shortcut to :meth:`add_feature` but with kind='spot'
### Response:
def add_spot(self, component=None, feature=None, **kwargs):
"""
Shortcut to :meth:`add_feature` but with kind='spot'
"""
if component is None:
if len(self.hierarchy.get_stars())==1:
component = self.hierarchy.get_stars()[0]
else:
raise ValueError("must provide component for spot")
kwargs.setdefault('component', component)
kwargs.setdefault('feature', feature)
return self.add_feature('spot', **kwargs) |
def parse_params(self, params):
"""
Parsing params, params is a dict
and the dict value can be a string
or an iterable, namely a list, we
need to process those iterables
"""
for (key, value) in params.items():
if not isinstance(value, str):
string_params = self.to_string(value)
params[key] = string_params
return params | Parsing params, params is a dict
and the dict value can be a string
or an iterable, namely a list, we
need to process those iterables | Below is the the instruction that describes the task:
### Input:
Parsing params, params is a dict
and the dict value can be a string
or an iterable, namely a list, we
need to process those iterables
### Response:
def parse_params(self, params):
"""
Parsing params, params is a dict
and the dict value can be a string
or an iterable, namely a list, we
need to process those iterables
"""
for (key, value) in params.items():
if not isinstance(value, str):
string_params = self.to_string(value)
params[key] = string_params
return params |
def find_obfuscatables(tokens, obfunc, ignore_length=False):
"""
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, calling *obfunc* on each with the
following parameters:
- **tokens:** The current list of tokens.
- **index:** The current position in the list.
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__skipline__'** Keep skipping tokens until a newline is reached.
- **'__skipnext__'** Skip the next token in the sequence.
If *ignore_length* is ``True`` then single-character obfuscatables will
be obfuscated anyway (even though it wouldn't save any space).
"""
global keyword_args
keyword_args = analyze.enumerate_keyword_args(tokens)
global imported_modules
imported_modules = analyze.enumerate_imports(tokens)
#print("imported_modules: %s" % imported_modules)
skip_line = False
skip_next = False
obfuscatables = []
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.NEWLINE:
skip_line = False
if skip_line:
continue
result = obfunc(tokens, index, ignore_length=ignore_length)
if result:
if skip_next:
skip_next = False
elif result == '__skipline__':
skip_line = True
elif result == '__skipnext__':
skip_next = True
elif result in obfuscatables:
pass
else:
obfuscatables.append(result)
else: # If result is empty we need to reset skip_next so we don't
skip_next = False # accidentally skip the next identifier
return obfuscatables | Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, calling *obfunc* on each with the
following parameters:
- **tokens:** The current list of tokens.
- **index:** The current position in the list.
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__skipline__'** Keep skipping tokens until a newline is reached.
- **'__skipnext__'** Skip the next token in the sequence.
If *ignore_length* is ``True`` then single-character obfuscatables will
be obfuscated anyway (even though it wouldn't save any space). | Below is the the instruction that describes the task:
### Input:
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, calling *obfunc* on each with the
following parameters:
- **tokens:** The current list of tokens.
- **index:** The current position in the list.
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__skipline__'** Keep skipping tokens until a newline is reached.
- **'__skipnext__'** Skip the next token in the sequence.
If *ignore_length* is ``True`` then single-character obfuscatables will
be obfuscated anyway (even though it wouldn't save any space).
### Response:
def find_obfuscatables(tokens, obfunc, ignore_length=False):
"""
Iterates over *tokens*, which must be an equivalent output to what
tokenize.generate_tokens() produces, calling *obfunc* on each with the
following parameters:
- **tokens:** The current list of tokens.
- **index:** The current position in the list.
*obfunc* is expected to return the token string if that token can be safely
obfuscated **or** one of the following optional values which will instruct
find_obfuscatables() how to proceed:
- **'__skipline__'** Keep skipping tokens until a newline is reached.
- **'__skipnext__'** Skip the next token in the sequence.
If *ignore_length* is ``True`` then single-character obfuscatables will
be obfuscated anyway (even though it wouldn't save any space).
"""
global keyword_args
keyword_args = analyze.enumerate_keyword_args(tokens)
global imported_modules
imported_modules = analyze.enumerate_imports(tokens)
#print("imported_modules: %s" % imported_modules)
skip_line = False
skip_next = False
obfuscatables = []
for index, tok in enumerate(tokens):
token_type = tok[0]
if token_type == tokenize.NEWLINE:
skip_line = False
if skip_line:
continue
result = obfunc(tokens, index, ignore_length=ignore_length)
if result:
if skip_next:
skip_next = False
elif result == '__skipline__':
skip_line = True
elif result == '__skipnext__':
skip_next = True
elif result in obfuscatables:
pass
else:
obfuscatables.append(result)
else: # If result is empty we need to reset skip_next so we don't
skip_next = False # accidentally skip the next identifier
return obfuscatables |
def _ring_2d(m, n):
"""Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs
"""
if m == 1:
return [(0, i) for i in range(n)]
if n == 1:
return [(i, 0) for i in range(m)]
if m % 2 != 0:
tf.logging.warning("Odd dimension")
return [(i % m, i // m) for i in range(n * m)]
ret = [(0, 0)]
for i in range(m // 2):
for j in range(1, n):
ret.append((2 * i, j))
for j in range(n-1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(m-1, 0, -1):
ret.append((i, 0))
return ret | Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs | Below is the the instruction that describes the task:
### Input:
Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs
### Response:
def _ring_2d(m, n):
"""Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs
"""
if m == 1:
return [(0, i) for i in range(n)]
if n == 1:
return [(i, 0) for i in range(m)]
if m % 2 != 0:
tf.logging.warning("Odd dimension")
return [(i % m, i // m) for i in range(n * m)]
ret = [(0, 0)]
for i in range(m // 2):
for j in range(1, n):
ret.append((2 * i, j))
for j in range(n-1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(m-1, 0, -1):
ret.append((i, 0))
return ret |
def add_transition(self, from_state_id, from_outcome, to_state_id, to_outcome, transition_id=None):
"""Adds a transition to the container state
Note: Either the toState or the toOutcome needs to be "None"
:param from_state_id: The source state of the transition
:param from_outcome: The outcome id of the source state to connect the transition to
:param to_state_id: The target state of the transition
:param to_outcome: The target outcome id of a container state
:param transition_id: An optional transition id for the new transition
"""
transition_id = self.check_transition_id(transition_id)
# Set from_state_id to None for start transitions, as from_state_id and from_outcome should both be None for
# these transitions
if from_state_id == self.state_id and from_outcome is None:
from_state_id = None
new_transition = Transition(from_state_id, from_outcome, to_state_id, to_outcome, transition_id, self)
self.transitions[transition_id] = new_transition
# notify all states waiting for transition to be connected
self._transitions_cv.acquire()
self._transitions_cv.notify_all()
self._transitions_cv.release()
# self.create_transition(from_state_id, from_outcome, to_state_id, to_outcome, transition_id)
return transition_id | Adds a transition to the container state
Note: Either the toState or the toOutcome needs to be "None"
:param from_state_id: The source state of the transition
:param from_outcome: The outcome id of the source state to connect the transition to
:param to_state_id: The target state of the transition
:param to_outcome: The target outcome id of a container state
:param transition_id: An optional transition id for the new transition | Below is the the instruction that describes the task:
### Input:
Adds a transition to the container state
Note: Either the toState or the toOutcome needs to be "None"
:param from_state_id: The source state of the transition
:param from_outcome: The outcome id of the source state to connect the transition to
:param to_state_id: The target state of the transition
:param to_outcome: The target outcome id of a container state
:param transition_id: An optional transition id for the new transition
### Response:
def add_transition(self, from_state_id, from_outcome, to_state_id, to_outcome, transition_id=None):
"""Adds a transition to the container state
Note: Either the toState or the toOutcome needs to be "None"
:param from_state_id: The source state of the transition
:param from_outcome: The outcome id of the source state to connect the transition to
:param to_state_id: The target state of the transition
:param to_outcome: The target outcome id of a container state
:param transition_id: An optional transition id for the new transition
"""
transition_id = self.check_transition_id(transition_id)
# Set from_state_id to None for start transitions, as from_state_id and from_outcome should both be None for
# these transitions
if from_state_id == self.state_id and from_outcome is None:
from_state_id = None
new_transition = Transition(from_state_id, from_outcome, to_state_id, to_outcome, transition_id, self)
self.transitions[transition_id] = new_transition
# notify all states waiting for transition to be connected
self._transitions_cv.acquire()
self._transitions_cv.notify_all()
self._transitions_cv.release()
# self.create_transition(from_state_id, from_outcome, to_state_id, to_outcome, transition_id)
return transition_id |
def _default_http_header(self, empty_session_only: bool = False) -> Dict[str, str]:
"""Returns default HTTP header we use for requests."""
header = {'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'Content-Length': '0',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'Referer': 'https://www.instagram.com/',
'User-Agent': self.user_agent,
'X-Instagram-AJAX': '1',
'X-Requested-With': 'XMLHttpRequest'}
if empty_session_only:
del header['Host']
del header['Origin']
del header['Referer']
del header['X-Instagram-AJAX']
del header['X-Requested-With']
return header | Returns default HTTP header we use for requests. | Below is the the instruction that describes the task:
### Input:
Returns default HTTP header we use for requests.
### Response:
def _default_http_header(self, empty_session_only: bool = False) -> Dict[str, str]:
"""Returns default HTTP header we use for requests."""
header = {'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'Content-Length': '0',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'Referer': 'https://www.instagram.com/',
'User-Agent': self.user_agent,
'X-Instagram-AJAX': '1',
'X-Requested-With': 'XMLHttpRequest'}
if empty_session_only:
del header['Host']
del header['Origin']
del header['Referer']
del header['X-Instagram-AJAX']
del header['X-Requested-With']
return header |
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
lower_bound = self.lower_bound.value
upper_bound = self.upper_bound.value
low = lower_bound
spread = float(upper_bound - lower_bound)
par = x * spread + low
return par | Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return: | Below is the the instruction that describes the task:
### Input:
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
### Response:
def from_unit_cube(self, x):
"""
Used by multinest
:param x: 0 < x < 1
:param lower_bound:
:param upper_bound:
:return:
"""
lower_bound = self.lower_bound.value
upper_bound = self.upper_bound.value
low = lower_bound
spread = float(upper_bound - lower_bound)
par = x * spread + low
return par |
def load(self, name):
"""Loads and returns foreign library."""
name = ctypes.util.find_library(name)
return ctypes.cdll.LoadLibrary(name) | Loads and returns foreign library. | Below is the the instruction that describes the task:
### Input:
Loads and returns foreign library.
### Response:
def load(self, name):
"""Loads and returns foreign library."""
name = ctypes.util.find_library(name)
return ctypes.cdll.LoadLibrary(name) |
def _get_page_data(self, pno, zoom=0):
"""
Return a PNG image for a document page number. If zoom is other than 0, one of
the 4 page quadrants are zoomed-in instead and the corresponding clip returned.
"""
dlist = self.dlist_tab[pno] # get display list
if not dlist: # create if not yet there
self.dlist_tab[pno] = self.doc[pno].getDisplayList()
dlist = self.dlist_tab[pno]
r = dlist.rect # page rectangle
mp = r.tl + (r.br - r.tl) * 0.5 # rect middle point
mt = r.tl + (r.tr - r.tl) * 0.5 # middle of top edge
ml = r.tl + (r.bl - r.tl) * 0.5 # middle of left edge
mr = r.tr + (r.br - r.tr) * 0.5 # middle of right egde
mb = r.bl + (r.br - r.bl) * 0.5 # middle of bottom edge
mat = fitz.Matrix(2, 2) # zoom matrix
if zoom == 1: # top-left quadrant
clip = fitz.Rect(r.tl, mp)
elif zoom == 4: # bot-right quadrant
clip = fitz.Rect(mp, r.br)
elif zoom == 2: # top-right
clip = fitz.Rect(mt, mr)
elif zoom == 3: # bot-left
clip = fitz.Rect(ml, mb)
if zoom == 0: # total page
pix = dlist.getPixmap(alpha=False)
else:
pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip)
return pix.getPNGData() | Return a PNG image for a document page number. If zoom is other than 0, one of
the 4 page quadrants are zoomed-in instead and the corresponding clip returned. | Below is the the instruction that describes the task:
### Input:
Return a PNG image for a document page number. If zoom is other than 0, one of
the 4 page quadrants are zoomed-in instead and the corresponding clip returned.
### Response:
def _get_page_data(self, pno, zoom=0):
"""
Return a PNG image for a document page number. If zoom is other than 0, one of
the 4 page quadrants are zoomed-in instead and the corresponding clip returned.
"""
dlist = self.dlist_tab[pno] # get display list
if not dlist: # create if not yet there
self.dlist_tab[pno] = self.doc[pno].getDisplayList()
dlist = self.dlist_tab[pno]
r = dlist.rect # page rectangle
mp = r.tl + (r.br - r.tl) * 0.5 # rect middle point
mt = r.tl + (r.tr - r.tl) * 0.5 # middle of top edge
ml = r.tl + (r.bl - r.tl) * 0.5 # middle of left edge
mr = r.tr + (r.br - r.tr) * 0.5 # middle of right egde
mb = r.bl + (r.br - r.bl) * 0.5 # middle of bottom edge
mat = fitz.Matrix(2, 2) # zoom matrix
if zoom == 1: # top-left quadrant
clip = fitz.Rect(r.tl, mp)
elif zoom == 4: # bot-right quadrant
clip = fitz.Rect(mp, r.br)
elif zoom == 2: # top-right
clip = fitz.Rect(mt, mr)
elif zoom == 3: # bot-left
clip = fitz.Rect(ml, mb)
if zoom == 0: # total page
pix = dlist.getPixmap(alpha=False)
else:
pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip)
return pix.getPNGData() |
def get_lock(self, lockname, locktime=60, auto_renewal=False):
''' Gets a lock and returns if it can be stablished. Returns false otherwise '''
pid = os.getpid()
caller = inspect.stack()[0][3]
try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal)
except:
if self.logger:
self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc()))
return False
try:
lock = rl.acquire(blocking=False)
except RedisError:
return False
if not lock:
return False
else:
return rl | Gets a lock and returns if it can be stablished. Returns false otherwise | Below is the the instruction that describes the task:
### Input:
Gets a lock and returns if it can be stablished. Returns false otherwise
### Response:
def get_lock(self, lockname, locktime=60, auto_renewal=False):
''' Gets a lock and returns if it can be stablished. Returns false otherwise '''
pid = os.getpid()
caller = inspect.stack()[0][3]
try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal)
except:
if self.logger:
self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc()))
return False
try:
lock = rl.acquire(blocking=False)
except RedisError:
return False
if not lock:
return False
else:
return rl |
def team_districts(self, team):
"""
Get districts a team has competed in.
:param team: Team to get data on.
:return: List of District objects.
"""
return [District(raw) for raw in self._get('team/%s/districts' % self.team_key(team))] | Get districts a team has competed in.
:param team: Team to get data on.
:return: List of District objects. | Below is the the instruction that describes the task:
### Input:
Get districts a team has competed in.
:param team: Team to get data on.
:return: List of District objects.
### Response:
def team_districts(self, team):
"""
Get districts a team has competed in.
:param team: Team to get data on.
:return: List of District objects.
"""
return [District(raw) for raw in self._get('team/%s/districts' % self.team_key(team))] |
def count(self):
"""
Returns the number of rows matched by this query.
*Note: This function executes a SELECT COUNT() and has a performance cost on large datasets*
"""
if self._batch:
raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode")
if self._count is None:
query = self._select_query()
query.count = True
result = self._execute(query)
count_row = result.one().popitem()
self._count = count_row[1]
return self._count | Returns the number of rows matched by this query.
*Note: This function executes a SELECT COUNT() and has a performance cost on large datasets* | Below is the the instruction that describes the task:
### Input:
Returns the number of rows matched by this query.
*Note: This function executes a SELECT COUNT() and has a performance cost on large datasets*
### Response:
def count(self):
"""
Returns the number of rows matched by this query.
*Note: This function executes a SELECT COUNT() and has a performance cost on large datasets*
"""
if self._batch:
raise CQLEngineException("Only inserts, updates, and deletes are available in batch mode")
if self._count is None:
query = self._select_query()
query.count = True
result = self._execute(query)
count_row = result.one().popitem()
self._count = count_row[1]
return self._count |
async def get_poll(poll_id):
""" Get a strawpoll.
Example:
poll = strawpy.get_poll('11682852')
:param poll_id:
:return: strawpy.Strawpoll object
"""
async with aiohttp.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=poll_id)) as r:
return await StrawPoll(r) | Get a strawpoll.
Example:
poll = strawpy.get_poll('11682852')
:param poll_id:
:return: strawpy.Strawpoll object | Below is the the instruction that describes the task:
### Input:
Get a strawpoll.
Example:
poll = strawpy.get_poll('11682852')
:param poll_id:
:return: strawpy.Strawpoll object
### Response:
async def get_poll(poll_id):
""" Get a strawpoll.
Example:
poll = strawpy.get_poll('11682852')
:param poll_id:
:return: strawpy.Strawpoll object
"""
async with aiohttp.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=poll_id)) as r:
return await StrawPoll(r) |
def pertibate(self, pertibate_columns=None, filter_func=None,
max_size=1000):
"""
:param pertibate_columns: list of str fo columns to pertibate see DOE
:param filter_func: func that takes a SeabornRow and return
True if this row should be exist
:param max_size: int of the max number of rows to try
but some may be filtered out
:return: None
"""
pertibate_columns = pertibate_columns or self.columns
for c in pertibate_columns:
assert c in self.columns, 'Column %s was not part of this self' % c
# noinspection PyTypeChecker
column_size = [c in pertibate_columns and len(self._parameters[c]) or 1
for c in self.columns]
max_size = min(max_size, reduce(lambda x, y: x * y, column_size))
for indexes in self._index_iterator(column_size, max_size):
row = SeabornRow(self._column_index,
[self._pertibate_value(indexes.pop(0), c) for
c in self.columns])
kwargs = row.obj_to_dict()
if filter_func is None or filter_func(_row_index=len(self.table),
**kwargs):
self.table.append(row)
for c in self.columns: # if the parameter is a dynamic function
if hasattr(self._parameters.get(c, ''), '__call__'):
# noinspection PyTypeChecker
self.set_column(c, self._parameters[c]) | :param pertibate_columns: list of str fo columns to pertibate see DOE
:param filter_func: func that takes a SeabornRow and return
True if this row should be exist
:param max_size: int of the max number of rows to try
but some may be filtered out
:return: None | Below is the the instruction that describes the task:
### Input:
:param pertibate_columns: list of str fo columns to pertibate see DOE
:param filter_func: func that takes a SeabornRow and return
True if this row should be exist
:param max_size: int of the max number of rows to try
but some may be filtered out
:return: None
### Response:
def pertibate(self, pertibate_columns=None, filter_func=None,
max_size=1000):
"""
:param pertibate_columns: list of str fo columns to pertibate see DOE
:param filter_func: func that takes a SeabornRow and return
True if this row should be exist
:param max_size: int of the max number of rows to try
but some may be filtered out
:return: None
"""
pertibate_columns = pertibate_columns or self.columns
for c in pertibate_columns:
assert c in self.columns, 'Column %s was not part of this self' % c
# noinspection PyTypeChecker
column_size = [c in pertibate_columns and len(self._parameters[c]) or 1
for c in self.columns]
max_size = min(max_size, reduce(lambda x, y: x * y, column_size))
for indexes in self._index_iterator(column_size, max_size):
row = SeabornRow(self._column_index,
[self._pertibate_value(indexes.pop(0), c) for
c in self.columns])
kwargs = row.obj_to_dict()
if filter_func is None or filter_func(_row_index=len(self.table),
**kwargs):
self.table.append(row)
for c in self.columns: # if the parameter is a dynamic function
if hasattr(self._parameters.get(c, ''), '__call__'):
# noinspection PyTypeChecker
self.set_column(c, self._parameters[c]) |
def add_projection(self, A, alpha = 1.0, beta = 1.0, reordered=False):
"""
Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`.
X := alpha*proj(A) + beta*X
"""
assert self.is_factor is False, "cannot project matrix onto a cspmatrix factor"
assert isinstance(A, matrix), "argument A must be a dense matrix"
symb = self.symb
blkval = self.blkval
n = symb.n
snptr = symb.snptr
snode = symb.snode
relptr = symb.relptr
snrowidx = symb.snrowidx
sncolptr = symb.sncolptr
blkptr = symb.blkptr
if self.symb.p is not None and reordered is False:
A = tril(A)
A = A+A.T
A[::A.size[0]+1] *= 0.5
A = A[self.symb.p,self.symb.p]
# for each block ...
for k in range(self.symb.Nsn):
nn = snptr[k+1]-snptr[k]
na = relptr[k+1]-relptr[k]
nj = nn + na
blkval[blkptr[k]:blkptr[k+1]] = beta*blkval[blkptr[k]:blkptr[k+1]] + alpha*(A[snrowidx[sncolptr[k]:sncolptr[k+1]],snode[snptr[k]:snptr[k+1]]][:])
return | Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`.
X := alpha*proj(A) + beta*X | Below is the the instruction that describes the task:
### Input:
Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`.
X := alpha*proj(A) + beta*X
### Response:
def add_projection(self, A, alpha = 1.0, beta = 1.0, reordered=False):
"""
Add projection of a dense matrix :math:`A` to :py:class:`cspmatrix`.
X := alpha*proj(A) + beta*X
"""
assert self.is_factor is False, "cannot project matrix onto a cspmatrix factor"
assert isinstance(A, matrix), "argument A must be a dense matrix"
symb = self.symb
blkval = self.blkval
n = symb.n
snptr = symb.snptr
snode = symb.snode
relptr = symb.relptr
snrowidx = symb.snrowidx
sncolptr = symb.sncolptr
blkptr = symb.blkptr
if self.symb.p is not None and reordered is False:
A = tril(A)
A = A+A.T
A[::A.size[0]+1] *= 0.5
A = A[self.symb.p,self.symb.p]
# for each block ...
for k in range(self.symb.Nsn):
nn = snptr[k+1]-snptr[k]
na = relptr[k+1]-relptr[k]
nj = nn + na
blkval[blkptr[k]:blkptr[k+1]] = beta*blkval[blkptr[k]:blkptr[k+1]] + alpha*(A[snrowidx[sncolptr[k]:sncolptr[k+1]],snode[snptr[k]:snptr[k+1]]][:])
return |
def move_to(self, target_datetime):
"""Moves frozen date to the given ``target_datetime``"""
target_datetime = _parse_time_to_freeze(target_datetime)
delta = target_datetime - self.time_to_freeze
self.tick(delta=delta) | Moves frozen date to the given ``target_datetime`` | Below is the the instruction that describes the task:
### Input:
Moves frozen date to the given ``target_datetime``
### Response:
def move_to(self, target_datetime):
"""Moves frozen date to the given ``target_datetime``"""
target_datetime = _parse_time_to_freeze(target_datetime)
delta = target_datetime - self.time_to_freeze
self.tick(delta=delta) |
def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse:
"""Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png'
"""
self._validate_xoxp_token()
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs) | Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png' | Below is the the instruction that describes the task:
### Input:
Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png'
### Response:
def users_setPhoto(self, *, image: Union[str, IOBase], **kwargs) -> SlackResponse:
"""Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png'
"""
self._validate_xoxp_token()
return self.api_call("users.setPhoto", files={"image": image}, data=kwargs) |
def focus_next(self, count=1):
" Focus the next pane. "
panes = self.panes
if panes:
self.active_pane = panes[(panes.index(self.active_pane) + count) % len(panes)]
else:
self.active_pane = None | Focus the next pane. | Below is the the instruction that describes the task:
### Input:
Focus the next pane.
### Response:
def focus_next(self, count=1):
" Focus the next pane. "
panes = self.panes
if panes:
self.active_pane = panes[(panes.index(self.active_pane) + count) % len(panes)]
else:
self.active_pane = None |
def selectImports(pth, xtrapath=None):
"""
Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath)
"""
rv = []
if xtrapath is None:
xtrapath = [os.path.dirname(pth)]
else:
assert isinstance(xtrapath, list)
xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy
dlls = getImports(pth)
for lib in dlls:
if seen.get(lib.upper(), 0):
continue
if not is_win and not is_cygwin:
# all other platforms
npth = lib
dir, lib = os.path.split(lib)
else:
# plain win case
npth = getfullnameof(lib, xtrapath)
# now npth is a candidate lib if found
# check again for excludes but with regex FIXME: split the list
if npth:
candidatelib = npth
else:
candidatelib = lib
if not dylib.include_library(candidatelib):
if (candidatelib.find('libpython') < 0 and
candidatelib.find('Python.framework') < 0):
# skip libs not containing (libpython or Python.framework)
if not seen.get(npth.upper(), 0):
logger.debug("Skipping %s dependency of %s",
lib, os.path.basename(pth))
continue
else:
pass
if npth:
if not seen.get(npth.upper(), 0):
logger.debug("Adding %s dependency of %s",
lib, os.path.basename(pth))
rv.append((lib, npth))
else:
logger.error("lib not found: %s dependency of %s", lib, pth)
return rv | Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath) | Below is the the instruction that describes the task:
### Input:
Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath)
### Response:
def selectImports(pth, xtrapath=None):
"""
Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath)
"""
rv = []
if xtrapath is None:
xtrapath = [os.path.dirname(pth)]
else:
assert isinstance(xtrapath, list)
xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy
dlls = getImports(pth)
for lib in dlls:
if seen.get(lib.upper(), 0):
continue
if not is_win and not is_cygwin:
# all other platforms
npth = lib
dir, lib = os.path.split(lib)
else:
# plain win case
npth = getfullnameof(lib, xtrapath)
# now npth is a candidate lib if found
# check again for excludes but with regex FIXME: split the list
if npth:
candidatelib = npth
else:
candidatelib = lib
if not dylib.include_library(candidatelib):
if (candidatelib.find('libpython') < 0 and
candidatelib.find('Python.framework') < 0):
# skip libs not containing (libpython or Python.framework)
if not seen.get(npth.upper(), 0):
logger.debug("Skipping %s dependency of %s",
lib, os.path.basename(pth))
continue
else:
pass
if npth:
if not seen.get(npth.upper(), 0):
logger.debug("Adding %s dependency of %s",
lib, os.path.basename(pth))
rv.append((lib, npth))
else:
logger.error("lib not found: %s dependency of %s", lib, pth)
return rv |
def schedule_window(report_type, start_at, stop_at, resolution):
"""
Find all active schedules and schedule celery tasks for
each of them with a specific ETA (determined by parsing
the cron schedule for the schedule)
"""
model_cls = get_scheduler_model(report_type)
dbsession = db.create_scoped_session()
schedules = dbsession.query(model_cls).filter(model_cls.active.is_(True))
for schedule in schedules:
args = (
report_type,
schedule.id,
)
# Schedule the job for the specified time window
for eta in next_schedules(schedule.crontab,
start_at,
stop_at,
resolution=resolution):
schedule_email_report.apply_async(args, eta=eta) | Find all active schedules and schedule celery tasks for
each of them with a specific ETA (determined by parsing
the cron schedule for the schedule) | Below is the the instruction that describes the task:
### Input:
Find all active schedules and schedule celery tasks for
each of them with a specific ETA (determined by parsing
the cron schedule for the schedule)
### Response:
def schedule_window(report_type, start_at, stop_at, resolution):
"""
Find all active schedules and schedule celery tasks for
each of them with a specific ETA (determined by parsing
the cron schedule for the schedule)
"""
model_cls = get_scheduler_model(report_type)
dbsession = db.create_scoped_session()
schedules = dbsession.query(model_cls).filter(model_cls.active.is_(True))
for schedule in schedules:
args = (
report_type,
schedule.id,
)
# Schedule the job for the specified time window
for eta in next_schedules(schedule.crontab,
start_at,
stop_at,
resolution=resolution):
schedule_email_report.apply_async(args, eta=eta) |
def _get_ordering_field_lookup(self, field_name):
"""
get real model field to order by
"""
field = field_name
get_field = getattr(self, "get_%s_ordering_field" % field_name, None)
if get_field:
field = get_field()
return field | get real model field to order by | Below is the the instruction that describes the task:
### Input:
get real model field to order by
### Response:
def _get_ordering_field_lookup(self, field_name):
"""
get real model field to order by
"""
field = field_name
get_field = getattr(self, "get_%s_ordering_field" % field_name, None)
if get_field:
field = get_field()
return field |
def get_random_integer(N, randfunc=None):
"""getRandomInteger(N:int, randfunc:callable):long
Return a random number with at most N bits.
If randfunc is omitted, then Random.new().read is used.
This function is for internal use only and may be renamed or removed in
the future.
"""
if randfunc is None:
randfunc = Random.new().read
S = randfunc(N>>3)
odd_bits = N % 8
if odd_bits != 0:
char = ord(randfunc(1)) >> (8-odd_bits)
S = bchr(char) + S
value = bytes_to_long(S)
return value | getRandomInteger(N:int, randfunc:callable):long
Return a random number with at most N bits.
If randfunc is omitted, then Random.new().read is used.
This function is for internal use only and may be renamed or removed in
the future. | Below is the the instruction that describes the task:
### Input:
getRandomInteger(N:int, randfunc:callable):long
Return a random number with at most N bits.
If randfunc is omitted, then Random.new().read is used.
This function is for internal use only and may be renamed or removed in
the future.
### Response:
def get_random_integer(N, randfunc=None):
"""getRandomInteger(N:int, randfunc:callable):long
Return a random number with at most N bits.
If randfunc is omitted, then Random.new().read is used.
This function is for internal use only and may be renamed or removed in
the future.
"""
if randfunc is None:
randfunc = Random.new().read
S = randfunc(N>>3)
odd_bits = N % 8
if odd_bits != 0:
char = ord(randfunc(1)) >> (8-odd_bits)
S = bchr(char) + S
value = bytes_to_long(S)
return value |
def to_numpy(nd4j_array):
""" Convert an ND4J array to a numpy array
:param nd4j_array:
:return:
"""
buff = nd4j_array.data()
address = buff.pointer().address()
type_name = java_classes.DataTypeUtil.getDtypeFromContext()
data_type = java_classes.DataTypeUtil.getDTypeForName(type_name)
mapping = {
'double': ctypes.c_double,
'float': ctypes.c_float
}
Pointer = ctypes.POINTER(mapping[data_type])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array | Convert an ND4J array to a numpy array
:param nd4j_array:
:return: | Below is the the instruction that describes the task:
### Input:
Convert an ND4J array to a numpy array
:param nd4j_array:
:return:
### Response:
def to_numpy(nd4j_array):
""" Convert an ND4J array to a numpy array
:param nd4j_array:
:return:
"""
buff = nd4j_array.data()
address = buff.pointer().address()
type_name = java_classes.DataTypeUtil.getDtypeFromContext()
data_type = java_classes.DataTypeUtil.getDTypeForName(type_name)
mapping = {
'double': ctypes.c_double,
'float': ctypes.c_float
}
Pointer = ctypes.POINTER(mapping[data_type])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array |
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value | Retrieve original value and check it wasn't signed more
than max_age seconds ago. | Below is the the instruction that describes the task:
### Input:
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
### Response:
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value |
def updateMigrationBlockStatus(self, migration_status=0, migration_block=None, migration_request=None):
"""
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 allowed for retrying.
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
if migration_block:
upst = dict(migration_status=migration_status,
migration_block_id=migration_block, last_modification_date=dbsUtils().getTime())
elif migration_request:
upst = dict(migration_status=migration_status, migration_request_id=migration_request,
last_modification_date=dbsUtils().getTime())
self.mgrup.execute(conn, upst)
except:
if tran:tran.rollback()
raise
else:
if tran:tran.commit()
finally:
if conn:conn.close() | migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 allowed for retrying. | Below is the the instruction that describes the task:
### Input:
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 allowed for retrying.
### Response:
def updateMigrationBlockStatus(self, migration_status=0, migration_block=None, migration_request=None):
"""
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 allowed for retrying.
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
if migration_block:
upst = dict(migration_status=migration_status,
migration_block_id=migration_block, last_modification_date=dbsUtils().getTime())
elif migration_request:
upst = dict(migration_status=migration_status, migration_request_id=migration_request,
last_modification_date=dbsUtils().getTime())
self.mgrup.execute(conn, upst)
except:
if tran:tran.rollback()
raise
else:
if tran:tran.commit()
finally:
if conn:conn.close() |
def srp1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 2 and return only the first answer
prn: function applied to packets received
verbose: set verbosity level
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
s = conf.L2socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
else:
return None | Flood and receive packets at layer 2 and return only the first answer
prn: function applied to packets received
verbose: set verbosity level
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface | Below is the the instruction that describes the task:
### Input:
Flood and receive packets at layer 2 and return only the first answer
prn: function applied to packets received
verbose: set verbosity level
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface
### Response:
def srp1flood(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): # noqa: E501
"""Flood and receive packets at layer 2 and return only the first answer
prn: function applied to packets received
verbose: set verbosity level
nofilter: put 1 to avoid use of BPF filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
s = conf.L2socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) # noqa: E501
ans, _ = sndrcvflood(s, x, *args, **kargs)
s.close()
if len(ans) > 0:
return ans[0][1]
else:
return None |
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(Qt, "_uic"):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, "_QtUiTools"):
# Implement `PyQt5.uic.loadUi` for PySide(2)
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
# For whatever reason, if this doesn't happen then
# reading an invalid or non-existing .ui file throws
# a RuntimeError.
etree = ElementTree()
etree.parse(uifile)
widget = Qt._QtUiTools.QUiLoader.load(
self, uifile, *args, **kwargs)
# Workaround for PySide 1.0.9, see issue #208
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=""):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
# Supposed to create the top-level widget,
# return the base instance instead
return self.baseinstance
# For some reason, Line is not in the list of available
# widgets, but works fine, so we have to special case it here.
if class_name in self.availableWidgets() + ["Line"]:
# Create a new widget for child widgets
widget = Qt._QtUiTools.QUiLoader.createWidget(self,
class_name,
parent,
name)
else:
raise Exception("Custom widget '%s' not supported"
% class_name)
if self.baseinstance:
# Set an attribute for the new child widget on the base
# instance, just like PyQt5.uic.loadUi does.
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError("No implementation available for loadUi") | Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface. | Below is the the instruction that describes the task:
### Input:
Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
### Response:
def _loadUi(uifile, baseinstance=None):
"""Dynamically load a user interface from the given `uifile`
This function calls `uic.loadUi` if using PyQt bindings,
else it implements a comparable binding for PySide.
Documentation:
http://pyqt.sourceforge.net/Docs/PyQt5/designer.html#PyQt5.uic.loadUi
Arguments:
uifile (str): Absolute path to Qt Designer file.
baseinstance (QWidget): Instantiated QWidget or subclass thereof
Return:
baseinstance if `baseinstance` is not `None`. Otherwise
return the newly created instance of the user interface.
"""
if hasattr(Qt, "_uic"):
return Qt._uic.loadUi(uifile, baseinstance)
elif hasattr(Qt, "_QtUiTools"):
# Implement `PyQt5.uic.loadUi` for PySide(2)
class _UiLoader(Qt._QtUiTools.QUiLoader):
"""Create the user interface in a base instance.
Unlike `Qt._QtUiTools.QUiLoader` itself this class does not
create a new instance of the top-level widget, but creates the user
interface in an existing instance of the top-level class if needed.
This mimics the behaviour of `PyQt5.uic.loadUi`.
"""
def __init__(self, baseinstance):
super(_UiLoader, self).__init__(baseinstance)
self.baseinstance = baseinstance
def load(self, uifile, *args, **kwargs):
from xml.etree.ElementTree import ElementTree
# For whatever reason, if this doesn't happen then
# reading an invalid or non-existing .ui file throws
# a RuntimeError.
etree = ElementTree()
etree.parse(uifile)
widget = Qt._QtUiTools.QUiLoader.load(
self, uifile, *args, **kwargs)
# Workaround for PySide 1.0.9, see issue #208
widget.parentWidget()
return widget
def createWidget(self, class_name, parent=None, name=""):
"""Called for each widget defined in ui file
Overridden here to populate `baseinstance` instead.
"""
if parent is None and self.baseinstance:
# Supposed to create the top-level widget,
# return the base instance instead
return self.baseinstance
# For some reason, Line is not in the list of available
# widgets, but works fine, so we have to special case it here.
if class_name in self.availableWidgets() + ["Line"]:
# Create a new widget for child widgets
widget = Qt._QtUiTools.QUiLoader.createWidget(self,
class_name,
parent,
name)
else:
raise Exception("Custom widget '%s' not supported"
% class_name)
if self.baseinstance:
# Set an attribute for the new child widget on the base
# instance, just like PyQt5.uic.loadUi does.
setattr(self.baseinstance, name, widget)
return widget
widget = _UiLoader(baseinstance).load(uifile)
Qt.QtCore.QMetaObject.connectSlotsByName(widget)
return widget
else:
raise NotImplementedError("No implementation available for loadUi") |
def with_write_hdf5(func):
"""Decorate an HDF5-writing function to open a filepath if needed
``func`` should be written to take the object to be written as the
first argument, and then presume an `h5py.Group` as the second.
This method uses keywords ``append`` and ``overwrite`` as follows if
the output file already exists:
- ``append=False, overwrite=False``: raise `~exceptions.IOError`
- ``append=True``: open in mode ``a``
- ``append=False, overwrite=True``: open in mode ``w``
"""
@wraps(func)
def decorated_func(obj, fobj, *args, **kwargs):
# pylint: disable=missing-docstring
if not isinstance(fobj, h5py.HLObject):
append = kwargs.get('append', False)
overwrite = kwargs.get('overwrite', False)
if os.path.exists(fobj) and not (overwrite or append):
raise IOError("File exists: %s" % fobj)
with h5py.File(fobj, 'a' if append else 'w') as h5f:
return func(obj, h5f, *args, **kwargs)
return func(obj, fobj, *args, **kwargs)
return decorated_func | Decorate an HDF5-writing function to open a filepath if needed
``func`` should be written to take the object to be written as the
first argument, and then presume an `h5py.Group` as the second.
This method uses keywords ``append`` and ``overwrite`` as follows if
the output file already exists:
- ``append=False, overwrite=False``: raise `~exceptions.IOError`
- ``append=True``: open in mode ``a``
- ``append=False, overwrite=True``: open in mode ``w`` | Below is the the instruction that describes the task:
### Input:
Decorate an HDF5-writing function to open a filepath if needed
``func`` should be written to take the object to be written as the
first argument, and then presume an `h5py.Group` as the second.
This method uses keywords ``append`` and ``overwrite`` as follows if
the output file already exists:
- ``append=False, overwrite=False``: raise `~exceptions.IOError`
- ``append=True``: open in mode ``a``
- ``append=False, overwrite=True``: open in mode ``w``
### Response:
def with_write_hdf5(func):
"""Decorate an HDF5-writing function to open a filepath if needed
``func`` should be written to take the object to be written as the
first argument, and then presume an `h5py.Group` as the second.
This method uses keywords ``append`` and ``overwrite`` as follows if
the output file already exists:
- ``append=False, overwrite=False``: raise `~exceptions.IOError`
- ``append=True``: open in mode ``a``
- ``append=False, overwrite=True``: open in mode ``w``
"""
@wraps(func)
def decorated_func(obj, fobj, *args, **kwargs):
# pylint: disable=missing-docstring
if not isinstance(fobj, h5py.HLObject):
append = kwargs.get('append', False)
overwrite = kwargs.get('overwrite', False)
if os.path.exists(fobj) and not (overwrite or append):
raise IOError("File exists: %s" % fobj)
with h5py.File(fobj, 'a' if append else 'w') as h5f:
return func(obj, h5f, *args, **kwargs)
return func(obj, fobj, *args, **kwargs)
return decorated_func |
def is_cptp(self, atol=None, rtol=None):
"""Return True if completely-positive trace-preserving."""
if self._data[1] is not None:
return False
if atol is None:
atol = self._atol
if rtol is None:
rtol = self._rtol
accum = 0j
for op in self._data[0]:
accum += np.dot(np.transpose(np.conj(op)), op)
return is_identity_matrix(accum, rtol=rtol, atol=atol) | Return True if completely-positive trace-preserving. | Below is the the instruction that describes the task:
### Input:
Return True if completely-positive trace-preserving.
### Response:
def is_cptp(self, atol=None, rtol=None):
"""Return True if completely-positive trace-preserving."""
if self._data[1] is not None:
return False
if atol is None:
atol = self._atol
if rtol is None:
rtol = self._rtol
accum = 0j
for op in self._data[0]:
accum += np.dot(np.transpose(np.conj(op)), op)
return is_identity_matrix(accum, rtol=rtol, atol=atol) |
def dict_to_etree(source, root_tag=None):
""" Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- A dictionary representing an XML document where identical children tags are
countained in a list.
Keyword args:
root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict
contains multiple root items, a list of etree's Elements will be returned.
Returns:
An ET.Element which is the root of an XML tree or a list of these.
>>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS
<Element foo at 0x...>
>>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS
[<Element foo at 0x...>, <Element bar at 0x...>]
>>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}}))
'<document><item2>bar</item2><item1>foo</item1></document>'
>>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document'))
'<document><foo>baz</foo></document>'
>>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document'))
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
"""
def dict_to_etree_recursive(source, parent):
if hasattr(source, 'keys'):
for key, value in source.iteritems():
sub = ET.SubElement(parent, key)
dict_to_etree_recursive(value, sub)
elif isinstance(source, list):
for element in source:
dict_to_etree_recursive(element, parent)
else: # TODO: Add feature to include xml literals as special objects or a etree subtree
parent.text = source
if root_tag is None:
if len(source) == 1:
root_tag = source.keys()[0]
source = source[root_tag]
else:
roots = []
for tag, content in source.iteritems():
root = ET.Element(tag)
dict_to_etree_recursive(content, root)
roots.append(root)
return roots
root = ET.Element(root_tag)
dict_to_etree_recursive(source, root)
return root | Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- A dictionary representing an XML document where identical children tags are
countained in a list.
Keyword args:
root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict
contains multiple root items, a list of etree's Elements will be returned.
Returns:
An ET.Element which is the root of an XML tree or a list of these.
>>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS
<Element foo at 0x...>
>>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS
[<Element foo at 0x...>, <Element bar at 0x...>]
>>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}}))
'<document><item2>bar</item2><item1>foo</item1></document>'
>>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document'))
'<document><foo>baz</foo></document>'
>>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document'))
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>' | Below is the the instruction that describes the task:
### Input:
Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- A dictionary representing an XML document where identical children tags are
countained in a list.
Keyword args:
root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict
contains multiple root items, a list of etree's Elements will be returned.
Returns:
An ET.Element which is the root of an XML tree or a list of these.
>>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS
<Element foo at 0x...>
>>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS
[<Element foo at 0x...>, <Element bar at 0x...>]
>>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}}))
'<document><item2>bar</item2><item1>foo</item1></document>'
>>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document'))
'<document><foo>baz</foo></document>'
>>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document'))
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
### Response:
def dict_to_etree(source, root_tag=None):
""" Recursively load dict/list representation of an XML tree into an etree representation.
Args:
source -- A dictionary representing an XML document where identical children tags are
countained in a list.
Keyword args:
root_tag -- A parent tag in which to wrap the xml tree. If None, and the source dict
contains multiple root items, a list of etree's Elements will be returned.
Returns:
An ET.Element which is the root of an XML tree or a list of these.
>>> dict_to_etree({'foo': 'lorem'}) #doctest: +ELLIPSIS
<Element foo at 0x...>
>>> dict_to_etree({'foo': 'lorem', 'bar': 'ipsum'}) #doctest: +ELLIPSIS
[<Element foo at 0x...>, <Element bar at 0x...>]
>>> ET.tostring(dict_to_etree({'document': {'item1': 'foo', 'item2': 'bar'}}))
'<document><item2>bar</item2><item1>foo</item1></document>'
>>> ET.tostring(dict_to_etree({'foo': 'baz'}, root_tag='document'))
'<document><foo>baz</foo></document>'
>>> ET.tostring(dict_to_etree({'title': 'foo', 'list': [{'li':1}, {'li':2}]}, root_tag='document'))
'<document><list><li>1</li><li>2</li></list><title>foo</title></document>'
"""
def dict_to_etree_recursive(source, parent):
if hasattr(source, 'keys'):
for key, value in source.iteritems():
sub = ET.SubElement(parent, key)
dict_to_etree_recursive(value, sub)
elif isinstance(source, list):
for element in source:
dict_to_etree_recursive(element, parent)
else: # TODO: Add feature to include xml literals as special objects or a etree subtree
parent.text = source
if root_tag is None:
if len(source) == 1:
root_tag = source.keys()[0]
source = source[root_tag]
else:
roots = []
for tag, content in source.iteritems():
root = ET.Element(tag)
dict_to_etree_recursive(content, root)
roots.append(root)
return roots
root = ET.Element(root_tag)
dict_to_etree_recursive(source, root)
return root |
def cat_file(path):
"""Cat file and return content"""
cmd = ["cat", path]
status, stdout, _ = cij.ssh.command(cmd, shell=True, echo=True)
if status:
raise RuntimeError("cij.nvme.env: cat %s failed" % path)
return stdout.strip() | Cat file and return content | Below is the the instruction that describes the task:
### Input:
Cat file and return content
### Response:
def cat_file(path):
"""Cat file and return content"""
cmd = ["cat", path]
status, stdout, _ = cij.ssh.command(cmd, shell=True, echo=True)
if status:
raise RuntimeError("cij.nvme.env: cat %s failed" % path)
return stdout.strip() |
def _get_query_params(self):
"""Getter for query parameters for the next request.
Returns:
dict: A dictionary of query parameters.
"""
result = {}
if self.next_page_token is not None:
result[self._PAGE_TOKEN] = self.next_page_token
if self.max_results is not None:
result[self._MAX_RESULTS] = self.max_results - self.num_results
result.update(self.extra_params)
return result | Getter for query parameters for the next request.
Returns:
dict: A dictionary of query parameters. | Below is the the instruction that describes the task:
### Input:
Getter for query parameters for the next request.
Returns:
dict: A dictionary of query parameters.
### Response:
def _get_query_params(self):
"""Getter for query parameters for the next request.
Returns:
dict: A dictionary of query parameters.
"""
result = {}
if self.next_page_token is not None:
result[self._PAGE_TOKEN] = self.next_page_token
if self.max_results is not None:
result[self._MAX_RESULTS] = self.max_results - self.num_results
result.update(self.extra_params)
return result |
def _encode_observations(self, observations):
"""Encodes observations as PNG."""
return [
Observation(
self._session.obj.run(
self._encoded_image_t.obj,
feed_dict={self._decoded_image_p.obj: observation}
),
self._decode_png
)
for observation in observations
] | Encodes observations as PNG. | Below is the the instruction that describes the task:
### Input:
Encodes observations as PNG.
### Response:
def _encode_observations(self, observations):
"""Encodes observations as PNG."""
return [
Observation(
self._session.obj.run(
self._encoded_image_t.obj,
feed_dict={self._decoded_image_p.obj: observation}
),
self._decode_png
)
for observation in observations
] |
def get_name_value_hash_txid( self, name, value_hash ):
"""
Given a name and a value hash (i.e. the zone file hash), return the txid for the value hash.
Return None if the name doesn't exist, or is revoked, or did not
receive a NAME_UPDATE since it was last preordered.
"""
rec = self.get_name( name )
if rec is None:
return None
if rec['revoked']:
return None
# find the txid of the given value hash
if rec['value_hash'] == value_hash:
return rec['txid']
else:
# search backwards for it
hist = rec['history']
flat_hist = namedb_flatten_history( hist )
for i in xrange(len(flat_hist)-1, 0, -1):
delta = flat_hist[i]
if delta['op'] == NAME_PREORDER:
# this name was re-registered. skip
return None
if delta['value_hash'] == value_hash:
# this is the txid that affected it
return delta['txid']
# not found
return None | Given a name and a value hash (i.e. the zone file hash), return the txid for the value hash.
Return None if the name doesn't exist, or is revoked, or did not
receive a NAME_UPDATE since it was last preordered. | Below is the the instruction that describes the task:
### Input:
Given a name and a value hash (i.e. the zone file hash), return the txid for the value hash.
Return None if the name doesn't exist, or is revoked, or did not
receive a NAME_UPDATE since it was last preordered.
### Response:
def get_name_value_hash_txid( self, name, value_hash ):
"""
Given a name and a value hash (i.e. the zone file hash), return the txid for the value hash.
Return None if the name doesn't exist, or is revoked, or did not
receive a NAME_UPDATE since it was last preordered.
"""
rec = self.get_name( name )
if rec is None:
return None
if rec['revoked']:
return None
# find the txid of the given value hash
if rec['value_hash'] == value_hash:
return rec['txid']
else:
# search backwards for it
hist = rec['history']
flat_hist = namedb_flatten_history( hist )
for i in xrange(len(flat_hist)-1, 0, -1):
delta = flat_hist[i]
if delta['op'] == NAME_PREORDER:
# this name was re-registered. skip
return None
if delta['value_hash'] == value_hash:
# this is the txid that affected it
return delta['txid']
# not found
return None |
Subsets and Splits