code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def literal_to_dict(value):
""" Transform an object value into a dict readable value
:param value: Object of a triple which is not a BNode
:type value: Literal or URIRef
:return: dict or str or list
"""
if isinstance(value, Literal):
if value.language is not None:
return {"@value": str(value), "@language": value.language}
return value.toPython()
elif isinstance(value, URIRef):
return {"@id": str(value)}
elif value is None:
return None
return str(value) | Transform an object value into a dict readable value
:param value: Object of a triple which is not a BNode
:type value: Literal or URIRef
:return: dict or str or list | Below is the the instruction that describes the task:
### Input:
Transform an object value into a dict readable value
:param value: Object of a triple which is not a BNode
:type value: Literal or URIRef
:return: dict or str or list
### Response:
def literal_to_dict(value):
""" Transform an object value into a dict readable value
:param value: Object of a triple which is not a BNode
:type value: Literal or URIRef
:return: dict or str or list
"""
if isinstance(value, Literal):
if value.language is not None:
return {"@value": str(value), "@language": value.language}
return value.toPython()
elif isinstance(value, URIRef):
return {"@id": str(value)}
elif value is None:
return None
return str(value) |
def commit_transaction(self):
"""Commit a multi-statement transaction.
.. versionadded:: 3.7
"""
self._check_ended()
retry = False
state = self._transaction.state
if state is _TxnState.NONE:
raise InvalidOperation("No transaction started")
elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY):
# Server transaction was never started, no need to send a command.
self._transaction.state = _TxnState.COMMITTED_EMPTY
return
elif state is _TxnState.ABORTED:
raise InvalidOperation(
"Cannot call commitTransaction after calling abortTransaction")
elif state is _TxnState.COMMITTED:
# We're explicitly retrying the commit, move the state back to
# "in progress" so that _in_transaction returns true.
self._transaction.state = _TxnState.IN_PROGRESS
retry = True
try:
self._finish_transaction_with_retry("commitTransaction", retry)
except ConnectionFailure as exc:
# We do not know if the commit was successfully applied on the
# server or if it satisfied the provided write concern, set the
# unknown commit error label.
exc._remove_error_label("TransientTransactionError")
_reraise_with_unknown_commit(exc)
except WTimeoutError as exc:
# We do not know if the commit has satisfied the provided write
# concern, add the unknown commit error label.
_reraise_with_unknown_commit(exc)
except OperationFailure as exc:
if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES:
# The server reports errorLabels in the case.
raise
# We do not know if the commit was successfully applied on the
# server or if it satisfied the provided write concern, set the
# unknown commit error label.
_reraise_with_unknown_commit(exc)
finally:
self._transaction.state = _TxnState.COMMITTED | Commit a multi-statement transaction.
.. versionadded:: 3.7 | Below is the the instruction that describes the task:
### Input:
Commit a multi-statement transaction.
.. versionadded:: 3.7
### Response:
def commit_transaction(self):
"""Commit a multi-statement transaction.
.. versionadded:: 3.7
"""
self._check_ended()
retry = False
state = self._transaction.state
if state is _TxnState.NONE:
raise InvalidOperation("No transaction started")
elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY):
# Server transaction was never started, no need to send a command.
self._transaction.state = _TxnState.COMMITTED_EMPTY
return
elif state is _TxnState.ABORTED:
raise InvalidOperation(
"Cannot call commitTransaction after calling abortTransaction")
elif state is _TxnState.COMMITTED:
# We're explicitly retrying the commit, move the state back to
# "in progress" so that _in_transaction returns true.
self._transaction.state = _TxnState.IN_PROGRESS
retry = True
try:
self._finish_transaction_with_retry("commitTransaction", retry)
except ConnectionFailure as exc:
# We do not know if the commit was successfully applied on the
# server or if it satisfied the provided write concern, set the
# unknown commit error label.
exc._remove_error_label("TransientTransactionError")
_reraise_with_unknown_commit(exc)
except WTimeoutError as exc:
# We do not know if the commit has satisfied the provided write
# concern, add the unknown commit error label.
_reraise_with_unknown_commit(exc)
except OperationFailure as exc:
if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES:
# The server reports errorLabels in the case.
raise
# We do not know if the commit was successfully applied on the
# server or if it satisfied the provided write concern, set the
# unknown commit error label.
_reraise_with_unknown_commit(exc)
finally:
self._transaction.state = _TxnState.COMMITTED |
def subjects(self):
"""
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
"""
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject | A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id) | Below is the the instruction that describes the task:
### Input:
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
### Response:
def subjects(self):
"""
A generator which yields :py:class:`.Subject` objects which are in this
subject set.
Examples::
for subject in subject_set.subjects:
print(subject.id)
"""
for sms in SetMemberSubject.where(subject_set_id=self.id):
yield sms.links.subject |
def gps_velocity_body(GPS_RAW_INT, ATTITUDE):
'''return GPS velocity vector in body frame'''
r = rotation(ATTITUDE)
return r.transposed() * Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)),
GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)),
-tan(ATTITUDE.pitch)*GPS_RAW_INT.vel*0.01) | return GPS velocity vector in body frame | Below is the the instruction that describes the task:
### Input:
return GPS velocity vector in body frame
### Response:
def gps_velocity_body(GPS_RAW_INT, ATTITUDE):
'''return GPS velocity vector in body frame'''
r = rotation(ATTITUDE)
return r.transposed() * Vector3(GPS_RAW_INT.vel*0.01*cos(radians(GPS_RAW_INT.cog*0.01)),
GPS_RAW_INT.vel*0.01*sin(radians(GPS_RAW_INT.cog*0.01)),
-tan(ATTITUDE.pitch)*GPS_RAW_INT.vel*0.01) |
async def handle_job_closing(self, container_id, retval):
"""
Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the backend
"""
try:
self._logger.debug("Closing %s", container_id)
try:
message, container_path, future_results = self._containers_running[container_id]
del self._containers_running[container_id]
except asyncio.CancelledError:
raise
except:
self._logger.warning("Container %s that has finished(p1) was not launched by this agent", str(container_id), exc_info=True)
return
# Close sub containers
for student_container_id_loop in self._student_containers_for_job[message.job_id]:
# little hack to ensure the value of student_container_id_loop is copied into the closure
async def close_and_delete(student_container_id=student_container_id_loop):
try:
await self._docker.kill_container(student_container_id)
await self._docker.remove_container(student_container_id)
except asyncio.CancelledError:
raise
except:
pass # ignore
self._create_safe_task(close_and_delete(student_container_id_loop))
del self._student_containers_for_job[message.job_id]
# Allow other container to reuse the external ports this container has finished to use
if container_id in self._assigned_external_ports:
for p in self._assigned_external_ports[container_id]:
self._external_ports.add(p)
del self._assigned_external_ports[container_id]
# Verify if the container was killed, either by the client, by an OOM or by a timeout
killed = await self._timeout_watcher.was_killed(container_id)
if container_id in self._containers_killed:
killed = self._containers_killed[container_id]
del self._containers_killed[container_id]
stdout = ""
stderr = ""
result = "crash" if retval == -1 else None
error_msg = None
grade = None
problems = {}
custom = {}
tests = {}
archive = None
state = ""
if killed is not None:
result = killed
# If everything did well, continue to retrieve the status from the container
if result is None:
# Get logs back
try:
return_value = await future_results
# Accepted types for return dict
accepted_types = {"stdout": str, "stderr": str, "result": str, "text": str, "grade": float,
"problems": dict, "custom": dict, "tests": dict, "state": str, "archive": str}
keys_fct = {"problems": id_checker, "custom": id_checker, "tests": id_checker_tests}
# Check dict content
for key, item in return_value.items():
if not isinstance(item, accepted_types[key]):
raise Exception("Feedback file is badly formatted.")
elif accepted_types[key] == dict and key != "custom": #custom can contain anything:
for sub_key, sub_item in item.items():
if not keys_fct[key](sub_key) or isinstance(sub_item, dict):
raise Exception("Feedback file is badly formatted.")
# Set output fields
stdout = return_value.get("stdout", "")
stderr = return_value.get("stderr", "")
result = return_value.get("result", "error")
error_msg = return_value.get("text", "")
grade = return_value.get("grade", None)
problems = return_value.get("problems", {})
custom = return_value.get("custom", {})
tests = return_value.get("tests", {})
state = return_value.get("state", "")
archive = return_value.get("archive", None)
if archive is not None:
archive = base64.b64decode(archive)
except Exception as e:
self._logger.exception("Cannot get back output of container %s! (%s)", container_id, str(e))
result = "crash"
error_msg = 'The grader did not return a readable output : {}'.format(str(e))
# Default values
if error_msg is None:
error_msg = ""
if grade is None:
if result == "success":
grade = 100.0
else:
grade = 0.0
# Remove container
try:
await self._docker.remove_container(container_id)
except asyncio.CancelledError:
raise
except:
pass
# Delete folders
try:
await self._ashutil.rmtree(container_path)
except PermissionError:
self._logger.debug("Cannot remove old container path!")
pass # todo: run a docker container to force removal
# Return!
await self.send_job_result(message.job_id, result, error_msg, grade, problems, tests, custom, state, archive, stdout, stderr)
# Do not forget to remove data from internal state
del self._container_for_job[message.job_id]
except asyncio.CancelledError:
raise
except:
self._logger.exception("Exception in handle_job_closing") | Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the backend | Below is the the instruction that describes the task:
### Input:
Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the backend
### Response:
async def handle_job_closing(self, container_id, retval):
"""
Handle a closing student container. Do some cleaning, verify memory limits, timeouts, ... and returns data to the backend
"""
try:
self._logger.debug("Closing %s", container_id)
try:
message, container_path, future_results = self._containers_running[container_id]
del self._containers_running[container_id]
except asyncio.CancelledError:
raise
except:
self._logger.warning("Container %s that has finished(p1) was not launched by this agent", str(container_id), exc_info=True)
return
# Close sub containers
for student_container_id_loop in self._student_containers_for_job[message.job_id]:
# little hack to ensure the value of student_container_id_loop is copied into the closure
async def close_and_delete(student_container_id=student_container_id_loop):
try:
await self._docker.kill_container(student_container_id)
await self._docker.remove_container(student_container_id)
except asyncio.CancelledError:
raise
except:
pass # ignore
self._create_safe_task(close_and_delete(student_container_id_loop))
del self._student_containers_for_job[message.job_id]
# Allow other container to reuse the external ports this container has finished to use
if container_id in self._assigned_external_ports:
for p in self._assigned_external_ports[container_id]:
self._external_ports.add(p)
del self._assigned_external_ports[container_id]
# Verify if the container was killed, either by the client, by an OOM or by a timeout
killed = await self._timeout_watcher.was_killed(container_id)
if container_id in self._containers_killed:
killed = self._containers_killed[container_id]
del self._containers_killed[container_id]
stdout = ""
stderr = ""
result = "crash" if retval == -1 else None
error_msg = None
grade = None
problems = {}
custom = {}
tests = {}
archive = None
state = ""
if killed is not None:
result = killed
# If everything did well, continue to retrieve the status from the container
if result is None:
# Get logs back
try:
return_value = await future_results
# Accepted types for return dict
accepted_types = {"stdout": str, "stderr": str, "result": str, "text": str, "grade": float,
"problems": dict, "custom": dict, "tests": dict, "state": str, "archive": str}
keys_fct = {"problems": id_checker, "custom": id_checker, "tests": id_checker_tests}
# Check dict content
for key, item in return_value.items():
if not isinstance(item, accepted_types[key]):
raise Exception("Feedback file is badly formatted.")
elif accepted_types[key] == dict and key != "custom": #custom can contain anything:
for sub_key, sub_item in item.items():
if not keys_fct[key](sub_key) or isinstance(sub_item, dict):
raise Exception("Feedback file is badly formatted.")
# Set output fields
stdout = return_value.get("stdout", "")
stderr = return_value.get("stderr", "")
result = return_value.get("result", "error")
error_msg = return_value.get("text", "")
grade = return_value.get("grade", None)
problems = return_value.get("problems", {})
custom = return_value.get("custom", {})
tests = return_value.get("tests", {})
state = return_value.get("state", "")
archive = return_value.get("archive", None)
if archive is not None:
archive = base64.b64decode(archive)
except Exception as e:
self._logger.exception("Cannot get back output of container %s! (%s)", container_id, str(e))
result = "crash"
error_msg = 'The grader did not return a readable output : {}'.format(str(e))
# Default values
if error_msg is None:
error_msg = ""
if grade is None:
if result == "success":
grade = 100.0
else:
grade = 0.0
# Remove container
try:
await self._docker.remove_container(container_id)
except asyncio.CancelledError:
raise
except:
pass
# Delete folders
try:
await self._ashutil.rmtree(container_path)
except PermissionError:
self._logger.debug("Cannot remove old container path!")
pass # todo: run a docker container to force removal
# Return!
await self.send_job_result(message.job_id, result, error_msg, grade, problems, tests, custom, state, archive, stdout, stderr)
# Do not forget to remove data from internal state
del self._container_for_job[message.job_id]
except asyncio.CancelledError:
raise
except:
self._logger.exception("Exception in handle_job_closing") |
def resize(image, width=None, height=None, crop=False):
"""
Resize an image and return the resized file.
"""
# First normalize params to determine which file to get
width, height, crop = _normalize_params(image, width, height, crop)
try:
# Check the image file state for clean close
is_closed = image.closed
if is_closed:
image.open()
# Create the resized file
# Do resize and crop
resized_image = _resize(image, width, height, crop)
finally:
# Re-close if received a closed file
if is_closed:
image.close()
return ImageFile(resized_image) | Resize an image and return the resized file. | Below is the the instruction that describes the task:
### Input:
Resize an image and return the resized file.
### Response:
def resize(image, width=None, height=None, crop=False):
"""
Resize an image and return the resized file.
"""
# First normalize params to determine which file to get
width, height, crop = _normalize_params(image, width, height, crop)
try:
# Check the image file state for clean close
is_closed = image.closed
if is_closed:
image.open()
# Create the resized file
# Do resize and crop
resized_image = _resize(image, width, height, crop)
finally:
# Re-close if received a closed file
if is_closed:
image.close()
return ImageFile(resized_image) |
def set(self, client_id, code, request, *args, **kwargs):
"""Creates Grant object with the given params
:param client_id: ID of the client
:param code:
:param request: OAuthlib request object
"""
expires = datetime.utcnow() + timedelta(seconds=100)
grant = self.model(
client_id=request.client.client_id,
code=code['code'],
redirect_uri=request.redirect_uri,
scope=' '.join(request.scopes),
user=self.current_user(),
expires=expires
)
self.session.add(grant)
self.session.commit() | Creates Grant object with the given params
:param client_id: ID of the client
:param code:
:param request: OAuthlib request object | Below is the the instruction that describes the task:
### Input:
Creates Grant object with the given params
:param client_id: ID of the client
:param code:
:param request: OAuthlib request object
### Response:
def set(self, client_id, code, request, *args, **kwargs):
"""Creates Grant object with the given params
:param client_id: ID of the client
:param code:
:param request: OAuthlib request object
"""
expires = datetime.utcnow() + timedelta(seconds=100)
grant = self.model(
client_id=request.client.client_id,
code=code['code'],
redirect_uri=request.redirect_uri,
scope=' '.join(request.scopes),
user=self.current_user(),
expires=expires
)
self.session.add(grant)
self.session.commit() |
def find_asm_blocks(asm_lines):
"""Find blocks probably corresponding to loops in assembly."""
blocks = []
last_labels = OrderedDict()
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
for i, line in enumerate(asm_lines):
# Register access counts
zmm_references += re.findall('%zmm[0-9]+', line)
ymm_references += re.findall('%ymm[0-9]+', line)
xmm_references += re.findall('%xmm[0-9]+', line)
gp_references += re.findall('%r[a-z0-9]+', line)
if re.search(r'\d*\(%\w+(,%\w+)?(,\d)?\)', line):
m = re.search(r'(?P<off>[-]?\d*)\(%(?P<basep>\w+)(,%(?P<idx>\w+))?(?:,(?P<scale>\d))?\)'
r'(?P<eol>$)?',
line)
mem_references.append((
int(m.group('off')) if m.group('off') else 0,
m.group('basep'),
m.group('idx'),
int(m.group('scale')) if m.group('scale') else 1,
'load' if m.group('eol') is None else 'store'))
if re.match(r"^[v]?(mul|add|sub|div|fmadd(132|213|231)?)[h]?p[ds]", line):
if line.startswith('v'):
avx_ctr += 1
packed_ctr += 1
elif re.match(r'^\S+:', line):
# last_labels[label_name] = line_number
last_labels[line[0:line.find(':')]] =i
# Reset counters
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
elif re.match(r'^inc[bwlq]?\s+%[a-z0-9]+', line):
reg_start = line.find('%') + 1
increments[line[reg_start:]] = 1
elif re.match(r'^add[bwlq]?\s+\$[0-9]+,\s*%[a-z0-9]+', line):
const_start = line.find('$') + 1
const_end = line[const_start + 1:].find(',') + const_start + 1
reg_start = line.find('%') + 1
increments[line[reg_start:]] = int(line[const_start:const_end])
elif re.match(r'^dec[bwlq]?', line):
reg_start = line.find('%') + 1
increments[line[reg_start:]] = -1
elif re.match(r'^sub[bwlq]?\s+\$[0-9]+,', line):
const_start = line.find('$') + 1
const_end = line[const_start + 1:].find(',') + const_start + 1
reg_start = line.find('%') + 1
increments[line[reg_start:]] = -int(line[const_start:const_end])
elif last_labels and re.match(r'^j[a-z]+\s+\S+\s*', line):
# End of block(s) due to jump
# Check if jump target matches any previously recoded label
last_label = None
last_label_line = -1
for label_name, label_line in last_labels.items():
if re.match(r'^j[a-z]+\s+' + re.escape(label_name) + r'\s*', line):
# matched
last_label = label_name
last_label_line = label_line
labels = list(last_labels.keys())
if last_label:
# deduce loop increment from memory index register
pointer_increment = None # default -> can not decide, let user choose
possible_idx_regs = None
if mem_references:
# we found memory references to work with
# If store accesses exist, consider only those
store_references = [mref for mref in mem_references
if mref[4] == 'store']
refs = store_references or mem_references
possible_idx_regs = list(set(increments.keys()).intersection(
set([r[1] for r in refs if r[1] is not None] +
[r[2] for r in refs if r[2] is not None])))
for mref in refs:
for reg in list(possible_idx_regs):
# Only consider references with two registers, where one could be an index
if None not in mref[1:3]:
# One needs to mach, other registers will be excluded
if not (reg == mref[1] or reg == mref[2]):
# reg can not be it
possible_idx_regs.remove(reg)
idx_reg = None
if len(possible_idx_regs) == 1:
# good, exactly one register was found
idx_reg = possible_idx_regs[0]
elif possible_idx_regs and itemsEqual([increments[pidxreg]
for pidxreg in possible_idx_regs]):
# multiple were option found, but all have the same increment
# use first match:
idx_reg = possible_idx_regs[0]
if idx_reg:
mem_scales = [mref[3] for mref in refs
if idx_reg == mref[2] or idx_reg == mref[1]]
if itemsEqual(mem_scales):
# good, all scales are equal
try:
pointer_increment = mem_scales[0] * increments[idx_reg]
except:
print("labels", pformat(labels[labels.index(last_label):]))
print("lines", pformat(asm_lines[last_label_line:i + 1]))
print("increments", increments)
print("mem_references", pformat(mem_references))
print("idx_reg", idx_reg)
print("mem_scales", mem_scales)
raise
blocks.append({'first_line': last_label_line,
'last_line': i,
'ops': i - last_label_line,
'labels': labels[labels.index(last_label):],
'packed_instr': packed_ctr,
'avx_instr': avx_ctr,
'XMM': (len(xmm_references), len(set(xmm_references))),
'YMM': (len(ymm_references), len(set(ymm_references))),
'ZMM': (len(zmm_references), len(set(zmm_references))),
'GP': (len(gp_references), len(set(gp_references))),
'regs': (len(xmm_references) + len(ymm_references) +
len(zmm_references) + len(gp_references),
len(set(xmm_references)) + len(set(ymm_references)) +
len(set(zmm_references)) +
len(set(gp_references))),
'pointer_increment': pointer_increment,
'lines': asm_lines[last_label_line:i + 1],
'possible_idx_regs': possible_idx_regs,
'mem_references': mem_references,
'increments': increments, })
# Reset counters
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
last_labels = OrderedDict()
return list(enumerate(blocks)) | Find blocks probably corresponding to loops in assembly. | Below is the the instruction that describes the task:
### Input:
Find blocks probably corresponding to loops in assembly.
### Response:
def find_asm_blocks(asm_lines):
"""Find blocks probably corresponding to loops in assembly."""
blocks = []
last_labels = OrderedDict()
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
for i, line in enumerate(asm_lines):
# Register access counts
zmm_references += re.findall('%zmm[0-9]+', line)
ymm_references += re.findall('%ymm[0-9]+', line)
xmm_references += re.findall('%xmm[0-9]+', line)
gp_references += re.findall('%r[a-z0-9]+', line)
if re.search(r'\d*\(%\w+(,%\w+)?(,\d)?\)', line):
m = re.search(r'(?P<off>[-]?\d*)\(%(?P<basep>\w+)(,%(?P<idx>\w+))?(?:,(?P<scale>\d))?\)'
r'(?P<eol>$)?',
line)
mem_references.append((
int(m.group('off')) if m.group('off') else 0,
m.group('basep'),
m.group('idx'),
int(m.group('scale')) if m.group('scale') else 1,
'load' if m.group('eol') is None else 'store'))
if re.match(r"^[v]?(mul|add|sub|div|fmadd(132|213|231)?)[h]?p[ds]", line):
if line.startswith('v'):
avx_ctr += 1
packed_ctr += 1
elif re.match(r'^\S+:', line):
# last_labels[label_name] = line_number
last_labels[line[0:line.find(':')]] =i
# Reset counters
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
elif re.match(r'^inc[bwlq]?\s+%[a-z0-9]+', line):
reg_start = line.find('%') + 1
increments[line[reg_start:]] = 1
elif re.match(r'^add[bwlq]?\s+\$[0-9]+,\s*%[a-z0-9]+', line):
const_start = line.find('$') + 1
const_end = line[const_start + 1:].find(',') + const_start + 1
reg_start = line.find('%') + 1
increments[line[reg_start:]] = int(line[const_start:const_end])
elif re.match(r'^dec[bwlq]?', line):
reg_start = line.find('%') + 1
increments[line[reg_start:]] = -1
elif re.match(r'^sub[bwlq]?\s+\$[0-9]+,', line):
const_start = line.find('$') + 1
const_end = line[const_start + 1:].find(',') + const_start + 1
reg_start = line.find('%') + 1
increments[line[reg_start:]] = -int(line[const_start:const_end])
elif last_labels and re.match(r'^j[a-z]+\s+\S+\s*', line):
# End of block(s) due to jump
# Check if jump target matches any previously recoded label
last_label = None
last_label_line = -1
for label_name, label_line in last_labels.items():
if re.match(r'^j[a-z]+\s+' + re.escape(label_name) + r'\s*', line):
# matched
last_label = label_name
last_label_line = label_line
labels = list(last_labels.keys())
if last_label:
# deduce loop increment from memory index register
pointer_increment = None # default -> can not decide, let user choose
possible_idx_regs = None
if mem_references:
# we found memory references to work with
# If store accesses exist, consider only those
store_references = [mref for mref in mem_references
if mref[4] == 'store']
refs = store_references or mem_references
possible_idx_regs = list(set(increments.keys()).intersection(
set([r[1] for r in refs if r[1] is not None] +
[r[2] for r in refs if r[2] is not None])))
for mref in refs:
for reg in list(possible_idx_regs):
# Only consider references with two registers, where one could be an index
if None not in mref[1:3]:
# One needs to mach, other registers will be excluded
if not (reg == mref[1] or reg == mref[2]):
# reg can not be it
possible_idx_regs.remove(reg)
idx_reg = None
if len(possible_idx_regs) == 1:
# good, exactly one register was found
idx_reg = possible_idx_regs[0]
elif possible_idx_regs and itemsEqual([increments[pidxreg]
for pidxreg in possible_idx_regs]):
# multiple were option found, but all have the same increment
# use first match:
idx_reg = possible_idx_regs[0]
if idx_reg:
mem_scales = [mref[3] for mref in refs
if idx_reg == mref[2] or idx_reg == mref[1]]
if itemsEqual(mem_scales):
# good, all scales are equal
try:
pointer_increment = mem_scales[0] * increments[idx_reg]
except:
print("labels", pformat(labels[labels.index(last_label):]))
print("lines", pformat(asm_lines[last_label_line:i + 1]))
print("increments", increments)
print("mem_references", pformat(mem_references))
print("idx_reg", idx_reg)
print("mem_scales", mem_scales)
raise
blocks.append({'first_line': last_label_line,
'last_line': i,
'ops': i - last_label_line,
'labels': labels[labels.index(last_label):],
'packed_instr': packed_ctr,
'avx_instr': avx_ctr,
'XMM': (len(xmm_references), len(set(xmm_references))),
'YMM': (len(ymm_references), len(set(ymm_references))),
'ZMM': (len(zmm_references), len(set(zmm_references))),
'GP': (len(gp_references), len(set(gp_references))),
'regs': (len(xmm_references) + len(ymm_references) +
len(zmm_references) + len(gp_references),
len(set(xmm_references)) + len(set(ymm_references)) +
len(set(zmm_references)) +
len(set(gp_references))),
'pointer_increment': pointer_increment,
'lines': asm_lines[last_label_line:i + 1],
'possible_idx_regs': possible_idx_regs,
'mem_references': mem_references,
'increments': increments, })
# Reset counters
packed_ctr = 0
avx_ctr = 0
xmm_references = []
ymm_references = []
zmm_references = []
gp_references = []
mem_references = []
increments = {}
last_labels = OrderedDict()
return list(enumerate(blocks)) |
def get_multiple_data():
"""Get data from all the platforms listed in makerlabs."""
# Get data from all the mapped platforms
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
# all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict")
return all_labs | Get data from all the platforms listed in makerlabs. | Below is the the instruction that describes the task:
### Input:
Get data from all the platforms listed in makerlabs.
### Response:
def get_multiple_data():
"""Get data from all the platforms listed in makerlabs."""
# Get data from all the mapped platforms
all_labs = {}
all_labs["diybio_org"] = diybio_org.get_labs(format="dict")
all_labs["fablabs_io"] = fablabs_io.get_labs(format="dict")
all_labs["makeinitaly_foundation"] = makeinitaly_foundation.get_labs(
format="dict")
all_labs["hackaday_io"] = hackaday_io.get_labs(format="dict")
all_labs["hackerspaces_org"] = hackerspaces_org.get_labs(format="dict")
all_labs["makery_info"] = makery_info.get_labs(format="dict")
all_labs["nesta"] = nesta.get_labs(format="dict")
# all_labs["techshop_ws"] = techshop_ws.get_labs(format="dict")
return all_labs |
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
self.init_checker_state(name, argument_names)
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0] | Run all physical checks on a raw input line. | Below is the the instruction that describes the task:
### Input:
Run all physical checks on a raw input line.
### Response:
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
self.init_checker_state(name, argument_names)
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0] |
def sign_key(self, keyid, default_key=None, passphrase=None):
""" sign (an imported) public key - keyid, with default secret key
>>> import gnupg
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_input = gpg.gen_key_input()
>>> key = gpg.gen_key(key_input)
>>> gpg.sign_key(key['fingerprint'])
>>> gpg.list_sigs(key['fingerprint'])
:param str keyid: key shortID, longID, fingerprint or email_address
:param str passphrase: passphrase used when creating the key, leave None otherwise
:returns: The result giving status of the key signing...
success can be verified by gpg.list_sigs(keyid)
"""
args = []
input_command = ""
if passphrase:
passphrase_arg = "--passphrase-fd 0"
input_command = "%s\n" % passphrase
args.append(passphrase_arg)
if default_key:
args.append(str("--default-key %s" % default_key))
args.extend(["--command-fd 0", "--sign-key %s" % keyid])
p = self._open_subprocess(args)
result = self._result_map['signing'](self)
confirm_command = "%sy\n" % input_command
p.stdin.write(b(confirm_command))
self._collect_output(p, result, stdin=p.stdin)
return result | sign (an imported) public key - keyid, with default secret key
>>> import gnupg
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_input = gpg.gen_key_input()
>>> key = gpg.gen_key(key_input)
>>> gpg.sign_key(key['fingerprint'])
>>> gpg.list_sigs(key['fingerprint'])
:param str keyid: key shortID, longID, fingerprint or email_address
:param str passphrase: passphrase used when creating the key, leave None otherwise
:returns: The result giving status of the key signing...
success can be verified by gpg.list_sigs(keyid) | Below is the the instruction that describes the task:
### Input:
sign (an imported) public key - keyid, with default secret key
>>> import gnupg
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_input = gpg.gen_key_input()
>>> key = gpg.gen_key(key_input)
>>> gpg.sign_key(key['fingerprint'])
>>> gpg.list_sigs(key['fingerprint'])
:param str keyid: key shortID, longID, fingerprint or email_address
:param str passphrase: passphrase used when creating the key, leave None otherwise
:returns: The result giving status of the key signing...
success can be verified by gpg.list_sigs(keyid)
### Response:
def sign_key(self, keyid, default_key=None, passphrase=None):
""" sign (an imported) public key - keyid, with default secret key
>>> import gnupg
>>> gpg = gnupg.GPG(homedir="doctests")
>>> key_input = gpg.gen_key_input()
>>> key = gpg.gen_key(key_input)
>>> gpg.sign_key(key['fingerprint'])
>>> gpg.list_sigs(key['fingerprint'])
:param str keyid: key shortID, longID, fingerprint or email_address
:param str passphrase: passphrase used when creating the key, leave None otherwise
:returns: The result giving status of the key signing...
success can be verified by gpg.list_sigs(keyid)
"""
args = []
input_command = ""
if passphrase:
passphrase_arg = "--passphrase-fd 0"
input_command = "%s\n" % passphrase
args.append(passphrase_arg)
if default_key:
args.append(str("--default-key %s" % default_key))
args.extend(["--command-fd 0", "--sign-key %s" % keyid])
p = self._open_subprocess(args)
result = self._result_map['signing'](self)
confirm_command = "%sy\n" % input_command
p.stdin.write(b(confirm_command))
self._collect_output(p, result, stdin=p.stdin)
return result |
def steal_docstring_from(obj):
"""Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween.
"""
def deco(fn):
docs = [obj.__doc__]
if fn.__doc__:
docs.append(fn.__doc__)
fn.__doc__ = '\n\n'.join(docs)
return fn
return deco | Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween. | Below is the the instruction that describes the task:
### Input:
Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween.
### Response:
def steal_docstring_from(obj):
"""Decorator that lets you steal a docstring from another object
Example
-------
::
@steal_docstring_from(superclass.meth)
def meth(self, arg):
"Extra subclass documentation"
pass
In this case the docstring of the new 'meth' will be copied from superclass.meth, and
if an additional dosctring was defined for meth it will be appended to the superclass
docstring with a two newlines inbetween.
"""
def deco(fn):
docs = [obj.__doc__]
if fn.__doc__:
docs.append(fn.__doc__)
fn.__doc__ = '\n\n'.join(docs)
return fn
return deco |
def add_manager(model):
"""
Monkey patches the original model to use MultilingualManager instead of
default managers (not only ``objects``, but also every manager defined and inherited).
Custom managers are merged with MultilingualManager.
"""
if model._meta.abstract:
return
# Make all managers local for this model to fix patching parent model managers
model._meta.local_managers = model._meta.managers
for current_manager in model._meta.local_managers:
prev_class = current_manager.__class__
patch_manager_class(current_manager)
if model._default_manager.__class__ is prev_class:
# Normally model._default_manager is a reference to one of model's managers
# (and would be patched by the way).
# However, in some rare situations (mostly proxy models)
# model._default_manager is not the same instance as one of managers, but it
# share the same class.
model._default_manager.__class__ = current_manager.__class__
patch_manager_class(model._base_manager)
model._meta.base_manager_name = 'objects'
model._meta._expire_cache() | Monkey patches the original model to use MultilingualManager instead of
default managers (not only ``objects``, but also every manager defined and inherited).
Custom managers are merged with MultilingualManager. | Below is the the instruction that describes the task:
### Input:
Monkey patches the original model to use MultilingualManager instead of
default managers (not only ``objects``, but also every manager defined and inherited).
Custom managers are merged with MultilingualManager.
### Response:
def add_manager(model):
"""
Monkey patches the original model to use MultilingualManager instead of
default managers (not only ``objects``, but also every manager defined and inherited).
Custom managers are merged with MultilingualManager.
"""
if model._meta.abstract:
return
# Make all managers local for this model to fix patching parent model managers
model._meta.local_managers = model._meta.managers
for current_manager in model._meta.local_managers:
prev_class = current_manager.__class__
patch_manager_class(current_manager)
if model._default_manager.__class__ is prev_class:
# Normally model._default_manager is a reference to one of model's managers
# (and would be patched by the way).
# However, in some rare situations (mostly proxy models)
# model._default_manager is not the same instance as one of managers, but it
# share the same class.
model._default_manager.__class__ = current_manager.__class__
patch_manager_class(model._base_manager)
model._meta.base_manager_name = 'objects'
model._meta._expire_cache() |
def count_relations(graph) -> Counter:
"""Return a histogram over all relationships in a graph.
:param pybel.BELGraph graph: A BEL graph
:return: A Counter from {relation type: frequency}
"""
return Counter(
data[RELATION]
for _, _, data in graph.edges(data=True)
) | Return a histogram over all relationships in a graph.
:param pybel.BELGraph graph: A BEL graph
:return: A Counter from {relation type: frequency} | Below is the the instruction that describes the task:
### Input:
Return a histogram over all relationships in a graph.
:param pybel.BELGraph graph: A BEL graph
:return: A Counter from {relation type: frequency}
### Response:
def count_relations(graph) -> Counter:
"""Return a histogram over all relationships in a graph.
:param pybel.BELGraph graph: A BEL graph
:return: A Counter from {relation type: frequency}
"""
return Counter(
data[RELATION]
for _, _, data in graph.edges(data=True)
) |
def _get_query_argument(args, cell, env):
""" Get a query argument to a cell magic.
The query is specified with args['query']. We look that up and if it is a BQ query
object, just return it. If it is a string, build a query object out of it and return
that
Args:
args: the dictionary of magic arguments.
cell: the cell contents which can be variable value overrides (if args has a 'query'
value) or inline SQL otherwise.
env: a dictionary that is used for looking up variable values.
Returns:
A Query object.
"""
sql_arg = args.get('query', None)
if sql_arg is None:
# Assume we have inline SQL in the cell
if not isinstance(cell, basestring):
raise Exception('Expected a --query argument or inline SQL')
return bigquery.Query(cell, env=env)
item = google.datalab.utils.commands.get_notebook_item(sql_arg)
if isinstance(item, bigquery.Query):
return item
else:
raise Exception('Expected a query object, got %s.' % type(item)) | Get a query argument to a cell magic.
The query is specified with args['query']. We look that up and if it is a BQ query
object, just return it. If it is a string, build a query object out of it and return
that
Args:
args: the dictionary of magic arguments.
cell: the cell contents which can be variable value overrides (if args has a 'query'
value) or inline SQL otherwise.
env: a dictionary that is used for looking up variable values.
Returns:
A Query object. | Below is the the instruction that describes the task:
### Input:
Get a query argument to a cell magic.
The query is specified with args['query']. We look that up and if it is a BQ query
object, just return it. If it is a string, build a query object out of it and return
that
Args:
args: the dictionary of magic arguments.
cell: the cell contents which can be variable value overrides (if args has a 'query'
value) or inline SQL otherwise.
env: a dictionary that is used for looking up variable values.
Returns:
A Query object.
### Response:
def _get_query_argument(args, cell, env):
""" Get a query argument to a cell magic.
The query is specified with args['query']. We look that up and if it is a BQ query
object, just return it. If it is a string, build a query object out of it and return
that
Args:
args: the dictionary of magic arguments.
cell: the cell contents which can be variable value overrides (if args has a 'query'
value) or inline SQL otherwise.
env: a dictionary that is used for looking up variable values.
Returns:
A Query object.
"""
sql_arg = args.get('query', None)
if sql_arg is None:
# Assume we have inline SQL in the cell
if not isinstance(cell, basestring):
raise Exception('Expected a --query argument or inline SQL')
return bigquery.Query(cell, env=env)
item = google.datalab.utils.commands.get_notebook_item(sql_arg)
if isinstance(item, bigquery.Query):
return item
else:
raise Exception('Expected a query object, got %s.' % type(item)) |
def freqz_cas(sos,w):
"""
Cascade frequency response
Mark Wickert October 2016
"""
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas | Cascade frequency response
Mark Wickert October 2016 | Below is the the instruction that describes the task:
### Input:
Cascade frequency response
Mark Wickert October 2016
### Response:
def freqz_cas(sos,w):
"""
Cascade frequency response
Mark Wickert October 2016
"""
Ns,Mcol = sos.shape
w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w)
for k in range(1,Ns):
w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w)
Hcas *= Htemp
return w, Hcas |
def select_labels(self, labels=None):
""" Prepare binar segmentation based on input segmentation and labels.
:param labels:
:return:
"""
self._resize_if_required()
segmentation = self._select_labels(self.resized_segmentation, labels)
# logger.debug("select labels in show_segmentation {} sum {}".format(labels, np.sum(segmentation)))
self.resized_binar_segmentation = segmentation | Prepare binar segmentation based on input segmentation and labels.
:param labels:
:return: | Below is the the instruction that describes the task:
### Input:
Prepare binar segmentation based on input segmentation and labels.
:param labels:
:return:
### Response:
def select_labels(self, labels=None):
""" Prepare binar segmentation based on input segmentation and labels.
:param labels:
:return:
"""
self._resize_if_required()
segmentation = self._select_labels(self.resized_segmentation, labels)
# logger.debug("select labels in show_segmentation {} sum {}".format(labels, np.sum(segmentation)))
self.resized_binar_segmentation = segmentation |
def family_name(self):
"""
The name of the typeface family for this font, e.g. 'Arial'.
"""
def find_first(dict_, keys, default=None):
for key in keys:
value = dict_.get(key)
if value is not None:
return value
return default
# keys for Unicode, Mac, and Windows family name, respectively
return find_first(self._names, ((0, 1), (1, 1), (3, 1))) | The name of the typeface family for this font, e.g. 'Arial'. | Below is the the instruction that describes the task:
### Input:
The name of the typeface family for this font, e.g. 'Arial'.
### Response:
def family_name(self):
"""
The name of the typeface family for this font, e.g. 'Arial'.
"""
def find_first(dict_, keys, default=None):
for key in keys:
value = dict_.get(key)
if value is not None:
return value
return default
# keys for Unicode, Mac, and Windows family name, respectively
return find_first(self._names, ((0, 1), (1, 1), (3, 1))) |
async def play(self, ctx, *, query):
"""Plays a file from the local filesystem"""
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('Now playing: {}'.format(query)) | Plays a file from the local filesystem | Below is the the instruction that describes the task:
### Input:
Plays a file from the local filesystem
### Response:
async def play(self, ctx, *, query):
"""Plays a file from the local filesystem"""
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)
await ctx.send('Now playing: {}'.format(query)) |
def _find_reader_dataset(self, dataset_key, **dfilter):
"""Attempt to find a `DatasetID` in the available readers.
Args:
dataset_key (str, float, DatasetID):
Dataset name, wavelength, or a combination of `DatasetID`
parameters to use in searching for the dataset from the
available readers.
**dfilter (list or str): `DatasetID` parameters besides `name`
and `wavelength` to use to filter the
available datasets. Passed directly to
`get_dataset_key` of the readers, see
that method for more information.
"""
too_many = False
for reader_name, reader_instance in self.readers.items():
try:
ds_id = reader_instance.get_dataset_key(dataset_key, **dfilter)
except TooManyResults:
LOG.trace("Too many datasets matching key {} in reader {}".format(dataset_key, reader_name))
too_many = True
continue
except KeyError:
LOG.trace("Can't find dataset %s in reader %s", str(dataset_key), reader_name)
continue
LOG.trace("Found {} in reader {} when asking for {}".format(str(ds_id), reader_name, repr(dataset_key)))
try:
# now that we know we have the exact DatasetID see if we have already created a Node for it
return self.getitem(ds_id)
except KeyError:
# we haven't created a node yet, create it now
return Node(ds_id, {'reader_name': reader_name})
if too_many:
raise TooManyResults("Too many keys matching: {}".format(dataset_key)) | Attempt to find a `DatasetID` in the available readers.
Args:
dataset_key (str, float, DatasetID):
Dataset name, wavelength, or a combination of `DatasetID`
parameters to use in searching for the dataset from the
available readers.
**dfilter (list or str): `DatasetID` parameters besides `name`
and `wavelength` to use to filter the
available datasets. Passed directly to
`get_dataset_key` of the readers, see
that method for more information. | Below is the the instruction that describes the task:
### Input:
Attempt to find a `DatasetID` in the available readers.
Args:
dataset_key (str, float, DatasetID):
Dataset name, wavelength, or a combination of `DatasetID`
parameters to use in searching for the dataset from the
available readers.
**dfilter (list or str): `DatasetID` parameters besides `name`
and `wavelength` to use to filter the
available datasets. Passed directly to
`get_dataset_key` of the readers, see
that method for more information.
### Response:
def _find_reader_dataset(self, dataset_key, **dfilter):
"""Attempt to find a `DatasetID` in the available readers.
Args:
dataset_key (str, float, DatasetID):
Dataset name, wavelength, or a combination of `DatasetID`
parameters to use in searching for the dataset from the
available readers.
**dfilter (list or str): `DatasetID` parameters besides `name`
and `wavelength` to use to filter the
available datasets. Passed directly to
`get_dataset_key` of the readers, see
that method for more information.
"""
too_many = False
for reader_name, reader_instance in self.readers.items():
try:
ds_id = reader_instance.get_dataset_key(dataset_key, **dfilter)
except TooManyResults:
LOG.trace("Too many datasets matching key {} in reader {}".format(dataset_key, reader_name))
too_many = True
continue
except KeyError:
LOG.trace("Can't find dataset %s in reader %s", str(dataset_key), reader_name)
continue
LOG.trace("Found {} in reader {} when asking for {}".format(str(ds_id), reader_name, repr(dataset_key)))
try:
# now that we know we have the exact DatasetID see if we have already created a Node for it
return self.getitem(ds_id)
except KeyError:
# we haven't created a node yet, create it now
return Node(ds_id, {'reader_name': reader_name})
if too_many:
raise TooManyResults("Too many keys matching: {}".format(dataset_key)) |
def fetch_points_of_sales(self, ticket=None):
"""
Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,).
"""
ticket = ticket or self.get_or_create_ticket('wsfe')
client = clients.get_client('wsfe', self.is_sandboxed)
response = client.service.FEParamGetPtosVenta(
serializers.serialize_ticket(ticket),
)
check_response(response)
results = []
for pos_data in response.ResultGet.PtoVenta:
results.append(PointOfSales.objects.update_or_create(
number=pos_data.Nro,
issuance_type=pos_data.EmisionTipo,
owner=self,
defaults={
'blocked': pos_data.Bloqueado == 'N',
'drop_date': parsers.parse_date(pos_data.FchBaja),
}
))
return results | Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,). | Below is the the instruction that describes the task:
### Input:
Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,).
### Response:
def fetch_points_of_sales(self, ticket=None):
"""
Fetch all point of sales objects.
Fetch all point of sales from the WS and store (or update) them
locally.
Returns a list of tuples with the format (pos, created,).
"""
ticket = ticket or self.get_or_create_ticket('wsfe')
client = clients.get_client('wsfe', self.is_sandboxed)
response = client.service.FEParamGetPtosVenta(
serializers.serialize_ticket(ticket),
)
check_response(response)
results = []
for pos_data in response.ResultGet.PtoVenta:
results.append(PointOfSales.objects.update_or_create(
number=pos_data.Nro,
issuance_type=pos_data.EmisionTipo,
owner=self,
defaults={
'blocked': pos_data.Bloqueado == 'N',
'drop_date': parsers.parse_date(pos_data.FchBaja),
}
))
return results |
def check_basic_auth(self, username, password):
"""
This function is called to check if a username /
password combination is valid via the htpasswd file.
"""
valid = self.users.check_password(
username, password
)
if not valid:
log.warning('Invalid login from %s', username)
valid = False
return (
valid,
username
) | This function is called to check if a username /
password combination is valid via the htpasswd file. | Below is the the instruction that describes the task:
### Input:
This function is called to check if a username /
password combination is valid via the htpasswd file.
### Response:
def check_basic_auth(self, username, password):
"""
This function is called to check if a username /
password combination is valid via the htpasswd file.
"""
valid = self.users.check_password(
username, password
)
if not valid:
log.warning('Invalid login from %s', username)
valid = False
return (
valid,
username
) |
def clear_all(self):
"Remove all items and column headings"
self.clear()
for ch in reversed(self.columns):
del self[ch.name] | Remove all items and column headings | Below is the the instruction that describes the task:
### Input:
Remove all items and column headings
### Response:
def clear_all(self):
"Remove all items and column headings"
self.clear()
for ch in reversed(self.columns):
del self[ch.name] |
def ensure_int64_or_float64(arr, copy=False):
"""
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
"""
try:
return arr.astype('int64', copy=copy, casting='safe')
except TypeError:
return arr.astype('float64', copy=copy) | Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64. | Below is the the instruction that describes the task:
### Input:
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
### Response:
def ensure_int64_or_float64(arr, copy=False):
"""
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
"""
try:
return arr.astype('int64', copy=copy, casting='safe')
except TypeError:
return arr.astype('float64', copy=copy) |
def _create_with_scope(body, kwargs):
'''
Helper function to wrap a block in a scope stack:
with ContextScope(context, **kwargs) as context:
... body ...
'''
return ast.With(
items=[
ast.withitem(
context_expr=_a.Call(
_a.Name('ContextScope'),
[_a.Name('context')],
keywords=kwargs,
),
optional_vars=_a.Name('context', ctx=ast.Store())
),
],
body=body,
) | Helper function to wrap a block in a scope stack:
with ContextScope(context, **kwargs) as context:
... body ... | Below is the the instruction that describes the task:
### Input:
Helper function to wrap a block in a scope stack:
with ContextScope(context, **kwargs) as context:
... body ...
### Response:
def _create_with_scope(body, kwargs):
'''
Helper function to wrap a block in a scope stack:
with ContextScope(context, **kwargs) as context:
... body ...
'''
return ast.With(
items=[
ast.withitem(
context_expr=_a.Call(
_a.Name('ContextScope'),
[_a.Name('context')],
keywords=kwargs,
),
optional_vars=_a.Name('context', ctx=ast.Store())
),
],
body=body,
) |
def create_reader(name, *args, format=None, registry=default_registry, **kwargs):
"""
Create a reader instance, guessing its factory using filename (and eventually format).
:param name:
:param args:
:param format:
:param registry:
:param kwargs:
:return: mixed
"""
return registry.get_reader_factory_for(name, format=format)(name, *args, **kwargs) | Create a reader instance, guessing its factory using filename (and eventually format).
:param name:
:param args:
:param format:
:param registry:
:param kwargs:
:return: mixed | Below is the the instruction that describes the task:
### Input:
Create a reader instance, guessing its factory using filename (and eventually format).
:param name:
:param args:
:param format:
:param registry:
:param kwargs:
:return: mixed
### Response:
def create_reader(name, *args, format=None, registry=default_registry, **kwargs):
"""
Create a reader instance, guessing its factory using filename (and eventually format).
:param name:
:param args:
:param format:
:param registry:
:param kwargs:
:return: mixed
"""
return registry.get_reader_factory_for(name, format=format)(name, *args, **kwargs) |
def init(FILE):
"""
Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str
"""
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
file_not_found_message(FILE) | Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str | Below is the the instruction that describes the task:
### Input:
Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str
### Response:
def init(FILE):
"""
Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str
"""
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
file_not_found_message(FILE) |
def after_third_friday(day=None):
""" check if day is after month's 3rd friday """
day = day if day is not None else datetime.datetime.now()
now = day.replace(day=1, hour=16, minute=0, second=0, microsecond=0)
now += relativedelta.relativedelta(weeks=2, weekday=relativedelta.FR)
return day > now | check if day is after month's 3rd friday | Below is the the instruction that describes the task:
### Input:
check if day is after month's 3rd friday
### Response:
def after_third_friday(day=None):
""" check if day is after month's 3rd friday """
day = day if day is not None else datetime.datetime.now()
now = day.replace(day=1, hour=16, minute=0, second=0, microsecond=0)
now += relativedelta.relativedelta(weeks=2, weekday=relativedelta.FR)
return day > now |
def get_weights(self):
"""Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
"""
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
} | Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
### Response:
def get_weights(self):
"""Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
"""
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
} |
def join_keys(x, y, by=None):
"""
Join keys.
Given two data frames, create a unique key for each row.
Parameters
-----------
x : dataframe
y : dataframe
by : list-like
Column names to join by
Returns
-------
out : dict
Dictionary with keys x and y. The values of both keys
are arrays with integer elements. Identical rows in
x and y dataframes would have the same key in the
output. The key elements start at 1.
"""
if by is None:
by = slice(None, None, None)
if isinstance(by, tuple):
by = list(by)
joint = x[by].append(y[by], ignore_index=True)
keys = ninteraction(joint, drop=True)
keys = np.asarray(keys)
nx, ny = len(x), len(y)
return {'x': keys[np.arange(nx)],
'y': keys[nx + np.arange(ny)]} | Join keys.
Given two data frames, create a unique key for each row.
Parameters
-----------
x : dataframe
y : dataframe
by : list-like
Column names to join by
Returns
-------
out : dict
Dictionary with keys x and y. The values of both keys
are arrays with integer elements. Identical rows in
x and y dataframes would have the same key in the
output. The key elements start at 1. | Below is the the instruction that describes the task:
### Input:
Join keys.
Given two data frames, create a unique key for each row.
Parameters
-----------
x : dataframe
y : dataframe
by : list-like
Column names to join by
Returns
-------
out : dict
Dictionary with keys x and y. The values of both keys
are arrays with integer elements. Identical rows in
x and y dataframes would have the same key in the
output. The key elements start at 1.
### Response:
def join_keys(x, y, by=None):
"""
Join keys.
Given two data frames, create a unique key for each row.
Parameters
-----------
x : dataframe
y : dataframe
by : list-like
Column names to join by
Returns
-------
out : dict
Dictionary with keys x and y. The values of both keys
are arrays with integer elements. Identical rows in
x and y dataframes would have the same key in the
output. The key elements start at 1.
"""
if by is None:
by = slice(None, None, None)
if isinstance(by, tuple):
by = list(by)
joint = x[by].append(y[by], ignore_index=True)
keys = ninteraction(joint, drop=True)
keys = np.asarray(keys)
nx, ny = len(x), len(y)
return {'x': keys[np.arange(nx)],
'y': keys[nx + np.arange(ny)]} |
def datagram_received(self, data, addr):
"""Method run when data is received from the devices
This method will unpack the data according to the LIFX protocol.
If a new device is found, the Light device will be created and started aa
a DatagramProtocol and will be registered with the parent.
:param data: raw data
:type data: bytestring
:param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6
:type addr: tuple
"""
response = unpack_lifx_message(data)
response.ip_addr = addr[0]
mac_addr = response.target_addr
if mac_addr == BROADCAST_MAC:
return
if type(response) == StateService and response.service == 1: # only look for UDP services
# discovered
remote_port = response.port
elif type(response) == LightState:
# looks like the lights are volunteering LigthState after booting
remote_port = UDP_BROADCAST_PORT
else:
return
if self.ipv6prefix:
family = socket.AF_INET6
remote_ip = mac_to_ipv6_linklocal(mac_addr, self.ipv6prefix)
else:
family = socket.AF_INET
remote_ip = response.ip_addr
if mac_addr in self.lights:
# rediscovered
light = self.lights[mac_addr]
# nothing to do
if light.registered:
return
light.cleanup()
light.ip_addr = remote_ip
light.port = remote_port
else:
# newly discovered
light = Light(self.loop, mac_addr, remote_ip, remote_port, parent=self)
self.lights[mac_addr] = light
coro = self.loop.create_datagram_endpoint(
lambda: light, family=family, remote_addr=(remote_ip, remote_port))
light.task = self.loop.create_task(coro) | Method run when data is received from the devices
This method will unpack the data according to the LIFX protocol.
If a new device is found, the Light device will be created and started aa
a DatagramProtocol and will be registered with the parent.
:param data: raw data
:type data: bytestring
:param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6
:type addr: tuple | Below is the the instruction that describes the task:
### Input:
Method run when data is received from the devices
This method will unpack the data according to the LIFX protocol.
If a new device is found, the Light device will be created and started aa
a DatagramProtocol and will be registered with the parent.
:param data: raw data
:type data: bytestring
:param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6
:type addr: tuple
### Response:
def datagram_received(self, data, addr):
"""Method run when data is received from the devices
This method will unpack the data according to the LIFX protocol.
If a new device is found, the Light device will be created and started aa
a DatagramProtocol and will be registered with the parent.
:param data: raw data
:type data: bytestring
:param addr: sender IP address 2-tuple for IPv4, 4-tuple for IPv6
:type addr: tuple
"""
response = unpack_lifx_message(data)
response.ip_addr = addr[0]
mac_addr = response.target_addr
if mac_addr == BROADCAST_MAC:
return
if type(response) == StateService and response.service == 1: # only look for UDP services
# discovered
remote_port = response.port
elif type(response) == LightState:
# looks like the lights are volunteering LigthState after booting
remote_port = UDP_BROADCAST_PORT
else:
return
if self.ipv6prefix:
family = socket.AF_INET6
remote_ip = mac_to_ipv6_linklocal(mac_addr, self.ipv6prefix)
else:
family = socket.AF_INET
remote_ip = response.ip_addr
if mac_addr in self.lights:
# rediscovered
light = self.lights[mac_addr]
# nothing to do
if light.registered:
return
light.cleanup()
light.ip_addr = remote_ip
light.port = remote_port
else:
# newly discovered
light = Light(self.loop, mac_addr, remote_ip, remote_port, parent=self)
self.lights[mac_addr] = light
coro = self.loop.create_datagram_endpoint(
lambda: light, family=family, remote_addr=(remote_ip, remote_port))
light.task = self.loop.create_task(coro) |
def _get_canonical_map(self):
"""Return a mapping of available command names and aliases to
their canonical command name.
"""
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
# Get the list of commands and their aliases, if any.
token2canonical = {}
cmd2funcname = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items(): # add aliases
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn("'%s' alias for '%s' command conflicts "
"with '%s' handler" %
(alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr) | Return a mapping of available command names and aliases to
their canonical command name. | Below is the the instruction that describes the task:
### Input:
Return a mapping of available command names and aliases to
their canonical command name.
### Response:
def _get_canonical_map(self):
"""Return a mapping of available command names and aliases to
their canonical command name.
"""
cacheattr = "_token2canonical"
if not hasattr(self, cacheattr):
# Get the list of commands and their aliases, if any.
token2canonical = {}
cmd2funcname = {} # use a dict to strip duplicates
for attr in self.get_names():
if attr.startswith("do_"): cmdname = attr[3:]
elif attr.startswith("_do_"): cmdname = attr[4:]
else:
continue
cmd2funcname[cmdname] = attr
token2canonical[cmdname] = cmdname
for cmdname, funcname in cmd2funcname.items(): # add aliases
func = getattr(self, funcname)
aliases = getattr(func, "aliases", [])
for alias in aliases:
if alias in cmd2funcname:
import warnings
warnings.warn("'%s' alias for '%s' command conflicts "
"with '%s' handler" %
(alias, cmdname, cmd2funcname[alias]))
continue
token2canonical[alias] = cmdname
setattr(self, cacheattr, token2canonical)
return getattr(self, cacheattr) |
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name | Yield names and strings used by `code` and its nested code objects | Below is the the instruction that describes the task:
### Input:
Yield names and strings used by `code` and its nested code objects
### Response:
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name |
def get_port_chann_detail_request(last_aggregator_id):
""" Creates a new Netconf request based on the last received
aggregator id when the hasMore flag is true
"""
port_channel_ns = 'urn:brocade.com:mgmt:brocade-lag'
request_port_channel = ET.Element('get-port-channel-detail',
xmlns=port_channel_ns)
if last_aggregator_id != '':
last_received_port_chann_el = ET.SubElement(request_port_channel,
"last-aggregator-id")
last_received_port_chann_el.text = last_aggregator_id
return request_port_channel | Creates a new Netconf request based on the last received
aggregator id when the hasMore flag is true | Below is the the instruction that describes the task:
### Input:
Creates a new Netconf request based on the last received
aggregator id when the hasMore flag is true
### Response:
def get_port_chann_detail_request(last_aggregator_id):
""" Creates a new Netconf request based on the last received
aggregator id when the hasMore flag is true
"""
port_channel_ns = 'urn:brocade.com:mgmt:brocade-lag'
request_port_channel = ET.Element('get-port-channel-detail',
xmlns=port_channel_ns)
if last_aggregator_id != '':
last_received_port_chann_el = ET.SubElement(request_port_channel,
"last-aggregator-id")
last_received_port_chann_el.text = last_aggregator_id
return request_port_channel |
def thermal_conductivity(self, temperature, volume):
"""
Eq(17) in 10.1103/PhysRevB.90.174107
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: thermal conductivity in W/K/m
"""
gamma = self.gruneisen_parameter(temperature, volume)
theta_d = self.debye_temperature(volume) # K
theta_a = theta_d * self.natoms**(-1./3.) # K
prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3)
# kg/K^3/s^3
prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass
kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228)
# kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10
# = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10
kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10
return kappa | Eq(17) in 10.1103/PhysRevB.90.174107
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: thermal conductivity in W/K/m | Below is the the instruction that describes the task:
### Input:
Eq(17) in 10.1103/PhysRevB.90.174107
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: thermal conductivity in W/K/m
### Response:
def thermal_conductivity(self, temperature, volume):
"""
Eq(17) in 10.1103/PhysRevB.90.174107
Args:
temperature (float): temperature in K
volume (float): in Ang^3
Returns:
float: thermal conductivity in W/K/m
"""
gamma = self.gruneisen_parameter(temperature, volume)
theta_d = self.debye_temperature(volume) # K
theta_a = theta_d * self.natoms**(-1./3.) # K
prefactor = (0.849 * 3 * 4**(1./3.)) / (20. * np.pi**3)
# kg/K^3/s^3
prefactor = prefactor * (self.kb/self.hbar)**3 * self.avg_mass
kappa = prefactor / (gamma**2 - 0.514 * gamma + 0.228)
# kg/K/s^3 * Ang = (kg m/s^2)/(Ks)*1e-10
# = N/(Ks)*1e-10 = Nm/(Kms)*1e-10 = W/K/m*1e-10
kappa = kappa * theta_a**2 * volume**(1./3.) * 1e-10
return kappa |
def _UnpackGdbVal(self, gdb_value):
"""Unpacks gdb.Value objects and returns the best-matched python object."""
val_type = gdb_value.type.code
if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM:
return int(gdb_value)
if val_type == gdb.TYPE_CODE_VOID:
return None
if val_type == gdb.TYPE_CODE_PTR:
return long(gdb_value)
if val_type == gdb.TYPE_CODE_ARRAY:
# This is probably a string
return str(gdb_value)
# I'm out of ideas, let's return it as a string
return str(gdb_value) | Unpacks gdb.Value objects and returns the best-matched python object. | Below is the the instruction that describes the task:
### Input:
Unpacks gdb.Value objects and returns the best-matched python object.
### Response:
def _UnpackGdbVal(self, gdb_value):
"""Unpacks gdb.Value objects and returns the best-matched python object."""
val_type = gdb_value.type.code
if val_type == gdb.TYPE_CODE_INT or val_type == gdb.TYPE_CODE_ENUM:
return int(gdb_value)
if val_type == gdb.TYPE_CODE_VOID:
return None
if val_type == gdb.TYPE_CODE_PTR:
return long(gdb_value)
if val_type == gdb.TYPE_CODE_ARRAY:
# This is probably a string
return str(gdb_value)
# I'm out of ideas, let's return it as a string
return str(gdb_value) |
def pygal_parser(preprocessor, tag, markup):
""" Simple pygal parser """
# Find JSON payload
data = loads(markup)
if tag == 'pygal' and data is not None:
# Run generation of chart
output = run_pygal(data)
# Return embedded SVG image
return '<div class="pygal" style="text-align: center;"><embed type="image/svg+xml" src=%s style="max-width:1000px"/></div>' % output
else:
raise ValueError('Error processing input. \nExpected syntax: {0}'.format(SYNTAX)) | Simple pygal parser | Below is the the instruction that describes the task:
### Input:
Simple pygal parser
### Response:
def pygal_parser(preprocessor, tag, markup):
""" Simple pygal parser """
# Find JSON payload
data = loads(markup)
if tag == 'pygal' and data is not None:
# Run generation of chart
output = run_pygal(data)
# Return embedded SVG image
return '<div class="pygal" style="text-align: center;"><embed type="image/svg+xml" src=%s style="max-width:1000px"/></div>' % output
else:
raise ValueError('Error processing input. \nExpected syntax: {0}'.format(SYNTAX)) |
def remove(self, node, dirty=True):
"""Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty.
"""
if node.id in self._children:
self._children[node.id].parent = None
del self._children[node.id]
if dirty:
self.touch() | Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty. | Below is the the instruction that describes the task:
### Input:
Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty.
### Response:
def remove(self, node, dirty=True):
"""Remove the given child node.
Args:
node (gkeepapi.Node): Node to remove.
dirty (bool): Whether this node should be marked dirty.
"""
if node.id in self._children:
self._children[node.id].parent = None
del self._children[node.id]
if dirty:
self.touch() |
def compile_template_str(template, renderers, default, blacklist, whitelist):
'''
Take template as a string and return the high data structure
derived from the template.
'''
fn_ = salt.utils.files.mkstemp()
with salt.utils.files.fopen(fn_, 'wb') as ofile:
ofile.write(SLS_ENCODER(template)[0])
return compile_template(fn_, renderers, default, blacklist, whitelist) | Take template as a string and return the high data structure
derived from the template. | Below is the the instruction that describes the task:
### Input:
Take template as a string and return the high data structure
derived from the template.
### Response:
def compile_template_str(template, renderers, default, blacklist, whitelist):
'''
Take template as a string and return the high data structure
derived from the template.
'''
fn_ = salt.utils.files.mkstemp()
with salt.utils.files.fopen(fn_, 'wb') as ofile:
ofile.write(SLS_ENCODER(template)[0])
return compile_template(fn_, renderers, default, blacklist, whitelist) |
def _auto_commit(self):
"""
Check if we have to commit based on number of messages and commit
"""
# Check if we are supposed to do an auto-commit
if not self.auto_commit or self.auto_commit_every_n is None:
return
if self.count_since_commit >= self.auto_commit_every_n:
self.commit() | Check if we have to commit based on number of messages and commit | Below is the the instruction that describes the task:
### Input:
Check if we have to commit based on number of messages and commit
### Response:
def _auto_commit(self):
"""
Check if we have to commit based on number of messages and commit
"""
# Check if we are supposed to do an auto-commit
if not self.auto_commit or self.auto_commit_every_n is None:
return
if self.count_since_commit >= self.auto_commit_every_n:
self.commit() |
def delete_process_behavior(self, process_id, behavior_ref_name):
"""DeleteProcessBehavior.
[Preview API] Removes a behavior in the process.
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if behavior_ref_name is not None:
route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str')
self._send(http_method='DELETE',
location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e',
version='5.0-preview.2',
route_values=route_values) | DeleteProcessBehavior.
[Preview API] Removes a behavior in the process.
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior | Below is the the instruction that describes the task:
### Input:
DeleteProcessBehavior.
[Preview API] Removes a behavior in the process.
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior
### Response:
def delete_process_behavior(self, process_id, behavior_ref_name):
"""DeleteProcessBehavior.
[Preview API] Removes a behavior in the process.
:param str process_id: The ID of the process
:param str behavior_ref_name: The reference name of the behavior
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if behavior_ref_name is not None:
route_values['behaviorRefName'] = self._serialize.url('behavior_ref_name', behavior_ref_name, 'str')
self._send(http_method='DELETE',
location_id='d1800200-f184-4e75-a5f2-ad0b04b4373e',
version='5.0-preview.2',
route_values=route_values) |
def match(line, keyword):
""" If the first part of line (modulo blanks) matches keyword,
returns the end of that line. Otherwise checks if keyword is
anywhere in the line and returns that section, else returns None"""
line = line.lstrip()
length = len(keyword)
if line[:length] == keyword:
return line[length:]
else:
if keyword in line:
return line[line.index(keyword):]
else:
return None | If the first part of line (modulo blanks) matches keyword,
returns the end of that line. Otherwise checks if keyword is
anywhere in the line and returns that section, else returns None | Below is the the instruction that describes the task:
### Input:
If the first part of line (modulo blanks) matches keyword,
returns the end of that line. Otherwise checks if keyword is
anywhere in the line and returns that section, else returns None
### Response:
def match(line, keyword):
""" If the first part of line (modulo blanks) matches keyword,
returns the end of that line. Otherwise checks if keyword is
anywhere in the line and returns that section, else returns None"""
line = line.lstrip()
length = len(keyword)
if line[:length] == keyword:
return line[length:]
else:
if keyword in line:
return line[line.index(keyword):]
else:
return None |
def auth_access(self, auth_code):
"""
verify the fist authorization response url code
response data
返回值字段 字段类型 字段说明
access_token string 用户授权的唯一票据,用于调用微博的开放接口,同时也是第三方应用验证微博用户登录的唯一票据,
第三方应用应该用该票据和自己应用内的用户建立唯一影射关系,来识别登录状态,不能使用本返回值里的UID
字段来做登录识别。
expires_in string access_token的生命周期,单位是秒数。
remind_in string access_token的生命周期(该参数即将废弃,开发者请使用expires_in)。
uid string 授权用户的UID,本字段只是为了方便开发者,减少一次user/show接口调用而返回的,第三方应用不能用此字段作为用户
登录状态的识别,只有access_token才是用户授权的唯一票据。
:param auth_code: authorize_url response code
:return:
normal:
{
"access_token": "ACCESS_TOKEN",
"expires_in": 1234,
"remind_in":"798114",
"uid":"12341234"
}
mobile:
{
"access_token": "SlAV32hkKG",
"remind_in": 3600,
"expires_in": 3600
"refresh_token": "QXBK19xm62"
}
"""
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': self.redirect_url
}
return self.request("post", "access_token", data=data) | verify the fist authorization response url code
response data
返回值字段 字段类型 字段说明
access_token string 用户授权的唯一票据,用于调用微博的开放接口,同时也是第三方应用验证微博用户登录的唯一票据,
第三方应用应该用该票据和自己应用内的用户建立唯一影射关系,来识别登录状态,不能使用本返回值里的UID
字段来做登录识别。
expires_in string access_token的生命周期,单位是秒数。
remind_in string access_token的生命周期(该参数即将废弃,开发者请使用expires_in)。
uid string 授权用户的UID,本字段只是为了方便开发者,减少一次user/show接口调用而返回的,第三方应用不能用此字段作为用户
登录状态的识别,只有access_token才是用户授权的唯一票据。
:param auth_code: authorize_url response code
:return:
normal:
{
"access_token": "ACCESS_TOKEN",
"expires_in": 1234,
"remind_in":"798114",
"uid":"12341234"
}
mobile:
{
"access_token": "SlAV32hkKG",
"remind_in": 3600,
"expires_in": 3600
"refresh_token": "QXBK19xm62"
} | Below is the the instruction that describes the task:
### Input:
verify the fist authorization response url code
response data
返回值字段 字段类型 字段说明
access_token string 用户授权的唯一票据,用于调用微博的开放接口,同时也是第三方应用验证微博用户登录的唯一票据,
第三方应用应该用该票据和自己应用内的用户建立唯一影射关系,来识别登录状态,不能使用本返回值里的UID
字段来做登录识别。
expires_in string access_token的生命周期,单位是秒数。
remind_in string access_token的生命周期(该参数即将废弃,开发者请使用expires_in)。
uid string 授权用户的UID,本字段只是为了方便开发者,减少一次user/show接口调用而返回的,第三方应用不能用此字段作为用户
登录状态的识别,只有access_token才是用户授权的唯一票据。
:param auth_code: authorize_url response code
:return:
normal:
{
"access_token": "ACCESS_TOKEN",
"expires_in": 1234,
"remind_in":"798114",
"uid":"12341234"
}
mobile:
{
"access_token": "SlAV32hkKG",
"remind_in": 3600,
"expires_in": 3600
"refresh_token": "QXBK19xm62"
}
### Response:
def auth_access(self, auth_code):
"""
verify the fist authorization response url code
response data
返回值字段 字段类型 字段说明
access_token string 用户授权的唯一票据,用于调用微博的开放接口,同时也是第三方应用验证微博用户登录的唯一票据,
第三方应用应该用该票据和自己应用内的用户建立唯一影射关系,来识别登录状态,不能使用本返回值里的UID
字段来做登录识别。
expires_in string access_token的生命周期,单位是秒数。
remind_in string access_token的生命周期(该参数即将废弃,开发者请使用expires_in)。
uid string 授权用户的UID,本字段只是为了方便开发者,减少一次user/show接口调用而返回的,第三方应用不能用此字段作为用户
登录状态的识别,只有access_token才是用户授权的唯一票据。
:param auth_code: authorize_url response code
:return:
normal:
{
"access_token": "ACCESS_TOKEN",
"expires_in": 1234,
"remind_in":"798114",
"uid":"12341234"
}
mobile:
{
"access_token": "SlAV32hkKG",
"remind_in": 3600,
"expires_in": 3600
"refresh_token": "QXBK19xm62"
}
"""
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': auth_code,
'redirect_uri': self.redirect_url
}
return self.request("post", "access_token", data=data) |
def f_get_parent(self):
"""Returns the parent of the node.
Raises a TypeError if current node is root.
"""
if self.v_is_root:
raise TypeError('Root does not have a parent')
elif self.v_location == '':
return self.v_root
else:
return self.v_root.f_get(self.v_location, fast_access=False, shortcuts=False) | Returns the parent of the node.
Raises a TypeError if current node is root. | Below is the the instruction that describes the task:
### Input:
Returns the parent of the node.
Raises a TypeError if current node is root.
### Response:
def f_get_parent(self):
"""Returns the parent of the node.
Raises a TypeError if current node is root.
"""
if self.v_is_root:
raise TypeError('Root does not have a parent')
elif self.v_location == '':
return self.v_root
else:
return self.v_root.f_get(self.v_location, fast_access=False, shortcuts=False) |
def merge_ticket(self, ticket_id, into_id):
""" Merge ticket into another (undocumented API feature).
:param ticket_id: ID of ticket to be merged
:param into: ID of destination ticket
:returns: ``True``
Operation was successful
``False``
Either origin or destination ticket does not
exist or user does not have ModifyTicket permission.
"""
msg = self.__request('ticket/{}/merge/{}'.format(str(ticket_id),
str(into_id)))
state = msg.split('\n')[2]
return self.RE_PATTERNS['merge_successful_pattern'].match(state) is not None | Merge ticket into another (undocumented API feature).
:param ticket_id: ID of ticket to be merged
:param into: ID of destination ticket
:returns: ``True``
Operation was successful
``False``
Either origin or destination ticket does not
exist or user does not have ModifyTicket permission. | Below is the the instruction that describes the task:
### Input:
Merge ticket into another (undocumented API feature).
:param ticket_id: ID of ticket to be merged
:param into: ID of destination ticket
:returns: ``True``
Operation was successful
``False``
Either origin or destination ticket does not
exist or user does not have ModifyTicket permission.
### Response:
def merge_ticket(self, ticket_id, into_id):
""" Merge ticket into another (undocumented API feature).
:param ticket_id: ID of ticket to be merged
:param into: ID of destination ticket
:returns: ``True``
Operation was successful
``False``
Either origin or destination ticket does not
exist or user does not have ModifyTicket permission.
"""
msg = self.__request('ticket/{}/merge/{}'.format(str(ticket_id),
str(into_id)))
state = msg.split('\n')[2]
return self.RE_PATTERNS['merge_successful_pattern'].match(state) is not None |
def clear_created_date(self):
"""Removes the created date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.clear_start_time_template
if (self.get_created_date_metadata().is_read_only() or
self.get_created_date_metadata().is_required()):
raise errors.NoAccess()
self._my_map['createdDate'] = self._created_date_default | Removes the created date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Removes the created date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
### Response:
def clear_created_date(self):
"""Removes the created date.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.assessment.AssessmentOfferedForm.clear_start_time_template
if (self.get_created_date_metadata().is_read_only() or
self.get_created_date_metadata().is_required()):
raise errors.NoAccess()
self._my_map['createdDate'] = self._created_date_default |
def fake_chars_or_choice(self, field_name):
"""
Return fake chars or choice it if the `field_name` has choices.
Then, returning random value from it.
This specially for `CharField`.
Usage:
faker.fake_chars_or_choice('field_name')
Example for field:
TYPE_CHOICES = (
('project', 'I wanna to talk about project'),
('feedback', 'I want to report a bugs or give feedback'),
('hello', 'I just want to say hello')
)
type = models.CharField(max_length=200, choices=TYPE_CHOICES)
"""
return self.djipsum_fields().randomCharField(
self.model_class(),
field_name=field_name
) | Return fake chars or choice it if the `field_name` has choices.
Then, returning random value from it.
This specially for `CharField`.
Usage:
faker.fake_chars_or_choice('field_name')
Example for field:
TYPE_CHOICES = (
('project', 'I wanna to talk about project'),
('feedback', 'I want to report a bugs or give feedback'),
('hello', 'I just want to say hello')
)
type = models.CharField(max_length=200, choices=TYPE_CHOICES) | Below is the the instruction that describes the task:
### Input:
Return fake chars or choice it if the `field_name` has choices.
Then, returning random value from it.
This specially for `CharField`.
Usage:
faker.fake_chars_or_choice('field_name')
Example for field:
TYPE_CHOICES = (
('project', 'I wanna to talk about project'),
('feedback', 'I want to report a bugs or give feedback'),
('hello', 'I just want to say hello')
)
type = models.CharField(max_length=200, choices=TYPE_CHOICES)
### Response:
def fake_chars_or_choice(self, field_name):
"""
Return fake chars or choice it if the `field_name` has choices.
Then, returning random value from it.
This specially for `CharField`.
Usage:
faker.fake_chars_or_choice('field_name')
Example for field:
TYPE_CHOICES = (
('project', 'I wanna to talk about project'),
('feedback', 'I want to report a bugs or give feedback'),
('hello', 'I just want to say hello')
)
type = models.CharField(max_length=200, choices=TYPE_CHOICES)
"""
return self.djipsum_fields().randomCharField(
self.model_class(),
field_name=field_name
) |
def owned_pre_save(sender, document, **kwargs):
'''
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
'''
if not isinstance(document, Owned):
return
changed_fields = getattr(document, '_changed_fields', [])
if 'organization' in changed_fields:
if document.owner:
# Change from owner to organization
document._previous_owner = document.owner
document.owner = None
else:
# Change from org to another
# Need to fetch previous value in base
original = sender.objects.only('organization').get(pk=document.pk)
document._previous_owner = original.organization
elif 'owner' in changed_fields:
if document.organization:
# Change from organization to owner
document._previous_owner = document.organization
document.organization = None
else:
# Change from owner to another
# Need to fetch previous value in base
original = sender.objects.only('owner').get(pk=document.pk)
document._previous_owner = original.owner | Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it. | Below is the the instruction that describes the task:
### Input:
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
### Response:
def owned_pre_save(sender, document, **kwargs):
'''
Owned mongoengine.pre_save signal handler
Need to fetch original owner before the new one erase it.
'''
if not isinstance(document, Owned):
return
changed_fields = getattr(document, '_changed_fields', [])
if 'organization' in changed_fields:
if document.owner:
# Change from owner to organization
document._previous_owner = document.owner
document.owner = None
else:
# Change from org to another
# Need to fetch previous value in base
original = sender.objects.only('organization').get(pk=document.pk)
document._previous_owner = original.organization
elif 'owner' in changed_fields:
if document.organization:
# Change from organization to owner
document._previous_owner = document.organization
document.organization = None
else:
# Change from owner to another
# Need to fetch previous value in base
original = sender.objects.only('owner').get(pk=document.pk)
document._previous_owner = original.owner |
def list_app(self):
'''
List the apps.
'''
kwd = {
'pager': '',
'title': ''
}
self.render('user/info_list/list_app.html', kwd=kwd,
userinfo=self.userinfo) | List the apps. | Below is the the instruction that describes the task:
### Input:
List the apps.
### Response:
def list_app(self):
'''
List the apps.
'''
kwd = {
'pager': '',
'title': ''
}
self.render('user/info_list/list_app.html', kwd=kwd,
userinfo=self.userinfo) |
def scan_index(index, model):
"""
Yield all documents of model type in an index.
This function calls the elasticsearch.helpers.scan function,
and yields all the documents in the index that match the doc_type
produced by a specific Django model.
Args:
index: string, the name of the index to scan, must be a configured
index as returned from settings.get_index_names.
model: a Django model type, used to filter the the documents that
are scanned.
Yields each document of type model in index, one at a time.
"""
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html
query = {"query": {"type": {"value": model._meta.model_name}}}
client = get_client()
for hit in helpers.scan(client, index=index, query=query):
yield hit | Yield all documents of model type in an index.
This function calls the elasticsearch.helpers.scan function,
and yields all the documents in the index that match the doc_type
produced by a specific Django model.
Args:
index: string, the name of the index to scan, must be a configured
index as returned from settings.get_index_names.
model: a Django model type, used to filter the the documents that
are scanned.
Yields each document of type model in index, one at a time. | Below is the the instruction that describes the task:
### Input:
Yield all documents of model type in an index.
This function calls the elasticsearch.helpers.scan function,
and yields all the documents in the index that match the doc_type
produced by a specific Django model.
Args:
index: string, the name of the index to scan, must be a configured
index as returned from settings.get_index_names.
model: a Django model type, used to filter the the documents that
are scanned.
Yields each document of type model in index, one at a time.
### Response:
def scan_index(index, model):
"""
Yield all documents of model type in an index.
This function calls the elasticsearch.helpers.scan function,
and yields all the documents in the index that match the doc_type
produced by a specific Django model.
Args:
index: string, the name of the index to scan, must be a configured
index as returned from settings.get_index_names.
model: a Django model type, used to filter the the documents that
are scanned.
Yields each document of type model in index, one at a time.
"""
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html
query = {"query": {"type": {"value": model._meta.model_name}}}
client = get_client()
for hit in helpers.scan(client, index=index, query=query):
yield hit |
def shared_atts(self):
"""Gets atts shared among all nonzero length component Chunk"""
#TODO cache this, could get ugly for large FmtStrs
atts = {}
first = self.chunks[0]
for att in sorted(first.atts):
#TODO how to write this without the '???'?
if all(fs.atts.get(att, '???') == first.atts[att] for fs in self.chunks if len(fs) > 0):
atts[att] = first.atts[att]
return atts | Gets atts shared among all nonzero length component Chunk | Below is the the instruction that describes the task:
### Input:
Gets atts shared among all nonzero length component Chunk
### Response:
def shared_atts(self):
"""Gets atts shared among all nonzero length component Chunk"""
#TODO cache this, could get ugly for large FmtStrs
atts = {}
first = self.chunks[0]
for att in sorted(first.atts):
#TODO how to write this without the '???'?
if all(fs.atts.get(att, '???') == first.atts[att] for fs in self.chunks if len(fs) > 0):
atts[att] = first.atts[att]
return atts |
def build_srcdict(gta, prop):
"""Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property
"""
o = {}
for s in gta.roi.sources:
o[s.name] = s[prop]
return o | Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property | Below is the the instruction that describes the task:
### Input:
Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property
### Response:
def build_srcdict(gta, prop):
"""Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property
"""
o = {}
for s in gta.roi.sources:
o[s.name] = s[prop]
return o |
def parse_san(self, board: chess.Board, san: str) -> chess.Move:
"""
When the visitor is used by a parser, this is called to parse a move
in standard algebraic notation.
You can override the default implementation to work around specific
quirks of your input format.
"""
# Replace zeros with correct castling notation.
if san == "0-0":
san = "O-O"
elif san == "0-0-0":
san = "O-O-O"
return board.parse_san(san) | When the visitor is used by a parser, this is called to parse a move
in standard algebraic notation.
You can override the default implementation to work around specific
quirks of your input format. | Below is the the instruction that describes the task:
### Input:
When the visitor is used by a parser, this is called to parse a move
in standard algebraic notation.
You can override the default implementation to work around specific
quirks of your input format.
### Response:
def parse_san(self, board: chess.Board, san: str) -> chess.Move:
"""
When the visitor is used by a parser, this is called to parse a move
in standard algebraic notation.
You can override the default implementation to work around specific
quirks of your input format.
"""
# Replace zeros with correct castling notation.
if san == "0-0":
san = "O-O"
elif san == "0-0-0":
san = "O-O-O"
return board.parse_san(san) |
def visit_ifexp(self, node):
"""return an astroid.IfExp node as string"""
return "%s if %s else %s" % (
self._precedence_parens(node, node.body, is_left=True),
self._precedence_parens(node, node.test, is_left=True),
self._precedence_parens(node, node.orelse, is_left=False),
) | return an astroid.IfExp node as string | Below is the the instruction that describes the task:
### Input:
return an astroid.IfExp node as string
### Response:
def visit_ifexp(self, node):
"""return an astroid.IfExp node as string"""
return "%s if %s else %s" % (
self._precedence_parens(node, node.body, is_left=True),
self._precedence_parens(node, node.test, is_left=True),
self._precedence_parens(node, node.orelse, is_left=False),
) |
def p_if_then_part(p):
""" if_then_part : IF expr then """
if is_number(p[2]):
api.errmsg.warning_condition_is_always(p.lineno(1), bool(p[2].value))
p[0] = p[2] | if_then_part : IF expr then | Below is the the instruction that describes the task:
### Input:
if_then_part : IF expr then
### Response:
def p_if_then_part(p):
""" if_then_part : IF expr then """
if is_number(p[2]):
api.errmsg.warning_condition_is_always(p.lineno(1), bool(p[2].value))
p[0] = p[2] |
def validate_model_parameters(self, algo, training_frame, parameters, timeoutSecs=60, **kwargs):
'''
Check a dictionary of model builder parameters on the h2o cluster
using the given algorithm and model parameters.
'''
assert algo is not None, '"algo" parameter is null'
# Allow this now: assert training_frame is not None, '"training_frame" parameter is null'
assert parameters is not None, '"parameters" parameter is null'
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "/ModelBuilders REST call failed"
assert algo in model_builders['model_builders']
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
if training_frame is not None:
frames = self.frames(key=training_frame)
assert frames is not None, "/Frames/{0} REST call failed".format(training_frame)
key_name = frames['frames'][0]['key']['name']
assert key_name==training_frame, \
"/Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, key_name, training_frame)
parameters['training_frame'] = training_frame
# TODO: add parameter existence checks
# TODO: add parameter value validation
# FIX! why ignoreH2oError here?
result = self.do_json_request('/3/ModelBuilders.json/' + algo + "/parameters", cmd='post',
timeout=timeoutSecs, postData=parameters, ignoreH2oError=True, noExtraErrorCheck=True)
verboseprint("model parameters validation: " + repr(result))
return result | Check a dictionary of model builder parameters on the h2o cluster
using the given algorithm and model parameters. | Below is the the instruction that describes the task:
### Input:
Check a dictionary of model builder parameters on the h2o cluster
using the given algorithm and model parameters.
### Response:
def validate_model_parameters(self, algo, training_frame, parameters, timeoutSecs=60, **kwargs):
'''
Check a dictionary of model builder parameters on the h2o cluster
using the given algorithm and model parameters.
'''
assert algo is not None, '"algo" parameter is null'
# Allow this now: assert training_frame is not None, '"training_frame" parameter is null'
assert parameters is not None, '"parameters" parameter is null'
model_builders = self.model_builders(timeoutSecs=timeoutSecs)
assert model_builders is not None, "/ModelBuilders REST call failed"
assert algo in model_builders['model_builders']
builder = model_builders['model_builders'][algo]
# TODO: test this assert, I don't think this is working. . .
if training_frame is not None:
frames = self.frames(key=training_frame)
assert frames is not None, "/Frames/{0} REST call failed".format(training_frame)
key_name = frames['frames'][0]['key']['name']
assert key_name==training_frame, \
"/Frames/{0} returned Frame {1} rather than Frame {2}".format(training_frame, key_name, training_frame)
parameters['training_frame'] = training_frame
# TODO: add parameter existence checks
# TODO: add parameter value validation
# FIX! why ignoreH2oError here?
result = self.do_json_request('/3/ModelBuilders.json/' + algo + "/parameters", cmd='post',
timeout=timeoutSecs, postData=parameters, ignoreH2oError=True, noExtraErrorCheck=True)
verboseprint("model parameters validation: " + repr(result))
return result |
def is_present(conf, atom):
'''
Tell if a given package or DEPEND atom is present in the configuration
files tree.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.is_present unmask salt
'''
if conf in SUPPORTED_CONFS:
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom, allow_wildcard=True)
has_wildcard = '*' in atom
package_file = _get_config_file(conf, six.text_type(atom))
# wildcards are valid in confs
if has_wildcard:
match_list = set(atom)
else:
match_list = set(_porttree().dbapi.xmatch("match-all", atom))
try:
with salt.utils.files.fopen(package_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line).strip()
line_package = line.split()[0]
if has_wildcard:
if line_package == six.text_type(atom):
return True
else:
line_list = _porttree().dbapi.xmatch("match-all", line_package)
if match_list.issubset(line_list):
return True
except IOError:
pass
return False | Tell if a given package or DEPEND atom is present in the configuration
files tree.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.is_present unmask salt | Below is the the instruction that describes the task:
### Input:
Tell if a given package or DEPEND atom is present in the configuration
files tree.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.is_present unmask salt
### Response:
def is_present(conf, atom):
'''
Tell if a given package or DEPEND atom is present in the configuration
files tree.
Warning: This only works if the configuration files tree is in the correct
format (the one enforced by enforce_nice_config)
CLI Example:
.. code-block:: bash
salt '*' portage_config.is_present unmask salt
'''
if conf in SUPPORTED_CONFS:
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom, allow_wildcard=True)
has_wildcard = '*' in atom
package_file = _get_config_file(conf, six.text_type(atom))
# wildcards are valid in confs
if has_wildcard:
match_list = set(atom)
else:
match_list = set(_porttree().dbapi.xmatch("match-all", atom))
try:
with salt.utils.files.fopen(package_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line).strip()
line_package = line.split()[0]
if has_wildcard:
if line_package == six.text_type(atom):
return True
else:
line_list = _porttree().dbapi.xmatch("match-all", line_package)
if match_list.issubset(line_list):
return True
except IOError:
pass
return False |
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())] | Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line | Below is the the instruction that describes the task:
### Input:
Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line
### Response:
def _split_comment(lineno, comment):
"""Return the multiline comment at lineno split into a list of
comment line numbers and the accompanying comment line"""
return [(lineno + index, line) for index, line in
enumerate(comment.splitlines())] |
def get_colorscheme(self, scheme_file):
"""Return a string object with the colorscheme that is to be
inserted."""
scheme = get_yaml_dict(scheme_file)
scheme_slug = builder.slugify(scheme_file)
builder.format_scheme(scheme, scheme_slug)
try:
temp_base, temp_sub = self.temp.split('##')
except ValueError:
temp_base, temp_sub = (self.temp.strip('##'), 'default')
temp_path = rel_to_cwd('templates', temp_base)
temp_group = builder.TemplateGroup(temp_path)
try:
single_temp = temp_group.templates[temp_sub]
except KeyError:
raise FileNotFoundError(None,
None,
self.path + ' (sub-template)')
colorscheme = pystache.render(single_temp['parsed'], scheme)
return colorscheme | Return a string object with the colorscheme that is to be
inserted. | Below is the the instruction that describes the task:
### Input:
Return a string object with the colorscheme that is to be
inserted.
### Response:
def get_colorscheme(self, scheme_file):
"""Return a string object with the colorscheme that is to be
inserted."""
scheme = get_yaml_dict(scheme_file)
scheme_slug = builder.slugify(scheme_file)
builder.format_scheme(scheme, scheme_slug)
try:
temp_base, temp_sub = self.temp.split('##')
except ValueError:
temp_base, temp_sub = (self.temp.strip('##'), 'default')
temp_path = rel_to_cwd('templates', temp_base)
temp_group = builder.TemplateGroup(temp_path)
try:
single_temp = temp_group.templates[temp_sub]
except KeyError:
raise FileNotFoundError(None,
None,
self.path + ' (sub-template)')
colorscheme = pystache.render(single_temp['parsed'], scheme)
return colorscheme |
def in_network(scope, prefixes, destination, default_pfxlen=[24]):
"""
Returns True if the given destination is in the network range that is
defined by the given prefix (e.g. 10.0.0.1/22). If the given prefix
does not have a prefix length specified, the given default prefix length
is applied. If no such prefix length is given, the default length is
/24.
If a list of prefixes is passed, this function returns True only if
the given destination is in ANY of the given prefixes.
:type prefixes: string
:param prefixes: A prefix, or a list of IP prefixes.
:type destination: string
:param destination: An IP address.
:type default_pfxlen: int
:param default_pfxlen: The default prefix length.
:rtype: True
:return: Whether the given destination is in the given network.
"""
needle = ipv4.ip2int(destination[0])
for prefix in prefixes:
network, pfxlen = ipv4.parse_prefix(prefix, default_pfxlen[0])
mask = ipv4.pfxlen2mask_int(pfxlen)
if needle & mask == ipv4.ip2int(network) & mask:
return [True]
return [False] | Returns True if the given destination is in the network range that is
defined by the given prefix (e.g. 10.0.0.1/22). If the given prefix
does not have a prefix length specified, the given default prefix length
is applied. If no such prefix length is given, the default length is
/24.
If a list of prefixes is passed, this function returns True only if
the given destination is in ANY of the given prefixes.
:type prefixes: string
:param prefixes: A prefix, or a list of IP prefixes.
:type destination: string
:param destination: An IP address.
:type default_pfxlen: int
:param default_pfxlen: The default prefix length.
:rtype: True
:return: Whether the given destination is in the given network. | Below is the the instruction that describes the task:
### Input:
Returns True if the given destination is in the network range that is
defined by the given prefix (e.g. 10.0.0.1/22). If the given prefix
does not have a prefix length specified, the given default prefix length
is applied. If no such prefix length is given, the default length is
/24.
If a list of prefixes is passed, this function returns True only if
the given destination is in ANY of the given prefixes.
:type prefixes: string
:param prefixes: A prefix, or a list of IP prefixes.
:type destination: string
:param destination: An IP address.
:type default_pfxlen: int
:param default_pfxlen: The default prefix length.
:rtype: True
:return: Whether the given destination is in the given network.
### Response:
def in_network(scope, prefixes, destination, default_pfxlen=[24]):
"""
Returns True if the given destination is in the network range that is
defined by the given prefix (e.g. 10.0.0.1/22). If the given prefix
does not have a prefix length specified, the given default prefix length
is applied. If no such prefix length is given, the default length is
/24.
If a list of prefixes is passed, this function returns True only if
the given destination is in ANY of the given prefixes.
:type prefixes: string
:param prefixes: A prefix, or a list of IP prefixes.
:type destination: string
:param destination: An IP address.
:type default_pfxlen: int
:param default_pfxlen: The default prefix length.
:rtype: True
:return: Whether the given destination is in the given network.
"""
needle = ipv4.ip2int(destination[0])
for prefix in prefixes:
network, pfxlen = ipv4.parse_prefix(prefix, default_pfxlen[0])
mask = ipv4.pfxlen2mask_int(pfxlen)
if needle & mask == ipv4.ip2int(network) & mask:
return [True]
return [False] |
def post_dissection(self, m):
"""
First we update the client DHParams. Then, we try to update the server
DHParams generated during Server*DHParams building, with the shared
secret. Finally, we derive the session keys and update the context.
"""
s = self.tls_session
# if there are kx params and keys, we assume the crypto library is ok
if s.client_kx_ffdh_params:
y = pkcs_os2ip(self.dh_Yc)
param_numbers = s.client_kx_ffdh_params.parameter_numbers()
public_numbers = dh.DHPublicNumbers(y, param_numbers)
s.client_kx_pubkey = public_numbers.public_key(default_backend())
if s.server_kx_privkey and s.client_kx_pubkey:
ZZ = s.server_kx_privkey.exchange(s.client_kx_pubkey)
s.pre_master_secret = ZZ
s.compute_ms_and_derive_keys() | First we update the client DHParams. Then, we try to update the server
DHParams generated during Server*DHParams building, with the shared
secret. Finally, we derive the session keys and update the context. | Below is the the instruction that describes the task:
### Input:
First we update the client DHParams. Then, we try to update the server
DHParams generated during Server*DHParams building, with the shared
secret. Finally, we derive the session keys and update the context.
### Response:
def post_dissection(self, m):
"""
First we update the client DHParams. Then, we try to update the server
DHParams generated during Server*DHParams building, with the shared
secret. Finally, we derive the session keys and update the context.
"""
s = self.tls_session
# if there are kx params and keys, we assume the crypto library is ok
if s.client_kx_ffdh_params:
y = pkcs_os2ip(self.dh_Yc)
param_numbers = s.client_kx_ffdh_params.parameter_numbers()
public_numbers = dh.DHPublicNumbers(y, param_numbers)
s.client_kx_pubkey = public_numbers.public_key(default_backend())
if s.server_kx_privkey and s.client_kx_pubkey:
ZZ = s.server_kx_privkey.exchange(s.client_kx_pubkey)
s.pre_master_secret = ZZ
s.compute_ms_and_derive_keys() |
def I(self):
r"""Returns the set of intermediate states
"""
return list(set(range(self.nstates)) - set(self._A) - set(self._B)) | r"""Returns the set of intermediate states | Below is the the instruction that describes the task:
### Input:
r"""Returns the set of intermediate states
### Response:
def I(self):
r"""Returns the set of intermediate states
"""
return list(set(range(self.nstates)) - set(self._A) - set(self._B)) |
def time_to_hhmmssmmm(time_value, decimal_separator="."):
"""
Format the given time value into a ``HH:MM:SS.mmm`` string.
Examples: ::
12 => 00:00:12.000
12.345 => 00:00:12.345
12.345432 => 00:00:12.345
12.345678 => 00:00:12.346
83 => 00:01:23.000
83.456 => 00:01:23.456
83.456789 => 00:01:23.456
3600 => 01:00:00.000
3612.345 => 01:00:12.345
:param float time_value: a time value, in seconds
:param string decimal_separator: the decimal separator, default ``.``
:rtype: string
"""
if time_value is None:
time_value = 0
tmp = time_value
hours = int(math.floor(tmp / 3600))
tmp -= (hours * 3600)
minutes = int(math.floor(tmp / 60))
tmp -= minutes * 60
seconds = int(math.floor(tmp))
tmp -= seconds
milliseconds = int(math.floor(tmp * 1000))
return "%02d:%02d:%02d%s%03d" % (
hours,
minutes,
seconds,
decimal_separator,
milliseconds
) | Format the given time value into a ``HH:MM:SS.mmm`` string.
Examples: ::
12 => 00:00:12.000
12.345 => 00:00:12.345
12.345432 => 00:00:12.345
12.345678 => 00:00:12.346
83 => 00:01:23.000
83.456 => 00:01:23.456
83.456789 => 00:01:23.456
3600 => 01:00:00.000
3612.345 => 01:00:12.345
:param float time_value: a time value, in seconds
:param string decimal_separator: the decimal separator, default ``.``
:rtype: string | Below is the the instruction that describes the task:
### Input:
Format the given time value into a ``HH:MM:SS.mmm`` string.
Examples: ::
12 => 00:00:12.000
12.345 => 00:00:12.345
12.345432 => 00:00:12.345
12.345678 => 00:00:12.346
83 => 00:01:23.000
83.456 => 00:01:23.456
83.456789 => 00:01:23.456
3600 => 01:00:00.000
3612.345 => 01:00:12.345
:param float time_value: a time value, in seconds
:param string decimal_separator: the decimal separator, default ``.``
:rtype: string
### Response:
def time_to_hhmmssmmm(time_value, decimal_separator="."):
"""
Format the given time value into a ``HH:MM:SS.mmm`` string.
Examples: ::
12 => 00:00:12.000
12.345 => 00:00:12.345
12.345432 => 00:00:12.345
12.345678 => 00:00:12.346
83 => 00:01:23.000
83.456 => 00:01:23.456
83.456789 => 00:01:23.456
3600 => 01:00:00.000
3612.345 => 01:00:12.345
:param float time_value: a time value, in seconds
:param string decimal_separator: the decimal separator, default ``.``
:rtype: string
"""
if time_value is None:
time_value = 0
tmp = time_value
hours = int(math.floor(tmp / 3600))
tmp -= (hours * 3600)
minutes = int(math.floor(tmp / 60))
tmp -= minutes * 60
seconds = int(math.floor(tmp))
tmp -= seconds
milliseconds = int(math.floor(tmp * 1000))
return "%02d:%02d:%02d%s%03d" % (
hours,
minutes,
seconds,
decimal_separator,
milliseconds
) |
def main(demo=False, aschild=False, targets=[]):
"""Start the Qt-runtime and show the window
Arguments:
aschild (bool, optional): Run as child of parent process
"""
if aschild:
print("Starting pyblish-qml")
compat.main()
app = Application(APP_PATH, targets)
app.listen()
print("Done, don't forget to call `show()`")
return app.exec_()
else:
print("Starting pyblish-qml server..")
service = ipc.service.MockService() if demo else ipc.service.Service()
server = ipc.server.Server(service, targets=targets)
proxy = ipc.server.Proxy(server)
proxy.show(settings.to_dict())
server.listen()
server.wait() | Start the Qt-runtime and show the window
Arguments:
aschild (bool, optional): Run as child of parent process | Below is the the instruction that describes the task:
### Input:
Start the Qt-runtime and show the window
Arguments:
aschild (bool, optional): Run as child of parent process
### Response:
def main(demo=False, aschild=False, targets=[]):
"""Start the Qt-runtime and show the window
Arguments:
aschild (bool, optional): Run as child of parent process
"""
if aschild:
print("Starting pyblish-qml")
compat.main()
app = Application(APP_PATH, targets)
app.listen()
print("Done, don't forget to call `show()`")
return app.exec_()
else:
print("Starting pyblish-qml server..")
service = ipc.service.MockService() if demo else ipc.service.Service()
server = ipc.server.Server(service, targets=targets)
proxy = ipc.server.Proxy(server)
proxy.show(settings.to_dict())
server.listen()
server.wait() |
def list_servers(self, datacenter_id, depth=1):
"""
Retrieves a list of all servers bound to the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/datacenters/%s/servers?depth=%s' % (datacenter_id, str(depth)))
return response | Retrieves a list of all servers bound to the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int`` | Below is the the instruction that describes the task:
### Input:
Retrieves a list of all servers bound to the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
### Response:
def list_servers(self, datacenter_id, depth=1):
"""
Retrieves a list of all servers bound to the specified data center.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param depth: The depth of the response data.
:type depth: ``int``
"""
response = self._perform_request(
'/datacenters/%s/servers?depth=%s' % (datacenter_id, str(depth)))
return response |
def conv_ast_to_sym(self, math_ast):
"""
Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST.
"""
if type(math_ast) is c_ast.ID:
return symbol_pos_int(math_ast.name)
elif type(math_ast) is c_ast.Constant:
return sympy.Integer(math_ast.value)
else: # elif type(dim) is c_ast.BinaryOp:
op = {
'*': operator.mul,
'+': operator.add,
'-': operator.sub
}
return op[math_ast.op](
self.conv_ast_to_sym(math_ast.left),
self.conv_ast_to_sym(math_ast.right)) | Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST. | Below is the the instruction that describes the task:
### Input:
Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST.
### Response:
def conv_ast_to_sym(self, math_ast):
"""
Convert mathematical expressions to a sympy representation.
May only contain paranthesis, addition, subtraction and multiplication from AST.
"""
if type(math_ast) is c_ast.ID:
return symbol_pos_int(math_ast.name)
elif type(math_ast) is c_ast.Constant:
return sympy.Integer(math_ast.value)
else: # elif type(dim) is c_ast.BinaryOp:
op = {
'*': operator.mul,
'+': operator.add,
'-': operator.sub
}
return op[math_ast.op](
self.conv_ast_to_sym(math_ast.left),
self.conv_ast_to_sym(math_ast.right)) |
def createSpatialAnchorFromDescriptor(self, pchDescriptor):
"""
Returns a handle for an spatial anchor described by "descriptor". On success, pHandle
will contain a handle valid for this session. Caller can wait for an event or occasionally
poll GetSpatialAnchorPose() to find the virtual coordinate associated with this anchor.
"""
fn = self.function_table.createSpatialAnchorFromDescriptor
pHandleOut = SpatialAnchorHandle_t()
result = fn(pchDescriptor, byref(pHandleOut))
return result, pHandleOut | Returns a handle for an spatial anchor described by "descriptor". On success, pHandle
will contain a handle valid for this session. Caller can wait for an event or occasionally
poll GetSpatialAnchorPose() to find the virtual coordinate associated with this anchor. | Below is the the instruction that describes the task:
### Input:
Returns a handle for an spatial anchor described by "descriptor". On success, pHandle
will contain a handle valid for this session. Caller can wait for an event or occasionally
poll GetSpatialAnchorPose() to find the virtual coordinate associated with this anchor.
### Response:
def createSpatialAnchorFromDescriptor(self, pchDescriptor):
"""
Returns a handle for an spatial anchor described by "descriptor". On success, pHandle
will contain a handle valid for this session. Caller can wait for an event or occasionally
poll GetSpatialAnchorPose() to find the virtual coordinate associated with this anchor.
"""
fn = self.function_table.createSpatialAnchorFromDescriptor
pHandleOut = SpatialAnchorHandle_t()
result = fn(pchDescriptor, byref(pHandleOut))
return result, pHandleOut |
def setStyles(self, styleUpdatesDict):
'''
setStyles - Sets one or more style params.
This all happens in one shot, so it is much much faster than calling setStyle for every value.
To remove a style, set its value to empty string.
When all styles are removed, the "style" attribute will be nullified.
@param styleUpdatesDict - Dictionary of attribute : value styles.
@return - String of current value of "style" after change is made.
'''
setStyleMethod = self.setStyle
for newName, newValue in styleUpdatesDict.items():
setStyleMethod(newName, newValue)
return self.style | setStyles - Sets one or more style params.
This all happens in one shot, so it is much much faster than calling setStyle for every value.
To remove a style, set its value to empty string.
When all styles are removed, the "style" attribute will be nullified.
@param styleUpdatesDict - Dictionary of attribute : value styles.
@return - String of current value of "style" after change is made. | Below is the the instruction that describes the task:
### Input:
setStyles - Sets one or more style params.
This all happens in one shot, so it is much much faster than calling setStyle for every value.
To remove a style, set its value to empty string.
When all styles are removed, the "style" attribute will be nullified.
@param styleUpdatesDict - Dictionary of attribute : value styles.
@return - String of current value of "style" after change is made.
### Response:
def setStyles(self, styleUpdatesDict):
'''
setStyles - Sets one or more style params.
This all happens in one shot, so it is much much faster than calling setStyle for every value.
To remove a style, set its value to empty string.
When all styles are removed, the "style" attribute will be nullified.
@param styleUpdatesDict - Dictionary of attribute : value styles.
@return - String of current value of "style" after change is made.
'''
setStyleMethod = self.setStyle
for newName, newValue in styleUpdatesDict.items():
setStyleMethod(newName, newValue)
return self.style |
def HardwareInput(uMsg: int, param: int = 0) -> INPUT:
"""Create Win32 struct `HARDWAREINPUT` for `SendInput`."""
return _CreateInput(HARDWAREINPUT(uMsg, param & 0xFFFF, param >> 16 & 0xFFFF)) | Create Win32 struct `HARDWAREINPUT` for `SendInput`. | Below is the the instruction that describes the task:
### Input:
Create Win32 struct `HARDWAREINPUT` for `SendInput`.
### Response:
def HardwareInput(uMsg: int, param: int = 0) -> INPUT:
"""Create Win32 struct `HARDWAREINPUT` for `SendInput`."""
return _CreateInput(HARDWAREINPUT(uMsg, param & 0xFFFF, param >> 16 & 0xFFFF)) |
def dlafns(handle, descr):
"""
Find the segment following a specified segment in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlafns_c.html
:param handle: Handle of open DLA file.
:type handle: c_int
:param descr: Descriptor of a DLA segment.
:type descr: spiceypy.utils.support_types.SpiceDLADescr
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr
"""
assert isinstance(descr, stypes.SpiceDLADescr)
handle = ctypes.c_int(handle)
nxtdsc = stypes.SpiceDLADescr()
found = ctypes.c_int()
libspice.dlafns_c(handle, ctypes.byref(descr), ctypes.byref(nxtdsc), ctypes.byref(found))
return nxtdsc, bool(found.value) | Find the segment following a specified segment in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlafns_c.html
:param handle: Handle of open DLA file.
:type handle: c_int
:param descr: Descriptor of a DLA segment.
:type descr: spiceypy.utils.support_types.SpiceDLADescr
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr | Below is the the instruction that describes the task:
### Input:
Find the segment following a specified segment in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlafns_c.html
:param handle: Handle of open DLA file.
:type handle: c_int
:param descr: Descriptor of a DLA segment.
:type descr: spiceypy.utils.support_types.SpiceDLADescr
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr
### Response:
def dlafns(handle, descr):
"""
Find the segment following a specified segment in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlafns_c.html
:param handle: Handle of open DLA file.
:type handle: c_int
:param descr: Descriptor of a DLA segment.
:type descr: spiceypy.utils.support_types.SpiceDLADescr
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr
"""
assert isinstance(descr, stypes.SpiceDLADescr)
handle = ctypes.c_int(handle)
nxtdsc = stypes.SpiceDLADescr()
found = ctypes.c_int()
libspice.dlafns_c(handle, ctypes.byref(descr), ctypes.byref(nxtdsc), ctypes.byref(found))
return nxtdsc, bool(found.value) |
def _get_modules_map(self, path=None):
'''
Get installed Ansible modules
:return:
'''
paths = {}
root = ansible.modules.__path__[0]
if not path:
path = root
for p_el in os.listdir(path):
p_el_path = os.path.join(path, p_el)
if os.path.islink(p_el_path):
continue
if os.path.isdir(p_el_path):
paths.update(self._get_modules_map(p_el_path))
else:
if (any(p_el.startswith(elm) for elm in ['__', '.']) or
not p_el.endswith('.py') or
p_el in ansible.constants.IGNORE_FILES):
continue
p_el_path = p_el_path.replace(root, '').split('.')[0]
als_name = p_el_path.replace('.', '').replace('/', '', 1).replace('/', '.')
paths[als_name] = p_el_path
return paths | Get installed Ansible modules
:return: | Below is the the instruction that describes the task:
### Input:
Get installed Ansible modules
:return:
### Response:
def _get_modules_map(self, path=None):
'''
Get installed Ansible modules
:return:
'''
paths = {}
root = ansible.modules.__path__[0]
if not path:
path = root
for p_el in os.listdir(path):
p_el_path = os.path.join(path, p_el)
if os.path.islink(p_el_path):
continue
if os.path.isdir(p_el_path):
paths.update(self._get_modules_map(p_el_path))
else:
if (any(p_el.startswith(elm) for elm in ['__', '.']) or
not p_el.endswith('.py') or
p_el in ansible.constants.IGNORE_FILES):
continue
p_el_path = p_el_path.replace(root, '').split('.')[0]
als_name = p_el_path.replace('.', '').replace('/', '', 1).replace('/', '.')
paths[als_name] = p_el_path
return paths |
def authorized_connect_apps(self):
"""
Access the authorized_connect_apps
:returns: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
:rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
"""
if self._authorized_connect_apps is None:
self._authorized_connect_apps = AuthorizedConnectAppList(
self._version,
account_sid=self._solution['sid'],
)
return self._authorized_connect_apps | Access the authorized_connect_apps
:returns: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
:rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList | Below is the the instruction that describes the task:
### Input:
Access the authorized_connect_apps
:returns: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
:rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
### Response:
def authorized_connect_apps(self):
"""
Access the authorized_connect_apps
:returns: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
:rtype: twilio.rest.api.v2010.account.authorized_connect_app.AuthorizedConnectAppList
"""
if self._authorized_connect_apps is None:
self._authorized_connect_apps = AuthorizedConnectAppList(
self._version,
account_sid=self._solution['sid'],
)
return self._authorized_connect_apps |
def get_time_slice(time, z, zdot=None, timeStart=None, timeEnd=None):
"""
Get slice of time, z and (if provided) zdot from timeStart to timeEnd.
Parameters
----------
time : ndarray
array of time values
z : ndarray
array of z values
zdot : ndarray, optional
array of zdot (velocity) values.
timeStart : float, optional
time at which to start the slice.
Defaults to beginnging of time trace
timeEnd : float, optional
time at which to end the slide.
Defaults to end of time trace
Returns
-------
time_sliced : ndarray
array of time values from timeStart to timeEnd
z_sliced : ndarray
array of z values from timeStart to timeEnd
zdot_sliced : ndarray
array of zdot values from timeStart to timeEnd.
None if zdot not provided
"""
if timeStart == None:
timeStart = time[0]
if timeEnd == None:
timeEnd = time[-1]
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
time_sliced = time[StartIndex:EndIndex]
z_sliced = z[StartIndex:EndIndex]
if zdot != None:
zdot_sliced = zdot[StartIndex:EndIndex]
else:
zdot_sliced = None
return time_sliced, z_sliced, zdot_sliced | Get slice of time, z and (if provided) zdot from timeStart to timeEnd.
Parameters
----------
time : ndarray
array of time values
z : ndarray
array of z values
zdot : ndarray, optional
array of zdot (velocity) values.
timeStart : float, optional
time at which to start the slice.
Defaults to beginnging of time trace
timeEnd : float, optional
time at which to end the slide.
Defaults to end of time trace
Returns
-------
time_sliced : ndarray
array of time values from timeStart to timeEnd
z_sliced : ndarray
array of z values from timeStart to timeEnd
zdot_sliced : ndarray
array of zdot values from timeStart to timeEnd.
None if zdot not provided | Below is the the instruction that describes the task:
### Input:
Get slice of time, z and (if provided) zdot from timeStart to timeEnd.
Parameters
----------
time : ndarray
array of time values
z : ndarray
array of z values
zdot : ndarray, optional
array of zdot (velocity) values.
timeStart : float, optional
time at which to start the slice.
Defaults to beginnging of time trace
timeEnd : float, optional
time at which to end the slide.
Defaults to end of time trace
Returns
-------
time_sliced : ndarray
array of time values from timeStart to timeEnd
z_sliced : ndarray
array of z values from timeStart to timeEnd
zdot_sliced : ndarray
array of zdot values from timeStart to timeEnd.
None if zdot not provided
### Response:
def get_time_slice(time, z, zdot=None, timeStart=None, timeEnd=None):
"""
Get slice of time, z and (if provided) zdot from timeStart to timeEnd.
Parameters
----------
time : ndarray
array of time values
z : ndarray
array of z values
zdot : ndarray, optional
array of zdot (velocity) values.
timeStart : float, optional
time at which to start the slice.
Defaults to beginnging of time trace
timeEnd : float, optional
time at which to end the slide.
Defaults to end of time trace
Returns
-------
time_sliced : ndarray
array of time values from timeStart to timeEnd
z_sliced : ndarray
array of z values from timeStart to timeEnd
zdot_sliced : ndarray
array of zdot values from timeStart to timeEnd.
None if zdot not provided
"""
if timeStart == None:
timeStart = time[0]
if timeEnd == None:
timeEnd = time[-1]
StartIndex = _np.where(time == take_closest(time, timeStart))[0][0]
EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0]
time_sliced = time[StartIndex:EndIndex]
z_sliced = z[StartIndex:EndIndex]
if zdot != None:
zdot_sliced = zdot[StartIndex:EndIndex]
else:
zdot_sliced = None
return time_sliced, z_sliced, zdot_sliced |
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result | seq ::= ( atom [ '...' ] )* ; | Below is the the instruction that describes the task:
### Input:
seq ::= ( atom [ '...' ] )* ;
### Response:
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result |
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results | Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
### Response:
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results |
def process_metric(self, message, **kwargs):
"""
Handle a prometheus metric message according to the following flow:
- search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping
- call check method with the same name as the metric
- log some info if none of the above worked
`send_histograms_buckets` is used to specify if yes or no you want to send
the buckets as tagged values when dealing with histograms.
"""
# If targeted metric, store labels
self.store_labels(message)
if message.name in self.ignore_metrics:
return # Ignore the metric
# Filter metric to see if we can enrich with joined labels
self.join_labels(message)
send_histograms_buckets = kwargs.get('send_histograms_buckets', True)
send_monotonic_counter = kwargs.get('send_monotonic_counter', False)
custom_tags = kwargs.get('custom_tags')
ignore_unmapped = kwargs.get('ignore_unmapped', False)
try:
if not self._dry_run:
try:
self._submit(
self.metrics_mapper[message.name],
message,
send_histograms_buckets,
send_monotonic_counter,
custom_tags,
)
except KeyError:
if not ignore_unmapped:
# call magic method (non-generic check)
handler = getattr(self, message.name) # Lookup will throw AttributeError if not found
try:
handler(message, **kwargs)
except Exception as err:
self.log.warning("Error handling metric: {} - error: {}".format(message.name, err))
else:
# build the wildcard list if first pass
if self._metrics_wildcards is None:
self._metrics_wildcards = [x for x in self.metrics_mapper.keys() if '*' in x]
# try matching wildcard (generic check)
for wildcard in self._metrics_wildcards:
if fnmatchcase(message.name, wildcard):
self._submit(
message.name, message, send_histograms_buckets, send_monotonic_counter, custom_tags
)
except AttributeError as err:
self.log.debug("Unable to handle metric: {} - error: {}".format(message.name, err)) | Handle a prometheus metric message according to the following flow:
- search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping
- call check method with the same name as the metric
- log some info if none of the above worked
`send_histograms_buckets` is used to specify if yes or no you want to send
the buckets as tagged values when dealing with histograms. | Below is the the instruction that describes the task:
### Input:
Handle a prometheus metric message according to the following flow:
- search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping
- call check method with the same name as the metric
- log some info if none of the above worked
`send_histograms_buckets` is used to specify if yes or no you want to send
the buckets as tagged values when dealing with histograms.
### Response:
def process_metric(self, message, **kwargs):
"""
Handle a prometheus metric message according to the following flow:
- search self.metrics_mapper for a prometheus.metric <--> datadog.metric mapping
- call check method with the same name as the metric
- log some info if none of the above worked
`send_histograms_buckets` is used to specify if yes or no you want to send
the buckets as tagged values when dealing with histograms.
"""
# If targeted metric, store labels
self.store_labels(message)
if message.name in self.ignore_metrics:
return # Ignore the metric
# Filter metric to see if we can enrich with joined labels
self.join_labels(message)
send_histograms_buckets = kwargs.get('send_histograms_buckets', True)
send_monotonic_counter = kwargs.get('send_monotonic_counter', False)
custom_tags = kwargs.get('custom_tags')
ignore_unmapped = kwargs.get('ignore_unmapped', False)
try:
if not self._dry_run:
try:
self._submit(
self.metrics_mapper[message.name],
message,
send_histograms_buckets,
send_monotonic_counter,
custom_tags,
)
except KeyError:
if not ignore_unmapped:
# call magic method (non-generic check)
handler = getattr(self, message.name) # Lookup will throw AttributeError if not found
try:
handler(message, **kwargs)
except Exception as err:
self.log.warning("Error handling metric: {} - error: {}".format(message.name, err))
else:
# build the wildcard list if first pass
if self._metrics_wildcards is None:
self._metrics_wildcards = [x for x in self.metrics_mapper.keys() if '*' in x]
# try matching wildcard (generic check)
for wildcard in self._metrics_wildcards:
if fnmatchcase(message.name, wildcard):
self._submit(
message.name, message, send_histograms_buckets, send_monotonic_counter, custom_tags
)
except AttributeError as err:
self.log.debug("Unable to handle metric: {} - error: {}".format(message.name, err)) |
def toggle(self, *args):
"""
If no arguments are specified, toggle the state of all LEDs. If
arguments are specified, they must be the indexes of the LEDs you wish
to toggle. For example::
from gpiozero import LEDBoard
leds = LEDBoard(2, 3, 4, 5)
leds.toggle(0) # turn on the first LED (pin 2)
leds.toggle(-1) # turn on the last LED (pin 5)
leds.toggle() # turn the first and last LED off, and the
# middle pair on
If :meth:`blink` is currently active, it will be stopped first.
:param int args:
The index(es) of the LED(s) to toggle. If no indexes are specified
toggle the state of all LEDs.
"""
self._stop_blink()
if args:
for index in args:
self[index].toggle()
else:
super(LEDBoard, self).toggle() | If no arguments are specified, toggle the state of all LEDs. If
arguments are specified, they must be the indexes of the LEDs you wish
to toggle. For example::
from gpiozero import LEDBoard
leds = LEDBoard(2, 3, 4, 5)
leds.toggle(0) # turn on the first LED (pin 2)
leds.toggle(-1) # turn on the last LED (pin 5)
leds.toggle() # turn the first and last LED off, and the
# middle pair on
If :meth:`blink` is currently active, it will be stopped first.
:param int args:
The index(es) of the LED(s) to toggle. If no indexes are specified
toggle the state of all LEDs. | Below is the the instruction that describes the task:
### Input:
If no arguments are specified, toggle the state of all LEDs. If
arguments are specified, they must be the indexes of the LEDs you wish
to toggle. For example::
from gpiozero import LEDBoard
leds = LEDBoard(2, 3, 4, 5)
leds.toggle(0) # turn on the first LED (pin 2)
leds.toggle(-1) # turn on the last LED (pin 5)
leds.toggle() # turn the first and last LED off, and the
# middle pair on
If :meth:`blink` is currently active, it will be stopped first.
:param int args:
The index(es) of the LED(s) to toggle. If no indexes are specified
toggle the state of all LEDs.
### Response:
def toggle(self, *args):
"""
If no arguments are specified, toggle the state of all LEDs. If
arguments are specified, they must be the indexes of the LEDs you wish
to toggle. For example::
from gpiozero import LEDBoard
leds = LEDBoard(2, 3, 4, 5)
leds.toggle(0) # turn on the first LED (pin 2)
leds.toggle(-1) # turn on the last LED (pin 5)
leds.toggle() # turn the first and last LED off, and the
# middle pair on
If :meth:`blink` is currently active, it will be stopped first.
:param int args:
The index(es) of the LED(s) to toggle. If no indexes are specified
toggle the state of all LEDs.
"""
self._stop_blink()
if args:
for index in args:
self[index].toggle()
else:
super(LEDBoard, self).toggle() |
def get_volume_by_name(self, name):
"""
Get ScaleIO Volume object by its Name
:param name: Name of volume
:return: ScaleIO Volume object
:raise KeyError: No Volume with specified name found
:rtype: ScaleIO Volume object
"""
for vol in self.conn.volumes:
if vol.name == name:
return vol
raise KeyError("Volume with NAME " + name + " not found") | Get ScaleIO Volume object by its Name
:param name: Name of volume
:return: ScaleIO Volume object
:raise KeyError: No Volume with specified name found
:rtype: ScaleIO Volume object | Below is the the instruction that describes the task:
### Input:
Get ScaleIO Volume object by its Name
:param name: Name of volume
:return: ScaleIO Volume object
:raise KeyError: No Volume with specified name found
:rtype: ScaleIO Volume object
### Response:
def get_volume_by_name(self, name):
"""
Get ScaleIO Volume object by its Name
:param name: Name of volume
:return: ScaleIO Volume object
:raise KeyError: No Volume with specified name found
:rtype: ScaleIO Volume object
"""
for vol in self.conn.volumes:
if vol.name == name:
return vol
raise KeyError("Volume with NAME " + name + " not found") |
def get_item(identifier,
config=None,
config_file=None,
archive_session=None,
debug=None,
http_adapter_kwargs=None,
request_kwargs=None):
"""Get an :class:`Item` object.
:type identifier: str
:param identifier: The globally unique Archive.org item identifier.
:type config: dict
:param config: (optional) A dictionary used to configure your session.
:type config_file: str
:param config_file: (optional) A path to a config file used to configure your session.
:type archive_session: :class:`ArchiveSession`
:param archive_session: (optional) An :class:`ArchiveSession` object can be provided
via the ``archive_session`` parameter.
:type http_adapter_kwargs: dict
:param http_adapter_kwargs: (optional) Keyword arguments that
:py:class:`requests.adapters.HTTPAdapter` takes.
:type request_kwargs: dict
:param request_kwargs: (optional) Keyword arguments that
:py:class:`requests.Request` takes.
Usage:
>>> from internetarchive import get_item
>>> item = get_item('nasa')
>>> item.item_size
121084
"""
if not archive_session:
archive_session = get_session(config, config_file, debug, http_adapter_kwargs)
return archive_session.get_item(identifier, request_kwargs=request_kwargs) | Get an :class:`Item` object.
:type identifier: str
:param identifier: The globally unique Archive.org item identifier.
:type config: dict
:param config: (optional) A dictionary used to configure your session.
:type config_file: str
:param config_file: (optional) A path to a config file used to configure your session.
:type archive_session: :class:`ArchiveSession`
:param archive_session: (optional) An :class:`ArchiveSession` object can be provided
via the ``archive_session`` parameter.
:type http_adapter_kwargs: dict
:param http_adapter_kwargs: (optional) Keyword arguments that
:py:class:`requests.adapters.HTTPAdapter` takes.
:type request_kwargs: dict
:param request_kwargs: (optional) Keyword arguments that
:py:class:`requests.Request` takes.
Usage:
>>> from internetarchive import get_item
>>> item = get_item('nasa')
>>> item.item_size
121084 | Below is the the instruction that describes the task:
### Input:
Get an :class:`Item` object.
:type identifier: str
:param identifier: The globally unique Archive.org item identifier.
:type config: dict
:param config: (optional) A dictionary used to configure your session.
:type config_file: str
:param config_file: (optional) A path to a config file used to configure your session.
:type archive_session: :class:`ArchiveSession`
:param archive_session: (optional) An :class:`ArchiveSession` object can be provided
via the ``archive_session`` parameter.
:type http_adapter_kwargs: dict
:param http_adapter_kwargs: (optional) Keyword arguments that
:py:class:`requests.adapters.HTTPAdapter` takes.
:type request_kwargs: dict
:param request_kwargs: (optional) Keyword arguments that
:py:class:`requests.Request` takes.
Usage:
>>> from internetarchive import get_item
>>> item = get_item('nasa')
>>> item.item_size
121084
### Response:
def get_item(identifier,
config=None,
config_file=None,
archive_session=None,
debug=None,
http_adapter_kwargs=None,
request_kwargs=None):
"""Get an :class:`Item` object.
:type identifier: str
:param identifier: The globally unique Archive.org item identifier.
:type config: dict
:param config: (optional) A dictionary used to configure your session.
:type config_file: str
:param config_file: (optional) A path to a config file used to configure your session.
:type archive_session: :class:`ArchiveSession`
:param archive_session: (optional) An :class:`ArchiveSession` object can be provided
via the ``archive_session`` parameter.
:type http_adapter_kwargs: dict
:param http_adapter_kwargs: (optional) Keyword arguments that
:py:class:`requests.adapters.HTTPAdapter` takes.
:type request_kwargs: dict
:param request_kwargs: (optional) Keyword arguments that
:py:class:`requests.Request` takes.
Usage:
>>> from internetarchive import get_item
>>> item = get_item('nasa')
>>> item.item_size
121084
"""
if not archive_session:
archive_session = get_session(config, config_file, debug, http_adapter_kwargs)
return archive_session.get_item(identifier, request_kwargs=request_kwargs) |
def _parse(self, str):
""" Parses the text data from an XML element defined by tag.
"""
str = replace_entities(str)
str = strip_tags(str)
str = collapse_spaces(str)
return str | Parses the text data from an XML element defined by tag. | Below is the the instruction that describes the task:
### Input:
Parses the text data from an XML element defined by tag.
### Response:
def _parse(self, str):
""" Parses the text data from an XML element defined by tag.
"""
str = replace_entities(str)
str = strip_tags(str)
str = collapse_spaces(str)
return str |
def build_instruction_coverage_plugin() -> LaserPlugin:
""" Creates an instance of the instruction coverage plugin"""
from mythril.laser.ethereum.plugins.implementations.coverage import (
InstructionCoveragePlugin,
)
return InstructionCoveragePlugin() | Creates an instance of the instruction coverage plugin | Below is the the instruction that describes the task:
### Input:
Creates an instance of the instruction coverage plugin
### Response:
def build_instruction_coverage_plugin() -> LaserPlugin:
""" Creates an instance of the instruction coverage plugin"""
from mythril.laser.ethereum.plugins.implementations.coverage import (
InstructionCoveragePlugin,
)
return InstructionCoveragePlugin() |
def import_entries(self, items):
"""
Loops over items and find entry to import,
an entry need to have 'post_type' set to 'post' and
have content.
"""
self.write_out(self.style.STEP('- Importing entries\n'))
for item_node in items:
title = (item_node.find('title').text or '')[:255]
post_type = item_node.find('{%s}post_type' % WP_NS).text
content = item_node.find(
'{http://purl.org/rss/1.0/modules/content/}encoded').text
if post_type == 'post' and content and title:
self.write_out('> %s... ' % title)
entry, created = self.import_entry(title, content, item_node)
if created:
self.write_out(self.style.ITEM('OK\n'))
image_id = self.find_image_id(
item_node.findall('{%s}postmeta' % WP_NS))
if image_id:
self.import_image(entry, items, image_id)
self.import_comments(entry, item_node.findall(
'{%s}comment' % WP_NS))
else:
self.write_out(self.style.NOTICE(
'SKIPPED (already imported)\n'))
else:
self.write_out('> %s... ' % title, 2)
self.write_out(self.style.NOTICE('SKIPPED (not a post)\n'), 2) | Loops over items and find entry to import,
an entry need to have 'post_type' set to 'post' and
have content. | Below is the the instruction that describes the task:
### Input:
Loops over items and find entry to import,
an entry need to have 'post_type' set to 'post' and
have content.
### Response:
def import_entries(self, items):
"""
Loops over items and find entry to import,
an entry need to have 'post_type' set to 'post' and
have content.
"""
self.write_out(self.style.STEP('- Importing entries\n'))
for item_node in items:
title = (item_node.find('title').text or '')[:255]
post_type = item_node.find('{%s}post_type' % WP_NS).text
content = item_node.find(
'{http://purl.org/rss/1.0/modules/content/}encoded').text
if post_type == 'post' and content and title:
self.write_out('> %s... ' % title)
entry, created = self.import_entry(title, content, item_node)
if created:
self.write_out(self.style.ITEM('OK\n'))
image_id = self.find_image_id(
item_node.findall('{%s}postmeta' % WP_NS))
if image_id:
self.import_image(entry, items, image_id)
self.import_comments(entry, item_node.findall(
'{%s}comment' % WP_NS))
else:
self.write_out(self.style.NOTICE(
'SKIPPED (already imported)\n'))
else:
self.write_out('> %s... ' % title, 2)
self.write_out(self.style.NOTICE('SKIPPED (not a post)\n'), 2) |
def mobile_sign(self, id_code, country, phone_nr, language=None, signing_profile='LT_TM'):
""" This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session
"""
if not (self.container and isinstance(self.container, PreviouslyCreatedContainer)):
assert self.data_files, 'To use MobileSign endpoint the application must ' \
'add at least one data file to users session'
response = self.__invoke('MobileSign', {
'SignerIDCode': id_code,
'SignersCountry': country,
'SignerPhoneNo': phone_nr,
'Language': self.parse_language(language),
'Role': SkipValue,
'City': SkipValue,
'StateOrProvince': SkipValue,
'PostalCode': SkipValue,
'CountryName': SkipValue,
'ServiceName': self.service_name,
'AdditionalDataToBeDisplayed': self.mobile_message,
# Either LT or LT_TM, see: http://sk-eid.github.io/dds-documentation/api/api_docs/#mobilesign
'SigningProfile': signing_profile,
'MessagingMode': 'asynchClientServer',
'AsyncConfiguration': SkipValue,
'ReturnDocInfo': SkipValue,
'ReturnDocData': SkipValue,
})
return response | This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session | Below is the the instruction that describes the task:
### Input:
This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session
### Response:
def mobile_sign(self, id_code, country, phone_nr, language=None, signing_profile='LT_TM'):
""" This can be used to add a signature to existing data files
WARNING: Must have at least one datafile in the session
"""
if not (self.container and isinstance(self.container, PreviouslyCreatedContainer)):
assert self.data_files, 'To use MobileSign endpoint the application must ' \
'add at least one data file to users session'
response = self.__invoke('MobileSign', {
'SignerIDCode': id_code,
'SignersCountry': country,
'SignerPhoneNo': phone_nr,
'Language': self.parse_language(language),
'Role': SkipValue,
'City': SkipValue,
'StateOrProvince': SkipValue,
'PostalCode': SkipValue,
'CountryName': SkipValue,
'ServiceName': self.service_name,
'AdditionalDataToBeDisplayed': self.mobile_message,
# Either LT or LT_TM, see: http://sk-eid.github.io/dds-documentation/api/api_docs/#mobilesign
'SigningProfile': signing_profile,
'MessagingMode': 'asynchClientServer',
'AsyncConfiguration': SkipValue,
'ReturnDocInfo': SkipValue,
'ReturnDocData': SkipValue,
})
return response |
def __display_header(self, stat_display):
"""Display the firsts lines (header) in the Curses interface.
system + ip + uptime
(cloud)
"""
# First line
self.new_line()
self.space_between_column = 0
l_uptime = (self.get_stats_display_width(stat_display["system"]) +
self.get_stats_display_width(stat_display["ip"]) +
self.get_stats_display_width(stat_display["uptime"]) + 1)
self.display_plugin(
stat_display["system"],
display_optional=(self.screen.getmaxyx()[1] >= l_uptime))
self.space_between_column = 3
self.new_column()
self.display_plugin(stat_display["ip"])
self.new_column()
self.display_plugin(
stat_display["uptime"],
add_space=-(self.get_stats_display_width(stat_display["cloud"]) != 0))
# Second line (optional)
self.init_column()
self.new_line()
self.display_plugin(stat_display["cloud"]) | Display the firsts lines (header) in the Curses interface.
system + ip + uptime
(cloud) | Below is the the instruction that describes the task:
### Input:
Display the firsts lines (header) in the Curses interface.
system + ip + uptime
(cloud)
### Response:
def __display_header(self, stat_display):
"""Display the firsts lines (header) in the Curses interface.
system + ip + uptime
(cloud)
"""
# First line
self.new_line()
self.space_between_column = 0
l_uptime = (self.get_stats_display_width(stat_display["system"]) +
self.get_stats_display_width(stat_display["ip"]) +
self.get_stats_display_width(stat_display["uptime"]) + 1)
self.display_plugin(
stat_display["system"],
display_optional=(self.screen.getmaxyx()[1] >= l_uptime))
self.space_between_column = 3
self.new_column()
self.display_plugin(stat_display["ip"])
self.new_column()
self.display_plugin(
stat_display["uptime"],
add_space=-(self.get_stats_display_width(stat_display["cloud"]) != 0))
# Second line (optional)
self.init_column()
self.new_line()
self.display_plugin(stat_display["cloud"]) |
def _step4func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 4 """
if self._headers:
print("\n Step 4: Joint estimation of error rate and heterozygosity")
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## Check if all/none in the right state
if not self._samples_precheck(samples, 4, force):
raise IPyradError(FIRST_RUN_3)
elif not force:
## skip if all are finished
if all([i.stats.state >= 4 for i in samples]):
print(JOINTS_EXIST.format(len(samples)))
return
## send to function
assemble.jointestimate.run(self, samples, force, ipyclient) | hidden wrapped function to start step 4 | Below is the the instruction that describes the task:
### Input:
hidden wrapped function to start step 4
### Response:
def _step4func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 4 """
if self._headers:
print("\n Step 4: Joint estimation of error rate and heterozygosity")
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## Check if all/none in the right state
if not self._samples_precheck(samples, 4, force):
raise IPyradError(FIRST_RUN_3)
elif not force:
## skip if all are finished
if all([i.stats.state >= 4 for i in samples]):
print(JOINTS_EXIST.format(len(samples)))
return
## send to function
assemble.jointestimate.run(self, samples, force, ipyclient) |
def _ppf(self, q, left, right, cache):
"""
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
"""
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right
else:
out = evaluation.evaluate_inverse(right, q, cache=cache)
out = numpy.where(left < 0, 1-out, out)
out = left**out
return out
right = right + numpy.zeros(q.shape)
q = numpy.where(right < 0, 1-q, q)
out = evaluation.evaluate_inverse(left, q, cache=cache)**right
return out | Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.] | Below is the the instruction that describes the task:
### Input:
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
### Response:
def _ppf(self, q, left, right, cache):
"""
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Pow(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[0.01 0.04 0.81]
>>> print(chaospy.Pow(chaospy.Uniform(1, 2), -1).inv([0.1, 0.2, 0.9]))
[0.52631579 0.55555556 0.90909091]
>>> print(chaospy.Pow(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[1.07177346 1.14869835 1.86606598]
>>> print(chaospy.Pow(2, chaospy.Uniform(-1, 0)).inv([0.1, 0.2, 0.9]))
[0.53588673 0.57434918 0.93303299]
>>> print(chaospy.Pow(2, 3).inv([0.1, 0.2, 0.9]))
[8. 8. 8.]
"""
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise StochasticallyDependentError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left**right
else:
out = evaluation.evaluate_inverse(right, q, cache=cache)
out = numpy.where(left < 0, 1-out, out)
out = left**out
return out
right = right + numpy.zeros(q.shape)
q = numpy.where(right < 0, 1-q, q)
out = evaluation.evaluate_inverse(left, q, cache=cache)**right
return out |
def decode(self, data):
"""
Decode an armoured string into a chunk. The decoded output is
null-terminated, so it may be treated as a string, if that's what
it was prior to encoding.
"""
return Zchunk(lib.zarmour_decode(self._as_parameter_, data), True) | Decode an armoured string into a chunk. The decoded output is
null-terminated, so it may be treated as a string, if that's what
it was prior to encoding. | Below is the the instruction that describes the task:
### Input:
Decode an armoured string into a chunk. The decoded output is
null-terminated, so it may be treated as a string, if that's what
it was prior to encoding.
### Response:
def decode(self, data):
"""
Decode an armoured string into a chunk. The decoded output is
null-terminated, so it may be treated as a string, if that's what
it was prior to encoding.
"""
return Zchunk(lib.zarmour_decode(self._as_parameter_, data), True) |
def inject_basic_program(self, ascii_listing):
"""
save the given ASCII BASIC program listing into the emulator RAM.
"""
program_start = self.cpu.memory.read_word(
self.machine_api.PROGRAM_START_ADDR
)
tokens = self.machine_api.ascii_listing2program_dump(ascii_listing)
self.cpu.memory.load(program_start, tokens)
log.critical("BASIC program injected into Memory.")
# Update the BASIC addresses:
program_end = program_start + len(tokens)
self.cpu.memory.write_word(self.machine_api.VARIABLES_START_ADDR, program_end)
self.cpu.memory.write_word(self.machine_api.ARRAY_START_ADDR, program_end)
self.cpu.memory.write_word(self.machine_api.FREE_SPACE_START_ADDR, program_end)
log.critical("BASIC addresses updated.") | save the given ASCII BASIC program listing into the emulator RAM. | Below is the the instruction that describes the task:
### Input:
save the given ASCII BASIC program listing into the emulator RAM.
### Response:
def inject_basic_program(self, ascii_listing):
"""
save the given ASCII BASIC program listing into the emulator RAM.
"""
program_start = self.cpu.memory.read_word(
self.machine_api.PROGRAM_START_ADDR
)
tokens = self.machine_api.ascii_listing2program_dump(ascii_listing)
self.cpu.memory.load(program_start, tokens)
log.critical("BASIC program injected into Memory.")
# Update the BASIC addresses:
program_end = program_start + len(tokens)
self.cpu.memory.write_word(self.machine_api.VARIABLES_START_ADDR, program_end)
self.cpu.memory.write_word(self.machine_api.ARRAY_START_ADDR, program_end)
self.cpu.memory.write_word(self.machine_api.FREE_SPACE_START_ADDR, program_end)
log.critical("BASIC addresses updated.") |
def createmergerequest(self, project_id, sourcebranch, targetbranch,
title, target_project_id=None, assignee_id=None):
"""
Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request
"""
data = {
'source_branch': sourcebranch,
'target_branch': targetbranch,
'title': title,
'assignee_id': assignee_id,
'target_project_id': target_project_id
}
request = requests.post(
'{0}/{1}/merge_requests'.format(self.projects_url, project_id),
data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
else:
return False | Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request | Below is the the instruction that describes the task:
### Input:
Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request
### Response:
def createmergerequest(self, project_id, sourcebranch, targetbranch,
title, target_project_id=None, assignee_id=None):
"""
Create a new merge request.
:param project_id: ID of the project originating the merge request
:param sourcebranch: name of the branch to merge from
:param targetbranch: name of the branch to merge to
:param title: Title of the merge request
:param assignee_id: Assignee user ID
:return: dict of the new merge request
"""
data = {
'source_branch': sourcebranch,
'target_branch': targetbranch,
'title': title,
'assignee_id': assignee_id,
'target_project_id': target_project_id
}
request = requests.post(
'{0}/{1}/merge_requests'.format(self.projects_url, project_id),
data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return request.json()
else:
return False |
def pre_save(cls, sender, instance, *args, **kwargs):
"""Pull constant_contact_id out of data.
"""
instance.constant_contact_id = str(instance.data['id']) | Pull constant_contact_id out of data. | Below is the the instruction that describes the task:
### Input:
Pull constant_contact_id out of data.
### Response:
def pre_save(cls, sender, instance, *args, **kwargs):
"""Pull constant_contact_id out of data.
"""
instance.constant_contact_id = str(instance.data['id']) |
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm | This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function | Below is the the instruction that describes the task:
### Input:
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function
### Response:
def func_frame(function_index, function_name=None):
"""
This will return the class_name and function_name of the
function traced back two functions.
:param function_index: int of how many frames back the program
should look (2 will give the parent of the caller)
:param function_name: str of what function to look for (should
not be used with function_index
:return frame: this will return the frame of the calling function """
frm = inspect.currentframe()
if function_name is not None:
function_name = function_name.split('*')[0] # todo replace this
# todo with regex
for i in range(1000):
if frm.f_code.co_name.startswith(function_name):
break
frm = frm.f_back
else:
for i in range(function_index):
frm = frm.f_back
try: # this is pycharm debugger inserting middleware
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
except:
pass
return frm |
def _add_indent(self, val, indent_count):
'''
add whitespace to the beginning of each line of val
link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/
val -- string
indent -- integer -- how much whitespace we want in front of each line of val
return -- string -- val with more whitespace
'''
if isinstance(val, Value):
val = val.string_value()
return String(val).indent(indent_count) | add whitespace to the beginning of each line of val
link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/
val -- string
indent -- integer -- how much whitespace we want in front of each line of val
return -- string -- val with more whitespace | Below is the the instruction that describes the task:
### Input:
add whitespace to the beginning of each line of val
link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/
val -- string
indent -- integer -- how much whitespace we want in front of each line of val
return -- string -- val with more whitespace
### Response:
def _add_indent(self, val, indent_count):
'''
add whitespace to the beginning of each line of val
link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/
val -- string
indent -- integer -- how much whitespace we want in front of each line of val
return -- string -- val with more whitespace
'''
if isinstance(val, Value):
val = val.string_value()
return String(val).indent(indent_count) |
async def _notify_event_internal(self, conn_string, name, event):
"""Notify that an event has occured.
This method will send a notification and ensure that all callbacks
registered for it have completed by the time it returns. In
particular, if the callbacks are awaitable, this method will await
them before returning. The order in which the callbacks are called
is undefined.
This is a low level method that is not intended to be called directly.
You should use the high level public notify_* methods for each of the
types of events to ensure consistency in how the event objects are
created.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified.
"""
try:
self._currently_notifying = True
conn_id = self._get_conn_id(conn_string)
event_maps = self._monitors.get(conn_string, {})
wildcard_maps = self._monitors.get(None, {})
wildcard_handlers = wildcard_maps.get(name, {})
event_handlers = event_maps.get(name, {})
for handler, func in itertools.chain(event_handlers.items(), wildcard_handlers.items()):
try:
result = func(conn_string, conn_id, name, event)
if inspect.isawaitable(result):
await result
except: #pylint:disable=bare-except;This is a background function and we are logging exceptions
self._logger.warning("Error calling notification callback id=%s, func=%s", handler, func, exc_info=True)
finally:
for action in self._deferred_adjustments:
self._adjust_monitor_internal(*action)
self._deferred_adjustments = []
self._currently_notifying = False | Notify that an event has occured.
This method will send a notification and ensure that all callbacks
registered for it have completed by the time it returns. In
particular, if the callbacks are awaitable, this method will await
them before returning. The order in which the callbacks are called
is undefined.
This is a low level method that is not intended to be called directly.
You should use the high level public notify_* methods for each of the
types of events to ensure consistency in how the event objects are
created.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified. | Below is the the instruction that describes the task:
### Input:
Notify that an event has occured.
This method will send a notification and ensure that all callbacks
registered for it have completed by the time it returns. In
particular, if the callbacks are awaitable, this method will await
them before returning. The order in which the callbacks are called
is undefined.
This is a low level method that is not intended to be called directly.
You should use the high level public notify_* methods for each of the
types of events to ensure consistency in how the event objects are
created.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified.
### Response:
async def _notify_event_internal(self, conn_string, name, event):
"""Notify that an event has occured.
This method will send a notification and ensure that all callbacks
registered for it have completed by the time it returns. In
particular, if the callbacks are awaitable, this method will await
them before returning. The order in which the callbacks are called
is undefined.
This is a low level method that is not intended to be called directly.
You should use the high level public notify_* methods for each of the
types of events to ensure consistency in how the event objects are
created.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified.
"""
try:
self._currently_notifying = True
conn_id = self._get_conn_id(conn_string)
event_maps = self._monitors.get(conn_string, {})
wildcard_maps = self._monitors.get(None, {})
wildcard_handlers = wildcard_maps.get(name, {})
event_handlers = event_maps.get(name, {})
for handler, func in itertools.chain(event_handlers.items(), wildcard_handlers.items()):
try:
result = func(conn_string, conn_id, name, event)
if inspect.isawaitable(result):
await result
except: #pylint:disable=bare-except;This is a background function and we are logging exceptions
self._logger.warning("Error calling notification callback id=%s, func=%s", handler, func, exc_info=True)
finally:
for action in self._deferred_adjustments:
self._adjust_monitor_internal(*action)
self._deferred_adjustments = []
self._currently_notifying = False |
def parse_pdb(self):
"""Extracts additional information from PDB files.
I. When reading in a PDB file, OpenBabel numbers ATOMS and HETATOMS continously.
In PDB files, TER records are also counted, leading to a different numbering system.
This functions reads in a PDB file and provides a mapping as a dictionary.
II. Additionally, it returns a list of modified residues.
III. Furthermore, covalent linkages between ligands and protein residues/other ligands are identified
IV. Alternative conformations
"""
if self.as_string:
fil = self.pdbpath.rstrip('\n').split('\n') # Removing trailing newline character
else:
f = read(self.pdbpath)
fil = f.readlines()
f.close()
corrected_lines = []
i, j = 0, 0 # idx and PDB numbering
d = {}
modres = set()
covalent = []
alt = []
previous_ter = False
# Standard without fixing
if not config.NOFIX:
if not config.PLUGIN_MODE:
lastnum = 0 # Atom numbering (has to be consecutive)
other_models = False
for line in fil:
if not other_models: # Only consider the first model in an NRM structure
corrected_line, newnum = self.fix_pdbline(line, lastnum)
if corrected_line is not None:
if corrected_line.startswith('MODEL'):
try: # Get number of MODEL (1,2,3)
model_num = int(corrected_line[10:14])
if model_num > 1: # MODEL 2,3,4 etc.
other_models = True
except ValueError:
write_message("Ignoring invalid MODEL entry: %s\n" % corrected_line, mtype='debug')
corrected_lines.append(corrected_line)
lastnum = newnum
corrected_pdb = ''.join(corrected_lines)
else:
corrected_pdb = self.pdbpath
corrected_lines = fil
else:
corrected_pdb = self.pdbpath
corrected_lines = fil
for line in corrected_lines:
if line.startswith(("ATOM", "HETATM")):
# Retrieve alternate conformations
atomid, location = int(line[6:11]), line[16]
location = 'A' if location == ' ' else location
if location != 'A':
alt.append(atomid)
if not previous_ter:
i += 1
j += 1
else:
i += 1
j += 2
d[i] = j
previous_ter = False
# Numbering Changes at TER records
if line.startswith("TER"):
previous_ter = True
# Get modified residues
if line.startswith("MODRES"):
modres.add(line[12:15].strip())
# Get covalent linkages between ligands
if line.startswith("LINK"):
covalent.append(self.get_linkage(line))
return d, modres, covalent, alt, corrected_pdb | Extracts additional information from PDB files.
I. When reading in a PDB file, OpenBabel numbers ATOMS and HETATOMS continously.
In PDB files, TER records are also counted, leading to a different numbering system.
This functions reads in a PDB file and provides a mapping as a dictionary.
II. Additionally, it returns a list of modified residues.
III. Furthermore, covalent linkages between ligands and protein residues/other ligands are identified
IV. Alternative conformations | Below is the the instruction that describes the task:
### Input:
Extracts additional information from PDB files.
I. When reading in a PDB file, OpenBabel numbers ATOMS and HETATOMS continously.
In PDB files, TER records are also counted, leading to a different numbering system.
This functions reads in a PDB file and provides a mapping as a dictionary.
II. Additionally, it returns a list of modified residues.
III. Furthermore, covalent linkages between ligands and protein residues/other ligands are identified
IV. Alternative conformations
### Response:
def parse_pdb(self):
"""Extracts additional information from PDB files.
I. When reading in a PDB file, OpenBabel numbers ATOMS and HETATOMS continously.
In PDB files, TER records are also counted, leading to a different numbering system.
This functions reads in a PDB file and provides a mapping as a dictionary.
II. Additionally, it returns a list of modified residues.
III. Furthermore, covalent linkages between ligands and protein residues/other ligands are identified
IV. Alternative conformations
"""
if self.as_string:
fil = self.pdbpath.rstrip('\n').split('\n') # Removing trailing newline character
else:
f = read(self.pdbpath)
fil = f.readlines()
f.close()
corrected_lines = []
i, j = 0, 0 # idx and PDB numbering
d = {}
modres = set()
covalent = []
alt = []
previous_ter = False
# Standard without fixing
if not config.NOFIX:
if not config.PLUGIN_MODE:
lastnum = 0 # Atom numbering (has to be consecutive)
other_models = False
for line in fil:
if not other_models: # Only consider the first model in an NRM structure
corrected_line, newnum = self.fix_pdbline(line, lastnum)
if corrected_line is not None:
if corrected_line.startswith('MODEL'):
try: # Get number of MODEL (1,2,3)
model_num = int(corrected_line[10:14])
if model_num > 1: # MODEL 2,3,4 etc.
other_models = True
except ValueError:
write_message("Ignoring invalid MODEL entry: %s\n" % corrected_line, mtype='debug')
corrected_lines.append(corrected_line)
lastnum = newnum
corrected_pdb = ''.join(corrected_lines)
else:
corrected_pdb = self.pdbpath
corrected_lines = fil
else:
corrected_pdb = self.pdbpath
corrected_lines = fil
for line in corrected_lines:
if line.startswith(("ATOM", "HETATM")):
# Retrieve alternate conformations
atomid, location = int(line[6:11]), line[16]
location = 'A' if location == ' ' else location
if location != 'A':
alt.append(atomid)
if not previous_ter:
i += 1
j += 1
else:
i += 1
j += 2
d[i] = j
previous_ter = False
# Numbering Changes at TER records
if line.startswith("TER"):
previous_ter = True
# Get modified residues
if line.startswith("MODRES"):
modres.add(line[12:15].strip())
# Get covalent linkages between ligands
if line.startswith("LINK"):
covalent.append(self.get_linkage(line))
return d, modres, covalent, alt, corrected_pdb |
def set_data(self, data_np, metadata=None, order=None, astype=None):
"""Use this method to SHARE (not copy) the incoming array.
"""
if astype:
data = data_np.astype(astype, copy=False)
else:
data = data_np
self._data = data
self._calc_order(order)
if metadata:
self.update_metadata(metadata)
self._set_minmax()
self.make_callback('modified') | Use this method to SHARE (not copy) the incoming array. | Below is the the instruction that describes the task:
### Input:
Use this method to SHARE (not copy) the incoming array.
### Response:
def set_data(self, data_np, metadata=None, order=None, astype=None):
"""Use this method to SHARE (not copy) the incoming array.
"""
if astype:
data = data_np.astype(astype, copy=False)
else:
data = data_np
self._data = data
self._calc_order(order)
if metadata:
self.update_metadata(metadata)
self._set_minmax()
self.make_callback('modified') |
def sfs(dac, n=None):
"""Compute the site frequency spectrum given derived allele counts at
a set of biallelic variants.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs : ndarray, int, shape (n_chromosomes,)
Array where the kth element is the number of variant sites with k
derived alleles.
"""
# check input
dac, n = _check_dac_n(dac, n)
# need platform integer for bincount
dac = dac.astype(int, copy=False)
# compute site frequency spectrum
x = n + 1
s = np.bincount(dac, minlength=x)
return s | Compute the site frequency spectrum given derived allele counts at
a set of biallelic variants.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs : ndarray, int, shape (n_chromosomes,)
Array where the kth element is the number of variant sites with k
derived alleles. | Below is the the instruction that describes the task:
### Input:
Compute the site frequency spectrum given derived allele counts at
a set of biallelic variants.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs : ndarray, int, shape (n_chromosomes,)
Array where the kth element is the number of variant sites with k
derived alleles.
### Response:
def sfs(dac, n=None):
"""Compute the site frequency spectrum given derived allele counts at
a set of biallelic variants.
Parameters
----------
dac : array_like, int, shape (n_variants,)
Array of derived allele counts.
n : int, optional
The total number of chromosomes called.
Returns
-------
sfs : ndarray, int, shape (n_chromosomes,)
Array where the kth element is the number of variant sites with k
derived alleles.
"""
# check input
dac, n = _check_dac_n(dac, n)
# need platform integer for bincount
dac = dac.astype(int, copy=False)
# compute site frequency spectrum
x = n + 1
s = np.bincount(dac, minlength=x)
return s |
def load_metadata_from_desc_file(self, desc_file, partition='train',
max_duration=16.0,):
""" Read metadata from the description file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
max_duration (float): In seconds, the maximum duration of
utterances to train or test on
"""
logger = logUtil.getlogger()
logger.info('Reading description file: {} for partition: {}'
.format(desc_file, partition))
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec['duration']) > max_duration:
continue
audio_paths.append(spec['key'])
durations.append(float(spec['duration']))
texts.append(spec['text'])
except Exception as e:
# Change to (KeyError, ValueError) or
# (KeyError,json.decoder.JSONDecodeError), depending on
# json module version
logger.warn('Error reading line #{}: {}'
.format(line_num, json_line))
logger.warn(str(e))
if partition == 'train':
self.count = len(audio_paths)
self.train_audio_paths = audio_paths
self.train_durations = durations
self.train_texts = texts
elif partition == 'validation':
self.val_audio_paths = audio_paths
self.val_durations = durations
self.val_texts = texts
self.val_count = len(audio_paths)
elif partition == 'test':
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test") | Read metadata from the description file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
max_duration (float): In seconds, the maximum duration of
utterances to train or test on | Below is the the instruction that describes the task:
### Input:
Read metadata from the description file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
max_duration (float): In seconds, the maximum duration of
utterances to train or test on
### Response:
def load_metadata_from_desc_file(self, desc_file, partition='train',
max_duration=16.0,):
""" Read metadata from the description file
(possibly takes long, depending on the filesize)
Params:
desc_file (str): Path to a JSON-line file that contains labels and
paths to the audio files
partition (str): One of 'train', 'validation' or 'test'
max_duration (float): In seconds, the maximum duration of
utterances to train or test on
"""
logger = logUtil.getlogger()
logger.info('Reading description file: {} for partition: {}'
.format(desc_file, partition))
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec['duration']) > max_duration:
continue
audio_paths.append(spec['key'])
durations.append(float(spec['duration']))
texts.append(spec['text'])
except Exception as e:
# Change to (KeyError, ValueError) or
# (KeyError,json.decoder.JSONDecodeError), depending on
# json module version
logger.warn('Error reading line #{}: {}'
.format(line_num, json_line))
logger.warn(str(e))
if partition == 'train':
self.count = len(audio_paths)
self.train_audio_paths = audio_paths
self.train_durations = durations
self.train_texts = texts
elif partition == 'validation':
self.val_audio_paths = audio_paths
self.val_durations = durations
self.val_texts = texts
self.val_count = len(audio_paths)
elif partition == 'test':
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test") |
def translate(self, body, params=None):
"""
`<Translate SQL into Elasticsearch queries>`_
:arg body: Specify the query in the `query` element.
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST", "/_sql/translate", params=params, body=body
) | `<Translate SQL into Elasticsearch queries>`_
:arg body: Specify the query in the `query` element. | Below is the the instruction that describes the task:
### Input:
`<Translate SQL into Elasticsearch queries>`_
:arg body: Specify the query in the `query` element.
### Response:
def translate(self, body, params=None):
"""
`<Translate SQL into Elasticsearch queries>`_
:arg body: Specify the query in the `query` element.
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"POST", "/_sql/translate", params=params, body=body
) |
def copy(self):
""" Prepare and paste self templates. """
templates = self.prepare_templates()
if self.params.interactive:
keys = list(self.parser.default)
for key in keys:
if key.startswith('_'):
continue
prompt = "{0} (default is \"{1}\")? ".format(
key, self.parser.default[key])
if _compat.PY2:
value = raw_input(prompt.encode('utf-8')).decode('utf-8')
else:
value = input(prompt.encode('utf-8'))
value = value.strip()
if value:
self.parser.default[key] = value
self.parser.default['templates'] = tt = ','.join(
t.name for t in templates)
logging.warning("Paste templates: {0}".format(tt))
self.make_directory(self.params.TARGET)
logging.debug("\nDefault context:\n----------------")
logging.debug(
''.join('{0:<15} {1}\n'.format(*v)
for v in self.parser.default.items())
)
return [t.paste(
**dict(self.parser.default.items())) for t in templates] | Prepare and paste self templates. | Below is the the instruction that describes the task:
### Input:
Prepare and paste self templates.
### Response:
def copy(self):
""" Prepare and paste self templates. """
templates = self.prepare_templates()
if self.params.interactive:
keys = list(self.parser.default)
for key in keys:
if key.startswith('_'):
continue
prompt = "{0} (default is \"{1}\")? ".format(
key, self.parser.default[key])
if _compat.PY2:
value = raw_input(prompt.encode('utf-8')).decode('utf-8')
else:
value = input(prompt.encode('utf-8'))
value = value.strip()
if value:
self.parser.default[key] = value
self.parser.default['templates'] = tt = ','.join(
t.name for t in templates)
logging.warning("Paste templates: {0}".format(tt))
self.make_directory(self.params.TARGET)
logging.debug("\nDefault context:\n----------------")
logging.debug(
''.join('{0:<15} {1}\n'.format(*v)
for v in self.parser.default.items())
)
return [t.paste(
**dict(self.parser.default.items())) for t in templates] |
def server_call(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters):
"""Makes a call to an un-authenticated method on a server
:param method: The method name.
:type method: str
:param server: The MyGeotab server.
:type server: str
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:type timeout: float
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:type verify_ssl: bool
:param parameters: Additional parameters to send (for example, search=dict(id='b123') ).
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
:return: The result from the server.
"""
if method is None:
raise Exception("A method name must be specified")
if server is None:
raise Exception("A server (eg. my3.geotab.com) must be specified")
parameters = process_parameters(parameters)
return _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl) | Makes a call to an un-authenticated method on a server
:param method: The method name.
:type method: str
:param server: The MyGeotab server.
:type server: str
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:type timeout: float
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:type verify_ssl: bool
:param parameters: Additional parameters to send (for example, search=dict(id='b123') ).
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
:return: The result from the server. | Below is the the instruction that describes the task:
### Input:
Makes a call to an un-authenticated method on a server
:param method: The method name.
:type method: str
:param server: The MyGeotab server.
:type server: str
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:type timeout: float
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:type verify_ssl: bool
:param parameters: Additional parameters to send (for example, search=dict(id='b123') ).
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
:return: The result from the server.
### Response:
def server_call(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters):
"""Makes a call to an un-authenticated method on a server
:param method: The method name.
:type method: str
:param server: The MyGeotab server.
:type server: str
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:type timeout: float
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:type verify_ssl: bool
:param parameters: Additional parameters to send (for example, search=dict(id='b123') ).
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
:return: The result from the server.
"""
if method is None:
raise Exception("A method name must be specified")
if server is None:
raise Exception("A server (eg. my3.geotab.com) must be specified")
parameters = process_parameters(parameters)
return _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.