code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def from_axis_angle(self, axis, angle):
'''create a rotation matrix from axis and angle'''
ux = axis.x
uy = axis.y
uz = axis.z
ct = cos(angle)
st = sin(angle)
self.a.x = ct + (1-ct) * ux**2
self.a.y = ux*uy*(1-ct) - uz*st
self.a.z = ux*uz*(1-ct) + uy*st
self.b.x = uy*ux*(1-ct) + uz*st
self.b.y = ct + (1-ct) * uy**2
self.b.z = uy*uz*(1-ct) - ux*st
self.c.x = uz*ux*(1-ct) - uy*st
self.c.y = uz*uy*(1-ct) + ux*st
self.c.z = ct + (1-ct) * uz**2 | create a rotation matrix from axis and angle | Below is the the instruction that describes the task:
### Input:
create a rotation matrix from axis and angle
### Response:
def from_axis_angle(self, axis, angle):
'''create a rotation matrix from axis and angle'''
ux = axis.x
uy = axis.y
uz = axis.z
ct = cos(angle)
st = sin(angle)
self.a.x = ct + (1-ct) * ux**2
self.a.y = ux*uy*(1-ct) - uz*st
self.a.z = ux*uz*(1-ct) + uy*st
self.b.x = uy*ux*(1-ct) + uz*st
self.b.y = ct + (1-ct) * uy**2
self.b.z = uy*uz*(1-ct) - ux*st
self.c.x = uz*ux*(1-ct) - uy*st
self.c.y = uz*uy*(1-ct) + ux*st
self.c.z = ct + (1-ct) * uz**2 |
def base_path(main_path, fmt):
"""Given a path and options for a format (ext, suffix, prefix), return the corresponding base path"""
if not fmt:
return os.path.splitext(main_path)[0]
fmt = long_form_one_format(fmt)
fmt_ext = fmt['extension']
suffix = fmt.get('suffix')
prefix = fmt.get('prefix')
base, ext = os.path.splitext(main_path)
if ext != fmt_ext:
raise InconsistentPath(u"Notebook path '{}' was expected to have extension '{}'".format(main_path, fmt_ext))
if suffix:
if not base.endswith(suffix):
raise InconsistentPath(u"Notebook name '{}' was expected to end with suffix '{}'".format(base, suffix))
base = base[:-len(suffix)]
if not prefix:
return base
prefix_dir, prefix_file_name = os.path.split(prefix)
notebook_dir, notebook_file_name = os.path.split(base)
sep = base[len(notebook_dir):-len(notebook_file_name)]
if prefix_file_name:
if not notebook_file_name.startswith(prefix_file_name):
raise InconsistentPath(u"Notebook name '{}' was expected to start with prefix '{}'"
.format(notebook_file_name, prefix_file_name))
notebook_file_name = notebook_file_name[len(prefix_file_name):]
if prefix_dir:
if not notebook_dir.endswith(prefix_dir):
raise InconsistentPath(u"Notebook directory '{}' was expected to end with directory prefix '{}'"
.format(notebook_dir, prefix_dir))
notebook_dir = notebook_dir[:-len(prefix_dir)]
if not notebook_dir:
return notebook_file_name
# Does notebook_dir ends with a path separator?
if notebook_dir[-1:] == sep:
return notebook_dir + notebook_file_name
return notebook_dir + sep + notebook_file_name | Given a path and options for a format (ext, suffix, prefix), return the corresponding base path | Below is the the instruction that describes the task:
### Input:
Given a path and options for a format (ext, suffix, prefix), return the corresponding base path
### Response:
def base_path(main_path, fmt):
"""Given a path and options for a format (ext, suffix, prefix), return the corresponding base path"""
if not fmt:
return os.path.splitext(main_path)[0]
fmt = long_form_one_format(fmt)
fmt_ext = fmt['extension']
suffix = fmt.get('suffix')
prefix = fmt.get('prefix')
base, ext = os.path.splitext(main_path)
if ext != fmt_ext:
raise InconsistentPath(u"Notebook path '{}' was expected to have extension '{}'".format(main_path, fmt_ext))
if suffix:
if not base.endswith(suffix):
raise InconsistentPath(u"Notebook name '{}' was expected to end with suffix '{}'".format(base, suffix))
base = base[:-len(suffix)]
if not prefix:
return base
prefix_dir, prefix_file_name = os.path.split(prefix)
notebook_dir, notebook_file_name = os.path.split(base)
sep = base[len(notebook_dir):-len(notebook_file_name)]
if prefix_file_name:
if not notebook_file_name.startswith(prefix_file_name):
raise InconsistentPath(u"Notebook name '{}' was expected to start with prefix '{}'"
.format(notebook_file_name, prefix_file_name))
notebook_file_name = notebook_file_name[len(prefix_file_name):]
if prefix_dir:
if not notebook_dir.endswith(prefix_dir):
raise InconsistentPath(u"Notebook directory '{}' was expected to end with directory prefix '{}'"
.format(notebook_dir, prefix_dir))
notebook_dir = notebook_dir[:-len(prefix_dir)]
if not notebook_dir:
return notebook_file_name
# Does notebook_dir ends with a path separator?
if notebook_dir[-1:] == sep:
return notebook_dir + notebook_file_name
return notebook_dir + sep + notebook_file_name |
def fetch_items(self, category, **kwargs):
"""Fetch Google hit items
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
logger.info("Fetching data for '%s'", self.keywords)
hits_raw = self.client.hits(self.keywords)
hits = self.__parse_hits(hits_raw)
yield hits
logger.info("Fetch process completed") | Fetch Google hit items
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items | Below is the the instruction that describes the task:
### Input:
Fetch Google hit items
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
### Response:
def fetch_items(self, category, **kwargs):
"""Fetch Google hit items
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
logger.info("Fetching data for '%s'", self.keywords)
hits_raw = self.client.hits(self.keywords)
hits = self.__parse_hits(hits_raw)
yield hits
logger.info("Fetch process completed") |
def _get_room_ids_for_address(
self,
address: Address,
filter_private: bool = None,
) -> List[_RoomID]:
""" Uses GMatrixClient.get_account_data to get updated mapping of address->rooms
It'll filter only existing rooms.
If filter_private=True, also filter out public rooms.
If filter_private=None, filter according to self._private_rooms
"""
address_hex: AddressHex = to_checksum_address(address)
with self._account_data_lock:
room_ids = self._client.account_data.get(
'network.raiden.rooms',
{},
).get(address_hex)
self.log.debug('matrix get account data', room_ids=room_ids, for_address=address_hex)
if not room_ids: # None or empty
room_ids = list()
if not isinstance(room_ids, list): # old version, single room
room_ids = [room_ids]
if filter_private is None:
filter_private = self._private_rooms
if not filter_private:
# existing rooms
room_ids = [
room_id
for room_id in room_ids
if room_id in self._client.rooms
]
else:
# existing and private rooms
room_ids = [
room_id
for room_id in room_ids
if room_id in self._client.rooms and self._client.rooms[room_id].invite_only
]
return room_ids | Uses GMatrixClient.get_account_data to get updated mapping of address->rooms
It'll filter only existing rooms.
If filter_private=True, also filter out public rooms.
If filter_private=None, filter according to self._private_rooms | Below is the the instruction that describes the task:
### Input:
Uses GMatrixClient.get_account_data to get updated mapping of address->rooms
It'll filter only existing rooms.
If filter_private=True, also filter out public rooms.
If filter_private=None, filter according to self._private_rooms
### Response:
def _get_room_ids_for_address(
self,
address: Address,
filter_private: bool = None,
) -> List[_RoomID]:
""" Uses GMatrixClient.get_account_data to get updated mapping of address->rooms
It'll filter only existing rooms.
If filter_private=True, also filter out public rooms.
If filter_private=None, filter according to self._private_rooms
"""
address_hex: AddressHex = to_checksum_address(address)
with self._account_data_lock:
room_ids = self._client.account_data.get(
'network.raiden.rooms',
{},
).get(address_hex)
self.log.debug('matrix get account data', room_ids=room_ids, for_address=address_hex)
if not room_ids: # None or empty
room_ids = list()
if not isinstance(room_ids, list): # old version, single room
room_ids = [room_ids]
if filter_private is None:
filter_private = self._private_rooms
if not filter_private:
# existing rooms
room_ids = [
room_id
for room_id in room_ids
if room_id in self._client.rooms
]
else:
# existing and private rooms
room_ids = [
room_id
for room_id in room_ids
if room_id in self._client.rooms and self._client.rooms[room_id].invite_only
]
return room_ids |
def show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isl_linknumber(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_linkinfo = ET.Element("show_linkinfo")
config = show_linkinfo
output = ET.SubElement(show_linkinfo, "output")
show_link_info = ET.SubElement(output, "show-link-info")
linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid")
linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid')
linkinfo_isl = ET.SubElement(show_link_info, "linkinfo-isl")
linkinfo_isl_linknumber = ET.SubElement(linkinfo_isl, "linkinfo-isl-linknumber")
linkinfo_isl_linknumber.text = kwargs.pop('linkinfo_isl_linknumber')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_linkinfo_output_show_link_info_linkinfo_isl_linkinfo_isl_linknumber(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_linkinfo = ET.Element("show_linkinfo")
config = show_linkinfo
output = ET.SubElement(show_linkinfo, "output")
show_link_info = ET.SubElement(output, "show-link-info")
linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid")
linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid')
linkinfo_isl = ET.SubElement(show_link_info, "linkinfo-isl")
linkinfo_isl_linknumber = ET.SubElement(linkinfo_isl, "linkinfo-isl-linknumber")
linkinfo_isl_linknumber.text = kwargs.pop('linkinfo_isl_linknumber')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def warn_if_outdated(package,
version,
raise_exceptions=False,
background=True,
):
"""
Higher level convenience function using check_outdated.
The package and version arguments are the same.
If the package is outdated, a warning (OutdatedPackageWarning) will
be emitted.
Any exception in check_outdated will be converted to a warning (OutdatedCheckFailedWarning)
unless raise_exceptions if True.
If background is True (the default), the check will run in
a background thread so this function will return immediately.
In this case if an exception is raised and raise_exceptions if True
the traceback will be printed to stderr but the program will not be
interrupted.
This function doesn't return anything.
"""
def check():
# noinspection PyUnusedLocal
is_outdated = False
with utils.exception_to_warning('check for latest version of package',
OutdatedCheckFailedWarning,
always_raise=raise_exceptions):
is_outdated, latest = check_outdated(package, version)
if is_outdated:
warn_with_ignore(
'The package %s is out of date. Your version is %s, the latest is %s.'
% (package, version, latest),
OutdatedPackageWarning,
)
if background:
thread = Thread(target=check)
thread.start()
else:
check() | Higher level convenience function using check_outdated.
The package and version arguments are the same.
If the package is outdated, a warning (OutdatedPackageWarning) will
be emitted.
Any exception in check_outdated will be converted to a warning (OutdatedCheckFailedWarning)
unless raise_exceptions if True.
If background is True (the default), the check will run in
a background thread so this function will return immediately.
In this case if an exception is raised and raise_exceptions if True
the traceback will be printed to stderr but the program will not be
interrupted.
This function doesn't return anything. | Below is the the instruction that describes the task:
### Input:
Higher level convenience function using check_outdated.
The package and version arguments are the same.
If the package is outdated, a warning (OutdatedPackageWarning) will
be emitted.
Any exception in check_outdated will be converted to a warning (OutdatedCheckFailedWarning)
unless raise_exceptions if True.
If background is True (the default), the check will run in
a background thread so this function will return immediately.
In this case if an exception is raised and raise_exceptions if True
the traceback will be printed to stderr but the program will not be
interrupted.
This function doesn't return anything.
### Response:
def warn_if_outdated(package,
version,
raise_exceptions=False,
background=True,
):
"""
Higher level convenience function using check_outdated.
The package and version arguments are the same.
If the package is outdated, a warning (OutdatedPackageWarning) will
be emitted.
Any exception in check_outdated will be converted to a warning (OutdatedCheckFailedWarning)
unless raise_exceptions if True.
If background is True (the default), the check will run in
a background thread so this function will return immediately.
In this case if an exception is raised and raise_exceptions if True
the traceback will be printed to stderr but the program will not be
interrupted.
This function doesn't return anything.
"""
def check():
# noinspection PyUnusedLocal
is_outdated = False
with utils.exception_to_warning('check for latest version of package',
OutdatedCheckFailedWarning,
always_raise=raise_exceptions):
is_outdated, latest = check_outdated(package, version)
if is_outdated:
warn_with_ignore(
'The package %s is out of date. Your version is %s, the latest is %s.'
% (package, version, latest),
OutdatedPackageWarning,
)
if background:
thread = Thread(target=check)
thread.start()
else:
check() |
def stream(self, device_sid=values.unset, limit=None, page_size=None):
"""
Streams KeyInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode device_sid: Find all Keys authenticating specified Device.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.key.KeyInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(device_sid=device_sid, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit']) | Streams KeyInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode device_sid: Find all Keys authenticating specified Device.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.key.KeyInstance] | Below is the the instruction that describes the task:
### Input:
Streams KeyInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode device_sid: Find all Keys authenticating specified Device.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.key.KeyInstance]
### Response:
def stream(self, device_sid=values.unset, limit=None, page_size=None):
"""
Streams KeyInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode device_sid: Find all Keys authenticating specified Device.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.key.KeyInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(device_sid=device_sid, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit']) |
def parse_content(self, content):
"""
Parse the output of the ``alternatives`` command.
"""
self.program = None
self.status = None
self.link = None
self.best = None
self.paths = []
current_path = None
# Set up instance variable
for line in content:
words = line.split(None)
if ' - status is' in line:
# alternatives only displays one program, so finding
# this line again is an error.
if self.program:
raise ParseException(
"Program line for {newprog} found in output for {oldprog}".format(
newprog=words[0], oldprog=self.program
)
)
# Set up new program data
self.program = words[0]
self.status = words[4][:-1] # remove trailing .
self.alternatives = []
current_path = {}
elif not self.program:
# Lines before 'status is' line are ignored
continue
elif line.startswith(' link currently points to ') and len(words) == 5:
# line: ' link currently points to /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java'
self.link = words[4]
elif ' - priority ' in line and len(words) == 4 and words[3].isdigit():
# line: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java - priority 16091
# New path - save current path if set
self.paths.append({
'path': words[0],
'priority': int(words[3]),
'slave': {},
})
current_path = self.paths[-1]
elif line.startswith(' slave ') and len(words) == 3 and current_path:
# line: ' slave ControlPanel: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel'
current_path['slave'][words[1][:-1]] = words[2] # remove final : from program
elif line.startswith("Current `best' version is ") and len(words) == 5:
# line: 'Current `best' version is /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java.'
self.best = words[4][:-1] | Parse the output of the ``alternatives`` command. | Below is the the instruction that describes the task:
### Input:
Parse the output of the ``alternatives`` command.
### Response:
def parse_content(self, content):
"""
Parse the output of the ``alternatives`` command.
"""
self.program = None
self.status = None
self.link = None
self.best = None
self.paths = []
current_path = None
# Set up instance variable
for line in content:
words = line.split(None)
if ' - status is' in line:
# alternatives only displays one program, so finding
# this line again is an error.
if self.program:
raise ParseException(
"Program line for {newprog} found in output for {oldprog}".format(
newprog=words[0], oldprog=self.program
)
)
# Set up new program data
self.program = words[0]
self.status = words[4][:-1] # remove trailing .
self.alternatives = []
current_path = {}
elif not self.program:
# Lines before 'status is' line are ignored
continue
elif line.startswith(' link currently points to ') and len(words) == 5:
# line: ' link currently points to /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java'
self.link = words[4]
elif ' - priority ' in line and len(words) == 4 and words[3].isdigit():
# line: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java - priority 16091
# New path - save current path if set
self.paths.append({
'path': words[0],
'priority': int(words[3]),
'slave': {},
})
current_path = self.paths[-1]
elif line.startswith(' slave ') and len(words) == 3 and current_path:
# line: ' slave ControlPanel: /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel'
current_path['slave'][words[1][:-1]] = words[2] # remove final : from program
elif line.startswith("Current `best' version is ") and len(words) == 5:
# line: 'Current `best' version is /usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java.'
self.best = words[4][:-1] |
def pots(self, refresh=False):
"""
Returns a list of pots owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#pots
:param refresh: decides if the pots information should be refreshed.
:type refresh: bool
:returns: list of Monzo pots
:rtype: list of MonzoPot
"""
if not refresh and self._cached_pots:
return self._cached_pots
endpoint = '/pots/listV1'
response = self._get_response(
method='get', endpoint=endpoint,
)
pots_json = response.json()['pots']
pots = [MonzoPot(data=pot) for pot in pots_json]
self._cached_pots = pots
return pots | Returns a list of pots owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#pots
:param refresh: decides if the pots information should be refreshed.
:type refresh: bool
:returns: list of Monzo pots
:rtype: list of MonzoPot | Below is the the instruction that describes the task:
### Input:
Returns a list of pots owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#pots
:param refresh: decides if the pots information should be refreshed.
:type refresh: bool
:returns: list of Monzo pots
:rtype: list of MonzoPot
### Response:
def pots(self, refresh=False):
"""
Returns a list of pots owned by the currently authorised user.
Official docs:
https://monzo.com/docs/#pots
:param refresh: decides if the pots information should be refreshed.
:type refresh: bool
:returns: list of Monzo pots
:rtype: list of MonzoPot
"""
if not refresh and self._cached_pots:
return self._cached_pots
endpoint = '/pots/listV1'
response = self._get_response(
method='get', endpoint=endpoint,
)
pots_json = response.json()['pots']
pots = [MonzoPot(data=pot) for pot in pots_json]
self._cached_pots = pots
return pots |
def parse_source(self, filename):
"""
Extract the statements from the given file, look for function calls
`sass_processor(scss_file)` and compile the filename into CSS.
"""
callvisitor = FuncCallVisitor('sass_processor')
tree = ast.parse(open(filename, 'rb').read())
callvisitor.visit(tree)
for sass_fileurl in callvisitor.sass_files:
sass_filename = find_file(sass_fileurl)
if not sass_filename or sass_filename in self.processed_files:
continue
if self.delete_files:
self.delete_file(sass_filename, sass_fileurl)
else:
self.compile_sass(sass_filename, sass_fileurl) | Extract the statements from the given file, look for function calls
`sass_processor(scss_file)` and compile the filename into CSS. | Below is the the instruction that describes the task:
### Input:
Extract the statements from the given file, look for function calls
`sass_processor(scss_file)` and compile the filename into CSS.
### Response:
def parse_source(self, filename):
"""
Extract the statements from the given file, look for function calls
`sass_processor(scss_file)` and compile the filename into CSS.
"""
callvisitor = FuncCallVisitor('sass_processor')
tree = ast.parse(open(filename, 'rb').read())
callvisitor.visit(tree)
for sass_fileurl in callvisitor.sass_files:
sass_filename = find_file(sass_fileurl)
if not sass_filename or sass_filename in self.processed_files:
continue
if self.delete_files:
self.delete_file(sass_filename, sass_fileurl)
else:
self.compile_sass(sass_filename, sass_fileurl) |
def list_path(root_dir):
"""List directory if exists.
:param dir: str
:return: list
"""
res = []
if os.path.isdir(root_dir):
for name in os.listdir(root_dir):
res.append(name)
return res | List directory if exists.
:param dir: str
:return: list | Below is the the instruction that describes the task:
### Input:
List directory if exists.
:param dir: str
:return: list
### Response:
def list_path(root_dir):
"""List directory if exists.
:param dir: str
:return: list
"""
res = []
if os.path.isdir(root_dir):
for name in os.listdir(root_dir):
res.append(name)
return res |
def _score_for_model(meta):
""" Returns mean score between tasks in pipeline that can be used for early stopping. """
mean_acc = list()
pipes = meta["pipeline"]
acc = meta["accuracy"]
if "tagger" in pipes:
mean_acc.append(acc["tags_acc"])
if "parser" in pipes:
mean_acc.append((acc["uas"] + acc["las"]) / 2)
if "ner" in pipes:
mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3)
return sum(mean_acc) / len(mean_acc) | Returns mean score between tasks in pipeline that can be used for early stopping. | Below is the the instruction that describes the task:
### Input:
Returns mean score between tasks in pipeline that can be used for early stopping.
### Response:
def _score_for_model(meta):
""" Returns mean score between tasks in pipeline that can be used for early stopping. """
mean_acc = list()
pipes = meta["pipeline"]
acc = meta["accuracy"]
if "tagger" in pipes:
mean_acc.append(acc["tags_acc"])
if "parser" in pipes:
mean_acc.append((acc["uas"] + acc["las"]) / 2)
if "ner" in pipes:
mean_acc.append((acc["ents_p"] + acc["ents_r"] + acc["ents_f"]) / 3)
return sum(mean_acc) / len(mean_acc) |
def highlight_current_line(editor):
"""
Highlights given editor current line.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool
"""
format = editor.language.theme.get("accelerator.line")
if not format:
return False
extra_selections = editor.extraSelections() or []
if not editor.isReadOnly():
selection = QTextEdit.ExtraSelection()
selection.format.setBackground(format.background())
selection.format.setProperty(QTextFormat.FullWidthSelection, True)
selection.cursor = editor.textCursor()
selection.cursor.clearSelection()
extra_selections.append(selection)
editor.setExtraSelections(extra_selections)
return True | Highlights given editor current line.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Highlights given editor current line.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool
### Response:
def highlight_current_line(editor):
"""
Highlights given editor current line.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool
"""
format = editor.language.theme.get("accelerator.line")
if not format:
return False
extra_selections = editor.extraSelections() or []
if not editor.isReadOnly():
selection = QTextEdit.ExtraSelection()
selection.format.setBackground(format.background())
selection.format.setProperty(QTextFormat.FullWidthSelection, True)
selection.cursor = editor.textCursor()
selection.cursor.clearSelection()
extra_selections.append(selection)
editor.setExtraSelections(extra_selections)
return True |
def process(self):
"""
populate the report from the xml
:return:
"""
suites = None
if isinstance(self.tree, ET.Element):
root = self.tree
else:
root = self.tree.getroot()
if root.tag == "testrun":
root = root[0]
if root.tag == "testsuite":
suites = [root]
if root.tag == "testsuites":
suites = [x for x in root]
assert suites, "could not find test suites in results xml"
for suite in suites:
cursuite = Suite()
self.suites.append(cursuite)
cursuite.name = suite.attrib["name"]
if "package" in suite.attrib:
cursuite.package = suite.attrib["package"]
cursuite.duration = float(suite.attrib.get("time", '0').replace(',',''))
for element in suite:
if element.tag == "error":
# top level error?
errtag = {
"message": element.attrib.get("message", ""),
"type": element.attrib.get("type", ""),
"text": element.text
}
cursuite.errors.append(errtag)
if element.tag == "system-out":
cursuite.stdout = element.text
if element.tag == "system-err":
cursuite.stderr = element.text
if element.tag == "properties":
for prop in element:
if prop.tag == "property":
cursuite.properties[prop.attrib["name"]] = prop.attrib["value"]
if element.tag == "testcase":
testcase = element
if not testcase.attrib.get("classname", None):
testcase.attrib["classname"] = NO_CLASSNAME
if testcase.attrib["classname"] not in cursuite:
testclass = Class()
testclass.name = testcase.attrib["classname"]
cursuite[testclass.name] = testclass
testclass = cursuite[testcase.attrib["classname"]]
newcase = Case()
newcase.name = testcase.attrib["name"]
newcase.testclass = testclass
newcase.duration = float(testcase.attrib.get("time", '0').replace(',',''))
testclass.cases.append(newcase)
# does this test case have any children?
for child in testcase:
if child.tag == "skipped":
newcase.skipped = child.text
if "message" in child.attrib:
newcase.skipped_msg = child.attrib["message"]
elif child.tag == "system-out":
newcase.stdout = child.text
elif child.tag == "system-err":
newcase.stderr = child.text
elif child.tag == "failure":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "error":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "properties":
for property in child:
newproperty = Property()
newproperty.name = property.attrib["name"]
newproperty.value = property.attrib["value"]
newcase.properties.append(newproperty) | populate the report from the xml
:return: | Below is the the instruction that describes the task:
### Input:
populate the report from the xml
:return:
### Response:
def process(self):
"""
populate the report from the xml
:return:
"""
suites = None
if isinstance(self.tree, ET.Element):
root = self.tree
else:
root = self.tree.getroot()
if root.tag == "testrun":
root = root[0]
if root.tag == "testsuite":
suites = [root]
if root.tag == "testsuites":
suites = [x for x in root]
assert suites, "could not find test suites in results xml"
for suite in suites:
cursuite = Suite()
self.suites.append(cursuite)
cursuite.name = suite.attrib["name"]
if "package" in suite.attrib:
cursuite.package = suite.attrib["package"]
cursuite.duration = float(suite.attrib.get("time", '0').replace(',',''))
for element in suite:
if element.tag == "error":
# top level error?
errtag = {
"message": element.attrib.get("message", ""),
"type": element.attrib.get("type", ""),
"text": element.text
}
cursuite.errors.append(errtag)
if element.tag == "system-out":
cursuite.stdout = element.text
if element.tag == "system-err":
cursuite.stderr = element.text
if element.tag == "properties":
for prop in element:
if prop.tag == "property":
cursuite.properties[prop.attrib["name"]] = prop.attrib["value"]
if element.tag == "testcase":
testcase = element
if not testcase.attrib.get("classname", None):
testcase.attrib["classname"] = NO_CLASSNAME
if testcase.attrib["classname"] not in cursuite:
testclass = Class()
testclass.name = testcase.attrib["classname"]
cursuite[testclass.name] = testclass
testclass = cursuite[testcase.attrib["classname"]]
newcase = Case()
newcase.name = testcase.attrib["name"]
newcase.testclass = testclass
newcase.duration = float(testcase.attrib.get("time", '0').replace(',',''))
testclass.cases.append(newcase)
# does this test case have any children?
for child in testcase:
if child.tag == "skipped":
newcase.skipped = child.text
if "message" in child.attrib:
newcase.skipped_msg = child.attrib["message"]
elif child.tag == "system-out":
newcase.stdout = child.text
elif child.tag == "system-err":
newcase.stderr = child.text
elif child.tag == "failure":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "error":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "properties":
for property in child:
newproperty = Property()
newproperty.name = property.attrib["name"]
newproperty.value = property.attrib["value"]
newcase.properties.append(newproperty) |
def save_graph_only(sess, output_file_path, output_node_names, as_text=False):
"""Save a small version of the graph based on a session and the output node names."""
for node in sess.graph_def.node:
node.device = ''
graph_def = graph_util.extract_sub_graph(sess.graph_def, output_node_names)
output_dir, output_filename = os.path.split(output_file_path)
graph_io.write_graph(graph_def, output_dir, output_filename, as_text=as_text) | Save a small version of the graph based on a session and the output node names. | Below is the the instruction that describes the task:
### Input:
Save a small version of the graph based on a session and the output node names.
### Response:
def save_graph_only(sess, output_file_path, output_node_names, as_text=False):
"""Save a small version of the graph based on a session and the output node names."""
for node in sess.graph_def.node:
node.device = ''
graph_def = graph_util.extract_sub_graph(sess.graph_def, output_node_names)
output_dir, output_filename = os.path.split(output_file_path)
graph_io.write_graph(graph_def, output_dir, output_filename, as_text=as_text) |
def has_no_error(
state, incorrect_msg="Your code generated an error. Fix it and try again!"
):
"""Check whether the submission did not generate a runtime error.
Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors.
By default, after the entire SCT finished executing, ``sqlwhat`` will check
for errors before marking the exercise as correct. You can disable this behavior
by using ``Ex().allow_error()``.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the student's query did not return a result.
"""
if state.reporter.get_errors():
state.do_test(incorrect_msg)
return state | Check whether the submission did not generate a runtime error.
Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors.
By default, after the entire SCT finished executing, ``sqlwhat`` will check
for errors before marking the exercise as correct. You can disable this behavior
by using ``Ex().allow_error()``.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the student's query did not return a result. | Below is the the instruction that describes the task:
### Input:
Check whether the submission did not generate a runtime error.
Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors.
By default, after the entire SCT finished executing, ``sqlwhat`` will check
for errors before marking the exercise as correct. You can disable this behavior
by using ``Ex().allow_error()``.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the student's query did not return a result.
### Response:
def has_no_error(
state, incorrect_msg="Your code generated an error. Fix it and try again!"
):
"""Check whether the submission did not generate a runtime error.
Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors.
By default, after the entire SCT finished executing, ``sqlwhat`` will check
for errors before marking the exercise as correct. You can disable this behavior
by using ``Ex().allow_error()``.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the student's query did not return a result.
"""
if state.reporter.get_errors():
state.do_test(incorrect_msg)
return state |
def make_batched_timer(self, bucket_seconds, chunk_size=100):
"""
Creates and returns an object implementing
:class:`txaio.IBatchedTimer`.
:param bucket_seconds: the number of seconds in each bucket. That
is, a value of 5 means that any timeout within a 5 second
window will be in the same bucket, and get notified at the
same time. This is only accurate to "milliseconds".
:param chunk_size: when "doing" the callbacks in a particular
bucket, this controls how many we do at once before yielding to
the reactor.
"""
def get_seconds():
return self._get_loop().seconds()
def create_delayed_call(delay, fun, *args, **kwargs):
return self._get_loop().callLater(delay, fun, *args, **kwargs)
return _BatchedTimer(
bucket_seconds * 1000.0, chunk_size,
seconds_provider=get_seconds,
delayed_call_creator=create_delayed_call,
) | Creates and returns an object implementing
:class:`txaio.IBatchedTimer`.
:param bucket_seconds: the number of seconds in each bucket. That
is, a value of 5 means that any timeout within a 5 second
window will be in the same bucket, and get notified at the
same time. This is only accurate to "milliseconds".
:param chunk_size: when "doing" the callbacks in a particular
bucket, this controls how many we do at once before yielding to
the reactor. | Below is the the instruction that describes the task:
### Input:
Creates and returns an object implementing
:class:`txaio.IBatchedTimer`.
:param bucket_seconds: the number of seconds in each bucket. That
is, a value of 5 means that any timeout within a 5 second
window will be in the same bucket, and get notified at the
same time. This is only accurate to "milliseconds".
:param chunk_size: when "doing" the callbacks in a particular
bucket, this controls how many we do at once before yielding to
the reactor.
### Response:
def make_batched_timer(self, bucket_seconds, chunk_size=100):
"""
Creates and returns an object implementing
:class:`txaio.IBatchedTimer`.
:param bucket_seconds: the number of seconds in each bucket. That
is, a value of 5 means that any timeout within a 5 second
window will be in the same bucket, and get notified at the
same time. This is only accurate to "milliseconds".
:param chunk_size: when "doing" the callbacks in a particular
bucket, this controls how many we do at once before yielding to
the reactor.
"""
def get_seconds():
return self._get_loop().seconds()
def create_delayed_call(delay, fun, *args, **kwargs):
return self._get_loop().callLater(delay, fun, *args, **kwargs)
return _BatchedTimer(
bucket_seconds * 1000.0, chunk_size,
seconds_provider=get_seconds,
delayed_call_creator=create_delayed_call,
) |
def db_exists(name, user=None, password=None, host=None, port=None):
'''
Checks if a database exists in Influxdb
name
Database name to create
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.db_exists <name>
salt '*' influxdb08.db_exists <name> <user> <password> <host> <port>
'''
dbs = db_list(user, password, host, port)
if not isinstance(dbs, list):
return False
return name in [db['name'] for db in dbs] | Checks if a database exists in Influxdb
name
Database name to create
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.db_exists <name>
salt '*' influxdb08.db_exists <name> <user> <password> <host> <port> | Below is the the instruction that describes the task:
### Input:
Checks if a database exists in Influxdb
name
Database name to create
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.db_exists <name>
salt '*' influxdb08.db_exists <name> <user> <password> <host> <port>
### Response:
def db_exists(name, user=None, password=None, host=None, port=None):
'''
Checks if a database exists in Influxdb
name
Database name to create
user
The user to connect as
password
The password of the user
host
The host to connect to
port
The port to connect to
CLI Example:
.. code-block:: bash
salt '*' influxdb08.db_exists <name>
salt '*' influxdb08.db_exists <name> <user> <password> <host> <port>
'''
dbs = db_list(user, password, host, port)
if not isinstance(dbs, list):
return False
return name in [db['name'] for db in dbs] |
def get_dummy_run(nthread, nsamples, **kwargs):
"""Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = []
# set seed before generating any threads and do not reset for each thread
if seed is not False:
np.random.seed(seed)
threads = []
for _ in range(nthread):
threads.append(get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=logl_start,
logl_range=logl_range))
# Sort threads in order of starting logl so labels match labels that would
# have been given processing a dead points array. N.B. this only works when
# all threads have same start_logl
threads = sorted(threads, key=lambda th: th['logl'][0])
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
# Use combine_ns_runs rather than combine threads as this relabels the
# threads according to their order
return nestcheck.ns_run_utils.combine_threads(threads) | Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. | Below is the the instruction that describes the task:
### Input:
Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
### Response:
def get_dummy_run(nthread, nsamples, **kwargs):
"""Generate dummy data for a nested sampling run.
Log-likelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nthreads: int
Number of threads in the run.
nsamples: int
Number of samples in thread.
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
logl_start = kwargs.pop('logl_start', -np.inf)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
threads = []
# set seed before generating any threads and do not reset for each thread
if seed is not False:
np.random.seed(seed)
threads = []
for _ in range(nthread):
threads.append(get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=logl_start,
logl_range=logl_range))
# Sort threads in order of starting logl so labels match labels that would
# have been given processing a dead points array. N.B. this only works when
# all threads have same start_logl
threads = sorted(threads, key=lambda th: th['logl'][0])
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
# Use combine_ns_runs rather than combine threads as this relabels the
# threads according to their order
return nestcheck.ns_run_utils.combine_threads(threads) |
async def _build_state(self,
request: Request,
message: BaseMessage,
responder: Responder) \
-> Tuple[
Optional[BaseState],
Optional[BaseTrigger],
Optional[bool],
]:
"""
Build the state for this request.
"""
trigger, state_class, dnr = await self._find_trigger(request)
if trigger is None:
if not message.should_confuse():
return None, None, None
state_class = self._confused_state(request)
logger.debug('Next state: %s (confused)', state_class.name())
else:
logger.debug('Next state: %s', state_class.name())
state = state_class(request, responder, trigger, trigger)
return state, trigger, dnr | Build the state for this request. | Below is the the instruction that describes the task:
### Input:
Build the state for this request.
### Response:
async def _build_state(self,
request: Request,
message: BaseMessage,
responder: Responder) \
-> Tuple[
Optional[BaseState],
Optional[BaseTrigger],
Optional[bool],
]:
"""
Build the state for this request.
"""
trigger, state_class, dnr = await self._find_trigger(request)
if trigger is None:
if not message.should_confuse():
return None, None, None
state_class = self._confused_state(request)
logger.debug('Next state: %s (confused)', state_class.name())
else:
logger.debug('Next state: %s', state_class.name())
state = state_class(request, responder, trigger, trigger)
return state, trigger, dnr |
def await_results(url, pings=45, sleep=2):
"""
Ping {url} until it returns a results payload, timing out after
{pings} pings and waiting {sleep} seconds between pings.
"""
print("Checking...", end="", flush=True)
for _ in range(pings):
# Query for check results.
res = requests.post(url)
if res.status_code != 200:
continue
payload = res.json()
if payload["complete"]:
break
print(".", end="", flush=True)
time.sleep(sleep)
else:
# Terminate if no response
print()
raise Error(
_("check50 is taking longer than normal!\nSee https://cs50.me/checks/{} for more detail.").format(commit_hash))
print()
# TODO: Should probably check payload["checks"]["version"] here to make sure major version is same as __version__
# (otherwise we may not be able to parse results)
return (CheckResult(**result) for result in payload["checks"]["results"]) | Ping {url} until it returns a results payload, timing out after
{pings} pings and waiting {sleep} seconds between pings. | Below is the the instruction that describes the task:
### Input:
Ping {url} until it returns a results payload, timing out after
{pings} pings and waiting {sleep} seconds between pings.
### Response:
def await_results(url, pings=45, sleep=2):
"""
Ping {url} until it returns a results payload, timing out after
{pings} pings and waiting {sleep} seconds between pings.
"""
print("Checking...", end="", flush=True)
for _ in range(pings):
# Query for check results.
res = requests.post(url)
if res.status_code != 200:
continue
payload = res.json()
if payload["complete"]:
break
print(".", end="", flush=True)
time.sleep(sleep)
else:
# Terminate if no response
print()
raise Error(
_("check50 is taking longer than normal!\nSee https://cs50.me/checks/{} for more detail.").format(commit_hash))
print()
# TODO: Should probably check payload["checks"]["version"] here to make sure major version is same as __version__
# (otherwise we may not be able to parse results)
return (CheckResult(**result) for result in payload["checks"]["results"]) |
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None | Split a smartos docker uuid into repo and tag | Below is the the instruction that describes the task:
### Input:
Split a smartos docker uuid into repo and tag
### Response:
def _split_docker_uuid(uuid):
'''
Split a smartos docker uuid into repo and tag
'''
if uuid:
uuid = uuid.split(':')
if len(uuid) == 2:
tag = uuid[1]
repo = uuid[0]
return repo, tag
return None, None |
def load_code_info(self):
"""Load coded info for all contained phases."""
return PhaseGroup(
setup=load_code_info(self.setup),
main=load_code_info(self.main),
teardown=load_code_info(self.teardown),
name=self.name) | Load coded info for all contained phases. | Below is the the instruction that describes the task:
### Input:
Load coded info for all contained phases.
### Response:
def load_code_info(self):
"""Load coded info for all contained phases."""
return PhaseGroup(
setup=load_code_info(self.setup),
main=load_code_info(self.main),
teardown=load_code_info(self.teardown),
name=self.name) |
def _read_mat_mnu0(filename):
"""Import a .mat file with single potentials (a b m) into a pandas
DataFrame
Also export some variables of the MD struct into a separate structure
"""
print('read_mag_single_file: {0}'.format(filename))
mat = sio.loadmat(filename, squeeze_me=True)
# check the version
version = mat['MP']['Version'].item()
if version != 'FZJ-EZ-2017':
raise Exception(
'This data format is not supported (expected: FZJ-EZ-2017)' +
' got: {}'.format(version)
)
df_emd = _extract_emd(mat, filename=filename)
df_md = _extract_md(mat)
return df_emd, df_md | Import a .mat file with single potentials (a b m) into a pandas
DataFrame
Also export some variables of the MD struct into a separate structure | Below is the the instruction that describes the task:
### Input:
Import a .mat file with single potentials (a b m) into a pandas
DataFrame
Also export some variables of the MD struct into a separate structure
### Response:
def _read_mat_mnu0(filename):
"""Import a .mat file with single potentials (a b m) into a pandas
DataFrame
Also export some variables of the MD struct into a separate structure
"""
print('read_mag_single_file: {0}'.format(filename))
mat = sio.loadmat(filename, squeeze_me=True)
# check the version
version = mat['MP']['Version'].item()
if version != 'FZJ-EZ-2017':
raise Exception(
'This data format is not supported (expected: FZJ-EZ-2017)' +
' got: {}'.format(version)
)
df_emd = _extract_emd(mat, filename=filename)
df_md = _extract_md(mat)
return df_emd, df_md |
def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext) | Copy a fils, including biological index files. | Below is the the instruction that describes the task:
### Input:
Copy a fils, including biological index files.
### Response:
def copy_plus(orig, new):
"""Copy a fils, including biological index files.
"""
for ext in ["", ".idx", ".gbi", ".tbi", ".bai"]:
if os.path.exists(orig + ext) and (not os.path.lexists(new + ext) or not os.path.exists(new + ext)):
shutil.copyfile(orig + ext, new + ext) |
def encode(self, s):
"""
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
"""
if isinstance(s, basestring) and self.needsEncoding(s):
for x in self.encodings:
s = s.replace(x[0], x[1])
return s | Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str | Below is the the instruction that describes the task:
### Input:
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
### Response:
def encode(self, s):
"""
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
"""
if isinstance(s, basestring) and self.needsEncoding(s):
for x in self.encodings:
s = s.replace(x[0], x[1])
return s |
def check_int(integer):
"""
Check if number is integer or not.
:param integer: Number as str
:return: Boolean
"""
if not isinstance(integer, str):
return False
if integer[0] in ('-', '+'):
return integer[1:].isdigit()
return integer.isdigit() | Check if number is integer or not.
:param integer: Number as str
:return: Boolean | Below is the the instruction that describes the task:
### Input:
Check if number is integer or not.
:param integer: Number as str
:return: Boolean
### Response:
def check_int(integer):
"""
Check if number is integer or not.
:param integer: Number as str
:return: Boolean
"""
if not isinstance(integer, str):
return False
if integer[0] in ('-', '+'):
return integer[1:].isdigit()
return integer.isdigit() |
def process_tick(self, tup):
"""Called every window_duration
"""
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
self.processWindow(window_info, list(self.current_tuples))
for tup in self.current_tuples:
self.ack(tup)
self.current_tuples.clear() | Called every window_duration | Below is the the instruction that describes the task:
### Input:
Called every window_duration
### Response:
def process_tick(self, tup):
"""Called every window_duration
"""
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
self.processWindow(window_info, list(self.current_tuples))
for tup in self.current_tuples:
self.ack(tup)
self.current_tuples.clear() |
def get_item(self, tablename, key, attributes=None, consistent=False,
return_capacity=None):
"""
Fetch a single item from a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.get_item2`.
Parameters
----------
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
attributes : list, optional
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
kwargs = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ConsistentRead': consistent,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
if attributes is not None:
kwargs['AttributesToGet'] = attributes
data = self.call('get_item', **kwargs)
return Result(self.dynamizer, data, 'Item') | Fetch a single item from a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.get_item2`.
Parameters
----------
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
attributes : list, optional
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE) | Below is the the instruction that describes the task:
### Input:
Fetch a single item from a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.get_item2`.
Parameters
----------
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
attributes : list, optional
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
### Response:
def get_item(self, tablename, key, attributes=None, consistent=False,
return_capacity=None):
"""
Fetch a single item from a table
This uses the older version of the DynamoDB API.
See also: :meth:`~.get_item2`.
Parameters
----------
tablename : str
Name of the table to fetch from
key : dict
Primary key dict specifying the hash key and, if applicable, the
range key of the item.
attributes : list, optional
If present, only fetch these attributes from the item
consistent : bool, optional
Perform a strongly consistent read of the data (default False)
return_capacity : {NONE, INDEXES, TOTAL}, optional
INDEXES will return the consumed capacity for indexes, TOTAL will
return the consumed capacity for the table and the indexes.
(default NONE)
"""
kwargs = {
'TableName': tablename,
'Key': self.dynamizer.encode_keys(key),
'ConsistentRead': consistent,
'ReturnConsumedCapacity': self._default_capacity(return_capacity),
}
if attributes is not None:
kwargs['AttributesToGet'] = attributes
data = self.call('get_item', **kwargs)
return Result(self.dynamizer, data, 'Item') |
def handle(self, *args, **options):
"""
Processes the converted data into the yacms database correctly.
Attributes:
yacms_user: the user to put this data in against
date_format: the format the dates are in for posts and comments
"""
yacms_user = options.get("yacms_user")
site = Site.objects.get_current()
verbosity = int(options.get("verbosity", 1))
prompt = options.get("interactive")
# Validate the yacms user.
if yacms_user is None:
raise CommandError("No yacms user has been specified")
try:
yacms_user = User.objects.get(username=yacms_user)
except User.DoesNotExist:
raise CommandError("Invalid yacms user: %s" % yacms_user)
# Run the subclassed ``handle_import`` and save posts, tags,
# categories, and comments to the DB.
self.handle_import(options)
for post_data in self.posts:
categories = post_data.pop("categories")
tags = post_data.pop("tags")
comments = post_data.pop("comments")
old_url = post_data.pop("old_url")
post_data = self.trunc(BlogPost, prompt, **post_data)
initial = {
"title": post_data.pop("title"),
"user": yacms_user,
}
if post_data["publish_date"] is None:
post_data["status"] = CONTENT_STATUS_DRAFT
post, created = BlogPost.objects.get_or_create(**initial)
for k, v in post_data.items():
setattr(post, k, v)
post.save()
if created and verbosity >= 1:
print("Imported post: %s" % post)
for name in categories:
cat = self.trunc(BlogCategory, prompt, title=name)
if not cat["title"]:
continue
cat, created = BlogCategory.objects.get_or_create(**cat)
if created and verbosity >= 1:
print("Imported category: %s" % cat)
post.categories.add(cat)
for comment in comments:
comment = self.trunc(ThreadedComment, prompt, **comment)
comment["site"] = site
post.comments.create(**comment)
if verbosity >= 1:
print("Imported comment by: %s" % comment["user_name"])
self.add_meta(post, tags, prompt, verbosity, old_url)
# Create any pages imported (Wordpress can include pages)
in_menus = []
footer = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES
if menu[-1] == "pages/menus/footer.html"]
if options["in_navigation"]:
in_menus = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES]
if footer and not options["in_footer"]:
in_menus.remove(footer[0])
elif footer and options["in_footer"]:
in_menus = footer
parents = []
for page in self.pages:
tags = page.pop("tags")
old_url = page.pop("old_url")
old_id = page.pop("old_id")
old_parent_id = page.pop("old_parent_id")
page = self.trunc(RichTextPage, prompt, **page)
page["status"] = CONTENT_STATUS_PUBLISHED
page["in_menus"] = in_menus
page, created = RichTextPage.objects.get_or_create(**page)
if created and verbosity >= 1:
print("Imported page: %s" % page)
self.add_meta(page, tags, prompt, verbosity, old_url)
parents.append({
'old_id': old_id,
'old_parent_id': old_parent_id,
'page': page,
})
for obj in parents:
if obj['old_parent_id']:
for parent in parents:
if parent['old_id'] == obj['old_parent_id']:
obj['page'].parent = parent['page']
obj['page'].save()
break | Processes the converted data into the yacms database correctly.
Attributes:
yacms_user: the user to put this data in against
date_format: the format the dates are in for posts and comments | Below is the the instruction that describes the task:
### Input:
Processes the converted data into the yacms database correctly.
Attributes:
yacms_user: the user to put this data in against
date_format: the format the dates are in for posts and comments
### Response:
def handle(self, *args, **options):
"""
Processes the converted data into the yacms database correctly.
Attributes:
yacms_user: the user to put this data in against
date_format: the format the dates are in for posts and comments
"""
yacms_user = options.get("yacms_user")
site = Site.objects.get_current()
verbosity = int(options.get("verbosity", 1))
prompt = options.get("interactive")
# Validate the yacms user.
if yacms_user is None:
raise CommandError("No yacms user has been specified")
try:
yacms_user = User.objects.get(username=yacms_user)
except User.DoesNotExist:
raise CommandError("Invalid yacms user: %s" % yacms_user)
# Run the subclassed ``handle_import`` and save posts, tags,
# categories, and comments to the DB.
self.handle_import(options)
for post_data in self.posts:
categories = post_data.pop("categories")
tags = post_data.pop("tags")
comments = post_data.pop("comments")
old_url = post_data.pop("old_url")
post_data = self.trunc(BlogPost, prompt, **post_data)
initial = {
"title": post_data.pop("title"),
"user": yacms_user,
}
if post_data["publish_date"] is None:
post_data["status"] = CONTENT_STATUS_DRAFT
post, created = BlogPost.objects.get_or_create(**initial)
for k, v in post_data.items():
setattr(post, k, v)
post.save()
if created and verbosity >= 1:
print("Imported post: %s" % post)
for name in categories:
cat = self.trunc(BlogCategory, prompt, title=name)
if not cat["title"]:
continue
cat, created = BlogCategory.objects.get_or_create(**cat)
if created and verbosity >= 1:
print("Imported category: %s" % cat)
post.categories.add(cat)
for comment in comments:
comment = self.trunc(ThreadedComment, prompt, **comment)
comment["site"] = site
post.comments.create(**comment)
if verbosity >= 1:
print("Imported comment by: %s" % comment["user_name"])
self.add_meta(post, tags, prompt, verbosity, old_url)
# Create any pages imported (Wordpress can include pages)
in_menus = []
footer = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES
if menu[-1] == "pages/menus/footer.html"]
if options["in_navigation"]:
in_menus = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES]
if footer and not options["in_footer"]:
in_menus.remove(footer[0])
elif footer and options["in_footer"]:
in_menus = footer
parents = []
for page in self.pages:
tags = page.pop("tags")
old_url = page.pop("old_url")
old_id = page.pop("old_id")
old_parent_id = page.pop("old_parent_id")
page = self.trunc(RichTextPage, prompt, **page)
page["status"] = CONTENT_STATUS_PUBLISHED
page["in_menus"] = in_menus
page, created = RichTextPage.objects.get_or_create(**page)
if created and verbosity >= 1:
print("Imported page: %s" % page)
self.add_meta(page, tags, prompt, verbosity, old_url)
parents.append({
'old_id': old_id,
'old_parent_id': old_parent_id,
'page': page,
})
for obj in parents:
if obj['old_parent_id']:
for parent in parents:
if parent['old_id'] == obj['old_parent_id']:
obj['page'].parent = parent['page']
obj['page'].save()
break |
def has_code(state, text, incorrect_msg="The checker expected to find `{{text}}` in your command.", fixed=False):
"""Check whether the student code contains text.
This function is a simpler override of the `has_code` function in protowhat,
because ``ast_node._get_text()`` is not implemented in the OSH parser
Using ``has_code()`` should be a last resort. It is always better to look at the result of code
or the side effects they had on the state of your program.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: if specified, this overrides the automatically generated feedback message
in case ``text`` is not found in the student code.
fixed: whether to match ``text`` exactly, rather than using regular expressions.
:Example:
Suppose the solution requires you to do: ::
git push origin master
The following SCT can be written: ::
Ex().has_code(r'git\\s+push\\s+origin\\s+master')
Submissions that would pass: ::
git push origin master
git push origin master
Submissions that would fail: ::
git push --force origin master
"""
stu_code = state.student_code
# either simple text matching or regex test
res = text in stu_code if fixed else re.search(text, stu_code)
if not res:
_msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text })
state.do_test(_msg)
return state | Check whether the student code contains text.
This function is a simpler override of the `has_code` function in protowhat,
because ``ast_node._get_text()`` is not implemented in the OSH parser
Using ``has_code()`` should be a last resort. It is always better to look at the result of code
or the side effects they had on the state of your program.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: if specified, this overrides the automatically generated feedback message
in case ``text`` is not found in the student code.
fixed: whether to match ``text`` exactly, rather than using regular expressions.
:Example:
Suppose the solution requires you to do: ::
git push origin master
The following SCT can be written: ::
Ex().has_code(r'git\\s+push\\s+origin\\s+master')
Submissions that would pass: ::
git push origin master
git push origin master
Submissions that would fail: ::
git push --force origin master | Below is the the instruction that describes the task:
### Input:
Check whether the student code contains text.
This function is a simpler override of the `has_code` function in protowhat,
because ``ast_node._get_text()`` is not implemented in the OSH parser
Using ``has_code()`` should be a last resort. It is always better to look at the result of code
or the side effects they had on the state of your program.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: if specified, this overrides the automatically generated feedback message
in case ``text`` is not found in the student code.
fixed: whether to match ``text`` exactly, rather than using regular expressions.
:Example:
Suppose the solution requires you to do: ::
git push origin master
The following SCT can be written: ::
Ex().has_code(r'git\\s+push\\s+origin\\s+master')
Submissions that would pass: ::
git push origin master
git push origin master
Submissions that would fail: ::
git push --force origin master
### Response:
def has_code(state, text, incorrect_msg="The checker expected to find `{{text}}` in your command.", fixed=False):
"""Check whether the student code contains text.
This function is a simpler override of the `has_code` function in protowhat,
because ``ast_node._get_text()`` is not implemented in the OSH parser
Using ``has_code()`` should be a last resort. It is always better to look at the result of code
or the side effects they had on the state of your program.
Args:
state: State instance describing student and solution code. Can be omitted if used with Ex().
text : text that student code must contain. Can be a regex pattern or a simple string.
incorrect_msg: if specified, this overrides the automatically generated feedback message
in case ``text`` is not found in the student code.
fixed: whether to match ``text`` exactly, rather than using regular expressions.
:Example:
Suppose the solution requires you to do: ::
git push origin master
The following SCT can be written: ::
Ex().has_code(r'git\\s+push\\s+origin\\s+master')
Submissions that would pass: ::
git push origin master
git push origin master
Submissions that would fail: ::
git push --force origin master
"""
stu_code = state.student_code
# either simple text matching or regex test
res = text in stu_code if fixed else re.search(text, stu_code)
if not res:
_msg = state.build_message(incorrect_msg, fmt_kwargs={ 'text': text })
state.do_test(_msg)
return state |
def transform(self, work, xml, objectId, subreference=None):
""" Transform input according to potentially registered XSLT
.. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called
.. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt
.. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \
it is not possible to use strip tags in the xslt given to this application
:param work: Work object containing metadata about the xml
:type work: MyCapytains.resources.inventory.Text
:param xml: XML to transform
:type xml: etree._Element
:param objectId: Object Identifier
:type objectId: str
:param subreference: Subreference
:type subreference: str
:return: String representation of transformed resource
:rtype: str
"""
# We check first that we don't have
if str(objectId) in self._transform:
func = self._transform[str(objectId)]
else:
func = self._transform["default"]
# If we have a string, it means we get a XSL filepath
if isinstance(func, str):
with open(func) as f:
xslt = etree.XSLT(etree.parse(f))
return etree.tostring(
xslt(xml),
encoding=str, method="html",
xml_declaration=None, pretty_print=False, with_tail=True, standalone=None
)
# If we have a function, it means we return the result of the function
elif isinstance(func, Callable):
return func(work, xml, objectId, subreference)
# If we have None, it means we just give back the xml
elif func is None:
return etree.tostring(xml, encoding=str) | Transform input according to potentially registered XSLT
.. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called
.. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt
.. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \
it is not possible to use strip tags in the xslt given to this application
:param work: Work object containing metadata about the xml
:type work: MyCapytains.resources.inventory.Text
:param xml: XML to transform
:type xml: etree._Element
:param objectId: Object Identifier
:type objectId: str
:param subreference: Subreference
:type subreference: str
:return: String representation of transformed resource
:rtype: str | Below is the the instruction that describes the task:
### Input:
Transform input according to potentially registered XSLT
.. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called
.. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt
.. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \
it is not possible to use strip tags in the xslt given to this application
:param work: Work object containing metadata about the xml
:type work: MyCapytains.resources.inventory.Text
:param xml: XML to transform
:type xml: etree._Element
:param objectId: Object Identifier
:type objectId: str
:param subreference: Subreference
:type subreference: str
:return: String representation of transformed resource
:rtype: str
### Response:
def transform(self, work, xml, objectId, subreference=None):
""" Transform input according to potentially registered XSLT
.. note:: Since 1.0.0, transform takes an objectId parameter which represent the passage which is called
.. note:: Due to XSLT not being able to be used twice, we rexsltise the xml at every call of xslt
.. warning:: Until a C libxslt error is fixed ( https://bugzilla.gnome.org/show_bug.cgi?id=620102 ), \
it is not possible to use strip tags in the xslt given to this application
:param work: Work object containing metadata about the xml
:type work: MyCapytains.resources.inventory.Text
:param xml: XML to transform
:type xml: etree._Element
:param objectId: Object Identifier
:type objectId: str
:param subreference: Subreference
:type subreference: str
:return: String representation of transformed resource
:rtype: str
"""
# We check first that we don't have
if str(objectId) in self._transform:
func = self._transform[str(objectId)]
else:
func = self._transform["default"]
# If we have a string, it means we get a XSL filepath
if isinstance(func, str):
with open(func) as f:
xslt = etree.XSLT(etree.parse(f))
return etree.tostring(
xslt(xml),
encoding=str, method="html",
xml_declaration=None, pretty_print=False, with_tail=True, standalone=None
)
# If we have a function, it means we return the result of the function
elif isinstance(func, Callable):
return func(work, xml, objectId, subreference)
# If we have None, it means we just give back the xml
elif func is None:
return etree.tostring(xml, encoding=str) |
async def attach_tip(data):
"""
Attach a tip to the current pipette
:param data: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'token': UUID token from current session start
'command': 'attach tip'
'tipLength': a float representing how much the length of a pipette
increases when a tip is added
}
"""
global session
tip_length = data.get('tipLength')
if not tip_length:
message = 'Error: "tipLength" must be specified in request'
status = 400
else:
if not feature_flags.use_protocol_api_v2():
pipette = session.pipettes[session.current_mount]
if pipette.tip_attached:
log.warning('attach tip called while tip already attached')
pipette._remove_tip(pipette._tip_length)
pipette._add_tip(tip_length)
else:
session.adapter.add_tip(session.current_mount, tip_length)
if session.cp:
session.cp = CriticalPoint.FRONT_NOZZLE
session.tip_length = tip_length
message = "Tip length set: {}".format(tip_length)
status = 200
return web.json_response({'message': message}, status=status) | Attach a tip to the current pipette
:param data: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'token': UUID token from current session start
'command': 'attach tip'
'tipLength': a float representing how much the length of a pipette
increases when a tip is added
} | Below is the the instruction that describes the task:
### Input:
Attach a tip to the current pipette
:param data: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'token': UUID token from current session start
'command': 'attach tip'
'tipLength': a float representing how much the length of a pipette
increases when a tip is added
}
### Response:
async def attach_tip(data):
"""
Attach a tip to the current pipette
:param data: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'token': UUID token from current session start
'command': 'attach tip'
'tipLength': a float representing how much the length of a pipette
increases when a tip is added
}
"""
global session
tip_length = data.get('tipLength')
if not tip_length:
message = 'Error: "tipLength" must be specified in request'
status = 400
else:
if not feature_flags.use_protocol_api_v2():
pipette = session.pipettes[session.current_mount]
if pipette.tip_attached:
log.warning('attach tip called while tip already attached')
pipette._remove_tip(pipette._tip_length)
pipette._add_tip(tip_length)
else:
session.adapter.add_tip(session.current_mount, tip_length)
if session.cp:
session.cp = CriticalPoint.FRONT_NOZZLE
session.tip_length = tip_length
message = "Tip length set: {}".format(tip_length)
status = 200
return web.json_response({'message': message}, status=status) |
def asserts(input_value, rule, message=''):
""" this function allows you to write asserts in generators since there are
moments where you actually want the program to halt when certain values
are seen.
"""
assert callable(rule) or type(rule)==bool, 'asserts needs rule to be a callable function or a test boolean'
assert isinstance(message, str), 'asserts needs message to be a string'
# if the message is empty and rule is callable, fill message with rule's source code
if len(message)==0 and callable(rule):
try:
s = getsource(rule).splitlines()[0].strip()
except:
s = repr(rule).strip()
message = 'illegal input of {} breaks - {}'.format(input_value, s)
if callable(rule):
# if rule is a function, run the function and assign it to rule
rule = rule(input_value)
# now, assert the rule and return the input value
assert rule, message
return input_value | this function allows you to write asserts in generators since there are
moments where you actually want the program to halt when certain values
are seen. | Below is the the instruction that describes the task:
### Input:
this function allows you to write asserts in generators since there are
moments where you actually want the program to halt when certain values
are seen.
### Response:
def asserts(input_value, rule, message=''):
""" this function allows you to write asserts in generators since there are
moments where you actually want the program to halt when certain values
are seen.
"""
assert callable(rule) or type(rule)==bool, 'asserts needs rule to be a callable function or a test boolean'
assert isinstance(message, str), 'asserts needs message to be a string'
# if the message is empty and rule is callable, fill message with rule's source code
if len(message)==0 and callable(rule):
try:
s = getsource(rule).splitlines()[0].strip()
except:
s = repr(rule).strip()
message = 'illegal input of {} breaks - {}'.format(input_value, s)
if callable(rule):
# if rule is a function, run the function and assign it to rule
rule = rule(input_value)
# now, assert the rule and return the input value
assert rule, message
return input_value |
def accept_override(self):
"""Unbind all conflicted shortcuts, and accept the new one"""
conflicts = self.check_conflicts()
if conflicts:
for shortcut in conflicts:
shortcut.key = ''
self.accept() | Unbind all conflicted shortcuts, and accept the new one | Below is the the instruction that describes the task:
### Input:
Unbind all conflicted shortcuts, and accept the new one
### Response:
def accept_override(self):
"""Unbind all conflicted shortcuts, and accept the new one"""
conflicts = self.check_conflicts()
if conflicts:
for shortcut in conflicts:
shortcut.key = ''
self.accept() |
def parse(lines, root=None):
"""
Parses a list of lines from ls into dictionaries representing their
components.
Args:
lines (list): A list of lines generated by ls.
root (str): The directory name to be used for ls output stanzas that
don't have a name.
Returns:
A dictionary representing the ls output. It's keyed by the path
containing each ls stanza.
"""
doc = {}
entries = []
name = None
total = None
for line in lines:
line = line.strip()
if not line:
continue
if line and line[0] == "/" and line[-1] == ":":
if name is None:
name = line[:-1]
if entries:
d = Directory(name, total or len(entries), entries)
doc[root] = d
total = None
entries = []
else:
d = Directory(name, total or len(entries), entries)
doc[name or root] = d
total = None
entries = []
name = line[:-1]
continue
if line.startswith("total"):
total = int(line.split(None, 1)[1])
continue
entries.append(line)
name = name or root
doc[name] = Directory(name, total or len(entries), entries)
return doc | Parses a list of lines from ls into dictionaries representing their
components.
Args:
lines (list): A list of lines generated by ls.
root (str): The directory name to be used for ls output stanzas that
don't have a name.
Returns:
A dictionary representing the ls output. It's keyed by the path
containing each ls stanza. | Below is the the instruction that describes the task:
### Input:
Parses a list of lines from ls into dictionaries representing their
components.
Args:
lines (list): A list of lines generated by ls.
root (str): The directory name to be used for ls output stanzas that
don't have a name.
Returns:
A dictionary representing the ls output. It's keyed by the path
containing each ls stanza.
### Response:
def parse(lines, root=None):
"""
Parses a list of lines from ls into dictionaries representing their
components.
Args:
lines (list): A list of lines generated by ls.
root (str): The directory name to be used for ls output stanzas that
don't have a name.
Returns:
A dictionary representing the ls output. It's keyed by the path
containing each ls stanza.
"""
doc = {}
entries = []
name = None
total = None
for line in lines:
line = line.strip()
if not line:
continue
if line and line[0] == "/" and line[-1] == ":":
if name is None:
name = line[:-1]
if entries:
d = Directory(name, total or len(entries), entries)
doc[root] = d
total = None
entries = []
else:
d = Directory(name, total or len(entries), entries)
doc[name or root] = d
total = None
entries = []
name = line[:-1]
continue
if line.startswith("total"):
total = int(line.split(None, 1)[1])
continue
entries.append(line)
name = name or root
doc[name] = Directory(name, total or len(entries), entries)
return doc |
def parse_radl(data):
"""
Parse a RADL document.
Args:
- data(str): filepath to a RADL content or a string with content.
Return: RADL object.
"""
if data is None:
return None
elif os.path.isfile(data):
f = open(data)
data = "".join(f.readlines())
f.close()
elif data.strip() == "":
return RADL()
data = data + "\n"
parser = RADLParser(lextab='radl')
return parser.parse(data) | Parse a RADL document.
Args:
- data(str): filepath to a RADL content or a string with content.
Return: RADL object. | Below is the the instruction that describes the task:
### Input:
Parse a RADL document.
Args:
- data(str): filepath to a RADL content or a string with content.
Return: RADL object.
### Response:
def parse_radl(data):
"""
Parse a RADL document.
Args:
- data(str): filepath to a RADL content or a string with content.
Return: RADL object.
"""
if data is None:
return None
elif os.path.isfile(data):
f = open(data)
data = "".join(f.readlines())
f.close()
elif data.strip() == "":
return RADL()
data = data + "\n"
parser = RADLParser(lextab='radl')
return parser.parse(data) |
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text | Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped | Below is the the instruction that describes the task:
### Input:
Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
### Response:
def drop_bad_characters(text):
"""Takes a text and drops all non-printable and non-ascii characters and
also any whitespace characters that aren't space.
:arg str text: the text to fix
:returns: text with all bad characters dropped
"""
# Strip all non-ascii and non-printable characters
text = ''.join([c for c in text if c in ALLOWED_CHARS])
return text |
def save(self, *args, **kwargs):
"""
Takes an optional last_save keyword argument
other wise last_save will be set to timezone.now()
Calls super to actually save the object.
"""
self.last_save = kwargs.pop('last_save', timezone.now())
super(Cloneable, self).save(*args, **kwargs) | Takes an optional last_save keyword argument
other wise last_save will be set to timezone.now()
Calls super to actually save the object. | Below is the the instruction that describes the task:
### Input:
Takes an optional last_save keyword argument
other wise last_save will be set to timezone.now()
Calls super to actually save the object.
### Response:
def save(self, *args, **kwargs):
"""
Takes an optional last_save keyword argument
other wise last_save will be set to timezone.now()
Calls super to actually save the object.
"""
self.last_save = kwargs.pop('last_save', timezone.now())
super(Cloneable, self).save(*args, **kwargs) |
def _get_ex_data(self):
"""Return hierarchical function name."""
func_id, func_name = self._get_callable_path()
if self._full_cname:
func_name = self.encode_call(func_name)
return func_id, func_name | Return hierarchical function name. | Below is the the instruction that describes the task:
### Input:
Return hierarchical function name.
### Response:
def _get_ex_data(self):
"""Return hierarchical function name."""
func_id, func_name = self._get_callable_path()
if self._full_cname:
func_name = self.encode_call(func_name)
return func_id, func_name |
def GetSysFeeAmountByHeight(self, height):
"""
Get the system fee for the specified block.
Args:
height (int): block height.
Returns:
int:
"""
hash = self.GetBlockHash(height)
return self.GetSysFeeAmount(hash) | Get the system fee for the specified block.
Args:
height (int): block height.
Returns:
int: | Below is the the instruction that describes the task:
### Input:
Get the system fee for the specified block.
Args:
height (int): block height.
Returns:
int:
### Response:
def GetSysFeeAmountByHeight(self, height):
"""
Get the system fee for the specified block.
Args:
height (int): block height.
Returns:
int:
"""
hash = self.GetBlockHash(height)
return self.GetSysFeeAmount(hash) |
def special_links_replace(text, urls):
'''
Replace simplified Regulations and Guidelines links into actual links.
'urls' dictionary is expected to provide actual links to the targeted
Regulations and Guidelines, as well as to the PDF file.
'''
match_number = r'([A-Za-z0-9]+)' + r'(\+*)'
reference_list = [(r'regulations:article:' + match_number, urls['regulations']),
(r'regulations:regulation:' + match_number, urls['regulations']),
(r'guidelines:article:' + match_number, urls['guidelines']),
(r'guidelines:guideline:' + match_number, urls['guidelines']),
]
anchor_list = [(r'regulations:contents', urls['regulations'] + r'#contents'),
(r'guidelines:contents', urls['guidelines'] + r'#contents'),
(r'regulations:top', urls['regulations'] + r'#'),
(r'guidelines:top', urls['guidelines'] + r'#'),
(r'link:pdf', urls['pdf'] + '.pdf'),
]
retval = text
for match, repl in reference_list:
retval = re.sub(match, repl + r'#\1\2', retval)
for match, repl in anchor_list:
retval = re.sub(match, repl, retval)
return retval | Replace simplified Regulations and Guidelines links into actual links.
'urls' dictionary is expected to provide actual links to the targeted
Regulations and Guidelines, as well as to the PDF file. | Below is the the instruction that describes the task:
### Input:
Replace simplified Regulations and Guidelines links into actual links.
'urls' dictionary is expected to provide actual links to the targeted
Regulations and Guidelines, as well as to the PDF file.
### Response:
def special_links_replace(text, urls):
'''
Replace simplified Regulations and Guidelines links into actual links.
'urls' dictionary is expected to provide actual links to the targeted
Regulations and Guidelines, as well as to the PDF file.
'''
match_number = r'([A-Za-z0-9]+)' + r'(\+*)'
reference_list = [(r'regulations:article:' + match_number, urls['regulations']),
(r'regulations:regulation:' + match_number, urls['regulations']),
(r'guidelines:article:' + match_number, urls['guidelines']),
(r'guidelines:guideline:' + match_number, urls['guidelines']),
]
anchor_list = [(r'regulations:contents', urls['regulations'] + r'#contents'),
(r'guidelines:contents', urls['guidelines'] + r'#contents'),
(r'regulations:top', urls['regulations'] + r'#'),
(r'guidelines:top', urls['guidelines'] + r'#'),
(r'link:pdf', urls['pdf'] + '.pdf'),
]
retval = text
for match, repl in reference_list:
retval = re.sub(match, repl + r'#\1\2', retval)
for match, repl in anchor_list:
retval = re.sub(match, repl, retval)
return retval |
def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects | Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float | Below is the the instruction that describes the task:
### Input:
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
### Response:
def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects |
def flatten_reshape(variable, name='flatten'):
"""Reshapes a high-dimension vector input.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]
Parameters
----------
variable : TensorFlow variable or tensor
The variable or tensor to be flatten.
name : str
A unique layer name.
Returns
-------
Tensor
Flatten Tensor
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])
>>> # Convolution Layer with 32 filters and a kernel size of 5
>>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
>>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
>>> network = tf.layers.max_pooling2d(network, 2, 2)
>>> print(network.get_shape()[:].as_list())
>>> [None, 62, 62, 32]
>>> network = tl.layers.flatten_reshape(network)
>>> print(network.get_shape()[:].as_list()[1:])
>>> [None, 123008]
"""
dim = 1
for d in variable.get_shape()[1:].as_list():
dim *= d
return tf.reshape(variable, shape=[-1, dim], name=name) | Reshapes a high-dimension vector input.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]
Parameters
----------
variable : TensorFlow variable or tensor
The variable or tensor to be flatten.
name : str
A unique layer name.
Returns
-------
Tensor
Flatten Tensor
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])
>>> # Convolution Layer with 32 filters and a kernel size of 5
>>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
>>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
>>> network = tf.layers.max_pooling2d(network, 2, 2)
>>> print(network.get_shape()[:].as_list())
>>> [None, 62, 62, 32]
>>> network = tl.layers.flatten_reshape(network)
>>> print(network.get_shape()[:].as_list()[1:])
>>> [None, 123008] | Below is the the instruction that describes the task:
### Input:
Reshapes a high-dimension vector input.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]
Parameters
----------
variable : TensorFlow variable or tensor
The variable or tensor to be flatten.
name : str
A unique layer name.
Returns
-------
Tensor
Flatten Tensor
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])
>>> # Convolution Layer with 32 filters and a kernel size of 5
>>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
>>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
>>> network = tf.layers.max_pooling2d(network, 2, 2)
>>> print(network.get_shape()[:].as_list())
>>> [None, 62, 62, 32]
>>> network = tl.layers.flatten_reshape(network)
>>> print(network.get_shape()[:].as_list()[1:])
>>> [None, 123008]
### Response:
def flatten_reshape(variable, name='flatten'):
"""Reshapes a high-dimension vector input.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask]
Parameters
----------
variable : TensorFlow variable or tensor
The variable or tensor to be flatten.
name : str
A unique layer name.
Returns
-------
Tensor
Flatten Tensor
Examples
--------
>>> import tensorflow as tf
>>> import tensorlayer as tl
>>> x = tf.placeholder(tf.float32, [None, 128, 128, 3])
>>> # Convolution Layer with 32 filters and a kernel size of 5
>>> network = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
>>> # Max Pooling (down-sampling) with strides of 2 and kernel size of 2
>>> network = tf.layers.max_pooling2d(network, 2, 2)
>>> print(network.get_shape()[:].as_list())
>>> [None, 62, 62, 32]
>>> network = tl.layers.flatten_reshape(network)
>>> print(network.get_shape()[:].as_list()[1:])
>>> [None, 123008]
"""
dim = 1
for d in variable.get_shape()[1:].as_list():
dim *= d
return tf.reshape(variable, shape=[-1, dim], name=name) |
def do_login(self, line):
"login aws-acces-key aws-secret"
if line:
args = self.getargs(line)
self.conn = boto.connect_dynamodb(
aws_access_key_id=args[0],
aws_secret_access_key=args[1])
else:
self.conn = boto.connect_dynamodb()
self.do_tables('') | login aws-acces-key aws-secret | Below is the the instruction that describes the task:
### Input:
login aws-acces-key aws-secret
### Response:
def do_login(self, line):
"login aws-acces-key aws-secret"
if line:
args = self.getargs(line)
self.conn = boto.connect_dynamodb(
aws_access_key_id=args[0],
aws_secret_access_key=args[1])
else:
self.conn = boto.connect_dynamodb()
self.do_tables('') |
def _locked(func):
"""! Decorator to automatically lock an AccessPort method."""
def _locking(self, *args, **kwargs):
try:
self.lock()
return func(self, *args, **kwargs)
finally:
self.unlock()
return _locking | ! Decorator to automatically lock an AccessPort method. | Below is the the instruction that describes the task:
### Input:
! Decorator to automatically lock an AccessPort method.
### Response:
def _locked(func):
"""! Decorator to automatically lock an AccessPort method."""
def _locking(self, *args, **kwargs):
try:
self.lock()
return func(self, *args, **kwargs)
finally:
self.unlock()
return _locking |
def _init_backends(self):
""" Initialize auth backends.
"""
# fetch auth backends from config file
self._backends = {}
for section in self._config.sections():
# does the section define an auth backend?
section_components = section.rsplit('.', 1)
if section_components[0] == 'auth.backends':
auth_backend = section_components[1]
self._backends[auth_backend] = eval(self._config.get(section, 'type'))
self._logger.debug("Registered auth backends %s" % str(self._backends)) | Initialize auth backends. | Below is the the instruction that describes the task:
### Input:
Initialize auth backends.
### Response:
def _init_backends(self):
""" Initialize auth backends.
"""
# fetch auth backends from config file
self._backends = {}
for section in self._config.sections():
# does the section define an auth backend?
section_components = section.rsplit('.', 1)
if section_components[0] == 'auth.backends':
auth_backend = section_components[1]
self._backends[auth_backend] = eval(self._config.get(section, 'type'))
self._logger.debug("Registered auth backends %s" % str(self._backends)) |
def _write_color (self, text, color=None):
"""Print text with given color. If color is None, print text as-is."""
if color is None:
self.fp.write(text)
else:
write_color(self.fp, text, color) | Print text with given color. If color is None, print text as-is. | Below is the the instruction that describes the task:
### Input:
Print text with given color. If color is None, print text as-is.
### Response:
def _write_color (self, text, color=None):
"""Print text with given color. If color is None, print text as-is."""
if color is None:
self.fp.write(text)
else:
write_color(self.fp, text, color) |
def tickets_update_many(self, data, ids=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/tickets#update-many-tickets"
api_path = "/api/v2/tickets/update_many.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if ids:
api_query.update({
"ids": ids,
})
return self.call(api_path, query=api_query, method="PUT", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/tickets#update-many-tickets | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/tickets#update-many-tickets
### Response:
def tickets_update_many(self, data, ids=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/tickets#update-many-tickets"
api_path = "/api/v2/tickets/update_many.json"
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if ids:
api_query.update({
"ids": ids,
})
return self.call(api_path, query=api_query, method="PUT", data=data, **kwargs) |
def wait_processed(self, timeout):
"""Wait until time outs, or this event is processed. Event must be waitable for this operation to have
described semantics, for non-waitable returns true immediately.
in timeout of type int
Maximum time to wait for event processing, in ms;
0 = no wait, -1 = indefinite wait.
return result of type bool
If this event was processed before timeout.
"""
if not isinstance(timeout, baseinteger):
raise TypeError("timeout can only be an instance of type baseinteger")
result = self._call("waitProcessed",
in_p=[timeout])
return result | Wait until time outs, or this event is processed. Event must be waitable for this operation to have
described semantics, for non-waitable returns true immediately.
in timeout of type int
Maximum time to wait for event processing, in ms;
0 = no wait, -1 = indefinite wait.
return result of type bool
If this event was processed before timeout. | Below is the the instruction that describes the task:
### Input:
Wait until time outs, or this event is processed. Event must be waitable for this operation to have
described semantics, for non-waitable returns true immediately.
in timeout of type int
Maximum time to wait for event processing, in ms;
0 = no wait, -1 = indefinite wait.
return result of type bool
If this event was processed before timeout.
### Response:
def wait_processed(self, timeout):
"""Wait until time outs, or this event is processed. Event must be waitable for this operation to have
described semantics, for non-waitable returns true immediately.
in timeout of type int
Maximum time to wait for event processing, in ms;
0 = no wait, -1 = indefinite wait.
return result of type bool
If this event was processed before timeout.
"""
if not isinstance(timeout, baseinteger):
raise TypeError("timeout can only be an instance of type baseinteger")
result = self._call("waitProcessed",
in_p=[timeout])
return result |
def _domain_event_job_completed_cb(conn, domain, params, opaque):
'''
Domain job completion events handler
'''
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'params': params
}) | Domain job completion events handler | Below is the the instruction that describes the task:
### Input:
Domain job completion events handler
### Response:
def _domain_event_job_completed_cb(conn, domain, params, opaque):
'''
Domain job completion events handler
'''
_salt_send_domain_event(opaque, conn, domain, opaque['event'], {
'params': params
}) |
async def send_request(self, method, args=()):
'''Send an RPC request over the network.'''
message, event = self.connection.send_request(Request(method, args))
return await self._send_concurrent(message, event, 1) | Send an RPC request over the network. | Below is the the instruction that describes the task:
### Input:
Send an RPC request over the network.
### Response:
async def send_request(self, method, args=()):
'''Send an RPC request over the network.'''
message, event = self.connection.send_request(Request(method, args))
return await self._send_concurrent(message, event, 1) |
def hr_diagram(cluster_name, output=None):
"""Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re
"""
cluster = get_hr_data(cluster_name)
pf = hr_diagram_figure(cluster)
show_with_bokeh_server(pf) | Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re | Below is the the instruction that describes the task:
### Input:
Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re
### Response:
def hr_diagram(cluster_name, output=None):
"""Create a :class:`~bokeh.plotting.figure.Figure` to create an H-R
diagram using the cluster_name; then show it.
Re
"""
cluster = get_hr_data(cluster_name)
pf = hr_diagram_figure(cluster)
show_with_bokeh_server(pf) |
def get_function_policy(self, function_name):
# type: (str) -> Dict[str, Any]
"""Return the function policy for a lambda function.
This function will extract the policy string as a json document
and return the json.loads(...) version of the policy.
"""
client = self._client('lambda')
try:
policy = client.get_policy(FunctionName=function_name)
return json.loads(policy['Policy'])
except client.exceptions.ResourceNotFoundException:
return {'Statement': []} | Return the function policy for a lambda function.
This function will extract the policy string as a json document
and return the json.loads(...) version of the policy. | Below is the the instruction that describes the task:
### Input:
Return the function policy for a lambda function.
This function will extract the policy string as a json document
and return the json.loads(...) version of the policy.
### Response:
def get_function_policy(self, function_name):
# type: (str) -> Dict[str, Any]
"""Return the function policy for a lambda function.
This function will extract the policy string as a json document
and return the json.loads(...) version of the policy.
"""
client = self._client('lambda')
try:
policy = client.get_policy(FunctionName=function_name)
return json.loads(policy['Policy'])
except client.exceptions.ResourceNotFoundException:
return {'Statement': []} |
def __create_index(self, keys, index_options, session, **kwargs):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes(session) as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session) | Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`. | Below is the the instruction that describes the task:
### Input:
Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
### Response:
def __create_index(self, keys, index_options, session, **kwargs):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes(session) as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session) |
def ReqOrderAction(self, OrderID: str):
"""撤单
:param OrderID:
"""
of = self.orders[OrderID]
if not of:
return -1
else:
pOrderId = of.OrderID
return self.t.ReqOrderAction(
self.broker,
self.investor,
OrderRef=pOrderId.split('|')[2],
FrontID=int(pOrderId.split('|')[1]),
SessionID=int(pOrderId.split('|')[0]),
InstrumentID=of.InstrumentID,
ActionFlag=TThostFtdcActionFlagType.THOST_FTDC_AF_Delete) | 撤单
:param OrderID: | Below is the the instruction that describes the task:
### Input:
撤单
:param OrderID:
### Response:
def ReqOrderAction(self, OrderID: str):
"""撤单
:param OrderID:
"""
of = self.orders[OrderID]
if not of:
return -1
else:
pOrderId = of.OrderID
return self.t.ReqOrderAction(
self.broker,
self.investor,
OrderRef=pOrderId.split('|')[2],
FrontID=int(pOrderId.split('|')[1]),
SessionID=int(pOrderId.split('|')[0]),
InstrumentID=of.InstrumentID,
ActionFlag=TThostFtdcActionFlagType.THOST_FTDC_AF_Delete) |
def filter_human_only(stmts_in, **kwargs):
"""Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
from indra.databases import uniprot_client
if 'remove_bound' in kwargs and kwargs['remove_bound']:
remove_bound = True
else:
remove_bound = False
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements for human genes only...' %
len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get('UP')
if upid and not uniprot_client.is_human(upid):
return False
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break
if human_genes:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements. | Below is the the instruction that describes the task:
### Input:
Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
### Response:
def filter_human_only(stmts_in, **kwargs):
"""Filter out statements that are grounded, but not to a human gene.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
remove_bound: Optional[bool]
If true, removes all bound conditions that are grounded but not to human
genes. If false (default), filters out statements with boundary
conditions that are grounded to non-human genes.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
from indra.databases import uniprot_client
if 'remove_bound' in kwargs and kwargs['remove_bound']:
remove_bound = True
else:
remove_bound = False
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements for human genes only...' %
len(stmts_in))
stmts_out = []
def criterion(agent):
upid = agent.db_refs.get('UP')
if upid and not uniprot_client.is_human(upid):
return False
else:
return True
for st in stmts_in:
human_genes = True
for agent in st.agent_list():
if agent is not None:
if not criterion(agent):
human_genes = False
break
if remove_bound:
_remove_bound_conditions(agent, criterion)
elif _any_bound_condition_fails_criterion(agent, criterion):
human_genes = False
break
if human_genes:
stmts_out.append(st)
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out |
def escape(s, quote=False):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
"""
if s is None:
return ''
if hasattr(s, '__html__'):
return s.__html__()
if not isinstance(s, (text_type, binary_type)):
s = text_type(s)
if isinstance(s, binary_type):
try:
s.decode('ascii')
except:
s = s.decode('utf-8', 'replace')
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if quote:
s = s.replace('"', """)
return s | Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes. | Below is the the instruction that describes the task:
### Input:
Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
### Response:
def escape(s, quote=False):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
:param quote: set to true to also escape double quotes.
"""
if s is None:
return ''
if hasattr(s, '__html__'):
return s.__html__()
if not isinstance(s, (text_type, binary_type)):
s = text_type(s)
if isinstance(s, binary_type):
try:
s.decode('ascii')
except:
s = s.decode('utf-8', 'replace')
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if quote:
s = s.replace('"', """)
return s |
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
t = [prob for (val, prob) in self.d.iteritems() if val > x]
return sum(t) | Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability | Below is the the instruction that describes the task:
### Input:
Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
### Response:
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
t = [prob for (val, prob) in self.d.iteritems() if val > x]
return sum(t) |
def logout(session, cookies, csrf_token):
'''
Closes the session with the device.
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "logout",
"params": []
}
session.post(DETAILS['url'],
data=json.dumps(payload),
cookies=cookies,
headers={'X-CSRF-Token': csrf_token}) | Closes the session with the device. | Below is the the instruction that describes the task:
### Input:
Closes the session with the device.
### Response:
def logout(session, cookies, csrf_token):
'''
Closes the session with the device.
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "logout",
"params": []
}
session.post(DETAILS['url'],
data=json.dumps(payload),
cookies=cookies,
headers={'X-CSRF-Token': csrf_token}) |
def delete_orderrun(self, orderrun_id):
"""
:param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if orderrun_id == 'good':
rc.set(rc.DK_SUCCESS, None, None)
else:
rc.set(rc.DK_FAIL, 'ServingDeleteV2: unable to delete OrderRun')
return rc | :param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode | Below is the the instruction that describes the task:
### Input:
:param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode
### Response:
def delete_orderrun(self, orderrun_id):
"""
:param self: self
:param orderrun_id: string ; 'good' return a good value ; 'bad' return a bad value
:rtype: DKReturnCode
"""
rc = DKReturnCode()
if orderrun_id == 'good':
rc.set(rc.DK_SUCCESS, None, None)
else:
rc.set(rc.DK_FAIL, 'ServingDeleteV2: unable to delete OrderRun')
return rc |
def parse_resource(library, session, resource_name):
"""Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information with interface type and board number, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrc(session, resource_name, byref(interface_type),
byref(interface_board_number))
return ResourceInfo(constants.InterfaceType(interface_type.value),
interface_board_number.value,
None, None, None), ret | Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information with interface type and board number, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode` | Below is the the instruction that describes the task:
### Input:
Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information with interface type and board number, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
### Response:
def parse_resource(library, session, resource_name):
"""Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information with interface type and board number, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrc(session, resource_name, byref(interface_type),
byref(interface_board_number))
return ResourceInfo(constants.InterfaceType(interface_type.value),
interface_board_number.value,
None, None, None), ret |
def get_terms(self, kwargs):
"""Checks URL parameters for slug and/or version to pull the right TermsAndConditions object"""
slug = kwargs.get("slug")
version = kwargs.get("version")
if slug and version:
terms = [TermsAndConditions.objects.filter(slug=slug, version_number=version).latest('date_active')]
elif slug:
terms = [TermsAndConditions.get_active(slug)]
else:
# Return a list of not agreed to terms for the current user for the list view
terms = TermsAndConditions.get_active_terms_not_agreed_to(self.request.user)
return terms | Checks URL parameters for slug and/or version to pull the right TermsAndConditions object | Below is the the instruction that describes the task:
### Input:
Checks URL parameters for slug and/or version to pull the right TermsAndConditions object
### Response:
def get_terms(self, kwargs):
"""Checks URL parameters for slug and/or version to pull the right TermsAndConditions object"""
slug = kwargs.get("slug")
version = kwargs.get("version")
if slug and version:
terms = [TermsAndConditions.objects.filter(slug=slug, version_number=version).latest('date_active')]
elif slug:
terms = [TermsAndConditions.get_active(slug)]
else:
# Return a list of not agreed to terms for the current user for the list view
terms = TermsAndConditions.get_active_terms_not_agreed_to(self.request.user)
return terms |
def iter_events(self, number=-1):
"""Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, IssueEvent) | Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s | Below is the the instruction that describes the task:
### Input:
Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
### Response:
def iter_events(self, number=-1):
"""Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, IssueEvent) |
def _set_values_on_model(self, model, values, fields=None):
"""
Updates the values with the specified values.
:param Model model: The sqlalchemy model instance
:param dict values: The dictionary of attributes and
the values to set.
:param list fields: A list of strings indicating
the valid fields. Defaults to self.fields.
:return: The model with the updated
:rtype: Model
"""
fields = fields or self.fields
for name, val in six.iteritems(values):
if name not in fields:
continue
setattr(model, name, val)
return model | Updates the values with the specified values.
:param Model model: The sqlalchemy model instance
:param dict values: The dictionary of attributes and
the values to set.
:param list fields: A list of strings indicating
the valid fields. Defaults to self.fields.
:return: The model with the updated
:rtype: Model | Below is the the instruction that describes the task:
### Input:
Updates the values with the specified values.
:param Model model: The sqlalchemy model instance
:param dict values: The dictionary of attributes and
the values to set.
:param list fields: A list of strings indicating
the valid fields. Defaults to self.fields.
:return: The model with the updated
:rtype: Model
### Response:
def _set_values_on_model(self, model, values, fields=None):
"""
Updates the values with the specified values.
:param Model model: The sqlalchemy model instance
:param dict values: The dictionary of attributes and
the values to set.
:param list fields: A list of strings indicating
the valid fields. Defaults to self.fields.
:return: The model with the updated
:rtype: Model
"""
fields = fields or self.fields
for name, val in six.iteritems(values):
if name not in fields:
continue
setattr(model, name, val)
return model |
def detectRamPorts(stm: IfContainer, current_en: RtlSignalBase):
"""
Detect RAM ports in If statement
:param stm: statement to detect the ram ports in
:param current_en: curent en/clk signal
"""
if stm.ifFalse or stm.elIfs:
return
for _stm in stm.ifTrue:
if isinstance(_stm, IfContainer):
yield from detectRamPorts(_stm, _stm.cond & current_en)
elif isinstance(_stm, Assignment):
if isinstance(_stm.dst._dtype, HArray):
assert len(_stm.indexes) == 1, "one address per RAM port"
w_addr = _stm.indexes[0]
mem = _stm.dst
yield (RAM_WRITE, mem, w_addr, current_en, _stm.src)
elif _stm.src.hidden and len(_stm.src.drivers) == 1:
op = _stm.src.drivers[0]
mem = op.operands[0]
if isinstance(mem._dtype, HArray) and op.operator == AllOps.INDEX:
r_addr = op.operands[1]
if _stm.indexes:
raise NotImplementedError()
yield (RAM_READ, mem, r_addr, current_en, _stm.dst) | Detect RAM ports in If statement
:param stm: statement to detect the ram ports in
:param current_en: curent en/clk signal | Below is the the instruction that describes the task:
### Input:
Detect RAM ports in If statement
:param stm: statement to detect the ram ports in
:param current_en: curent en/clk signal
### Response:
def detectRamPorts(stm: IfContainer, current_en: RtlSignalBase):
"""
Detect RAM ports in If statement
:param stm: statement to detect the ram ports in
:param current_en: curent en/clk signal
"""
if stm.ifFalse or stm.elIfs:
return
for _stm in stm.ifTrue:
if isinstance(_stm, IfContainer):
yield from detectRamPorts(_stm, _stm.cond & current_en)
elif isinstance(_stm, Assignment):
if isinstance(_stm.dst._dtype, HArray):
assert len(_stm.indexes) == 1, "one address per RAM port"
w_addr = _stm.indexes[0]
mem = _stm.dst
yield (RAM_WRITE, mem, w_addr, current_en, _stm.src)
elif _stm.src.hidden and len(_stm.src.drivers) == 1:
op = _stm.src.drivers[0]
mem = op.operands[0]
if isinstance(mem._dtype, HArray) and op.operator == AllOps.INDEX:
r_addr = op.operands[1]
if _stm.indexes:
raise NotImplementedError()
yield (RAM_READ, mem, r_addr, current_en, _stm.dst) |
def _parse_qualimap_coverage(table):
"""Parse summary qualimap coverage metrics.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out | Parse summary qualimap coverage metrics. | Below is the the instruction that describes the task:
### Input:
Parse summary qualimap coverage metrics.
### Response:
def _parse_qualimap_coverage(table):
"""Parse summary qualimap coverage metrics.
"""
out = {}
for row in table.find_all("tr"):
col, val = [x.text for x in row.find_all("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out |
def addModel(self, moduleName, modelName, model):
"""
Add a model instance to the application model pool.
:param moduleName: <str> module name in which the model is located
:param modelName: <str> model name
:param model: <object> model instance
:return: <void>
"""
modelIdentifier = "{}.{}".format(moduleName, modelName)
if modelIdentifier not in self._models:
self._models[modelIdentifier] = model
else:
message = "Application - addModel() - " \
"A model with the identifier {} already exists." \
.format(modelIdentifier)
raise Exception(message) | Add a model instance to the application model pool.
:param moduleName: <str> module name in which the model is located
:param modelName: <str> model name
:param model: <object> model instance
:return: <void> | Below is the the instruction that describes the task:
### Input:
Add a model instance to the application model pool.
:param moduleName: <str> module name in which the model is located
:param modelName: <str> model name
:param model: <object> model instance
:return: <void>
### Response:
def addModel(self, moduleName, modelName, model):
"""
Add a model instance to the application model pool.
:param moduleName: <str> module name in which the model is located
:param modelName: <str> model name
:param model: <object> model instance
:return: <void>
"""
modelIdentifier = "{}.{}".format(moduleName, modelName)
if modelIdentifier not in self._models:
self._models[modelIdentifier] = model
else:
message = "Application - addModel() - " \
"A model with the identifier {} already exists." \
.format(modelIdentifier)
raise Exception(message) |
def run(self):
"""Configures and enables a CloudTrail trail and logging on a single AWS Account.
Has the capability to create both single region and multi-region trails.
Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question,
as well as reverting any manual changes to the trails if applicable.
Returns:
None
"""
for aws_region in AWS_REGIONS:
self.log.debug('Checking trails for {}/{}'.format(
self.account.account_name,
aws_region
))
ct = self.session.client('cloudtrail', region_name=aws_region)
trails = ct.describe_trails()
if len(trails['trailList']) == 0:
if aws_region == self.global_ct_region:
self.create_cloudtrail(aws_region)
else:
for trail in trails['trailList']:
if trail['Name'] in ('Default', self.trail_name):
if not trail['IsMultiRegionTrail']:
if trail['Name'] == self.trail_name and self.global_ct_region == aws_region:
ct.update_trail(
Name=trail['Name'],
IncludeGlobalServiceEvents=True,
IsMultiRegionTrail=True
)
auditlog(
event='cloudtrail.update_trail',
actor=self.ns,
data={
'trailName': trail['Name'],
'account': self.account.account_name,
'region': aws_region,
'changes': [
{
'setting': 'IsMultiRegionTrail',
'oldValue': False,
'newValue': True
}
]
}
)
else:
ct.delete_trail(name=trail['Name'])
auditlog(
event='cloudtrail.delete_trail',
actor=self.ns,
data={
'trailName': trail['Name'],
'account': self.account.account_name,
'region': aws_region,
'reason': 'Incorrect region, name or not multi-regional'
}
)
else:
if trail['HomeRegion'] == aws_region:
if self.global_ct_region != aws_region or trail['Name'] == 'Default':
ct.delete_trail(Name=trail['Name'])
auditlog(
event='cloudtrail.delete_trail',
actor=self.ns,
data={
'trailName': trail['Name'],
'account': self.account.account_name,
'region': aws_region,
'reason': 'Incorrect name or region for multi-region trail'
}
)
trails = ct.describe_trails()
for trail in trails['trailList']:
if trail['Name'] == self.trail_name and trail['HomeRegion'] == aws_region:
self.validate_trail_settings(ct, aws_region, trail) | Configures and enables a CloudTrail trail and logging on a single AWS Account.
Has the capability to create both single region and multi-region trails.
Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question,
as well as reverting any manual changes to the trails if applicable.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Configures and enables a CloudTrail trail and logging on a single AWS Account.
Has the capability to create both single region and multi-region trails.
Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question,
as well as reverting any manual changes to the trails if applicable.
Returns:
None
### Response:
def run(self):
"""Configures and enables a CloudTrail trail and logging on a single AWS Account.
Has the capability to create both single region and multi-region trails.
Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question,
as well as reverting any manual changes to the trails if applicable.
Returns:
None
"""
for aws_region in AWS_REGIONS:
self.log.debug('Checking trails for {}/{}'.format(
self.account.account_name,
aws_region
))
ct = self.session.client('cloudtrail', region_name=aws_region)
trails = ct.describe_trails()
if len(trails['trailList']) == 0:
if aws_region == self.global_ct_region:
self.create_cloudtrail(aws_region)
else:
for trail in trails['trailList']:
if trail['Name'] in ('Default', self.trail_name):
if not trail['IsMultiRegionTrail']:
if trail['Name'] == self.trail_name and self.global_ct_region == aws_region:
ct.update_trail(
Name=trail['Name'],
IncludeGlobalServiceEvents=True,
IsMultiRegionTrail=True
)
auditlog(
event='cloudtrail.update_trail',
actor=self.ns,
data={
'trailName': trail['Name'],
'account': self.account.account_name,
'region': aws_region,
'changes': [
{
'setting': 'IsMultiRegionTrail',
'oldValue': False,
'newValue': True
}
]
}
)
else:
ct.delete_trail(name=trail['Name'])
auditlog(
event='cloudtrail.delete_trail',
actor=self.ns,
data={
'trailName': trail['Name'],
'account': self.account.account_name,
'region': aws_region,
'reason': 'Incorrect region, name or not multi-regional'
}
)
else:
if trail['HomeRegion'] == aws_region:
if self.global_ct_region != aws_region or trail['Name'] == 'Default':
ct.delete_trail(Name=trail['Name'])
auditlog(
event='cloudtrail.delete_trail',
actor=self.ns,
data={
'trailName': trail['Name'],
'account': self.account.account_name,
'region': aws_region,
'reason': 'Incorrect name or region for multi-region trail'
}
)
trails = ct.describe_trails()
for trail in trails['trailList']:
if trail['Name'] == self.trail_name and trail['HomeRegion'] == aws_region:
self.validate_trail_settings(ct, aws_region, trail) |
def write_translations(self, catalogue, format, options={}):
"""
Writes translation from the catalogue according to the selected format.
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@type format: string
@param format: The format to use to dump the messages
@type options: array
@param options: Options that are passed to the dumper
@raises: ValueError
"""
if format not in self.dumpers:
raise ValueError(
'There is no dumper associated with format "{0}"'.format(format))
dumper = self.dumpers[format]
if "path" in options and not os.path.isdir(options['path']):
os.mkdir(options['path'])
dumper.dump(catalogue, options) | Writes translation from the catalogue according to the selected format.
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@type format: string
@param format: The format to use to dump the messages
@type options: array
@param options: Options that are passed to the dumper
@raises: ValueError | Below is the the instruction that describes the task:
### Input:
Writes translation from the catalogue according to the selected format.
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@type format: string
@param format: The format to use to dump the messages
@type options: array
@param options: Options that are passed to the dumper
@raises: ValueError
### Response:
def write_translations(self, catalogue, format, options={}):
"""
Writes translation from the catalogue according to the selected format.
@type catalogue: MessageCatalogue
@param catalogue: The message catalogue to dump
@type format: string
@param format: The format to use to dump the messages
@type options: array
@param options: Options that are passed to the dumper
@raises: ValueError
"""
if format not in self.dumpers:
raise ValueError(
'There is no dumper associated with format "{0}"'.format(format))
dumper = self.dumpers[format]
if "path" in options and not os.path.isdir(options['path']):
os.mkdir(options['path'])
dumper.dump(catalogue, options) |
def MakeID3v1(id3):
"""Return an ID3v1.1 tag string from a dict of ID3v2.4 frames."""
v1 = {}
for v2id, name in {"TIT2": "title", "TPE1": "artist",
"TALB": "album"}.items():
if v2id in id3:
text = id3[v2id].text[0].encode('latin1', 'replace')[:30]
else:
text = b""
v1[name] = text + (b"\x00" * (30 - len(text)))
if "COMM" in id3:
cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28]
else:
cmnt = b""
v1["comment"] = cmnt + (b"\x00" * (29 - len(cmnt)))
if "TRCK" in id3:
try:
v1["track"] = chr_(+id3["TRCK"])
except ValueError:
v1["track"] = b"\x00"
else:
v1["track"] = b"\x00"
if "TCON" in id3:
try:
genre = id3["TCON"].genres[0]
except IndexError:
pass
else:
if genre in TCON.GENRES:
v1["genre"] = chr_(TCON.GENRES.index(genre))
if "genre" not in v1:
v1["genre"] = b"\xff"
if "TDRC" in id3:
year = text_type(id3["TDRC"]).encode('ascii')
elif "TYER" in id3:
year = text_type(id3["TYER"]).encode('ascii')
else:
year = b""
v1["year"] = (year + b"\x00\x00\x00\x00")[:4]
return (
b"TAG" +
v1["title"] +
v1["artist"] +
v1["album"] +
v1["year"] +
v1["comment"] +
v1["track"] +
v1["genre"]
) | Return an ID3v1.1 tag string from a dict of ID3v2.4 frames. | Below is the the instruction that describes the task:
### Input:
Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.
### Response:
def MakeID3v1(id3):
"""Return an ID3v1.1 tag string from a dict of ID3v2.4 frames."""
v1 = {}
for v2id, name in {"TIT2": "title", "TPE1": "artist",
"TALB": "album"}.items():
if v2id in id3:
text = id3[v2id].text[0].encode('latin1', 'replace')[:30]
else:
text = b""
v1[name] = text + (b"\x00" * (30 - len(text)))
if "COMM" in id3:
cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28]
else:
cmnt = b""
v1["comment"] = cmnt + (b"\x00" * (29 - len(cmnt)))
if "TRCK" in id3:
try:
v1["track"] = chr_(+id3["TRCK"])
except ValueError:
v1["track"] = b"\x00"
else:
v1["track"] = b"\x00"
if "TCON" in id3:
try:
genre = id3["TCON"].genres[0]
except IndexError:
pass
else:
if genre in TCON.GENRES:
v1["genre"] = chr_(TCON.GENRES.index(genre))
if "genre" not in v1:
v1["genre"] = b"\xff"
if "TDRC" in id3:
year = text_type(id3["TDRC"]).encode('ascii')
elif "TYER" in id3:
year = text_type(id3["TYER"]).encode('ascii')
else:
year = b""
v1["year"] = (year + b"\x00\x00\x00\x00")[:4]
return (
b"TAG" +
v1["title"] +
v1["artist"] +
v1["album"] +
v1["year"] +
v1["comment"] +
v1["track"] +
v1["genre"]
) |
def change_user_password(self, ID, data):
"""Change password of a User."""
# http://teampasswordmanager.com/docs/api-users/#change_password
log.info('Change user %s password' % ID)
self.put('users/%s/change_password.json' % ID, data) | Change password of a User. | Below is the the instruction that describes the task:
### Input:
Change password of a User.
### Response:
def change_user_password(self, ID, data):
"""Change password of a User."""
# http://teampasswordmanager.com/docs/api-users/#change_password
log.info('Change user %s password' % ID)
self.put('users/%s/change_password.json' % ID, data) |
def get_step_f(step_f, lR2, lS2):
"""Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
"""
mu, tau = 10, 2
if lR2 > mu*lS2:
return step_f * tau
elif lS2 > mu*lR2:
return step_f / tau
return step_f | Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1 | Below is the the instruction that describes the task:
### Input:
Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
### Response:
def get_step_f(step_f, lR2, lS2):
"""Update the stepsize of given the primal and dual errors.
See Boyd (2011), section 3.4.1
"""
mu, tau = 10, 2
if lR2 > mu*lS2:
return step_f * tau
elif lS2 > mu*lR2:
return step_f / tau
return step_f |
def setEmergencyDecel(self, vehID, decel):
"""setEmergencyDecel(string, double) -> None
Sets the maximal physically possible deceleration in m/s^2 for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EMERGENCY_DECEL, vehID, decel) | setEmergencyDecel(string, double) -> None
Sets the maximal physically possible deceleration in m/s^2 for this vehicle. | Below is the the instruction that describes the task:
### Input:
setEmergencyDecel(string, double) -> None
Sets the maximal physically possible deceleration in m/s^2 for this vehicle.
### Response:
def setEmergencyDecel(self, vehID, decel):
"""setEmergencyDecel(string, double) -> None
Sets the maximal physically possible deceleration in m/s^2 for this vehicle.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_EMERGENCY_DECEL, vehID, decel) |
def get_belapi_handle(client, username=None, password=None):
"""Get BEL API arango db handle"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "belapi"
try:
if username and password:
belapi_db = sys_db.create_database(
name=belapi_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belapi_db = sys_db.create_database(name=belapi_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belapi_db = client.db(belapi_db_name, username=username, password=password)
else:
belapi_db = client.db(belapi_db_name)
try:
belapi_db.create_collection(belapi_settings_name)
except Exception:
pass
try:
belapi_db.create_collection(belapi_statemgmt_name)
except Exception:
pass
return belapi_db | Get BEL API arango db handle | Below is the the instruction that describes the task:
### Input:
Get BEL API arango db handle
### Response:
def get_belapi_handle(client, username=None, password=None):
"""Get BEL API arango db handle"""
(username, password) = get_user_creds(username, password)
sys_db = client.db("_system", username=username, password=password)
# Create a new database named "belapi"
try:
if username and password:
belapi_db = sys_db.create_database(
name=belapi_db_name,
users=[{"username": username, "password": password, "active": True}],
)
else:
belapi_db = sys_db.create_database(name=belapi_db_name)
except arango.exceptions.DatabaseCreateError:
if username and password:
belapi_db = client.db(belapi_db_name, username=username, password=password)
else:
belapi_db = client.db(belapi_db_name)
try:
belapi_db.create_collection(belapi_settings_name)
except Exception:
pass
try:
belapi_db.create_collection(belapi_statemgmt_name)
except Exception:
pass
return belapi_db |
def push(self):
"""
Adding the no_thin argument to the GIT push because we had some issues pushing previously.
According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917,
"a new optimization which causes git to send as little data as possible over the network caused this bug to manifest,
so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly
reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)."
"""
if not self.canRunRemoteCmd():
return None
try:
fetchInfo = self.repo.remotes.origin.push(no_thin=True)[0]
except exc.GitCommandError as e:
print(dir(e))
print(e)
raise
if fetchInfo.flags & fetchInfo.ERROR:
try:
raise IOError("An error occured while trying to push the GIT repository from the server. Error flag: '" +
str(fetchInfo.flags) + "', message: '" + str(fetchInfo.note) + "'.")
except:
IOError("An error occured while trying to push the GIT repository from the server.")
return fetchInfo | Adding the no_thin argument to the GIT push because we had some issues pushing previously.
According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917,
"a new optimization which causes git to send as little data as possible over the network caused this bug to manifest,
so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly
reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)." | Below is the the instruction that describes the task:
### Input:
Adding the no_thin argument to the GIT push because we had some issues pushing previously.
According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917,
"a new optimization which causes git to send as little data as possible over the network caused this bug to manifest,
so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly
reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)."
### Response:
def push(self):
"""
Adding the no_thin argument to the GIT push because we had some issues pushing previously.
According to http://stackoverflow.com/questions/16586642/git-unpack-error-on-push-to-gerrit#comment42953435_23610917,
"a new optimization which causes git to send as little data as possible over the network caused this bug to manifest,
so my guess is --no-thin just turns these optimizations off. From git push --help: "A thin transfer significantly
reduces the amount of sent data when the sender and receiver share many of the same objects in common." (--thin is the default)."
"""
if not self.canRunRemoteCmd():
return None
try:
fetchInfo = self.repo.remotes.origin.push(no_thin=True)[0]
except exc.GitCommandError as e:
print(dir(e))
print(e)
raise
if fetchInfo.flags & fetchInfo.ERROR:
try:
raise IOError("An error occured while trying to push the GIT repository from the server. Error flag: '" +
str(fetchInfo.flags) + "', message: '" + str(fetchInfo.note) + "'.")
except:
IOError("An error occured while trying to push the GIT repository from the server.")
return fetchInfo |
def _update_cov_model(self, strata_to_update='all'):
"""
strata_to_update : array-like or 'all'
array containing stratum indices to update
"""
if strata_to_update == 'all':
strata_to_update = self.strata.indices_
#: Otherwise assume strata_to_update is valid (no duplicates etc.)
#: Update covariance matrices
#: We usually update only one stratum at a time, so for loop is ok
n_sampled = np.clip(self.strata._n_sampled, 2, np.inf) #: adding 2 avoids undef. cov
factor = n_sampled/(n_sampled - 1)
for k in strata_to_update:
TP = self._BB_TP.theta_[k]
PP = self._BB_PP.theta_[k]
P = self._BB_P.theta_[k]
self.cov_model_[k,0,0] = factor[k] * TP * (1 - TP)
self.cov_model_[k,0,1] = factor[k] * TP * (1 - PP)
self.cov_model_[k,0,2] = factor[k] * TP * (1 - P)
self.cov_model_[k,1,1] = factor[k] * PP * (1 - PP)
self.cov_model_[k,1,2] = factor[k] * (TP - PP * P)
self.cov_model_[k,2,2] = factor[k] * P * (1 - P)
self.cov_model_[k,1,0] = self.cov_model_[k,0,1]
self.cov_model_[k,2,0] = self.cov_model_[k,0,2]
self.cov_model_[k,2,1] = self.cov_model_[k,1,2] | strata_to_update : array-like or 'all'
array containing stratum indices to update | Below is the the instruction that describes the task:
### Input:
strata_to_update : array-like or 'all'
array containing stratum indices to update
### Response:
def _update_cov_model(self, strata_to_update='all'):
"""
strata_to_update : array-like or 'all'
array containing stratum indices to update
"""
if strata_to_update == 'all':
strata_to_update = self.strata.indices_
#: Otherwise assume strata_to_update is valid (no duplicates etc.)
#: Update covariance matrices
#: We usually update only one stratum at a time, so for loop is ok
n_sampled = np.clip(self.strata._n_sampled, 2, np.inf) #: adding 2 avoids undef. cov
factor = n_sampled/(n_sampled - 1)
for k in strata_to_update:
TP = self._BB_TP.theta_[k]
PP = self._BB_PP.theta_[k]
P = self._BB_P.theta_[k]
self.cov_model_[k,0,0] = factor[k] * TP * (1 - TP)
self.cov_model_[k,0,1] = factor[k] * TP * (1 - PP)
self.cov_model_[k,0,2] = factor[k] * TP * (1 - P)
self.cov_model_[k,1,1] = factor[k] * PP * (1 - PP)
self.cov_model_[k,1,2] = factor[k] * (TP - PP * P)
self.cov_model_[k,2,2] = factor[k] * P * (1 - P)
self.cov_model_[k,1,0] = self.cov_model_[k,0,1]
self.cov_model_[k,2,0] = self.cov_model_[k,0,2]
self.cov_model_[k,2,1] = self.cov_model_[k,1,2] |
def run_command(self, run_with=None, join_args=False):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs
``['airflow run']``
:param join_args: bool
:return: the process that was run
:rtype: subprocess.Popen
"""
run_with = run_with or []
cmd = [" ".join(self._command)] if join_args else self._command
full_cmd = run_with + cmd
self.log.info('Running: %s', full_cmd)
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc | Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs
``['airflow run']``
:param join_args: bool
:return: the process that was run
:rtype: subprocess.Popen | Below is the the instruction that describes the task:
### Input:
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs
``['airflow run']``
:param join_args: bool
:return: the process that was run
:rtype: subprocess.Popen
### Response:
def run_command(self, run_with=None, join_args=False):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs
``['airflow run']``
:param join_args: bool
:return: the process that was run
:rtype: subprocess.Popen
"""
run_with = run_with or []
cmd = [" ".join(self._command)] if join_args else self._command
full_cmd = run_with + cmd
self.log.info('Running: %s', full_cmd)
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc |
def find_description(self, name):
"Find a description for the given appliance name."
for desc in self.virtual_system_descriptions:
values = desc.get_values_by_type(DescType.name,
DescValueType.original)
if name in values:
break
else:
raise Exception("Failed to find description for %s" % name)
return desc | Find a description for the given appliance name. | Below is the the instruction that describes the task:
### Input:
Find a description for the given appliance name.
### Response:
def find_description(self, name):
"Find a description for the given appliance name."
for desc in self.virtual_system_descriptions:
values = desc.get_values_by_type(DescType.name,
DescValueType.original)
if name in values:
break
else:
raise Exception("Failed to find description for %s" % name)
return desc |
def del_all_host_downtimes(self, host):
"""Delete all host downtimes
Format of the line that triggers function call::
DEL_ALL_HOST_DOWNTIMES;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for downtime in host.downtimes:
self.del_host_downtime(downtime)
self.send_an_element(host.get_update_status_brok()) | Delete all host downtimes
Format of the line that triggers function call::
DEL_ALL_HOST_DOWNTIMES;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None | Below is the the instruction that describes the task:
### Input:
Delete all host downtimes
Format of the line that triggers function call::
DEL_ALL_HOST_DOWNTIMES;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
### Response:
def del_all_host_downtimes(self, host):
"""Delete all host downtimes
Format of the line that triggers function call::
DEL_ALL_HOST_DOWNTIMES;<host_name>
:param host: host to edit
:type host: alignak.objects.host.Host
:return: None
"""
for downtime in host.downtimes:
self.del_host_downtime(downtime)
self.send_an_element(host.get_update_status_brok()) |
def _body(self):
""" Generate the information required to create an ISBN-10 or
ISBN-13.
"""
ean = self.random_element(RULES.keys())
reg_group = self.random_element(RULES[ean].keys())
# Given the chosen ean/group, decide how long the
# registrant/publication string may be.
# We must allocate for the calculated check digit, so
# subtract 1
reg_pub_len = ISBN.MAX_LENGTH - len(ean) - len(reg_group) - 1
# Generate a registrant/publication combination
reg_pub = self.numerify('#' * reg_pub_len)
# Use rules to separate the registrant from the publication
rules = RULES[ean][reg_group]
registrant, publication = self._registrant_publication(reg_pub, rules)
return [ean, reg_group, registrant, publication] | Generate the information required to create an ISBN-10 or
ISBN-13. | Below is the the instruction that describes the task:
### Input:
Generate the information required to create an ISBN-10 or
ISBN-13.
### Response:
def _body(self):
""" Generate the information required to create an ISBN-10 or
ISBN-13.
"""
ean = self.random_element(RULES.keys())
reg_group = self.random_element(RULES[ean].keys())
# Given the chosen ean/group, decide how long the
# registrant/publication string may be.
# We must allocate for the calculated check digit, so
# subtract 1
reg_pub_len = ISBN.MAX_LENGTH - len(ean) - len(reg_group) - 1
# Generate a registrant/publication combination
reg_pub = self.numerify('#' * reg_pub_len)
# Use rules to separate the registrant from the publication
rules = RULES[ean][reg_group]
registrant, publication = self._registrant_publication(reg_pub, rules)
return [ean, reg_group, registrant, publication] |
def get_remote_settings(self, name):
import posixpath
"""
Args:
name (str): The name of the remote that we want to retrieve
Returns:
dict: The content beneath the given remote name.
Example:
>>> config = {'remote "server"': {'url': 'ssh://localhost/'}}
>>> get_remote_settings("server")
{'url': 'ssh://localhost/'}
"""
settings = self.config[self.SECTION_REMOTE_FMT.format(name)]
parsed = urlparse(settings["url"])
# Support for cross referenced remotes.
# This will merge the settings, giving priority to the outer reference.
# For example, having:
#
# dvc remote add server ssh://localhost
# dvc remote modify server user root
# dvc remote modify server ask_password true
#
# dvc remote add images remote://server/tmp/pictures
# dvc remote modify images user alice
# dvc remote modify images ask_password false
# dvc remote modify images password asdf1234
#
# Results on a config dictionary like:
#
# {
# "url": "ssh://localhost/tmp/pictures",
# "user": "alice",
# "password": "asdf1234",
# "ask_password": False,
# }
#
if parsed.scheme == "remote":
reference = self.get_remote_settings(parsed.netloc)
url = posixpath.join(reference["url"], parsed.path.lstrip("/"))
merged = reference.copy()
merged.update(settings)
merged["url"] = url
return merged
return settings | Args:
name (str): The name of the remote that we want to retrieve
Returns:
dict: The content beneath the given remote name.
Example:
>>> config = {'remote "server"': {'url': 'ssh://localhost/'}}
>>> get_remote_settings("server")
{'url': 'ssh://localhost/'} | Below is the the instruction that describes the task:
### Input:
Args:
name (str): The name of the remote that we want to retrieve
Returns:
dict: The content beneath the given remote name.
Example:
>>> config = {'remote "server"': {'url': 'ssh://localhost/'}}
>>> get_remote_settings("server")
{'url': 'ssh://localhost/'}
### Response:
def get_remote_settings(self, name):
import posixpath
"""
Args:
name (str): The name of the remote that we want to retrieve
Returns:
dict: The content beneath the given remote name.
Example:
>>> config = {'remote "server"': {'url': 'ssh://localhost/'}}
>>> get_remote_settings("server")
{'url': 'ssh://localhost/'}
"""
settings = self.config[self.SECTION_REMOTE_FMT.format(name)]
parsed = urlparse(settings["url"])
# Support for cross referenced remotes.
# This will merge the settings, giving priority to the outer reference.
# For example, having:
#
# dvc remote add server ssh://localhost
# dvc remote modify server user root
# dvc remote modify server ask_password true
#
# dvc remote add images remote://server/tmp/pictures
# dvc remote modify images user alice
# dvc remote modify images ask_password false
# dvc remote modify images password asdf1234
#
# Results on a config dictionary like:
#
# {
# "url": "ssh://localhost/tmp/pictures",
# "user": "alice",
# "password": "asdf1234",
# "ask_password": False,
# }
#
if parsed.scheme == "remote":
reference = self.get_remote_settings(parsed.netloc)
url = posixpath.join(reference["url"], parsed.path.lstrip("/"))
merged = reference.copy()
merged.update(settings)
merged["url"] = url
return merged
return settings |
def _map_center(self, coord, val):
''' Identitify the center of the Image correspond to one coordinate. '''
if self.ppd in [4, 16, 64, 128]:
res = {'lat': 0, 'long': 360}
return res[coord] / 2.0
elif self.ppd in [256]:
res = {'lat': 90, 'long': 180}
c = (val // res[coord] + 1) * res[coord]
return c - res[coord], c
elif self.ppd in [512]:
res = {'lat': 45, 'long': 90}
c = (val // res[coord] + 1) * res[coord]
return c - res[coord], c
elif self.ppd in [1024]:
res = {'lat': 15, 'long': 30}
c = (val // res[coord] + 1) * res[coord]
return c - res[coord], c | Identitify the center of the Image correspond to one coordinate. | Below is the the instruction that describes the task:
### Input:
Identitify the center of the Image correspond to one coordinate.
### Response:
def _map_center(self, coord, val):
''' Identitify the center of the Image correspond to one coordinate. '''
if self.ppd in [4, 16, 64, 128]:
res = {'lat': 0, 'long': 360}
return res[coord] / 2.0
elif self.ppd in [256]:
res = {'lat': 90, 'long': 180}
c = (val // res[coord] + 1) * res[coord]
return c - res[coord], c
elif self.ppd in [512]:
res = {'lat': 45, 'long': 90}
c = (val // res[coord] + 1) * res[coord]
return c - res[coord], c
elif self.ppd in [1024]:
res = {'lat': 15, 'long': 30}
c = (val // res[coord] + 1) * res[coord]
return c - res[coord], c |
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, (basestring,AsyncResult)):
jobs = [jobs]
bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
for t in targets:
self.session.send(self._control_socket, 'abort_request',
content=content, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error | Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs. | Below is the the instruction that describes the task:
### Input:
Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
### Response:
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, (basestring,AsyncResult)):
jobs = [jobs]
bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
for t in targets:
self.session.send(self._control_socket, 'abort_request',
content=content, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error |
def _get_raw_key(self, key_id):
"""Retrieves a static, randomly generated, RSA key for the specified key id.
:param str key_id: User-defined ID for the static key
:returns: Wrapping key that contains the specified static key
:rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey`
"""
try:
static_key = self._static_keys[key_id]
except KeyError:
private_key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend())
static_key = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
self._static_keys[key_id] = static_key
return WrappingKey(
wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA1_MGF1,
wrapping_key=static_key,
wrapping_key_type=EncryptionKeyType.PRIVATE,
) | Retrieves a static, randomly generated, RSA key for the specified key id.
:param str key_id: User-defined ID for the static key
:returns: Wrapping key that contains the specified static key
:rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey` | Below is the the instruction that describes the task:
### Input:
Retrieves a static, randomly generated, RSA key for the specified key id.
:param str key_id: User-defined ID for the static key
:returns: Wrapping key that contains the specified static key
:rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey`
### Response:
def _get_raw_key(self, key_id):
"""Retrieves a static, randomly generated, RSA key for the specified key id.
:param str key_id: User-defined ID for the static key
:returns: Wrapping key that contains the specified static key
:rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey`
"""
try:
static_key = self._static_keys[key_id]
except KeyError:
private_key = rsa.generate_private_key(public_exponent=65537, key_size=4096, backend=default_backend())
static_key = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
self._static_keys[key_id] = static_key
return WrappingKey(
wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA1_MGF1,
wrapping_key=static_key,
wrapping_key_type=EncryptionKeyType.PRIVATE,
) |
def get_matches(pattern, language, max_count=8):
"""
take a word pattern or a Python regexp and a language name, and return a
list of all matching words.
"""
if str(pattern) == pattern:
pattern = compile_pattern(pattern)
results = []
if not dicts.exists(language):
print("The language '%s' is not available locally." % language)
return []
with open(dicts.filepath(language), 'r') as f:
for word in f:
if max_count <= 0:
break
w = word.strip()
if pattern.match(w) and w not in results:
results.append(w)
max_count -= 1
return results | take a word pattern or a Python regexp and a language name, and return a
list of all matching words. | Below is the the instruction that describes the task:
### Input:
take a word pattern or a Python regexp and a language name, and return a
list of all matching words.
### Response:
def get_matches(pattern, language, max_count=8):
"""
take a word pattern or a Python regexp and a language name, and return a
list of all matching words.
"""
if str(pattern) == pattern:
pattern = compile_pattern(pattern)
results = []
if not dicts.exists(language):
print("The language '%s' is not available locally." % language)
return []
with open(dicts.filepath(language), 'r') as f:
for word in f:
if max_count <= 0:
break
w = word.strip()
if pattern.match(w) and w not in results:
results.append(w)
max_count -= 1
return results |
def quantitate(data):
"""CWL target for quantitation.
XXX Needs to be split and parallelized by expression caller, with merging
of multiple calls.
"""
data = to_single_data(to_single_data(data))
data = generate_transcript_counts(data)[0][0]
data["quant"] = {}
if "sailfish" in dd.get_expression_caller(data):
data = to_single_data(sailfish.run_sailfish(data)[0])
data["quant"]["tsv"] = data["sailfish"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["sailfish"]), "abundance.h5")
if ("kallisto" in dd.get_expression_caller(data) or "pizzly" in dd.get_fusion_caller(data, [])):
data = to_single_data(kallisto.run_kallisto_rnaseq(data)[0])
data["quant"]["tsv"] = os.path.join(data["kallisto_quant"], "abundance.tsv")
data["quant"]["hdf5"] = os.path.join(data["kallisto_quant"], "abundance.h5")
if (os.path.exists(os.path.join(data["kallisto_quant"], "fusion.txt"))):
data["quant"]["fusion"] = os.path.join(data["kallisto_quant"], "fusion.txt")
else:
data["quant"]["fusion"] = None
if "salmon" in dd.get_expression_caller(data):
data = to_single_data(salmon.run_salmon_reads(data)[0])
data["quant"]["tsv"] = data["salmon"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["salmon"]), "abundance.h5")
return [[data]] | CWL target for quantitation.
XXX Needs to be split and parallelized by expression caller, with merging
of multiple calls. | Below is the the instruction that describes the task:
### Input:
CWL target for quantitation.
XXX Needs to be split and parallelized by expression caller, with merging
of multiple calls.
### Response:
def quantitate(data):
"""CWL target for quantitation.
XXX Needs to be split and parallelized by expression caller, with merging
of multiple calls.
"""
data = to_single_data(to_single_data(data))
data = generate_transcript_counts(data)[0][0]
data["quant"] = {}
if "sailfish" in dd.get_expression_caller(data):
data = to_single_data(sailfish.run_sailfish(data)[0])
data["quant"]["tsv"] = data["sailfish"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["sailfish"]), "abundance.h5")
if ("kallisto" in dd.get_expression_caller(data) or "pizzly" in dd.get_fusion_caller(data, [])):
data = to_single_data(kallisto.run_kallisto_rnaseq(data)[0])
data["quant"]["tsv"] = os.path.join(data["kallisto_quant"], "abundance.tsv")
data["quant"]["hdf5"] = os.path.join(data["kallisto_quant"], "abundance.h5")
if (os.path.exists(os.path.join(data["kallisto_quant"], "fusion.txt"))):
data["quant"]["fusion"] = os.path.join(data["kallisto_quant"], "fusion.txt")
else:
data["quant"]["fusion"] = None
if "salmon" in dd.get_expression_caller(data):
data = to_single_data(salmon.run_salmon_reads(data)[0])
data["quant"]["tsv"] = data["salmon"]
data["quant"]["hdf5"] = os.path.join(os.path.dirname(data["salmon"]), "abundance.h5")
return [[data]] |
def unmarshal(self, value, custom_formatters=None, strict=True):
"""Unmarshal parameter from the value."""
if self.deprecated:
warnings.warn("The schema is deprecated", DeprecationWarning)
casted = self.cast(value, custom_formatters=custom_formatters, strict=strict)
if casted is None and not self.required:
return None
if self.enum and casted not in self.enum:
raise InvalidSchemaValue(
"Value {value} not in enum choices: {type}", value, self.enum)
return casted | Unmarshal parameter from the value. | Below is the the instruction that describes the task:
### Input:
Unmarshal parameter from the value.
### Response:
def unmarshal(self, value, custom_formatters=None, strict=True):
"""Unmarshal parameter from the value."""
if self.deprecated:
warnings.warn("The schema is deprecated", DeprecationWarning)
casted = self.cast(value, custom_formatters=custom_formatters, strict=strict)
if casted is None and not self.required:
return None
if self.enum and casted not in self.enum:
raise InvalidSchemaValue(
"Value {value} not in enum choices: {type}", value, self.enum)
return casted |
def rewind(self):
'''rewind to start'''
self._index = 0
self.percent = 0
self.messages = {}
self._flightmode_index = 0
self._timestamp = None
self.flightmode = None
self.params = {} | rewind to start | Below is the the instruction that describes the task:
### Input:
rewind to start
### Response:
def rewind(self):
'''rewind to start'''
self._index = 0
self.percent = 0
self.messages = {}
self._flightmode_index = 0
self._timestamp = None
self.flightmode = None
self.params = {} |
def create_event_subscription(self, url):
"""Register a callback URL as an event subscriber.
:param str url: callback URL
:returns: the created event subscription
:rtype: dict
"""
params = {'callbackUrl': url}
response = self._do_request('POST', '/v2/eventSubscriptions', params)
return response.json() | Register a callback URL as an event subscriber.
:param str url: callback URL
:returns: the created event subscription
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Register a callback URL as an event subscriber.
:param str url: callback URL
:returns: the created event subscription
:rtype: dict
### Response:
def create_event_subscription(self, url):
"""Register a callback URL as an event subscriber.
:param str url: callback URL
:returns: the created event subscription
:rtype: dict
"""
params = {'callbackUrl': url}
response = self._do_request('POST', '/v2/eventSubscriptions', params)
return response.json() |
def _new_from_xml(cls, xmlnode):
"""Create a new `Item` object from an XML element.
:Parameters:
- `xmlnode`: the XML element.
:Types:
- `xmlnode`: `libxml2.xmlNode`
:return: the object created.
:returntype: `Item`
"""
child = xmlnode.children
fields = []
while child:
if child.type != "element" or child.ns().content != DATAFORM_NS:
pass
elif child.name == "field":
fields.append(Field._new_from_xml(child))
child = child.next
return cls(fields) | Create a new `Item` object from an XML element.
:Parameters:
- `xmlnode`: the XML element.
:Types:
- `xmlnode`: `libxml2.xmlNode`
:return: the object created.
:returntype: `Item` | Below is the the instruction that describes the task:
### Input:
Create a new `Item` object from an XML element.
:Parameters:
- `xmlnode`: the XML element.
:Types:
- `xmlnode`: `libxml2.xmlNode`
:return: the object created.
:returntype: `Item`
### Response:
def _new_from_xml(cls, xmlnode):
"""Create a new `Item` object from an XML element.
:Parameters:
- `xmlnode`: the XML element.
:Types:
- `xmlnode`: `libxml2.xmlNode`
:return: the object created.
:returntype: `Item`
"""
child = xmlnode.children
fields = []
while child:
if child.type != "element" or child.ns().content != DATAFORM_NS:
pass
elif child.name == "field":
fields.append(Field._new_from_xml(child))
child = child.next
return cls(fields) |
def present(name, auth=None, **kwargs):
'''
Ensure image exists and is up-to-date
name
Name of the image
enabled
Boolean to control if image is enabled
description
An arbitrary description of the image
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['glanceng.setup_clouds'](auth)
image = __salt__['glanceng.image_get'](name=name)
if not image:
if __opts__['test']:
ret['result'] = None
ret['changes'] = kwargs
ret['comment'] = 'Image {} will be created.'.format(name)
return ret
kwargs['name'] = name
image = __salt__['glanceng.image_create'](**kwargs)
ret['changes'] = image
ret['comment'] = 'Created image'
return ret
# TODO(SamYaple): Compare and update image properties here
return ret | Ensure image exists and is up-to-date
name
Name of the image
enabled
Boolean to control if image is enabled
description
An arbitrary description of the image | Below is the the instruction that describes the task:
### Input:
Ensure image exists and is up-to-date
name
Name of the image
enabled
Boolean to control if image is enabled
description
An arbitrary description of the image
### Response:
def present(name, auth=None, **kwargs):
'''
Ensure image exists and is up-to-date
name
Name of the image
enabled
Boolean to control if image is enabled
description
An arbitrary description of the image
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
kwargs = __utils__['args.clean_kwargs'](**kwargs)
__salt__['glanceng.setup_clouds'](auth)
image = __salt__['glanceng.image_get'](name=name)
if not image:
if __opts__['test']:
ret['result'] = None
ret['changes'] = kwargs
ret['comment'] = 'Image {} will be created.'.format(name)
return ret
kwargs['name'] = name
image = __salt__['glanceng.image_create'](**kwargs)
ret['changes'] = image
ret['comment'] = 'Created image'
return ret
# TODO(SamYaple): Compare and update image properties here
return ret |
def actionAngleTorus_xvFreqs_c(pot,jr,jphi,jz,
angler,anglephi,anglez,
tol=0.003):
"""
NAME:
actionAngleTorus_xvFreqs_c
PURPOSE:
compute configuration (x,v) and frequencies of a set of angles on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (0.003) goal for |dJ|/|J| along the torus
OUTPUT:
(R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag)
HISTORY:
2015-08-05/07 - Written - Bovy (UofT)
"""
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True)
#Set up result arrays
R= numpy.empty(len(angler))
vR= numpy.empty(len(angler))
vT= numpy.empty(len(angler))
z= numpy.empty(len(angler))
vz= numpy.empty(len(angler))
phi= numpy.empty(len(angler))
Omegar= numpy.empty(1)
Omegaphi= numpy.empty(1)
Omegaz= numpy.empty(1)
flag= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleTorus_xvFreqsFunc= _lib.actionAngleTorus_xvFreqs
actionAngleTorus_xvFreqsFunc.argtypes=\
[ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [angler.flags['F_CONTIGUOUS'],
anglephi.flags['F_CONTIGUOUS'],
anglez.flags['F_CONTIGUOUS']]
angler= numpy.require(angler,dtype=numpy.float64,requirements=['C','W'])
anglephi= numpy.require(anglephi,dtype=numpy.float64,requirements=['C','W'])
anglez= numpy.require(anglez,dtype=numpy.float64,requirements=['C','W'])
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
phi= numpy.require(phi,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleTorus_xvFreqsFunc(ctypes.c_double(jr),
ctypes.c_double(jphi),
ctypes.c_double(jz),
ctypes.c_int(len(angler)),
angler,
anglephi,
anglez,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(tol),
R,vR,vT,z,vz,phi,
Omegar,Omegaphi,Omegaz,
ctypes.byref(flag))
#Reset input arrays
if f_cont[0]: angler= numpy.asfortranarray(angler)
if f_cont[1]: anglephi= numpy.asfortranarray(anglephi)
if f_cont[2]: anglez= numpy.asfortranarray(anglez)
return (R,vR,vT,z,vz,phi,Omegar[0],Omegaphi[0],Omegaz[0],flag.value) | NAME:
actionAngleTorus_xvFreqs_c
PURPOSE:
compute configuration (x,v) and frequencies of a set of angles on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (0.003) goal for |dJ|/|J| along the torus
OUTPUT:
(R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag)
HISTORY:
2015-08-05/07 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
actionAngleTorus_xvFreqs_c
PURPOSE:
compute configuration (x,v) and frequencies of a set of angles on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (0.003) goal for |dJ|/|J| along the torus
OUTPUT:
(R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag)
HISTORY:
2015-08-05/07 - Written - Bovy (UofT)
### Response:
def actionAngleTorus_xvFreqs_c(pot,jr,jphi,jz,
angler,anglephi,anglez,
tol=0.003):
"""
NAME:
actionAngleTorus_xvFreqs_c
PURPOSE:
compute configuration (x,v) and frequencies of a set of angles on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (0.003) goal for |dJ|/|J| along the torus
OUTPUT:
(R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag)
HISTORY:
2015-08-05/07 - Written - Bovy (UofT)
"""
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True)
#Set up result arrays
R= numpy.empty(len(angler))
vR= numpy.empty(len(angler))
vT= numpy.empty(len(angler))
z= numpy.empty(len(angler))
vz= numpy.empty(len(angler))
phi= numpy.empty(len(angler))
Omegar= numpy.empty(1)
Omegaphi= numpy.empty(1)
Omegaz= numpy.empty(1)
flag= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleTorus_xvFreqsFunc= _lib.actionAngleTorus_xvFreqs
actionAngleTorus_xvFreqsFunc.argtypes=\
[ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [angler.flags['F_CONTIGUOUS'],
anglephi.flags['F_CONTIGUOUS'],
anglez.flags['F_CONTIGUOUS']]
angler= numpy.require(angler,dtype=numpy.float64,requirements=['C','W'])
anglephi= numpy.require(anglephi,dtype=numpy.float64,requirements=['C','W'])
anglez= numpy.require(anglez,dtype=numpy.float64,requirements=['C','W'])
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
phi= numpy.require(phi,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleTorus_xvFreqsFunc(ctypes.c_double(jr),
ctypes.c_double(jphi),
ctypes.c_double(jz),
ctypes.c_int(len(angler)),
angler,
anglephi,
anglez,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(tol),
R,vR,vT,z,vz,phi,
Omegar,Omegaphi,Omegaz,
ctypes.byref(flag))
#Reset input arrays
if f_cont[0]: angler= numpy.asfortranarray(angler)
if f_cont[1]: anglephi= numpy.asfortranarray(anglephi)
if f_cont[2]: anglez= numpy.asfortranarray(anglez)
return (R,vR,vT,z,vz,phi,Omegar[0],Omegaphi[0],Omegaz[0],flag.value) |
def _create_p(s, h):
"""Parabolic derivative"""
p = np.zeros_like(s)
p[1:] = (s[:-1]*h[1:] + s[1:] * h[:-1]) / (h[1:] + h[:-1])
return p | Parabolic derivative | Below is the the instruction that describes the task:
### Input:
Parabolic derivative
### Response:
def _create_p(s, h):
"""Parabolic derivative"""
p = np.zeros_like(s)
p[1:] = (s[:-1]*h[1:] + s[1:] * h[:-1]) / (h[1:] + h[:-1])
return p |
def clean_file_name(filename, unique=True, replace="_", force_nt=False):
"""
Return a filename version, which has no characters in it which are forbidden.
On Windows these are for example <, /, ?, ...
The intention of this function is to allow distribution of files to different OSes.
:param filename: string to clean
:param unique: check if the filename is already taken and append an integer to be unique (default: True)
:param replace: replacement character. (default: '_')
:param force_nt: Force shortening of paths like on NT systems (default: False)
:return: clean string
"""
if re.match(r'[<>:"/\\|?* .\x00-\x1f]', replace):
raise ValueError("replacement character is not allowed!")
path, fname = os.path.split(filename)
# For Windows see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
# Other operating systems seems to be more tolerant...
# Not allowed filenames, attach replace character if necessary
if re.match(r'(CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])', fname):
fname += replace
# reserved characters
fname = re.sub(r'[<>:"/\\|?*\x00-\x1f]', replace, fname)
# Do not end with dot or space
fname = re.sub(r'[ .]$', replace, fname)
if force_nt or os.name == 'nt':
PATH_MAX_LENGTH = 230 # give extra space for other stuff...
# Check filename length limit, usually a problem on older Windows versions
if len(fname) > PATH_MAX_LENGTH:
if "." in fname:
f, ext = fname.rsplit(".", 1)
fname = "{}.{}".format(f[:PATH_MAX_LENGTH-(len(ext)+1)], ext)
else:
fname = fname[:PATH_MAX_LENGTH]
# Special behaviour... On Windows, there is also a problem with the maximum path length in explorer.exe
# maximum length is limited to 260 chars, so use 250 to have room for other stuff
if len(os.path.abspath(os.path.join(path, fname))) > 250:
fname = fname[:250 - (len(os.path.abspath(path)) + 1)]
if unique:
counter = 0
origname = fname
while os.path.isfile(os.path.join(path, fname)):
if "." in fname:
# assume extension
f, ext = origname.rsplit(".", 1)
fname = "{}_{}.{}".format(f, counter, ext)
else:
fname = "{}_{}".format(origname, counter)
counter += 1
return os.path.join(path, fname) | Return a filename version, which has no characters in it which are forbidden.
On Windows these are for example <, /, ?, ...
The intention of this function is to allow distribution of files to different OSes.
:param filename: string to clean
:param unique: check if the filename is already taken and append an integer to be unique (default: True)
:param replace: replacement character. (default: '_')
:param force_nt: Force shortening of paths like on NT systems (default: False)
:return: clean string | Below is the the instruction that describes the task:
### Input:
Return a filename version, which has no characters in it which are forbidden.
On Windows these are for example <, /, ?, ...
The intention of this function is to allow distribution of files to different OSes.
:param filename: string to clean
:param unique: check if the filename is already taken and append an integer to be unique (default: True)
:param replace: replacement character. (default: '_')
:param force_nt: Force shortening of paths like on NT systems (default: False)
:return: clean string
### Response:
def clean_file_name(filename, unique=True, replace="_", force_nt=False):
"""
Return a filename version, which has no characters in it which are forbidden.
On Windows these are for example <, /, ?, ...
The intention of this function is to allow distribution of files to different OSes.
:param filename: string to clean
:param unique: check if the filename is already taken and append an integer to be unique (default: True)
:param replace: replacement character. (default: '_')
:param force_nt: Force shortening of paths like on NT systems (default: False)
:return: clean string
"""
if re.match(r'[<>:"/\\|?* .\x00-\x1f]', replace):
raise ValueError("replacement character is not allowed!")
path, fname = os.path.split(filename)
# For Windows see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
# Other operating systems seems to be more tolerant...
# Not allowed filenames, attach replace character if necessary
if re.match(r'(CON|PRN|AUX|NUL|COM[1-9]|LPT[1-9])', fname):
fname += replace
# reserved characters
fname = re.sub(r'[<>:"/\\|?*\x00-\x1f]', replace, fname)
# Do not end with dot or space
fname = re.sub(r'[ .]$', replace, fname)
if force_nt or os.name == 'nt':
PATH_MAX_LENGTH = 230 # give extra space for other stuff...
# Check filename length limit, usually a problem on older Windows versions
if len(fname) > PATH_MAX_LENGTH:
if "." in fname:
f, ext = fname.rsplit(".", 1)
fname = "{}.{}".format(f[:PATH_MAX_LENGTH-(len(ext)+1)], ext)
else:
fname = fname[:PATH_MAX_LENGTH]
# Special behaviour... On Windows, there is also a problem with the maximum path length in explorer.exe
# maximum length is limited to 260 chars, so use 250 to have room for other stuff
if len(os.path.abspath(os.path.join(path, fname))) > 250:
fname = fname[:250 - (len(os.path.abspath(path)) + 1)]
if unique:
counter = 0
origname = fname
while os.path.isfile(os.path.join(path, fname)):
if "." in fname:
# assume extension
f, ext = origname.rsplit(".", 1)
fname = "{}_{}.{}".format(f, counter, ext)
else:
fname = "{}_{}".format(origname, counter)
counter += 1
return os.path.join(path, fname) |
def setfocus(self, focus):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
self.focus = self.data[focus]
self.focus_stage = focus
self.__dict__.update(self.focus) | Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
### Response:
def setfocus(self, focus):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
self.focus = self.data[focus]
self.focus_stage = focus
self.__dict__.update(self.focus) |
def yAxisIsMinor(self):
'''
Returns True if the minor axis is parallel to the Y axis, boolean.
'''
return min(self.radius.x, self.radius.y) == self.radius.y | Returns True if the minor axis is parallel to the Y axis, boolean. | Below is the the instruction that describes the task:
### Input:
Returns True if the minor axis is parallel to the Y axis, boolean.
### Response:
def yAxisIsMinor(self):
'''
Returns True if the minor axis is parallel to the Y axis, boolean.
'''
return min(self.radius.x, self.radius.y) == self.radius.y |
def pad_position_w(self, i):
"""
Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return:
"""
if i >= self.n_pads_w:
raise ModelError("pad index out-of-bounds")
return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2 | Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return: | Below is the the instruction that describes the task:
### Input:
Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return:
### Response:
def pad_position_w(self, i):
"""
Determines the position of the ith pad in the width direction.
Assumes equally spaced pads.
:param i: ith number of pad in width direction (0-indexed)
:return:
"""
if i >= self.n_pads_w:
raise ModelError("pad index out-of-bounds")
return (self.width - self.pad_width) / (self.n_pads_w - 1) * i + self.pad_width / 2 |
def expand(self, m):
"""Using the template, expand the string."""
if m is None:
raise ValueError("Match is None!")
sep = m.string[:0]
if isinstance(sep, bytes) != self._bytes:
raise TypeError('Match string type does not match expander string type!')
text = []
# Expand string
for x in range(0, len(self.literals)):
index = x
l = self.literals[x]
if l is None:
g_index = self._get_group_index(index)
span_case, single_case, capture = self._get_group_attributes(index)
if not self.use_format:
# Non format replace
try:
l = m.group(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % capture)
else:
# String format replace
try:
obj = m.captures(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % g_index)
l = _util.format_string(m, obj, capture, self._bytes)
if span_case is not None:
if span_case == _LOWER:
l = l.lower()
else:
l = l.upper()
if single_case is not None:
if single_case == _LOWER:
l = l[0:1].lower() + l[1:]
else:
l = l[0:1].upper() + l[1:]
text.append(l)
return sep.join(text) | Using the template, expand the string. | Below is the the instruction that describes the task:
### Input:
Using the template, expand the string.
### Response:
def expand(self, m):
"""Using the template, expand the string."""
if m is None:
raise ValueError("Match is None!")
sep = m.string[:0]
if isinstance(sep, bytes) != self._bytes:
raise TypeError('Match string type does not match expander string type!')
text = []
# Expand string
for x in range(0, len(self.literals)):
index = x
l = self.literals[x]
if l is None:
g_index = self._get_group_index(index)
span_case, single_case, capture = self._get_group_attributes(index)
if not self.use_format:
# Non format replace
try:
l = m.group(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % capture)
else:
# String format replace
try:
obj = m.captures(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % g_index)
l = _util.format_string(m, obj, capture, self._bytes)
if span_case is not None:
if span_case == _LOWER:
l = l.lower()
else:
l = l.upper()
if single_case is not None:
if single_case == _LOWER:
l = l[0:1].lower() + l[1:]
else:
l = l[0:1].upper() + l[1:]
text.append(l)
return sep.join(text) |
def _findSubnetMask(self, ip):
"""
Retrieve the broadcast IP address connected to internet... used as
a default IP address when defining Script
:param ip: (str) optionnal IP address. If not provided, default to getIPAddr()
:param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC)
:returns: broadcast IP Adress as String
"""
ip = ip
if "win32" in sys.platform:
try:
proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
mask = (
proc.stdout.readline()
.rstrip()
.split(b":")[-1]
.replace(b" ", b"")
.decode()
)
except:
raise NetworkInterfaceException("Cannot read IP parameters from OS")
else:
"""
This procedure could use more direct way of obtaining the broadcast IP
as it is really simple in Unix
ifconfig gives Bcast directly for example
or use something like :
iface = "eth0"
socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24])
"""
pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})")
try:
proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
mask = re.findall(pattern, line.decode())[0]
except:
mask = "255.255.255.255"
# self._log.debug('Mask found : %s' % mask)
return mask | Retrieve the broadcast IP address connected to internet... used as
a default IP address when defining Script
:param ip: (str) optionnal IP address. If not provided, default to getIPAddr()
:param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC)
:returns: broadcast IP Adress as String | Below is the the instruction that describes the task:
### Input:
Retrieve the broadcast IP address connected to internet... used as
a default IP address when defining Script
:param ip: (str) optionnal IP address. If not provided, default to getIPAddr()
:param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC)
:returns: broadcast IP Adress as String
### Response:
def _findSubnetMask(self, ip):
"""
Retrieve the broadcast IP address connected to internet... used as
a default IP address when defining Script
:param ip: (str) optionnal IP address. If not provided, default to getIPAddr()
:param mask: (str) optionnal subnet mask. If not provided, will try to find one using ipconfig (Windows) or ifconfig (Linux or MAC)
:returns: broadcast IP Adress as String
"""
ip = ip
if "win32" in sys.platform:
try:
proc = subprocess.Popen("ipconfig", stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
mask = (
proc.stdout.readline()
.rstrip()
.split(b":")[-1]
.replace(b" ", b"")
.decode()
)
except:
raise NetworkInterfaceException("Cannot read IP parameters from OS")
else:
"""
This procedure could use more direct way of obtaining the broadcast IP
as it is really simple in Unix
ifconfig gives Bcast directly for example
or use something like :
iface = "eth0"
socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM), 35099, struct.pack('256s', iface))[20:24])
"""
pattern = re.compile(r"(255.\d{1,3}.\d{1,3}.\d{1,3})")
try:
proc = subprocess.Popen("ifconfig", stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if ip.encode() in line:
break
mask = re.findall(pattern, line.decode())[0]
except:
mask = "255.255.255.255"
# self._log.debug('Mask found : %s' % mask)
return mask |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.