code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last]) | Converts raw Flickr string response to Python dict | Below is the the instruction that describes the task:
### Input:
Converts raw Flickr string response to Python dict
### Response:
def _load_rsp(rsp):
"""
Converts raw Flickr string response to Python dict
"""
first = rsp.find('(') + 1
last = rsp.rfind(')')
return json.loads(rsp[first:last]) |
def read_xml(filename):
"""
Use et to read in a xml file, or string, into a Element object.
:param filename: File to parse.
:return: lxml._elementTree object or None
"""
parser = et.XMLParser(remove_blank_text=True)
isfile=False
try:
isfile = os.path.exists(filename)
except ValueError as e:
if 'path too long for Windows' in str(e):
pass
else:
raise
try:
if isfile:
return et.parse(filename, parser)
else:
r = et.fromstring(filename, parser)
return r.getroottree()
except IOError:
log.exception('unable to open file [[}]'.format(filename))
except et.XMLSyntaxError:
log.exception('unable to parse XML [{}]'.format(filename))
return None
return None | Use et to read in a xml file, or string, into a Element object.
:param filename: File to parse.
:return: lxml._elementTree object or None | Below is the the instruction that describes the task:
### Input:
Use et to read in a xml file, or string, into a Element object.
:param filename: File to parse.
:return: lxml._elementTree object or None
### Response:
def read_xml(filename):
"""
Use et to read in a xml file, or string, into a Element object.
:param filename: File to parse.
:return: lxml._elementTree object or None
"""
parser = et.XMLParser(remove_blank_text=True)
isfile=False
try:
isfile = os.path.exists(filename)
except ValueError as e:
if 'path too long for Windows' in str(e):
pass
else:
raise
try:
if isfile:
return et.parse(filename, parser)
else:
r = et.fromstring(filename, parser)
return r.getroottree()
except IOError:
log.exception('unable to open file [[}]'.format(filename))
except et.XMLSyntaxError:
log.exception('unable to parse XML [{}]'.format(filename))
return None
return None |
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self | Apply `processor` or `self.processor` to `self`. | Below is the the instruction that describes the task:
### Input:
Apply `processor` or `self.processor` to `self`.
### Response:
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
return self |
def selected_fields(self):
"""Obtain the fields selected by user.
:returns: Keyword of the selected field.
:rtype: list, str
"""
items = self.lstFields.selectedItems()
if items and self.mode == MULTI_MODE:
return [item.text() for item in items]
elif items and self.mode == SINGLE_MODE:
return items[0].text()
else:
return [] | Obtain the fields selected by user.
:returns: Keyword of the selected field.
:rtype: list, str | Below is the the instruction that describes the task:
### Input:
Obtain the fields selected by user.
:returns: Keyword of the selected field.
:rtype: list, str
### Response:
def selected_fields(self):
"""Obtain the fields selected by user.
:returns: Keyword of the selected field.
:rtype: list, str
"""
items = self.lstFields.selectedItems()
if items and self.mode == MULTI_MODE:
return [item.text() for item in items]
elif items and self.mode == SINGLE_MODE:
return items[0].text()
else:
return [] |
def serialize(self, buf, offset):
"""
Outputs the expression of the wire protocol of the flow stats into
the buf.
Returns the output length.
"""
fields = [ofproto.oxs_from_user(k, uv) for (k, uv)
in self.fields]
hdr_pack_str = '!HH'
field_offset = offset + struct.calcsize(hdr_pack_str)
for (n, value, _) in fields:
# No mask
field_offset += ofproto.oxs_serialize(n, value, None, buf,
field_offset)
reserved = 0
length = field_offset - offset
msg_pack_into(hdr_pack_str, buf, offset, reserved, length)
self.length = length
pad_len = utils.round_up(length, 8) - length
msg_pack_into("%dx" % pad_len, buf, field_offset)
return length + pad_len | Outputs the expression of the wire protocol of the flow stats into
the buf.
Returns the output length. | Below is the the instruction that describes the task:
### Input:
Outputs the expression of the wire protocol of the flow stats into
the buf.
Returns the output length.
### Response:
def serialize(self, buf, offset):
"""
Outputs the expression of the wire protocol of the flow stats into
the buf.
Returns the output length.
"""
fields = [ofproto.oxs_from_user(k, uv) for (k, uv)
in self.fields]
hdr_pack_str = '!HH'
field_offset = offset + struct.calcsize(hdr_pack_str)
for (n, value, _) in fields:
# No mask
field_offset += ofproto.oxs_serialize(n, value, None, buf,
field_offset)
reserved = 0
length = field_offset - offset
msg_pack_into(hdr_pack_str, buf, offset, reserved, length)
self.length = length
pad_len = utils.round_up(length, 8) - length
msg_pack_into("%dx" % pad_len, buf, field_offset)
return length + pad_len |
def extract_commands(imported_vars):
"""
从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object)
"""
commands = dict()
for tup in imported_vars:
name, obj = tup
if is_command_object(obj):
commands.setdefault(name, obj)
return commands | 从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object) | Below is the the instruction that describes the task:
### Input:
从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object)
### Response:
def extract_commands(imported_vars):
"""
从传入的变量列表中提取命令( ``click.core.Command`` )对象
:param dict_items imported_vars: 字典的键值条目列表
:return: 判定为终端命令的对象字典
:rtype: dict(str, object)
"""
commands = dict()
for tup in imported_vars:
name, obj = tup
if is_command_object(obj):
commands.setdefault(name, obj)
return commands |
def common_items_metrics(all_items, common_items):
"""
Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment.
"""
segments = common_items.index.unique()
metrics = {}
for seg in segments:
seg_common_items = segment_common_items(seg)
projects = get_segment_projects(seg)
metric_values = []
for proj in projects:
pronac = proj[0]
percentage = common_items_percentage(pronac, seg_common_items)
metric_values.append(percentage)
metrics[seg] = {
'mean': np.mean(metric_values),
'std': np.std(metric_values)
}
return pd.DataFrame.from_dict(metrics, orient='index') | Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment. | Below is the the instruction that describes the task:
### Input:
Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment.
### Response:
def common_items_metrics(all_items, common_items):
"""
Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment.
"""
segments = common_items.index.unique()
metrics = {}
for seg in segments:
seg_common_items = segment_common_items(seg)
projects = get_segment_projects(seg)
metric_values = []
for proj in projects:
pronac = proj[0]
percentage = common_items_percentage(pronac, seg_common_items)
metric_values.append(percentage)
metrics[seg] = {
'mean': np.mean(metric_values),
'std': np.std(metric_values)
}
return pd.DataFrame.from_dict(metrics, orient='index') |
def fromtsv(source=None, encoding=None, errors='strict', header=None,
**csvargs):
"""
Convenience function, as :func:`petl.io.csv.fromcsv` but with different
default dialect (tab delimited).
"""
csvargs.setdefault('dialect', 'excel-tab')
return fromcsv(source, encoding=encoding, errors=errors, **csvargs) | Convenience function, as :func:`petl.io.csv.fromcsv` but with different
default dialect (tab delimited). | Below is the the instruction that describes the task:
### Input:
Convenience function, as :func:`petl.io.csv.fromcsv` but with different
default dialect (tab delimited).
### Response:
def fromtsv(source=None, encoding=None, errors='strict', header=None,
**csvargs):
"""
Convenience function, as :func:`petl.io.csv.fromcsv` but with different
default dialect (tab delimited).
"""
csvargs.setdefault('dialect', 'excel-tab')
return fromcsv(source, encoding=encoding, errors=errors, **csvargs) |
def _build_indices(self):
"""Build indices for the different field types"""
result = {key: OrderedDict() for key in LINES_WITH_ID}
for line in self.lines:
if line.key in LINES_WITH_ID:
result.setdefault(line.key, OrderedDict())
if line.mapping["ID"] in result[line.key]:
warnings.warn(
("Seen {} header more than once: {}, using first" "occurence").format(
line.key, line.mapping["ID"]
),
DuplicateHeaderLineWarning,
)
else:
result[line.key][line.mapping["ID"]] = line
else:
result.setdefault(line.key, [])
result[line.key].append(line)
return result | Build indices for the different field types | Below is the the instruction that describes the task:
### Input:
Build indices for the different field types
### Response:
def _build_indices(self):
"""Build indices for the different field types"""
result = {key: OrderedDict() for key in LINES_WITH_ID}
for line in self.lines:
if line.key in LINES_WITH_ID:
result.setdefault(line.key, OrderedDict())
if line.mapping["ID"] in result[line.key]:
warnings.warn(
("Seen {} header more than once: {}, using first" "occurence").format(
line.key, line.mapping["ID"]
),
DuplicateHeaderLineWarning,
)
else:
result[line.key][line.mapping["ID"]] = line
else:
result.setdefault(line.key, [])
result[line.key].append(line)
return result |
def nb_persons(self, role = None):
"""
Returns the number of persons contained in the entity.
If ``role`` is provided, only the entity member with the given role are taken into account.
"""
if role:
if role.subroles:
role_condition = np.logical_or.reduce([self.members_role == subrole for subrole in role.subroles])
else:
role_condition = self.members_role == role
return self.sum(role_condition)
else:
return np.bincount(self.members_entity_id) | Returns the number of persons contained in the entity.
If ``role`` is provided, only the entity member with the given role are taken into account. | Below is the the instruction that describes the task:
### Input:
Returns the number of persons contained in the entity.
If ``role`` is provided, only the entity member with the given role are taken into account.
### Response:
def nb_persons(self, role = None):
"""
Returns the number of persons contained in the entity.
If ``role`` is provided, only the entity member with the given role are taken into account.
"""
if role:
if role.subroles:
role_condition = np.logical_or.reduce([self.members_role == subrole for subrole in role.subroles])
else:
role_condition = self.members_role == role
return self.sum(role_condition)
else:
return np.bincount(self.members_entity_id) |
def get_all(self, seq_set: SequenceSet) \
-> Sequence[Tuple[int, CachedMessage]]:
"""Return the cached messages, and their sequence numbers, for the
given sequence set.
Args:
seq_set: The message sequence set.
"""
if seq_set.uid:
all_uids = seq_set.flatten(self.max_uid) & self._uids
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if uid in all_uids]
else:
all_seqs = seq_set.flatten(self.exists)
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if seq in all_seqs] | Return the cached messages, and their sequence numbers, for the
given sequence set.
Args:
seq_set: The message sequence set. | Below is the the instruction that describes the task:
### Input:
Return the cached messages, and their sequence numbers, for the
given sequence set.
Args:
seq_set: The message sequence set.
### Response:
def get_all(self, seq_set: SequenceSet) \
-> Sequence[Tuple[int, CachedMessage]]:
"""Return the cached messages, and their sequence numbers, for the
given sequence set.
Args:
seq_set: The message sequence set.
"""
if seq_set.uid:
all_uids = seq_set.flatten(self.max_uid) & self._uids
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if uid in all_uids]
else:
all_seqs = seq_set.flatten(self.exists)
return [(seq, self._cache[uid])
for seq, uid in enumerate(self._sorted, 1)
if seq in all_seqs] |
def detect_language(self, text: str, hint: str = None):
"""
Detects the language of a text
:param text:
Text to analyze
:param hint:
A list which are hints for the API
in which language the text is written in
example:
"de, en"
:return:
detected language code. example: "en"
"""
encodedtext = urllib.parse.quote(text)
args = "&text=" + encodedtext
if hint is not None:
args += "&hint=" + hint
r = self.yandex_translate_request("detect", args)
self.handle_errors(r)
return r.json()["lang"] | Detects the language of a text
:param text:
Text to analyze
:param hint:
A list which are hints for the API
in which language the text is written in
example:
"de, en"
:return:
detected language code. example: "en" | Below is the the instruction that describes the task:
### Input:
Detects the language of a text
:param text:
Text to analyze
:param hint:
A list which are hints for the API
in which language the text is written in
example:
"de, en"
:return:
detected language code. example: "en"
### Response:
def detect_language(self, text: str, hint: str = None):
"""
Detects the language of a text
:param text:
Text to analyze
:param hint:
A list which are hints for the API
in which language the text is written in
example:
"de, en"
:return:
detected language code. example: "en"
"""
encodedtext = urllib.parse.quote(text)
args = "&text=" + encodedtext
if hint is not None:
args += "&hint=" + hint
r = self.yandex_translate_request("detect", args)
self.handle_errors(r)
return r.json()["lang"] |
def add_to_cache(cls, remote_info, container): # pylint: disable=g-bad-name
"""Adds a ResourceContainer to a cache tying it to a protorpc method.
Args:
remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding
to a method.
container: An instance of ResourceContainer.
Raises:
TypeError: if the container is not an instance of cls.
KeyError: if the remote method has been reference by a container before.
This created remote method should never occur because a remote method
is created once.
"""
if not isinstance(container, cls):
raise TypeError('%r not an instance of %r, could not be added to cache.' %
(container, cls))
if remote_info in cls.__remote_info_cache:
raise KeyError('Cache has collision but should not.')
cls.__remote_info_cache[remote_info] = container | Adds a ResourceContainer to a cache tying it to a protorpc method.
Args:
remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding
to a method.
container: An instance of ResourceContainer.
Raises:
TypeError: if the container is not an instance of cls.
KeyError: if the remote method has been reference by a container before.
This created remote method should never occur because a remote method
is created once. | Below is the the instruction that describes the task:
### Input:
Adds a ResourceContainer to a cache tying it to a protorpc method.
Args:
remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding
to a method.
container: An instance of ResourceContainer.
Raises:
TypeError: if the container is not an instance of cls.
KeyError: if the remote method has been reference by a container before.
This created remote method should never occur because a remote method
is created once.
### Response:
def add_to_cache(cls, remote_info, container): # pylint: disable=g-bad-name
"""Adds a ResourceContainer to a cache tying it to a protorpc method.
Args:
remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding
to a method.
container: An instance of ResourceContainer.
Raises:
TypeError: if the container is not an instance of cls.
KeyError: if the remote method has been reference by a container before.
This created remote method should never occur because a remote method
is created once.
"""
if not isinstance(container, cls):
raise TypeError('%r not an instance of %r, could not be added to cache.' %
(container, cls))
if remote_info in cls.__remote_info_cache:
raise KeyError('Cache has collision but should not.')
cls.__remote_info_cache[remote_info] = container |
def __read(self):
"""
Reads packets from the socket
"""
# Set the socket as non-blocking
self._socket.setblocking(0)
while not self._stop_event.is_set():
# Watch for content
ready = select.select([self._socket], [], [], 1)
if ready[0]:
# Socket is ready
data, sender = self._socket.recvfrom(1024)
try:
self._handle_heartbeat(sender, data)
except Exception as ex:
_logger.exception("Error handling the heart beat: %s", ex) | Reads packets from the socket | Below is the the instruction that describes the task:
### Input:
Reads packets from the socket
### Response:
def __read(self):
"""
Reads packets from the socket
"""
# Set the socket as non-blocking
self._socket.setblocking(0)
while not self._stop_event.is_set():
# Watch for content
ready = select.select([self._socket], [], [], 1)
if ready[0]:
# Socket is ready
data, sender = self._socket.recvfrom(1024)
try:
self._handle_heartbeat(sender, data)
except Exception as ex:
_logger.exception("Error handling the heart beat: %s", ex) |
def __check_mecab_dict_path(self):
"""check path to dict of Mecab in system environment
"""
mecab_dic_cmd = "echo `{} --dicdir`".format(os.path.join(self._path_mecab_config, 'mecab-config'))
try:
if six.PY2:
path_mecab_dict = subprocess.check_output( mecab_dic_cmd, shell=True ).strip('\n')
else:
path_mecab_dict = subprocess.check_output(mecab_dic_cmd, shell=True).decode(self.string_encoding).strip('\n')
except subprocess.CalledProcessError:
logger.error("{}".format(mecab_dic_cmd))
raise subprocess.CalledProcessError(returncode=-1, cmd="Failed to execute mecab-config command")
if path_mecab_dict == '':
raise SystemError("""mecab dictionary path is not found with following command: {}
You are not able to use additional dictionary.
Still you are able to call mecab default dictionary""".format(mecab_dic_cmd))
return path_mecab_dict | check path to dict of Mecab in system environment | Below is the the instruction that describes the task:
### Input:
check path to dict of Mecab in system environment
### Response:
def __check_mecab_dict_path(self):
"""check path to dict of Mecab in system environment
"""
mecab_dic_cmd = "echo `{} --dicdir`".format(os.path.join(self._path_mecab_config, 'mecab-config'))
try:
if six.PY2:
path_mecab_dict = subprocess.check_output( mecab_dic_cmd, shell=True ).strip('\n')
else:
path_mecab_dict = subprocess.check_output(mecab_dic_cmd, shell=True).decode(self.string_encoding).strip('\n')
except subprocess.CalledProcessError:
logger.error("{}".format(mecab_dic_cmd))
raise subprocess.CalledProcessError(returncode=-1, cmd="Failed to execute mecab-config command")
if path_mecab_dict == '':
raise SystemError("""mecab dictionary path is not found with following command: {}
You are not able to use additional dictionary.
Still you are able to call mecab default dictionary""".format(mecab_dic_cmd))
return path_mecab_dict |
def get_request_headers(self):
"""
Determine the headers to send along with the request. These are
pretty much the same for every request, with Route53.
"""
date_header = time.asctime(time.gmtime())
# We sign the time string above with the user's AWS secret access key
# in order to authenticate our request.
signing_key = self._hmac_sign_string(date_header)
# Amazon's super fun auth token.
auth_header = "AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s" % (
self.connection._aws_access_key_id,
signing_key,
)
return {
'X-Amzn-Authorization': auth_header,
'x-amz-date': date_header,
'Host': 'route53.amazonaws.com',
} | Determine the headers to send along with the request. These are
pretty much the same for every request, with Route53. | Below is the the instruction that describes the task:
### Input:
Determine the headers to send along with the request. These are
pretty much the same for every request, with Route53.
### Response:
def get_request_headers(self):
"""
Determine the headers to send along with the request. These are
pretty much the same for every request, with Route53.
"""
date_header = time.asctime(time.gmtime())
# We sign the time string above with the user's AWS secret access key
# in order to authenticate our request.
signing_key = self._hmac_sign_string(date_header)
# Amazon's super fun auth token.
auth_header = "AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s" % (
self.connection._aws_access_key_id,
signing_key,
)
return {
'X-Amzn-Authorization': auth_header,
'x-amz-date': date_header,
'Host': 'route53.amazonaws.com',
} |
def make_2D_samples_gauss(n, m, sigma, random_state=None):
"""return n samples drawn from 2D gaussian N(m,sigma)
Parameters
----------
n : int
number of samples to make
m : np.array (2,)
mean value of the gaussian distribution
sigma : np.array (2,2)
covariance matrix of the gaussian distribution
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : np.array (n,2)
n samples drawn from N(m,sigma)
"""
generator = check_random_state(random_state)
if np.isscalar(sigma):
sigma = np.array([sigma, ])
if len(sigma) > 1:
P = sp.linalg.sqrtm(sigma)
res = generator.randn(n, 2).dot(P) + m
else:
res = generator.randn(n, 2) * np.sqrt(sigma) + m
return res | return n samples drawn from 2D gaussian N(m,sigma)
Parameters
----------
n : int
number of samples to make
m : np.array (2,)
mean value of the gaussian distribution
sigma : np.array (2,2)
covariance matrix of the gaussian distribution
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : np.array (n,2)
n samples drawn from N(m,sigma) | Below is the the instruction that describes the task:
### Input:
return n samples drawn from 2D gaussian N(m,sigma)
Parameters
----------
n : int
number of samples to make
m : np.array (2,)
mean value of the gaussian distribution
sigma : np.array (2,2)
covariance matrix of the gaussian distribution
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : np.array (n,2)
n samples drawn from N(m,sigma)
### Response:
def make_2D_samples_gauss(n, m, sigma, random_state=None):
"""return n samples drawn from 2D gaussian N(m,sigma)
Parameters
----------
n : int
number of samples to make
m : np.array (2,)
mean value of the gaussian distribution
sigma : np.array (2,2)
covariance matrix of the gaussian distribution
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : np.array (n,2)
n samples drawn from N(m,sigma)
"""
generator = check_random_state(random_state)
if np.isscalar(sigma):
sigma = np.array([sigma, ])
if len(sigma) > 1:
P = sp.linalg.sqrtm(sigma)
res = generator.randn(n, 2).dot(P) + m
else:
res = generator.randn(n, 2) * np.sqrt(sigma) + m
return res |
def define_function(self, function, name=None):
"""Define the Python function within the CLIPS environment.
If a name is given, it will be the function name within CLIPS.
Otherwise, the name of the Python function will be used.
The Python function will be accessible within CLIPS via its name
as if it was defined via the `deffunction` construct.
"""
name = name if name is not None else function.__name__
ENVIRONMENT_DATA[self._env].user_functions[name] = function
self.build(DEFFUNCTION.format(name)) | Define the Python function within the CLIPS environment.
If a name is given, it will be the function name within CLIPS.
Otherwise, the name of the Python function will be used.
The Python function will be accessible within CLIPS via its name
as if it was defined via the `deffunction` construct. | Below is the the instruction that describes the task:
### Input:
Define the Python function within the CLIPS environment.
If a name is given, it will be the function name within CLIPS.
Otherwise, the name of the Python function will be used.
The Python function will be accessible within CLIPS via its name
as if it was defined via the `deffunction` construct.
### Response:
def define_function(self, function, name=None):
"""Define the Python function within the CLIPS environment.
If a name is given, it will be the function name within CLIPS.
Otherwise, the name of the Python function will be used.
The Python function will be accessible within CLIPS via its name
as if it was defined via the `deffunction` construct.
"""
name = name if name is not None else function.__name__
ENVIRONMENT_DATA[self._env].user_functions[name] = function
self.build(DEFFUNCTION.format(name)) |
def maximum_active_partitions(self):
"""
Integer: The maximum number of active logical partitions or partitions
of this CPC.
The following table shows the maximum number of active logical
partitions or partitions by machine generations supported at the HMC
API:
========================= ==================
Machine generation Maximum partitions
========================= ==================
z196 60
z114 30
zEC12 60
zBC12 30
z13 / Emperor 85
z13s / Rockhopper 40
z14 / Emperor II 85
z14-ZR1 / Rockhopper II 40
========================= ==================
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`ValueError`: Unknown machine type
"""
machine_type = self.get_property('machine-type')
try:
max_parts = self._MAX_PARTITIONS_BY_MACHINE_TYPE[machine_type]
except KeyError:
raise ValueError("Unknown machine type: {!r}".format(machine_type))
return max_parts | Integer: The maximum number of active logical partitions or partitions
of this CPC.
The following table shows the maximum number of active logical
partitions or partitions by machine generations supported at the HMC
API:
========================= ==================
Machine generation Maximum partitions
========================= ==================
z196 60
z114 30
zEC12 60
zBC12 30
z13 / Emperor 85
z13s / Rockhopper 40
z14 / Emperor II 85
z14-ZR1 / Rockhopper II 40
========================= ==================
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`ValueError`: Unknown machine type | Below is the the instruction that describes the task:
### Input:
Integer: The maximum number of active logical partitions or partitions
of this CPC.
The following table shows the maximum number of active logical
partitions or partitions by machine generations supported at the HMC
API:
========================= ==================
Machine generation Maximum partitions
========================= ==================
z196 60
z114 30
zEC12 60
zBC12 30
z13 / Emperor 85
z13s / Rockhopper 40
z14 / Emperor II 85
z14-ZR1 / Rockhopper II 40
========================= ==================
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`ValueError`: Unknown machine type
### Response:
def maximum_active_partitions(self):
"""
Integer: The maximum number of active logical partitions or partitions
of this CPC.
The following table shows the maximum number of active logical
partitions or partitions by machine generations supported at the HMC
API:
========================= ==================
Machine generation Maximum partitions
========================= ==================
z196 60
z114 30
zEC12 60
zBC12 30
z13 / Emperor 85
z13s / Rockhopper 40
z14 / Emperor II 85
z14-ZR1 / Rockhopper II 40
========================= ==================
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`ValueError`: Unknown machine type
"""
machine_type = self.get_property('machine-type')
try:
max_parts = self._MAX_PARTITIONS_BY_MACHINE_TYPE[machine_type]
except KeyError:
raise ValueError("Unknown machine type: {!r}".format(machine_type))
return max_parts |
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None):
"""
Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by
default, although it can be requested to read the whole available module structure.
:param config: artifact config (ArtifactConfig instance)
:param read_modules: if True all modules are read, otherwise only top-level artifact
:param repo_url: the URL of the repository to use
:param mvn_repo_local: local repository path
:param additional_params: additional params to add on command-line when running maven
"""
global scm_status_cache
if config.artifact in scm_status_cache.keys():
result = scm_status_cache[config.artifact]
elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()):
result = scm_status_cache["%s|False" % config.artifact]
else:
result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params)
if read_modules:
scm_status_cache[config.artifact] = result
if ("%s|False" % config.artifact) in scm_status_cache.keys():
del(scm_status_cache["%s|False" % config.artifact])
else:
scm_status_cache["%s|False" % config.artifact] = result
return result | Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by
default, although it can be requested to read the whole available module structure.
:param config: artifact config (ArtifactConfig instance)
:param read_modules: if True all modules are read, otherwise only top-level artifact
:param repo_url: the URL of the repository to use
:param mvn_repo_local: local repository path
:param additional_params: additional params to add on command-line when running maven | Below is the the instruction that describes the task:
### Input:
Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by
default, although it can be requested to read the whole available module structure.
:param config: artifact config (ArtifactConfig instance)
:param read_modules: if True all modules are read, otherwise only top-level artifact
:param repo_url: the URL of the repository to use
:param mvn_repo_local: local repository path
:param additional_params: additional params to add on command-line when running maven
### Response:
def get_scm_status(config, read_modules=False, repo_url=None, mvn_repo_local=None, additional_params=None):
"""
Gets the artifact status (MavenArtifact instance) from SCM defined by config. Only the top-level artifact is read by
default, although it can be requested to read the whole available module structure.
:param config: artifact config (ArtifactConfig instance)
:param read_modules: if True all modules are read, otherwise only top-level artifact
:param repo_url: the URL of the repository to use
:param mvn_repo_local: local repository path
:param additional_params: additional params to add on command-line when running maven
"""
global scm_status_cache
if config.artifact in scm_status_cache.keys():
result = scm_status_cache[config.artifact]
elif not read_modules and (("%s|False" % config.artifact) in scm_status_cache.keys()):
result = scm_status_cache["%s|False" % config.artifact]
else:
result = _get_scm_status(config, read_modules, repo_url, mvn_repo_local, additional_params)
if read_modules:
scm_status_cache[config.artifact] = result
if ("%s|False" % config.artifact) in scm_status_cache.keys():
del(scm_status_cache["%s|False" % config.artifact])
else:
scm_status_cache["%s|False" % config.artifact] = result
return result |
def get_keeper_token(host, username, password):
"""Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token.
"""
token_endpoint = urljoin(host, '/token')
r = requests.get(token_endpoint, auth=(username, password))
if r.status_code != 200:
raise KeeperError('Could not authenticate to {0}: error {1:d}\n{2}'.
format(host, r.status_code, r.json()))
return r.json()['token'] | Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token. | Below is the the instruction that describes the task:
### Input:
Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token.
### Response:
def get_keeper_token(host, username, password):
"""Get a temporary auth token from LTD Keeper.
Parameters
----------
host : `str`
Hostname of the LTD Keeper API (e.g., ``'https://keeper.lsst.codes'``).
username : `str`
Username.
password : `str`
Password.
Returns
-------
token : `str`
LTD Keeper API token.
Raises
------
KeeperError
Raised if the LTD Keeper API cannot return a token.
"""
token_endpoint = urljoin(host, '/token')
r = requests.get(token_endpoint, auth=(username, password))
if r.status_code != 200:
raise KeeperError('Could not authenticate to {0}: error {1:d}\n{2}'.
format(host, r.status_code, r.json()))
return r.json()['token'] |
def format_message(self, msg):
"""format message."""
return {'timestamp': int(msg.created * 1000),
'message': self.format(msg),
'stream': self.log_stream or msg.name,
'group': self.log_group} | format message. | Below is the the instruction that describes the task:
### Input:
format message.
### Response:
def format_message(self, msg):
"""format message."""
return {'timestamp': int(msg.created * 1000),
'message': self.format(msg),
'stream': self.log_stream or msg.name,
'group': self.log_group} |
def natural_neighbor(xp, yp, variable, grid_x, grid_y):
"""Wrap natural_neighbor_to_grid for deprecated natural_neighbor function."""
return natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y) | Wrap natural_neighbor_to_grid for deprecated natural_neighbor function. | Below is the the instruction that describes the task:
### Input:
Wrap natural_neighbor_to_grid for deprecated natural_neighbor function.
### Response:
def natural_neighbor(xp, yp, variable, grid_x, grid_y):
"""Wrap natural_neighbor_to_grid for deprecated natural_neighbor function."""
return natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y) |
def find_folder_recipes(base_folder,
pattern="Singularity",
manifest=None,
base=None):
'''find folder recipes will find recipes based on a particular pattern.
Parameters
==========
base_folder: the base folder to recursively walk
pattern: a default pattern to search for
manifest: an already started manifest
base: if defined, consider folders under this level recursively.
'''
# The user is not appending to an existing manifest
if manifest is None:
manifest = dict()
for root, dirnames, filenames in os.walk(base_folder):
for filename in fnmatch.filter(filenames, pattern):
container_path = os.path.join(root, filename)
if base is not None:
container_base = container_path.replace(base,'').strip('/')
collection = container_base.split('/')[0]
recipe = os.path.basename(container_base)
container_uri = "%s/%s" %(collection,recipe)
else:
container_uri = '/'.join(container_path.strip('/').split('/')[-2:])
add_container = True
# Add the most recently updated container
if container_uri in manifest:
if manifest[container_uri]['modified'] > os.path.getmtime(container_path):
add_container = False
if add_container:
manifest[container_uri] = {'path': os.path.abspath(container_path),
'modified':os.path.getmtime(container_path)}
return manifest | find folder recipes will find recipes based on a particular pattern.
Parameters
==========
base_folder: the base folder to recursively walk
pattern: a default pattern to search for
manifest: an already started manifest
base: if defined, consider folders under this level recursively. | Below is the the instruction that describes the task:
### Input:
find folder recipes will find recipes based on a particular pattern.
Parameters
==========
base_folder: the base folder to recursively walk
pattern: a default pattern to search for
manifest: an already started manifest
base: if defined, consider folders under this level recursively.
### Response:
def find_folder_recipes(base_folder,
pattern="Singularity",
manifest=None,
base=None):
'''find folder recipes will find recipes based on a particular pattern.
Parameters
==========
base_folder: the base folder to recursively walk
pattern: a default pattern to search for
manifest: an already started manifest
base: if defined, consider folders under this level recursively.
'''
# The user is not appending to an existing manifest
if manifest is None:
manifest = dict()
for root, dirnames, filenames in os.walk(base_folder):
for filename in fnmatch.filter(filenames, pattern):
container_path = os.path.join(root, filename)
if base is not None:
container_base = container_path.replace(base,'').strip('/')
collection = container_base.split('/')[0]
recipe = os.path.basename(container_base)
container_uri = "%s/%s" %(collection,recipe)
else:
container_uri = '/'.join(container_path.strip('/').split('/')[-2:])
add_container = True
# Add the most recently updated container
if container_uri in manifest:
if manifest[container_uri]['modified'] > os.path.getmtime(container_path):
add_container = False
if add_container:
manifest[container_uri] = {'path': os.path.abspath(container_path),
'modified':os.path.getmtime(container_path)}
return manifest |
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data | Return a mapping of all image data for available providers | Below is the the instruction that describes the task:
### Input:
Return a mapping of all image data for available providers
### Response:
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data |
def list_files(self, project):
"""
List files in the project on computes
"""
path = "/projects/{}/files".format(project.id)
res = yield from self.http_query("GET", path, timeout=120)
return res.json | List files in the project on computes | Below is the the instruction that describes the task:
### Input:
List files in the project on computes
### Response:
def list_files(self, project):
"""
List files in the project on computes
"""
path = "/projects/{}/files".format(project.id)
res = yield from self.http_query("GET", path, timeout=120)
return res.json |
def insert_before(self, text):
"""
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
"""
selection_state = self.selection
if selection_state:
selection_state = SelectionState(
original_cursor_position=selection_state.original_cursor_position + len(text),
type=selection_state.type)
return Document(
text=text + self.text,
cursor_position=self.cursor_position + len(text),
selection=selection_state) | Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync. | Below is the the instruction that describes the task:
### Input:
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
### Response:
def insert_before(self, text):
"""
Create a new document, with this text inserted before the buffer.
It keeps selection ranges and cursor position in sync.
"""
selection_state = self.selection
if selection_state:
selection_state = SelectionState(
original_cursor_position=selection_state.original_cursor_position + len(text),
type=selection_state.type)
return Document(
text=text + self.text,
cursor_position=self.cursor_position + len(text),
selection=selection_state) |
def auto_up(self, count=1, go_to_start_of_line_if_history_changes=False):
"""
If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_previous(count=count)
elif self.document.cursor_position_row > 0:
self.cursor_up(count=count)
elif not self.selection_state:
self.history_backward(count=count)
# Go to the start of the line?
if go_to_start_of_line_if_history_changes:
self.cursor_position += self.document.get_start_of_line_position() | If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.) | Below is the the instruction that describes the task:
### Input:
If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.)
### Response:
def auto_up(self, count=1, go_to_start_of_line_if_history_changes=False):
"""
If we're not on the first line (of a multiline input) go a line up,
otherwise go back in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_previous(count=count)
elif self.document.cursor_position_row > 0:
self.cursor_up(count=count)
elif not self.selection_state:
self.history_backward(count=count)
# Go to the start of the line?
if go_to_start_of_line_if_history_changes:
self.cursor_position += self.document.get_start_of_line_position() |
def run(
self,
num_episodes=-1,
max_episode_timesteps=-1,
episode_finished=None,
summary_report=None,
summary_interval=0,
num_timesteps=None,
deterministic=False,
episodes=None,
max_timesteps=None,
testing=False,
sleep=None
):
"""
Executes this runner by starting all Agents in parallel (each one in one thread).
Args:
episodes (int): Deprecated; see num_episodes.
max_timesteps (int): Deprecated; see max_episode_timesteps.
"""
# Renamed episodes into num_episodes to match BaseRunner's signature (fully backw. compatible).
if episodes is not None:
num_episodes = episodes
warnings.warn("WARNING: `episodes` parameter is deprecated, use `num_episodes` instead.",
category=DeprecationWarning)
assert isinstance(num_episodes, int)
# Renamed max_timesteps into max_episode_timesteps to match single Runner's signature (fully backw. compatible).
if max_timesteps is not None:
max_episode_timesteps = max_timesteps
warnings.warn("WARNING: `max_timesteps` parameter is deprecated, use `max_episode_timesteps` instead.",
category=DeprecationWarning)
assert isinstance(max_episode_timesteps, int)
if summary_report is not None:
warnings.warn("WARNING: `summary_report` parameter is deprecated, use `episode_finished` callback "
"instead to generate summaries every n episodes.",
category=DeprecationWarning)
self.reset()
# Reset counts/stop-condition for this run.
self.global_episode = 0
self.global_timestep = 0
self.should_stop = False
# Create threads.
threads = [threading.Thread(target=self._run_single, args=(t, self.agent[t], self.environment[t],),
kwargs={"deterministic": deterministic,
"max_episode_timesteps": max_episode_timesteps,
"episode_finished": episode_finished,
"testing": testing,
"sleep": sleep})
for t in range(len(self.agent))]
# Start threads.
self.start_time = time.time()
[t.start() for t in threads]
# Stay idle until killed by SIGINT or a global stop condition is met.
try:
next_summary = 0
next_save = 0 if self.save_frequency_unit != "s" else time.time()
while any([t.is_alive() for t in threads]) and self.global_episode < num_episodes or num_episodes == -1:
self.time = time.time()
# This is deprecated (but still supported) and should be covered by the `episode_finished` callable.
if summary_report is not None and self.global_episode > next_summary:
summary_report(self)
next_summary += summary_interval
if self.save_path and self.save_frequency is not None:
do_save = True
current = None
if self.save_frequency_unit == "e" and self.global_episode > next_save:
current = self.global_episode
elif self.save_frequency_unit == "s" and self.time > next_save:
current = self.time
elif self.save_frequency_unit == "t" and self.global_timestep > next_save:
current = self.global_timestep
else:
do_save = False
if do_save:
self.agent[0].save_model(self.save_path)
# Make sure next save is later than right now.
while next_save < current:
next_save += self.save_frequency
time.sleep(1)
except KeyboardInterrupt:
print('Keyboard interrupt, sending stop command to threads')
self.should_stop = True
# Join threads.
[t.join() for t in threads]
print('All threads stopped') | Executes this runner by starting all Agents in parallel (each one in one thread).
Args:
episodes (int): Deprecated; see num_episodes.
max_timesteps (int): Deprecated; see max_episode_timesteps. | Below is the the instruction that describes the task:
### Input:
Executes this runner by starting all Agents in parallel (each one in one thread).
Args:
episodes (int): Deprecated; see num_episodes.
max_timesteps (int): Deprecated; see max_episode_timesteps.
### Response:
def run(
self,
num_episodes=-1,
max_episode_timesteps=-1,
episode_finished=None,
summary_report=None,
summary_interval=0,
num_timesteps=None,
deterministic=False,
episodes=None,
max_timesteps=None,
testing=False,
sleep=None
):
"""
Executes this runner by starting all Agents in parallel (each one in one thread).
Args:
episodes (int): Deprecated; see num_episodes.
max_timesteps (int): Deprecated; see max_episode_timesteps.
"""
# Renamed episodes into num_episodes to match BaseRunner's signature (fully backw. compatible).
if episodes is not None:
num_episodes = episodes
warnings.warn("WARNING: `episodes` parameter is deprecated, use `num_episodes` instead.",
category=DeprecationWarning)
assert isinstance(num_episodes, int)
# Renamed max_timesteps into max_episode_timesteps to match single Runner's signature (fully backw. compatible).
if max_timesteps is not None:
max_episode_timesteps = max_timesteps
warnings.warn("WARNING: `max_timesteps` parameter is deprecated, use `max_episode_timesteps` instead.",
category=DeprecationWarning)
assert isinstance(max_episode_timesteps, int)
if summary_report is not None:
warnings.warn("WARNING: `summary_report` parameter is deprecated, use `episode_finished` callback "
"instead to generate summaries every n episodes.",
category=DeprecationWarning)
self.reset()
# Reset counts/stop-condition for this run.
self.global_episode = 0
self.global_timestep = 0
self.should_stop = False
# Create threads.
threads = [threading.Thread(target=self._run_single, args=(t, self.agent[t], self.environment[t],),
kwargs={"deterministic": deterministic,
"max_episode_timesteps": max_episode_timesteps,
"episode_finished": episode_finished,
"testing": testing,
"sleep": sleep})
for t in range(len(self.agent))]
# Start threads.
self.start_time = time.time()
[t.start() for t in threads]
# Stay idle until killed by SIGINT or a global stop condition is met.
try:
next_summary = 0
next_save = 0 if self.save_frequency_unit != "s" else time.time()
while any([t.is_alive() for t in threads]) and self.global_episode < num_episodes or num_episodes == -1:
self.time = time.time()
# This is deprecated (but still supported) and should be covered by the `episode_finished` callable.
if summary_report is not None and self.global_episode > next_summary:
summary_report(self)
next_summary += summary_interval
if self.save_path and self.save_frequency is not None:
do_save = True
current = None
if self.save_frequency_unit == "e" and self.global_episode > next_save:
current = self.global_episode
elif self.save_frequency_unit == "s" and self.time > next_save:
current = self.time
elif self.save_frequency_unit == "t" and self.global_timestep > next_save:
current = self.global_timestep
else:
do_save = False
if do_save:
self.agent[0].save_model(self.save_path)
# Make sure next save is later than right now.
while next_save < current:
next_save += self.save_frequency
time.sleep(1)
except KeyboardInterrupt:
print('Keyboard interrupt, sending stop command to threads')
self.should_stop = True
# Join threads.
[t.join() for t in threads]
print('All threads stopped') |
def flush_tx_buffer(self):
"""
Flushes the transmit buffer.
:raises can.CanError:
If flushing of the transmit buffer failed.
"""
log.info('Flushing transmit buffer')
self._ucan.reset_can(self.channel, ResetFlags.RESET_ONLY_TX_BUFF) | Flushes the transmit buffer.
:raises can.CanError:
If flushing of the transmit buffer failed. | Below is the the instruction that describes the task:
### Input:
Flushes the transmit buffer.
:raises can.CanError:
If flushing of the transmit buffer failed.
### Response:
def flush_tx_buffer(self):
"""
Flushes the transmit buffer.
:raises can.CanError:
If flushing of the transmit buffer failed.
"""
log.info('Flushing transmit buffer')
self._ucan.reset_can(self.channel, ResetFlags.RESET_ONLY_TX_BUFF) |
def from_agent_proto(agent_info_list, brain_params):
"""
Converts list of agent infos to BrainInfo.
"""
vis_obs = []
for i in range(brain_params.number_visual_observations):
obs = [BrainInfo.process_pixels(x.visual_observations[i],
brain_params.camera_resolutions[i]['blackAndWhite'])
for x in agent_info_list]
vis_obs += [obs]
if len(agent_info_list) == 0:
memory_size = 0
else:
memory_size = max([len(x.memories) for x in agent_info_list])
if memory_size == 0:
memory = np.zeros((0, 0))
else:
[x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list]
memory = np.array([list(x.memories) for x in agent_info_list])
total_num_actions = sum(brain_params.vector_action_space_size)
mask_actions = np.ones((len(agent_info_list), total_num_actions))
for agent_index, agent_info in enumerate(agent_info_list):
if agent_info.action_mask is not None:
if len(agent_info.action_mask) == total_num_actions:
mask_actions[agent_index, :] = [
0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)]
if any([np.isnan(x.reward) for x in agent_info_list]):
logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name)
if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]):
logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name)
if len(agent_info_list) == 0:
vector_obs = np.zeros(
(0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations)
)
else:
vector_obs = np.nan_to_num(
np.array([x.stacked_vector_observation for x in agent_info_list])
)
brain_info = BrainInfo(
visual_observation=vis_obs,
vector_observation=vector_obs,
text_observations=[x.text_observation for x in agent_info_list],
memory=memory,
reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list],
agents=[x.id for x in agent_info_list],
local_done=[x.done for x in agent_info_list],
vector_action=np.array([x.stored_vector_actions for x in agent_info_list]),
text_action=[list(x.stored_text_actions) for x in agent_info_list],
max_reached=[x.max_step_reached for x in agent_info_list],
custom_observations=[x.custom_observation for x in agent_info_list],
action_mask=mask_actions
)
return brain_info | Converts list of agent infos to BrainInfo. | Below is the the instruction that describes the task:
### Input:
Converts list of agent infos to BrainInfo.
### Response:
def from_agent_proto(agent_info_list, brain_params):
"""
Converts list of agent infos to BrainInfo.
"""
vis_obs = []
for i in range(brain_params.number_visual_observations):
obs = [BrainInfo.process_pixels(x.visual_observations[i],
brain_params.camera_resolutions[i]['blackAndWhite'])
for x in agent_info_list]
vis_obs += [obs]
if len(agent_info_list) == 0:
memory_size = 0
else:
memory_size = max([len(x.memories) for x in agent_info_list])
if memory_size == 0:
memory = np.zeros((0, 0))
else:
[x.memories.extend([0] * (memory_size - len(x.memories))) for x in agent_info_list]
memory = np.array([list(x.memories) for x in agent_info_list])
total_num_actions = sum(brain_params.vector_action_space_size)
mask_actions = np.ones((len(agent_info_list), total_num_actions))
for agent_index, agent_info in enumerate(agent_info_list):
if agent_info.action_mask is not None:
if len(agent_info.action_mask) == total_num_actions:
mask_actions[agent_index, :] = [
0 if agent_info.action_mask[k] else 1 for k in range(total_num_actions)]
if any([np.isnan(x.reward) for x in agent_info_list]):
logger.warning("An agent had a NaN reward for brain " + brain_params.brain_name)
if any([np.isnan(x.stacked_vector_observation).any() for x in agent_info_list]):
logger.warning("An agent had a NaN observation for brain " + brain_params.brain_name)
if len(agent_info_list) == 0:
vector_obs = np.zeros(
(0, brain_params.vector_observation_space_size * brain_params.num_stacked_vector_observations)
)
else:
vector_obs = np.nan_to_num(
np.array([x.stacked_vector_observation for x in agent_info_list])
)
brain_info = BrainInfo(
visual_observation=vis_obs,
vector_observation=vector_obs,
text_observations=[x.text_observation for x in agent_info_list],
memory=memory,
reward=[x.reward if not np.isnan(x.reward) else 0 for x in agent_info_list],
agents=[x.id for x in agent_info_list],
local_done=[x.done for x in agent_info_list],
vector_action=np.array([x.stored_vector_actions for x in agent_info_list]),
text_action=[list(x.stored_text_actions) for x in agent_info_list],
max_reached=[x.max_step_reached for x in agent_info_list],
custom_observations=[x.custom_observation for x in agent_info_list],
action_mask=mask_actions
)
return brain_info |
def check_geophysical_vars_fill_value(self, ds):
'''
Check that geophysical variables contain fill values.
:param netCDF4.Dataset ds: An open netCDF dataset
'''
results = []
for geo_var in get_geophysical_variables(ds):
results.append(
self._has_var_attr(ds, geo_var, '_FillValue', '_FillValue', BaseCheck.MEDIUM),
)
return results | Check that geophysical variables contain fill values.
:param netCDF4.Dataset ds: An open netCDF dataset | Below is the the instruction that describes the task:
### Input:
Check that geophysical variables contain fill values.
:param netCDF4.Dataset ds: An open netCDF dataset
### Response:
def check_geophysical_vars_fill_value(self, ds):
'''
Check that geophysical variables contain fill values.
:param netCDF4.Dataset ds: An open netCDF dataset
'''
results = []
for geo_var in get_geophysical_variables(ds):
results.append(
self._has_var_attr(ds, geo_var, '_FillValue', '_FillValue', BaseCheck.MEDIUM),
)
return results |
def draw_summary(self, history, title=""):
"""Inserts a text summary at the top that lists the number of steps and total
training time."""
# Generate summary string
time_str = str(history.get_total_time()).split(".")[0] # remove microseconds
summary = "Step: {} Time: {}".format(history.step, time_str)
if title:
summary = title + "\n\n" + summary
self.figure.suptitle(summary) | Inserts a text summary at the top that lists the number of steps and total
training time. | Below is the the instruction that describes the task:
### Input:
Inserts a text summary at the top that lists the number of steps and total
training time.
### Response:
def draw_summary(self, history, title=""):
"""Inserts a text summary at the top that lists the number of steps and total
training time."""
# Generate summary string
time_str = str(history.get_total_time()).split(".")[0] # remove microseconds
summary = "Step: {} Time: {}".format(history.step, time_str)
if title:
summary = title + "\n\n" + summary
self.figure.suptitle(summary) |
def create_set_cmap(values, cmap_name, alpha=255):
"""
return a dict of colors corresponding to the unique values
:param values: values to be mapped
:param cmap_name: colormap name
:param alpha: color alpha
:return: dict of colors corresponding to the unique values
"""
unique_values = list(set(values))
shuffle(unique_values)
from pylab import get_cmap
cmap = get_cmap(cmap_name)
d = {}
for i in range(len(unique_values)):
d[unique_values[i]] = _convert_color_format(cmap(1.*i/len(unique_values)), alpha)
return d | return a dict of colors corresponding to the unique values
:param values: values to be mapped
:param cmap_name: colormap name
:param alpha: color alpha
:return: dict of colors corresponding to the unique values | Below is the the instruction that describes the task:
### Input:
return a dict of colors corresponding to the unique values
:param values: values to be mapped
:param cmap_name: colormap name
:param alpha: color alpha
:return: dict of colors corresponding to the unique values
### Response:
def create_set_cmap(values, cmap_name, alpha=255):
"""
return a dict of colors corresponding to the unique values
:param values: values to be mapped
:param cmap_name: colormap name
:param alpha: color alpha
:return: dict of colors corresponding to the unique values
"""
unique_values = list(set(values))
shuffle(unique_values)
from pylab import get_cmap
cmap = get_cmap(cmap_name)
d = {}
for i in range(len(unique_values)):
d[unique_values[i]] = _convert_color_format(cmap(1.*i/len(unique_values)), alpha)
return d |
def handle_error(err, halt=True):
"""Print errors message and optionally exit.
Args:
err (str): The error message to print.
halt (bool, optional): Defaults to True. If True the script will exit.
"""
print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))
if halt:
sys.exit(1) | Print errors message and optionally exit.
Args:
err (str): The error message to print.
halt (bool, optional): Defaults to True. If True the script will exit. | Below is the the instruction that describes the task:
### Input:
Print errors message and optionally exit.
Args:
err (str): The error message to print.
halt (bool, optional): Defaults to True. If True the script will exit.
### Response:
def handle_error(err, halt=True):
"""Print errors message and optionally exit.
Args:
err (str): The error message to print.
halt (bool, optional): Defaults to True. If True the script will exit.
"""
print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))
if halt:
sys.exit(1) |
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
) | Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock | Below is the the instruction that describes the task:
### Input:
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
### Response:
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
) |
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_flogi(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_rx_flogi = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-flogi")
fcoe_intf_rx_flogi.text = kwargs.pop('fcoe_intf_rx_flogi')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_flogi(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_rx_flogi = ET.SubElement(fcoe_intf_list, "fcoe-intf-rx-flogi")
fcoe_intf_rx_flogi.text = kwargs.pop('fcoe_intf_rx_flogi')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _build_cached_instances(self):
"""
Build lookup table of VM instances known to the cloud provider.
The returned dictionary links VM id with the actual VM object.
"""
connection = self._connect()
reservations = connection.get_all_reservations()
cached_instances = {}
for rs in reservations:
for vm in rs.instances:
cached_instances[vm.id] = vm
return cached_instances | Build lookup table of VM instances known to the cloud provider.
The returned dictionary links VM id with the actual VM object. | Below is the the instruction that describes the task:
### Input:
Build lookup table of VM instances known to the cloud provider.
The returned dictionary links VM id with the actual VM object.
### Response:
def _build_cached_instances(self):
"""
Build lookup table of VM instances known to the cloud provider.
The returned dictionary links VM id with the actual VM object.
"""
connection = self._connect()
reservations = connection.get_all_reservations()
cached_instances = {}
for rs in reservations:
for vm in rs.instances:
cached_instances[vm.id] = vm
return cached_instances |
def add_file(self, *args):
"""
Add single file or list of files to bundle
:type: file_path: str|unicode
"""
for file_path in args:
self.files.append(FilePath(file_path, self)) | Add single file or list of files to bundle
:type: file_path: str|unicode | Below is the the instruction that describes the task:
### Input:
Add single file or list of files to bundle
:type: file_path: str|unicode
### Response:
def add_file(self, *args):
"""
Add single file or list of files to bundle
:type: file_path: str|unicode
"""
for file_path in args:
self.files.append(FilePath(file_path, self)) |
def reset_passwd(self, data):
""" Reset the user password """
error = False
msg = ""
# Check input format
if len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
user = self.database.users.find_one_and_update({"reset": data["reset_hash"]},
{"$set": {"password": passwd_hash},
"$unset": {"reset": True, "activate": True}})
if user is None:
error = True
msg = _("Invalid reset hash.")
else:
msg = _("Your password has been successfully changed.")
return msg, error | Reset the user password | Below is the the instruction that describes the task:
### Input:
Reset the user password
### Response:
def reset_passwd(self, data):
""" Reset the user password """
error = False
msg = ""
# Check input format
if len(data["passwd"]) < 6:
error = True
msg = _("Password too short.")
elif data["passwd"] != data["passwd2"]:
error = True
msg = _("Passwords don't match !")
if not error:
passwd_hash = hashlib.sha512(data["passwd"].encode("utf-8")).hexdigest()
user = self.database.users.find_one_and_update({"reset": data["reset_hash"]},
{"$set": {"password": passwd_hash},
"$unset": {"reset": True, "activate": True}})
if user is None:
error = True
msg = _("Invalid reset hash.")
else:
msg = _("Your password has been successfully changed.")
return msg, error |
def setup_dirs(data):
""" sets up directories for step3 data """
## make output folder for clusters
pdir = os.path.realpath(data.paramsdict["project_dir"])
data.dirs.clusts = os.path.join(pdir, "{}_clust_{}"\
.format(data.name, data.paramsdict["clust_threshold"]))
if not os.path.exists(data.dirs.clusts):
os.mkdir(data.dirs.clusts)
## make a tmpdir for align files
data.tmpdir = os.path.abspath(os.path.expanduser(
os.path.join(pdir, data.name+'-tmpalign')))
if not os.path.exists(data.tmpdir):
os.mkdir(data.tmpdir)
## If ref mapping, init samples and make the refmapping output directory.
if not data.paramsdict["assembly_method"] == "denovo":
## make output directory for read mapping process
data.dirs.refmapping = os.path.join(pdir, "{}_refmapping".format(data.name))
if not os.path.exists(data.dirs.refmapping):
os.mkdir(data.dirs.refmapping) | sets up directories for step3 data | Below is the the instruction that describes the task:
### Input:
sets up directories for step3 data
### Response:
def setup_dirs(data):
""" sets up directories for step3 data """
## make output folder for clusters
pdir = os.path.realpath(data.paramsdict["project_dir"])
data.dirs.clusts = os.path.join(pdir, "{}_clust_{}"\
.format(data.name, data.paramsdict["clust_threshold"]))
if not os.path.exists(data.dirs.clusts):
os.mkdir(data.dirs.clusts)
## make a tmpdir for align files
data.tmpdir = os.path.abspath(os.path.expanduser(
os.path.join(pdir, data.name+'-tmpalign')))
if not os.path.exists(data.tmpdir):
os.mkdir(data.tmpdir)
## If ref mapping, init samples and make the refmapping output directory.
if not data.paramsdict["assembly_method"] == "denovo":
## make output directory for read mapping process
data.dirs.refmapping = os.path.join(pdir, "{}_refmapping".format(data.name))
if not os.path.exists(data.dirs.refmapping):
os.mkdir(data.dirs.refmapping) |
def get(self, name, default, allow_default=True):
""" Return a setting value.
:param str name: Setting key name.
:param default: Default value of setting if it's not explicitly
set.
:param bool allow_default: If true, use the parameter default as
default if the key is not set, else raise
:exc:`LookupError`
:raises: :exc:`LookupError` if allow_default is false and the setting is
not set.
"""
if not self.settings.get('pyconfig.case_sensitive', False):
name = name.lower()
if name not in self.settings:
if not allow_default:
raise LookupError('No setting "{name}"'.format(name=name))
self.settings[name] = default
return self.settings[name] | Return a setting value.
:param str name: Setting key name.
:param default: Default value of setting if it's not explicitly
set.
:param bool allow_default: If true, use the parameter default as
default if the key is not set, else raise
:exc:`LookupError`
:raises: :exc:`LookupError` if allow_default is false and the setting is
not set. | Below is the the instruction that describes the task:
### Input:
Return a setting value.
:param str name: Setting key name.
:param default: Default value of setting if it's not explicitly
set.
:param bool allow_default: If true, use the parameter default as
default if the key is not set, else raise
:exc:`LookupError`
:raises: :exc:`LookupError` if allow_default is false and the setting is
not set.
### Response:
def get(self, name, default, allow_default=True):
""" Return a setting value.
:param str name: Setting key name.
:param default: Default value of setting if it's not explicitly
set.
:param bool allow_default: If true, use the parameter default as
default if the key is not set, else raise
:exc:`LookupError`
:raises: :exc:`LookupError` if allow_default is false and the setting is
not set.
"""
if not self.settings.get('pyconfig.case_sensitive', False):
name = name.lower()
if name not in self.settings:
if not allow_default:
raise LookupError('No setting "{name}"'.format(name=name))
self.settings[name] = default
return self.settings[name] |
def weighted_median(data, weights=None):
"""Calculate the weighted median of a list."""
if weights is None:
return median(data)
midpoint = 0.5 * sum(weights)
if any([j > midpoint for j in weights]):
return data[weights.index(max(weights))]
if any([j > 0 for j in weights]):
sorted_data, sorted_weights = zip(*sorted(zip(data, weights)))
cumulative_weight = 0
below_midpoint_index = 0
while cumulative_weight <= midpoint:
below_midpoint_index += 1
cumulative_weight += sorted_weights[below_midpoint_index-1]
cumulative_weight -= sorted_weights[below_midpoint_index-1]
if cumulative_weight - midpoint < sys.float_info.epsilon:
bounds = sorted_data[below_midpoint_index-2:below_midpoint_index]
return sum(bounds) / float(len(bounds))
return sorted_data[below_midpoint_index-1] | Calculate the weighted median of a list. | Below is the the instruction that describes the task:
### Input:
Calculate the weighted median of a list.
### Response:
def weighted_median(data, weights=None):
"""Calculate the weighted median of a list."""
if weights is None:
return median(data)
midpoint = 0.5 * sum(weights)
if any([j > midpoint for j in weights]):
return data[weights.index(max(weights))]
if any([j > 0 for j in weights]):
sorted_data, sorted_weights = zip(*sorted(zip(data, weights)))
cumulative_weight = 0
below_midpoint_index = 0
while cumulative_weight <= midpoint:
below_midpoint_index += 1
cumulative_weight += sorted_weights[below_midpoint_index-1]
cumulative_weight -= sorted_weights[below_midpoint_index-1]
if cumulative_weight - midpoint < sys.float_info.epsilon:
bounds = sorted_data[below_midpoint_index-2:below_midpoint_index]
return sum(bounds) / float(len(bounds))
return sorted_data[below_midpoint_index-1] |
def import_emails(self, archives_path, all, exclude_lists=None):
"""Get emails from the filesystem from the `archives_path`
and store them into the database. If `all` is set to True all
the filesystem storage will be imported otherwise the
importation will resume from the last message previously
imported. The lists set in `exclude_lists` won't be imported.
"""
count = 0
email_generator = self.get_emails(archives_path, all, exclude_lists)
for mailinglist_name, msg, index in email_generator:
try:
self.save_email(mailinglist_name, msg, index)
except:
# This anti-pattern is needed to avoid the transations to
# get stuck in case of errors.
transaction.rollback()
raise
count += 1
if count % 1000 == 0:
transaction.commit()
transaction.commit() | Get emails from the filesystem from the `archives_path`
and store them into the database. If `all` is set to True all
the filesystem storage will be imported otherwise the
importation will resume from the last message previously
imported. The lists set in `exclude_lists` won't be imported. | Below is the the instruction that describes the task:
### Input:
Get emails from the filesystem from the `archives_path`
and store them into the database. If `all` is set to True all
the filesystem storage will be imported otherwise the
importation will resume from the last message previously
imported. The lists set in `exclude_lists` won't be imported.
### Response:
def import_emails(self, archives_path, all, exclude_lists=None):
"""Get emails from the filesystem from the `archives_path`
and store them into the database. If `all` is set to True all
the filesystem storage will be imported otherwise the
importation will resume from the last message previously
imported. The lists set in `exclude_lists` won't be imported.
"""
count = 0
email_generator = self.get_emails(archives_path, all, exclude_lists)
for mailinglist_name, msg, index in email_generator:
try:
self.save_email(mailinglist_name, msg, index)
except:
# This anti-pattern is needed to avoid the transations to
# get stuck in case of errors.
transaction.rollback()
raise
count += 1
if count % 1000 == 0:
transaction.commit()
transaction.commit() |
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
stream = proc_or_stream
if not hasattr(stream, 'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
finalize_process(proc_or_stream) | Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects | Below is the the instruction that describes the task:
### Input:
Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects
### Response:
def _iter_from_process_or_stream(cls, repo, proc_or_stream):
"""Parse out commit information into a list of Commit objects
We expect one-line per commit, and parse the actual commit information directly
from our lighting fast object database
:param proc: git-rev-list process instance - one sha per line
:return: iterator returning Commit objects"""
stream = proc_or_stream
if not hasattr(stream, 'readline'):
stream = proc_or_stream.stdout
readline = stream.readline
while True:
line = readline()
if not line:
break
hexsha = line.strip()
if len(hexsha) > 40:
# split additional information, as returned by bisect for instance
hexsha, _ = line.split(None, 1)
# END handle extra info
assert len(hexsha) == 40, "Invalid line: %s" % hexsha
yield Commit(repo, hex_to_bin(hexsha))
# END for each line in stream
# TODO: Review this - it seems process handling got a bit out of control
# due to many developers trying to fix the open file handles issue
if hasattr(proc_or_stream, 'wait'):
finalize_process(proc_or_stream) |
def _update(self, **kwargs):
"""wrapped with update, override that in a subclass to customize"""
requests_params, update_uri, session, read_only = \
self._prepare_put_or_patch(kwargs)
read_only_mutations = []
for attr in read_only:
if attr in kwargs:
read_only_mutations.append(attr)
if read_only_mutations:
msg = 'Attempted to mutate read-only attribute(s): %s' \
% read_only_mutations
raise AttemptedMutationOfReadOnly(msg)
# Get the current state of the object on BIG-IP® and check the
# generation Use pop here because we don't want force in the data_dict
force = self._check_force_arg(kwargs.pop('force', True))
if not force:
# generation has a known server-side error
self._check_generation()
kwargs = self._check_for_boolean_pair_reduction(kwargs)
# Save the meta data so we can add it back into self after we
# load the new object.
temp_meta = self.__dict__.pop('_meta_data')
# Need to remove any of the Collection objects from self.__dict__
# because these are subCollections and _meta_data and
# other non-BIG-IP® attrs are not removed from the subCollections
# See issue #146 for details
tmp = dict()
for key, value in iteritems(self.__dict__):
# In Python2 versions we were changing a dictionary in place,
# but this cannot be done with an iterator as an error is raised.
# So instead we create a temporary holder for the modified dict
# and then re-assign it afterwards.
if isinstance(value, Collection):
pass
else:
tmp[key] = value
self.__dict__ = tmp
data_dict = self.to_dict()
# Remove any read-only attributes from our data_dict before we update
# the data dict with the attributes. If they pass in read-only attrs
# in the method call we are going to let BIG-IP® let them know about it
# when it fails
for attr in read_only:
data_dict.pop(attr, '')
data_dict.update(kwargs)
data_dict = self._prepare_request_json(data_dict)
# Handles ConnectionAborted errors
#
# @see https://github.com/F5Networks/f5-ansible/issues/317
# @see https://github.com/requests/requests/issues/2364
for _ in range(0, 30):
try:
response = session.put(update_uri, json=data_dict, **requests_params)
self._meta_data = temp_meta
self._local_update(response.json())
break
except iControlUnexpectedHTTPError:
response = session.get(update_uri, **requests_params)
self._meta_data = temp_meta
self._local_update(response.json())
raise
except ConnectionError as ex:
if 'Connection aborted' in str(ex):
time.sleep(1)
continue
else:
raise | wrapped with update, override that in a subclass to customize | Below is the the instruction that describes the task:
### Input:
wrapped with update, override that in a subclass to customize
### Response:
def _update(self, **kwargs):
"""wrapped with update, override that in a subclass to customize"""
requests_params, update_uri, session, read_only = \
self._prepare_put_or_patch(kwargs)
read_only_mutations = []
for attr in read_only:
if attr in kwargs:
read_only_mutations.append(attr)
if read_only_mutations:
msg = 'Attempted to mutate read-only attribute(s): %s' \
% read_only_mutations
raise AttemptedMutationOfReadOnly(msg)
# Get the current state of the object on BIG-IP® and check the
# generation Use pop here because we don't want force in the data_dict
force = self._check_force_arg(kwargs.pop('force', True))
if not force:
# generation has a known server-side error
self._check_generation()
kwargs = self._check_for_boolean_pair_reduction(kwargs)
# Save the meta data so we can add it back into self after we
# load the new object.
temp_meta = self.__dict__.pop('_meta_data')
# Need to remove any of the Collection objects from self.__dict__
# because these are subCollections and _meta_data and
# other non-BIG-IP® attrs are not removed from the subCollections
# See issue #146 for details
tmp = dict()
for key, value in iteritems(self.__dict__):
# In Python2 versions we were changing a dictionary in place,
# but this cannot be done with an iterator as an error is raised.
# So instead we create a temporary holder for the modified dict
# and then re-assign it afterwards.
if isinstance(value, Collection):
pass
else:
tmp[key] = value
self.__dict__ = tmp
data_dict = self.to_dict()
# Remove any read-only attributes from our data_dict before we update
# the data dict with the attributes. If they pass in read-only attrs
# in the method call we are going to let BIG-IP® let them know about it
# when it fails
for attr in read_only:
data_dict.pop(attr, '')
data_dict.update(kwargs)
data_dict = self._prepare_request_json(data_dict)
# Handles ConnectionAborted errors
#
# @see https://github.com/F5Networks/f5-ansible/issues/317
# @see https://github.com/requests/requests/issues/2364
for _ in range(0, 30):
try:
response = session.put(update_uri, json=data_dict, **requests_params)
self._meta_data = temp_meta
self._local_update(response.json())
break
except iControlUnexpectedHTTPError:
response = session.get(update_uri, **requests_params)
self._meta_data = temp_meta
self._local_update(response.json())
raise
except ConnectionError as ex:
if 'Connection aborted' in str(ex):
time.sleep(1)
continue
else:
raise |
def get_or_create_time_series(self, label_values):
"""Get a mutable measurement for the given set of label values.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:rtype: :class:`GaugePointLong`, :class:`GaugePointDouble`
:class:`opencensus.metrics.export.cumulative.CumulativePointLong`,
or
:class:`opencensus.metrics.export.cumulative.CumulativePointDouble`
:return: A mutable point that represents the last value of the
measurement.
"""
if label_values is None:
raise ValueError
if any(lv is None for lv in label_values):
raise ValueError
if len(label_values) != self._len_label_keys:
raise ValueError
return self._get_or_create_time_series(label_values) | Get a mutable measurement for the given set of label values.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:rtype: :class:`GaugePointLong`, :class:`GaugePointDouble`
:class:`opencensus.metrics.export.cumulative.CumulativePointLong`,
or
:class:`opencensus.metrics.export.cumulative.CumulativePointDouble`
:return: A mutable point that represents the last value of the
measurement. | Below is the the instruction that describes the task:
### Input:
Get a mutable measurement for the given set of label values.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:rtype: :class:`GaugePointLong`, :class:`GaugePointDouble`
:class:`opencensus.metrics.export.cumulative.CumulativePointLong`,
or
:class:`opencensus.metrics.export.cumulative.CumulativePointDouble`
:return: A mutable point that represents the last value of the
measurement.
### Response:
def get_or_create_time_series(self, label_values):
"""Get a mutable measurement for the given set of label values.
:type label_values: list(:class:`LabelValue`)
:param label_values: The measurement's label values.
:rtype: :class:`GaugePointLong`, :class:`GaugePointDouble`
:class:`opencensus.metrics.export.cumulative.CumulativePointLong`,
or
:class:`opencensus.metrics.export.cumulative.CumulativePointDouble`
:return: A mutable point that represents the last value of the
measurement.
"""
if label_values is None:
raise ValueError
if any(lv is None for lv in label_values):
raise ValueError
if len(label_values) != self._len_label_keys:
raise ValueError
return self._get_or_create_time_series(label_values) |
def get_resources(self, ids, cache=True):
"""Support server side filtering on arns or names
"""
if ids[0].startswith('arn:'):
params = {'LoadBalancerArns': ids}
else:
params = {'Names': ids}
return self.query.filter(self.manager, **params) | Support server side filtering on arns or names | Below is the the instruction that describes the task:
### Input:
Support server side filtering on arns or names
### Response:
def get_resources(self, ids, cache=True):
"""Support server side filtering on arns or names
"""
if ids[0].startswith('arn:'):
params = {'LoadBalancerArns': ids}
else:
params = {'Names': ids}
return self.query.filter(self.manager, **params) |
def addresses_for_key(gpg, key):
"""
Takes a key and extracts the email addresses for it.
"""
return [address.split("<")[-1].strip(">")
for address in gpg.list_keys().key_map[key['fingerprint']]["uids"]
if address] | Takes a key and extracts the email addresses for it. | Below is the the instruction that describes the task:
### Input:
Takes a key and extracts the email addresses for it.
### Response:
def addresses_for_key(gpg, key):
"""
Takes a key and extracts the email addresses for it.
"""
return [address.split("<")[-1].strip(">")
for address in gpg.list_keys().key_map[key['fingerprint']]["uids"]
if address] |
def get_descriptor_defaults(self, api_info, hostname=None):
"""Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
"""
hostname = (hostname or endpoints_util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
endpoints_util.is_running_on_devserver()) else 'https'
base_path = api_info.base_path.strip('/')
defaults = {
'extends': 'thirdParty.api',
'root': '{0}://{1}/{2}'.format(protocol, hostname, base_path),
'name': api_info.name,
'version': api_info.api_version,
'api_version': api_info.api_version,
'path_version': api_info.path_version,
'defaultVersion': True,
'abstract': False,
'adapter': {
'bns': '{0}://{1}/{2}'.format(protocol, hostname, base_path),
'type': 'lily',
'deadline': 10.0
}
}
if api_info.canonical_name:
defaults['canonicalName'] = api_info.canonical_name
if api_info.owner_domain:
defaults['ownerDomain'] = api_info.owner_domain
if api_info.owner_name:
defaults['ownerName'] = api_info.owner_name
if api_info.package_path:
defaults['packagePath'] = api_info.package_path
if api_info.title:
defaults['title'] = api_info.title
if api_info.documentation:
defaults['documentation'] = api_info.documentation
return defaults | Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration. | Below is the the instruction that describes the task:
### Input:
Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
### Response:
def get_descriptor_defaults(self, api_info, hostname=None):
"""Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
"""
hostname = (hostname or endpoints_util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
endpoints_util.is_running_on_devserver()) else 'https'
base_path = api_info.base_path.strip('/')
defaults = {
'extends': 'thirdParty.api',
'root': '{0}://{1}/{2}'.format(protocol, hostname, base_path),
'name': api_info.name,
'version': api_info.api_version,
'api_version': api_info.api_version,
'path_version': api_info.path_version,
'defaultVersion': True,
'abstract': False,
'adapter': {
'bns': '{0}://{1}/{2}'.format(protocol, hostname, base_path),
'type': 'lily',
'deadline': 10.0
}
}
if api_info.canonical_name:
defaults['canonicalName'] = api_info.canonical_name
if api_info.owner_domain:
defaults['ownerDomain'] = api_info.owner_domain
if api_info.owner_name:
defaults['ownerName'] = api_info.owner_name
if api_info.package_path:
defaults['packagePath'] = api_info.package_path
if api_info.title:
defaults['title'] = api_info.title
if api_info.documentation:
defaults['documentation'] = api_info.documentation
return defaults |
def convert_convolution(builder, layer, input_names, output_names, keras_layer):
"""
Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
is_deconv = isinstance(keras_layer,
_keras.layers.convolutional.Conv2DTranspose)
# Get the weights from _keras.
weightList = keras_layer.get_weights()
# Dimensions and weights
if is_deconv:
height, width, n_filters, channels = weightList[0].shape
W = weightList[0].transpose([0,1,3,2])
try:
output_blob_shape = list(filter(None, keras_layer.output_shape))
output_shape = output_blob_shape[:-1]
except:
output_shape = None
else:
height, width, channels, n_filters = weightList[0].shape
W = weightList[0]
output_shape = None
b = weightList[1] if has_bias else None
output_channels = n_filters
stride_height, stride_width = keras_layer.strides
# Dilations
dilations = [1,1]
if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple):
dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate]
if is_deconv and not dilations == [1,1]:
raise ValueError("Unsupported non-unity dilation for Deconvolution layer")
groups = 1
kernel_channels = channels
# depth-wise convolution
if isinstance(keras_layer, DepthwiseConv2D):
groups = channels
kernel_channels = 1
depth_multiplier = keras_layer.depth_multiplier
W = _np.reshape(W,(height, width,1,channels * depth_multiplier))
output_channels = channels * depth_multiplier
builder.add_convolution(name = layer,
kernel_channels = kernel_channels,
output_channels = output_channels,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
border_mode = keras_layer.padding,
groups = groups,
W = W,
b = b,
has_bias = has_bias,
is_deconv = is_deconv,
output_shape = output_shape,
input_name = input_name,
output_name = output_name,
dilation_factors = dilations) | Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. | Below is the the instruction that describes the task:
### Input:
Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
### Response:
def convert_convolution(builder, layer, input_names, output_names, keras_layer):
"""
Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
_check_data_format(keras_layer)
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.use_bias
is_deconv = isinstance(keras_layer,
_keras.layers.convolutional.Conv2DTranspose)
# Get the weights from _keras.
weightList = keras_layer.get_weights()
# Dimensions and weights
if is_deconv:
height, width, n_filters, channels = weightList[0].shape
W = weightList[0].transpose([0,1,3,2])
try:
output_blob_shape = list(filter(None, keras_layer.output_shape))
output_shape = output_blob_shape[:-1]
except:
output_shape = None
else:
height, width, channels, n_filters = weightList[0].shape
W = weightList[0]
output_shape = None
b = weightList[1] if has_bias else None
output_channels = n_filters
stride_height, stride_width = keras_layer.strides
# Dilations
dilations = [1,1]
if (type(keras_layer.dilation_rate) is list) or (type(keras_layer.dilation_rate) is tuple):
dilations = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilations = [keras_layer.dilation_rate, keras_layer.dilation_rate]
if is_deconv and not dilations == [1,1]:
raise ValueError("Unsupported non-unity dilation for Deconvolution layer")
groups = 1
kernel_channels = channels
# depth-wise convolution
if isinstance(keras_layer, DepthwiseConv2D):
groups = channels
kernel_channels = 1
depth_multiplier = keras_layer.depth_multiplier
W = _np.reshape(W,(height, width,1,channels * depth_multiplier))
output_channels = channels * depth_multiplier
builder.add_convolution(name = layer,
kernel_channels = kernel_channels,
output_channels = output_channels,
height = height,
width = width,
stride_height = stride_height,
stride_width = stride_width,
border_mode = keras_layer.padding,
groups = groups,
W = W,
b = b,
has_bias = has_bias,
is_deconv = is_deconv,
output_shape = output_shape,
input_name = input_name,
output_name = output_name,
dilation_factors = dilations) |
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes | helper, returns a list of all superclasses of a given class | Below is the the instruction that describes the task:
### Input:
helper, returns a list of all superclasses of a given class
### Response:
def _superclasses_for_subject(self, graph, typeof):
"""helper, returns a list of all superclasses of a given class"""
# TODO - this might be replacing a fairly simple graph API query where
# it doesn't need to
classes = []
superclass = typeof
while True:
found = False
for p, o in self.schema_def.ontology[superclass]:
if self.schema_def.lexicon['subclass'] == str(p):
found = True
classes.append(o)
superclass = o
if not found:
break
return classes |
def all_examples(self, pred_name=None):
'''
Emits all examples in prolog form for RSD.
:param pred_name: override for the emitted predicate name
'''
target = self.db.target_table
pred_name = pred_name if pred_name else target
examples = self.db.rows(target, [self.db.target_att, self.db.pkeys[target]])
return '\n'.join(["%s(%s, %s)." % (pred_name, ILPConverter.fmt_col(cls), pk) for cls, pk in examples]) | Emits all examples in prolog form for RSD.
:param pred_name: override for the emitted predicate name | Below is the the instruction that describes the task:
### Input:
Emits all examples in prolog form for RSD.
:param pred_name: override for the emitted predicate name
### Response:
def all_examples(self, pred_name=None):
'''
Emits all examples in prolog form for RSD.
:param pred_name: override for the emitted predicate name
'''
target = self.db.target_table
pred_name = pred_name if pred_name else target
examples = self.db.rows(target, [self.db.target_att, self.db.pkeys[target]])
return '\n'.join(["%s(%s, %s)." % (pred_name, ILPConverter.fmt_col(cls), pk) for cls, pk in examples]) |
def render(self, template: str, **vars) -> str:
"""
Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results
"""
vars.setdefault('ctx', self._ctx)
return self._renderer.render(template, **vars) | Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results | Below is the the instruction that describes the task:
### Input:
Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results
### Response:
def render(self, template: str, **vars) -> str:
"""
Render the named template.
The current context will be available to the template as the ``ctx`` variable.
:param template: name of the template file
:param vars: extra template variables
:return: the rendered results
"""
vars.setdefault('ctx', self._ctx)
return self._renderer.render(template, **vars) |
def diff_dumps(ih1, ih2, tofile=None, name1="a", name2="b", n_context=3):
"""Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output
"""
def prepare_lines(ih):
sio = StringIO()
ih.dump(sio)
dump = sio.getvalue()
lines = dump.splitlines()
return lines
a = prepare_lines(ih1)
b = prepare_lines(ih2)
import difflib
result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm=''))
if tofile is None:
tofile = sys.stdout
output = '\n'.join(result)+'\n'
tofile.write(output) | Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output | Below is the the instruction that describes the task:
### Input:
Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output
### Response:
def diff_dumps(ih1, ih2, tofile=None, name1="a", name2="b", n_context=3):
"""Diff 2 IntelHex objects and produce unified diff output for their
hex dumps.
@param ih1 first IntelHex object to compare
@param ih2 second IntelHex object to compare
@param tofile file-like object to write output
@param name1 name of the first hex file to show in the diff header
@param name2 name of the first hex file to show in the diff header
@param n_context number of context lines in the unidiff output
"""
def prepare_lines(ih):
sio = StringIO()
ih.dump(sio)
dump = sio.getvalue()
lines = dump.splitlines()
return lines
a = prepare_lines(ih1)
b = prepare_lines(ih2)
import difflib
result = list(difflib.unified_diff(a, b, fromfile=name1, tofile=name2, n=n_context, lineterm=''))
if tofile is None:
tofile = sys.stdout
output = '\n'.join(result)+'\n'
tofile.write(output) |
def setup_graph(self):
""" Will setup the assign operator for that variable. """
all_vars = tfv1.global_variables() + tfv1.local_variables()
for v in all_vars:
if v.name == self.var_name:
self.var = v
break
else:
raise ValueError("{} is not a variable in the graph!".format(self.var_name)) | Will setup the assign operator for that variable. | Below is the the instruction that describes the task:
### Input:
Will setup the assign operator for that variable.
### Response:
def setup_graph(self):
""" Will setup the assign operator for that variable. """
all_vars = tfv1.global_variables() + tfv1.local_variables()
for v in all_vars:
if v.name == self.var_name:
self.var = v
break
else:
raise ValueError("{} is not a variable in the graph!".format(self.var_name)) |
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, sort_keys=False, **kw):
"""Serialize ``obj`` as a DSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
output are escaped with ``\\uXXXX`` sequences, and the result is a ``str``
instance consisting of ASCII characters only. If ``ensure_ascii`` is
``False``, some chunks written to ``fp`` may be ``unicode`` instances.
This usually happens because the input contains unicode strings or the
``encoding`` parameter is used. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the DSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then DSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``('and ', 'is ')`` separators.
``('and', 'is')`` is the most compact DSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``DSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``DSONEncoder`` is used.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not sort_keys and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = DSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, sort_keys=sort_keys, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk) | Serialize ``obj`` as a DSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
output are escaped with ``\\uXXXX`` sequences, and the result is a ``str``
instance consisting of ASCII characters only. If ``ensure_ascii`` is
``False``, some chunks written to ``fp`` may be ``unicode`` instances.
This usually happens because the input contains unicode strings or the
``encoding`` parameter is used. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the DSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then DSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``('and ', 'is ')`` separators.
``('and', 'is')`` is the most compact DSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``DSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``DSONEncoder`` is used. | Below is the the instruction that describes the task:
### Input:
Serialize ``obj`` as a DSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
output are escaped with ``\\uXXXX`` sequences, and the result is a ``str``
instance consisting of ASCII characters only. If ``ensure_ascii`` is
``False``, some chunks written to ``fp`` may be ``unicode`` instances.
This usually happens because the input contains unicode strings or the
``encoding`` parameter is used. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the DSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then DSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``('and ', 'is ')`` separators.
``('and', 'is')`` is the most compact DSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``DSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``DSONEncoder`` is used.
### Response:
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, sort_keys=False, **kw):
"""Serialize ``obj`` as a DSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
output are escaped with ``\\uXXXX`` sequences, and the result is a ``str``
instance consisting of ASCII characters only. If ``ensure_ascii`` is
``False``, some chunks written to ``fp`` may be ``unicode`` instances.
This usually happens because the input contains unicode strings or the
``encoding`` parameter is used. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the DSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then DSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``('and ', 'is ')`` separators.
``('and', 'is')`` is the most compact DSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``DSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``DSONEncoder`` is used.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not sort_keys and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = DSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, sort_keys=sort_keys, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk) |
def list_scores(self, update_keys, session=None, lightweight=None):
"""
Returns a list of current scores for the given events.
:param list update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Score]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listScores')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Score, elapsed_time, lightweight) | Returns a list of current scores for the given events.
:param list update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Score] | Below is the the instruction that describes the task:
### Input:
Returns a list of current scores for the given events.
:param list update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Score]
### Response:
def list_scores(self, update_keys, session=None, lightweight=None):
"""
Returns a list of current scores for the given events.
:param list update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Score]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listScores')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Score, elapsed_time, lightweight) |
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node) | check whether or not the lambda is suspicious | Below is the the instruction that describes the task:
### Input:
check whether or not the lambda is suspicious
### Response:
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node) |
def remove(self, option):
"""
Removes the first `option` from the ButtonGroup.
Returns `True` if an item was removed.
:param string option:
The value of the option to remove from the ButtonGroup.
"""
for existing_option in self._options:
if existing_option[1] == option:
self._options.remove(existing_option)
self._refresh_options()
return True
return False | Removes the first `option` from the ButtonGroup.
Returns `True` if an item was removed.
:param string option:
The value of the option to remove from the ButtonGroup. | Below is the the instruction that describes the task:
### Input:
Removes the first `option` from the ButtonGroup.
Returns `True` if an item was removed.
:param string option:
The value of the option to remove from the ButtonGroup.
### Response:
def remove(self, option):
"""
Removes the first `option` from the ButtonGroup.
Returns `True` if an item was removed.
:param string option:
The value of the option to remove from the ButtonGroup.
"""
for existing_option in self._options:
if existing_option[1] == option:
self._options.remove(existing_option)
self._refresh_options()
return True
return False |
def getsecret(self, section, option, **kwargs):
"""Get a secret from Custodia
"""
# keyword-only arguments, vars and fallback are directly passed through
raw = kwargs.get('raw', False)
value = self.get(section, option, **kwargs)
if raw:
return value
return self.custodia_client.get_secret(value) | Get a secret from Custodia | Below is the the instruction that describes the task:
### Input:
Get a secret from Custodia
### Response:
def getsecret(self, section, option, **kwargs):
"""Get a secret from Custodia
"""
# keyword-only arguments, vars and fallback are directly passed through
raw = kwargs.get('raw', False)
value = self.get(section, option, **kwargs)
if raw:
return value
return self.custodia_client.get_secret(value) |
def safe_get(self, section, key):
"""
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
"""
try:
#Use full parent function so we can replace it in the class
# if desired
return configparser.RawConfigParser.get(self, section, key)
except (configparser.NoSectionError,
configparser.NoOptionError):
return None | Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time. | Below is the the instruction that describes the task:
### Input:
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
### Response:
def safe_get(self, section, key):
"""
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
"""
try:
#Use full parent function so we can replace it in the class
# if desired
return configparser.RawConfigParser.get(self, section, key)
except (configparser.NoSectionError,
configparser.NoOptionError):
return None |
def create_meta_main(create_path, config, role, categories):
"""
Create a meta template.
"""
meta_file = c.DEFAULT_META_FILE.replace(
"%author_name", config["author_name"])
meta_file = meta_file.replace(
"%author_company", config["author_company"])
meta_file = meta_file.replace("%license_type", config["license_type"])
meta_file = meta_file.replace("%role_name", role)
# Normalize the category so %categories always gets replaced.
if not categories:
categories = ""
meta_file = meta_file.replace("%categories", categories)
string_to_file(create_path, meta_file) | Create a meta template. | Below is the the instruction that describes the task:
### Input:
Create a meta template.
### Response:
def create_meta_main(create_path, config, role, categories):
"""
Create a meta template.
"""
meta_file = c.DEFAULT_META_FILE.replace(
"%author_name", config["author_name"])
meta_file = meta_file.replace(
"%author_company", config["author_company"])
meta_file = meta_file.replace("%license_type", config["license_type"])
meta_file = meta_file.replace("%role_name", role)
# Normalize the category so %categories always gets replaced.
if not categories:
categories = ""
meta_file = meta_file.replace("%categories", categories)
string_to_file(create_path, meta_file) |
def _pusher_connect_handler(self, data):
"""Event handler for the connection_established event. Binds the
shortlink_scanned event
"""
self.channel = self.pusher.subscribe(self.pos_callback_chan)
for listener in self.pusher_connected_listeners:
listener(data) | Event handler for the connection_established event. Binds the
shortlink_scanned event | Below is the the instruction that describes the task:
### Input:
Event handler for the connection_established event. Binds the
shortlink_scanned event
### Response:
def _pusher_connect_handler(self, data):
"""Event handler for the connection_established event. Binds the
shortlink_scanned event
"""
self.channel = self.pusher.subscribe(self.pos_callback_chan)
for listener in self.pusher_connected_listeners:
listener(data) |
def t_NAME(self,t):
'[A-Za-z]\w*|\"char\"'
# warning: this allows stuff like SeLeCt with mixed case. who cares.
t.type = KEYWORDS[t.value.lower()] if t.value.lower() in KEYWORDS else 'BOOL' if t.value.lower() in ('is','not') else 'NAME'
return t | [A-Za-z]\w*|\"char\" | Below is the the instruction that describes the task:
### Input:
[A-Za-z]\w*|\"char\"
### Response:
def t_NAME(self,t):
'[A-Za-z]\w*|\"char\"'
# warning: this allows stuff like SeLeCt with mixed case. who cares.
t.type = KEYWORDS[t.value.lower()] if t.value.lower() in KEYWORDS else 'BOOL' if t.value.lower() in ('is','not') else 'NAME'
return t |
def write(self, data, **keys):
"""
Write data into this HDU
parameters
----------
data: ndarray or list of ndarray
A numerical python array. Should be an ordinary array for image
HDUs, should have fields for tables. To write an ordinary array to
a column in a table HDU, use write_column. If data already exists
in this HDU, it will be overwritten. See the append(() method to
append new rows to a table HDU.
firstrow: integer, optional
At which row you should begin writing to tables. Be sure you know
what you are doing! For appending see the append() method.
Default 0.
columns: list, optional
If data is a list of arrays, you must send columns as a list
of names or column numbers
You can also send names=
names: list, optional
same as columns=
"""
slow = keys.get('slow', False)
isrec = False
if isinstance(data, (list, dict)):
if isinstance(data, list):
data_list = data
columns_all = keys.get('columns', None)
if columns_all is None:
columns_all = keys.get('names', None)
if columns_all is None:
raise ValueError(
"you must send columns with a list of arrays")
else:
columns_all = list(data.keys())
data_list = [data[n] for n in columns_all]
colnums_all = [self._extract_colnum(c) for c in columns_all]
names = [self.get_colname(c) for c in colnums_all]
isobj = numpy.zeros(len(data_list), dtype=numpy.bool)
for i in xrange(len(data_list)):
isobj[i] = is_object(data_list[i])
else:
if data.dtype.fields is None:
raise ValueError("You are writing to a table, so I expected "
"an array with fields as input. If you want "
"to write a simple array, you should use "
"write_column to write to a single column, "
"or instead write to an image hdu")
if data.shape is ():
raise ValueError("cannot write data with shape ()")
isrec = True
names = data.dtype.names
# only write object types (variable-length columns) after
# writing the main table
isobj = fields_are_object(data)
data_list = []
colnums_all = []
for i, name in enumerate(names):
colnum = self._extract_colnum(name)
data_list.append(data[name])
colnums_all.append(colnum)
if slow:
for i, name in enumerate(names):
if not isobj[i]:
self.write_column(name, data_list[i], **keys)
else:
nonobj_colnums = []
nonobj_arrays = []
for i in xrange(len(data_list)):
if not isobj[i]:
nonobj_colnums.append(colnums_all[i])
if isrec:
# this still leaves possibility of f-order sub-arrays..
colref = array_to_native(data_list[i], inplace=False)
else:
colref = array_to_native_c(data_list[i], inplace=False)
if IS_PY3 and colref.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
colref = colref.astype('S', copy=False)
nonobj_arrays.append(colref)
for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays):
self._verify_column_data(tcolnum, tdata)
if len(nonobj_arrays) > 0:
firstrow = keys.get('firstrow', 0)
self._FITS.write_columns(
self._ext+1, nonobj_colnums, nonobj_arrays,
firstrow=firstrow+1, write_bitcols=self.write_bitcols)
# writing the object arrays always occurs the same way
# need to make sure this works for array fields
for i, name in enumerate(names):
if isobj[i]:
self.write_var_column(name, data_list[i], **keys)
self._update_info() | Write data into this HDU
parameters
----------
data: ndarray or list of ndarray
A numerical python array. Should be an ordinary array for image
HDUs, should have fields for tables. To write an ordinary array to
a column in a table HDU, use write_column. If data already exists
in this HDU, it will be overwritten. See the append(() method to
append new rows to a table HDU.
firstrow: integer, optional
At which row you should begin writing to tables. Be sure you know
what you are doing! For appending see the append() method.
Default 0.
columns: list, optional
If data is a list of arrays, you must send columns as a list
of names or column numbers
You can also send names=
names: list, optional
same as columns= | Below is the the instruction that describes the task:
### Input:
Write data into this HDU
parameters
----------
data: ndarray or list of ndarray
A numerical python array. Should be an ordinary array for image
HDUs, should have fields for tables. To write an ordinary array to
a column in a table HDU, use write_column. If data already exists
in this HDU, it will be overwritten. See the append(() method to
append new rows to a table HDU.
firstrow: integer, optional
At which row you should begin writing to tables. Be sure you know
what you are doing! For appending see the append() method.
Default 0.
columns: list, optional
If data is a list of arrays, you must send columns as a list
of names or column numbers
You can also send names=
names: list, optional
same as columns=
### Response:
def write(self, data, **keys):
"""
Write data into this HDU
parameters
----------
data: ndarray or list of ndarray
A numerical python array. Should be an ordinary array for image
HDUs, should have fields for tables. To write an ordinary array to
a column in a table HDU, use write_column. If data already exists
in this HDU, it will be overwritten. See the append(() method to
append new rows to a table HDU.
firstrow: integer, optional
At which row you should begin writing to tables. Be sure you know
what you are doing! For appending see the append() method.
Default 0.
columns: list, optional
If data is a list of arrays, you must send columns as a list
of names or column numbers
You can also send names=
names: list, optional
same as columns=
"""
slow = keys.get('slow', False)
isrec = False
if isinstance(data, (list, dict)):
if isinstance(data, list):
data_list = data
columns_all = keys.get('columns', None)
if columns_all is None:
columns_all = keys.get('names', None)
if columns_all is None:
raise ValueError(
"you must send columns with a list of arrays")
else:
columns_all = list(data.keys())
data_list = [data[n] for n in columns_all]
colnums_all = [self._extract_colnum(c) for c in columns_all]
names = [self.get_colname(c) for c in colnums_all]
isobj = numpy.zeros(len(data_list), dtype=numpy.bool)
for i in xrange(len(data_list)):
isobj[i] = is_object(data_list[i])
else:
if data.dtype.fields is None:
raise ValueError("You are writing to a table, so I expected "
"an array with fields as input. If you want "
"to write a simple array, you should use "
"write_column to write to a single column, "
"or instead write to an image hdu")
if data.shape is ():
raise ValueError("cannot write data with shape ()")
isrec = True
names = data.dtype.names
# only write object types (variable-length columns) after
# writing the main table
isobj = fields_are_object(data)
data_list = []
colnums_all = []
for i, name in enumerate(names):
colnum = self._extract_colnum(name)
data_list.append(data[name])
colnums_all.append(colnum)
if slow:
for i, name in enumerate(names):
if not isobj[i]:
self.write_column(name, data_list[i], **keys)
else:
nonobj_colnums = []
nonobj_arrays = []
for i in xrange(len(data_list)):
if not isobj[i]:
nonobj_colnums.append(colnums_all[i])
if isrec:
# this still leaves possibility of f-order sub-arrays..
colref = array_to_native(data_list[i], inplace=False)
else:
colref = array_to_native_c(data_list[i], inplace=False)
if IS_PY3 and colref.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
colref = colref.astype('S', copy=False)
nonobj_arrays.append(colref)
for tcolnum, tdata in zip(nonobj_colnums, nonobj_arrays):
self._verify_column_data(tcolnum, tdata)
if len(nonobj_arrays) > 0:
firstrow = keys.get('firstrow', 0)
self._FITS.write_columns(
self._ext+1, nonobj_colnums, nonobj_arrays,
firstrow=firstrow+1, write_bitcols=self.write_bitcols)
# writing the object arrays always occurs the same way
# need to make sure this works for array fields
for i, name in enumerate(names):
if isobj[i]:
self.write_var_column(name, data_list[i], **keys)
self._update_info() |
def publish_wp(site_name, output_file, resources, args):
"""Publish a notebook to a wordpress post, using Gutenberg blocks.
Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter'
show_input: hide
github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb
identifier: 5c987397-a954-46ca-8743-bdcd7a71579c
featured_image: 171
authors:
- email: [email protected]
name: Eric Busboom
organization: Civic Knowledge
type: wrangler
tags:
- Tag1
- Tag2
categories:
- Demographics
- Tutorial
'Featured_image' is an attachment id
"""
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods.media import UploadFile, GetMediaLibrary
from wordpress_xmlrpc.methods.posts import NewPost, EditPost, GetPost
# http://busboom.org/wptest/wp-content/uploads/sites/7/2017/11/output_16_0-300x200.png
url, user, password = get_site_config(site_name)
meta = {}
for r in resources:
if r.endswith('.json'):
with open(r) as f:
meta = json.load(f)
fm = meta.get('frontmatter',{})
if not 'identifier' in fm or not fm['identifier']:
err("Can't publish notebook without a unique identifier. Add this to the "
"Metatab document or frontmatter metadata:\n identifier: {}".format(str(uuid4())))
wp = Client(url, user, password)
post = find_post(wp, fm['identifier'])
if post:
prt("Updating old post")
else:
post = WordPressPost()
post.id = wp.call(NewPost(post))
prt("Creating new post")
post.title = fm.get('title','')
post.slug = fm.get('slug')
with open(output_file) as f:
content = f.read()
post.terms_names = {
'post_tag': fm.get('tags',[]),
'category': fm.get('categories',[])
}
if args.header:
print(yaml.dump(fm, default_flow_style=False))
set_custom_field(post, 'identifier', fm['identifier'])
post.excerpt = fm.get('excerpt', fm.get('brief', fm.get('description')))
def strip_image_name(n):
"""Strip off the version number from the media file"""
from os.path import splitext
import re
return re.sub(r'\-\d+$','',splitext(n)[0])
extant_files = list(wp.call(GetMediaLibrary(dict(parent_id=post.id))))
def find_extant_image(image_name):
for img in extant_files:
if strip_image_name(basename(img.metadata['file'])) == strip_image_name(image_name):
return img
return None
for r in resources:
image_data = prepare_image(fm['identifier'], r, post.id)
img_from = "/{}/{}".format(fm['slug'], basename(r))
extant_image = find_extant_image(image_data['name'])
if extant_image:
prt("Post already has image:", extant_image.id, extant_image.link)
img_to = extant_image.link
elif r.endswith('.png'): # Foolishly assuming all images are PNGs
response = wp.call(UploadFile(image_data, overwrite=True))
prt("Uploaded image {} to id={}, {}".format(basename(r), response['id'], response['link']))
img_to = response['link']
content = content.replace(img_from, img_to)
if fm.get('featured_image') and fm.get('featured_image').strip():
post.thumbnail = int(fm['featured_image'])
elif hasattr(post, 'thumbnail') and isinstance(post.thumbnail, dict):
# The thumbnail expects an attachment id on EditPost, but returns a dict on GetPost
post.thumbnail = post.thumbnail['attachment_id']
post.content = content
r = wp.call(EditPost(post.id, post))
return r, wp.call(GetPost(post.id)) | Publish a notebook to a wordpress post, using Gutenberg blocks.
Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter'
show_input: hide
github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb
identifier: 5c987397-a954-46ca-8743-bdcd7a71579c
featured_image: 171
authors:
- email: [email protected]
name: Eric Busboom
organization: Civic Knowledge
type: wrangler
tags:
- Tag1
- Tag2
categories:
- Demographics
- Tutorial
'Featured_image' is an attachment id | Below is the the instruction that describes the task:
### Input:
Publish a notebook to a wordpress post, using Gutenberg blocks.
Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter'
show_input: hide
github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb
identifier: 5c987397-a954-46ca-8743-bdcd7a71579c
featured_image: 171
authors:
- email: [email protected]
name: Eric Busboom
organization: Civic Knowledge
type: wrangler
tags:
- Tag1
- Tag2
categories:
- Demographics
- Tutorial
'Featured_image' is an attachment id
### Response:
def publish_wp(site_name, output_file, resources, args):
"""Publish a notebook to a wordpress post, using Gutenberg blocks.
Here is what the metadata looks like, in a section of the notebook tagged 'frontmatter'
show_input: hide
github: https://github.com/sandiegodata/notebooks/blob/master/tutorial/American%20Community%20Survey.ipynb
identifier: 5c987397-a954-46ca-8743-bdcd7a71579c
featured_image: 171
authors:
- email: [email protected]
name: Eric Busboom
organization: Civic Knowledge
type: wrangler
tags:
- Tag1
- Tag2
categories:
- Demographics
- Tutorial
'Featured_image' is an attachment id
"""
from wordpress_xmlrpc import Client, WordPressPost
from wordpress_xmlrpc.methods.media import UploadFile, GetMediaLibrary
from wordpress_xmlrpc.methods.posts import NewPost, EditPost, GetPost
# http://busboom.org/wptest/wp-content/uploads/sites/7/2017/11/output_16_0-300x200.png
url, user, password = get_site_config(site_name)
meta = {}
for r in resources:
if r.endswith('.json'):
with open(r) as f:
meta = json.load(f)
fm = meta.get('frontmatter',{})
if not 'identifier' in fm or not fm['identifier']:
err("Can't publish notebook without a unique identifier. Add this to the "
"Metatab document or frontmatter metadata:\n identifier: {}".format(str(uuid4())))
wp = Client(url, user, password)
post = find_post(wp, fm['identifier'])
if post:
prt("Updating old post")
else:
post = WordPressPost()
post.id = wp.call(NewPost(post))
prt("Creating new post")
post.title = fm.get('title','')
post.slug = fm.get('slug')
with open(output_file) as f:
content = f.read()
post.terms_names = {
'post_tag': fm.get('tags',[]),
'category': fm.get('categories',[])
}
if args.header:
print(yaml.dump(fm, default_flow_style=False))
set_custom_field(post, 'identifier', fm['identifier'])
post.excerpt = fm.get('excerpt', fm.get('brief', fm.get('description')))
def strip_image_name(n):
"""Strip off the version number from the media file"""
from os.path import splitext
import re
return re.sub(r'\-\d+$','',splitext(n)[0])
extant_files = list(wp.call(GetMediaLibrary(dict(parent_id=post.id))))
def find_extant_image(image_name):
for img in extant_files:
if strip_image_name(basename(img.metadata['file'])) == strip_image_name(image_name):
return img
return None
for r in resources:
image_data = prepare_image(fm['identifier'], r, post.id)
img_from = "/{}/{}".format(fm['slug'], basename(r))
extant_image = find_extant_image(image_data['name'])
if extant_image:
prt("Post already has image:", extant_image.id, extant_image.link)
img_to = extant_image.link
elif r.endswith('.png'): # Foolishly assuming all images are PNGs
response = wp.call(UploadFile(image_data, overwrite=True))
prt("Uploaded image {} to id={}, {}".format(basename(r), response['id'], response['link']))
img_to = response['link']
content = content.replace(img_from, img_to)
if fm.get('featured_image') and fm.get('featured_image').strip():
post.thumbnail = int(fm['featured_image'])
elif hasattr(post, 'thumbnail') and isinstance(post.thumbnail, dict):
# The thumbnail expects an attachment id on EditPost, but returns a dict on GetPost
post.thumbnail = post.thumbnail['attachment_id']
post.content = content
r = wp.call(EditPost(post.id, post))
return r, wp.call(GetPost(post.id)) |
def blake2b(data, digest_size=BLAKE2B_BYTES, key=b'',
salt=b'', person=b'',
encoder=nacl.encoding.HexEncoder):
"""
Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes
"""
digest = _b2b_hash(data, digest_size=digest_size, key=key,
salt=salt, person=person)
return encoder.encode(digest) | Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes
### Response:
def blake2b(data, digest_size=BLAKE2B_BYTES, key=b'',
salt=b'', person=b'',
encoder=nacl.encoding.HexEncoder):
"""
Hashes ``data`` with blake2b.
:param data: the digest input byte sequence
:type data: bytes
:param digest_size: the requested digest size; must be at most
:const:`BLAKE2B_BYTES_MAX`;
the default digest size is
:const:`BLAKE2B_BYTES`
:type digest_size: int
:param key: the key to be set for keyed MAC/PRF usage; if set, the key
must be at most :data:`~nacl.hash.BLAKE2B_KEYBYTES_MAX` long
:type key: bytes
:param salt: an initialization salt at most
:const:`BLAKE2B_SALTBYTES` long;
it will be zero-padded if needed
:type salt: bytes
:param person: a personalization string at most
:const:`BLAKE2B_PERSONALBYTES` long;
it will be zero-padded if needed
:type person: bytes
:param encoder: the encoder to use on returned digest
:type encoder: class
:returns: The hashed message.
:rtype: bytes
"""
digest = _b2b_hash(data, digest_size=digest_size, key=key,
salt=salt, person=person)
return encoder.encode(digest) |
def load_df_from_file(file_path, sep=",", header=0):
"""Wrapper around pandas' read_csv."""
with tf.gfile.Open(file_path) as infile:
df = pd.read_csv(infile, sep=sep, header=header)
return df | Wrapper around pandas' read_csv. | Below is the the instruction that describes the task:
### Input:
Wrapper around pandas' read_csv.
### Response:
def load_df_from_file(file_path, sep=",", header=0):
"""Wrapper around pandas' read_csv."""
with tf.gfile.Open(file_path) as infile:
df = pd.read_csv(infile, sep=sep, header=header)
return df |
def set_representative_sequence(self, force_rerun=False):
"""Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative
sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
Returns:
SeqProp: Which sequence was set as representative
"""
if len(self.sequences) == 0:
log.error('{}: no sequences mapped'.format(self.id))
return self.representative_sequence
kegg_mappings = self.filter_sequences(KEGGProp)
if len(kegg_mappings) > 0:
kegg_to_use = kegg_mappings[0]
if len(kegg_mappings) > 1:
log.warning('{}: multiple KEGG mappings found, using the first entry {}'.format(self.id, kegg_to_use.id))
uniprot_mappings = self.filter_sequences(UniProtProp)
# If a representative sequence has already been set, nothing needs to be done
if self.representative_sequence and not force_rerun:
log.debug('{}: representative sequence already set'.format(self.id))
# If there is a KEGG annotation and no UniProt annotations, set KEGG as representative
elif len(kegg_mappings) > 0 and len(uniprot_mappings) == 0:
self.representative_sequence = kegg_to_use
log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id))
# If there are UniProt annotations and no KEGG annotations, set UniProt as representative
elif len(kegg_mappings) == 0 and len(uniprot_mappings) > 0:
# If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs
# This way, UniProts with PDBs get ranked to the top, or if no PDBs, reviewed entries
u_ranker = []
for u in uniprot_mappings:
u_ranker.append((u.id, u.ranking_score()))
sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)
best_u_id = sorted_by_second[0][0]
best_u = uniprot_mappings.get_by_id(best_u_id)
self.representative_sequence = best_u
log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id))
# If there are both UniProt and KEGG annotations...
elif len(kegg_mappings) > 0 and len(uniprot_mappings) > 0:
# Use KEGG if the mapped UniProt is unique, and it has PDBs
if kegg_to_use.num_pdbs > 0 and not uniprot_mappings.has_id(kegg_to_use.uniprot):
self.representative_sequence = kegg_to_use
log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id))
else:
# If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs
u_ranker = []
for u in uniprot_mappings:
u_ranker.append((u.id, u.ranking_score()))
sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)
best_u_id = sorted_by_second[0][0]
best_u = uniprot_mappings.get_by_id(best_u_id)
self.representative_sequence = best_u
log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id))
return self.representative_sequence | Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative
sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
Returns:
SeqProp: Which sequence was set as representative | Below is the the instruction that describes the task:
### Input:
Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative
sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
Returns:
SeqProp: Which sequence was set as representative
### Response:
def set_representative_sequence(self, force_rerun=False):
"""Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative
sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
Returns:
SeqProp: Which sequence was set as representative
"""
if len(self.sequences) == 0:
log.error('{}: no sequences mapped'.format(self.id))
return self.representative_sequence
kegg_mappings = self.filter_sequences(KEGGProp)
if len(kegg_mappings) > 0:
kegg_to_use = kegg_mappings[0]
if len(kegg_mappings) > 1:
log.warning('{}: multiple KEGG mappings found, using the first entry {}'.format(self.id, kegg_to_use.id))
uniprot_mappings = self.filter_sequences(UniProtProp)
# If a representative sequence has already been set, nothing needs to be done
if self.representative_sequence and not force_rerun:
log.debug('{}: representative sequence already set'.format(self.id))
# If there is a KEGG annotation and no UniProt annotations, set KEGG as representative
elif len(kegg_mappings) > 0 and len(uniprot_mappings) == 0:
self.representative_sequence = kegg_to_use
log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id))
# If there are UniProt annotations and no KEGG annotations, set UniProt as representative
elif len(kegg_mappings) == 0 and len(uniprot_mappings) > 0:
# If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs
# This way, UniProts with PDBs get ranked to the top, or if no PDBs, reviewed entries
u_ranker = []
for u in uniprot_mappings:
u_ranker.append((u.id, u.ranking_score()))
sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)
best_u_id = sorted_by_second[0][0]
best_u = uniprot_mappings.get_by_id(best_u_id)
self.representative_sequence = best_u
log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id))
# If there are both UniProt and KEGG annotations...
elif len(kegg_mappings) > 0 and len(uniprot_mappings) > 0:
# Use KEGG if the mapped UniProt is unique, and it has PDBs
if kegg_to_use.num_pdbs > 0 and not uniprot_mappings.has_id(kegg_to_use.uniprot):
self.representative_sequence = kegg_to_use
log.debug('{}: representative sequence set from KEGG ID {}'.format(self.id, kegg_to_use.id))
else:
# If there are multiple uniprots rank them by the sum of reviewed (bool) + num_pdbs
u_ranker = []
for u in uniprot_mappings:
u_ranker.append((u.id, u.ranking_score()))
sorted_by_second = sorted(u_ranker, key=lambda tup: tup[1], reverse=True)
best_u_id = sorted_by_second[0][0]
best_u = uniprot_mappings.get_by_id(best_u_id)
self.representative_sequence = best_u
log.debug('{}: representative sequence set from UniProt ID {}'.format(self.id, best_u_id))
return self.representative_sequence |
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
Examples
--------
>>> from unyt import km, s
>>> a = np.eye(2)*km
>>> b = (np.ones((2, 2)) * 2)*s
>>> print(udot(a, b))
[[2. 2.]
[2. 2.]] km*s
"""
dot = np.dot(op1.d, op2.d)
units = op1.units * op2.units
if dot.shape == ():
return unyt_quantity(dot, units)
return unyt_array(dot, units) | Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
Examples
--------
>>> from unyt import km, s
>>> a = np.eye(2)*km
>>> b = (np.ones((2, 2)) * 2)*s
>>> print(udot(a, b))
[[2. 2.]
[2. 2.]] km*s | Below is the the instruction that describes the task:
### Input:
Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
Examples
--------
>>> from unyt import km, s
>>> a = np.eye(2)*km
>>> b = (np.ones((2, 2)) * 2)*s
>>> print(udot(a, b))
[[2. 2.]
[2. 2.]] km*s
### Response:
def udot(op1, op2):
"""Matrix or vector dot product that preserves units
This is a wrapper around np.dot that preserves units.
Examples
--------
>>> from unyt import km, s
>>> a = np.eye(2)*km
>>> b = (np.ones((2, 2)) * 2)*s
>>> print(udot(a, b))
[[2. 2.]
[2. 2.]] km*s
"""
dot = np.dot(op1.d, op2.d)
units = op1.units * op2.units
if dot.shape == ():
return unyt_quantity(dot, units)
return unyt_array(dot, units) |
def replace(self, v):
"""Replace an individual selected by negative tournament selection with
individual v"""
if self.popsize < self._popsize:
return self.add(v)
k = self.tournament(negative=True)
self.clean(self.population[k])
self.population[k] = v
v.position = len(self._hist)
self._hist.append(v)
self.bsf = v
self.estopping = v
self._inds_replace += 1
self._density += self.get_density(v)
if self._inds_replace == self._popsize:
self._inds_replace = 0
self.generation += 1
gc.collect() | Replace an individual selected by negative tournament selection with
individual v | Below is the the instruction that describes the task:
### Input:
Replace an individual selected by negative tournament selection with
individual v
### Response:
def replace(self, v):
"""Replace an individual selected by negative tournament selection with
individual v"""
if self.popsize < self._popsize:
return self.add(v)
k = self.tournament(negative=True)
self.clean(self.population[k])
self.population[k] = v
v.position = len(self._hist)
self._hist.append(v)
self.bsf = v
self.estopping = v
self._inds_replace += 1
self._density += self.get_density(v)
if self._inds_replace == self._popsize:
self._inds_replace = 0
self.generation += 1
gc.collect() |
def get_notebook_name() -> str:
"""Return the full path of the jupyter notebook.
References
----------
https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246
"""
kernel_id = re.search( # type: ignore
'kernel-(.*).json',
ipykernel.connect.get_connection_file()
).group(1)
servers = list_running_servers()
for server in servers:
response = requests.get(urljoin(server['url'], 'api/sessions'),
params={'token': server.get('token', '')})
for session in json.loads(response.text):
if session['kernel']['id'] == kernel_id:
relative_path = session['notebook']['path']
return pjoin(server['notebook_dir'], relative_path)
raise Exception('Noteboook not found.') | Return the full path of the jupyter notebook.
References
----------
https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246 | Below is the the instruction that describes the task:
### Input:
Return the full path of the jupyter notebook.
References
----------
https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246
### Response:
def get_notebook_name() -> str:
"""Return the full path of the jupyter notebook.
References
----------
https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246
"""
kernel_id = re.search( # type: ignore
'kernel-(.*).json',
ipykernel.connect.get_connection_file()
).group(1)
servers = list_running_servers()
for server in servers:
response = requests.get(urljoin(server['url'], 'api/sessions'),
params={'token': server.get('token', '')})
for session in json.loads(response.text):
if session['kernel']['id'] == kernel_id:
relative_path = session['notebook']['path']
return pjoin(server['notebook_dir'], relative_path)
raise Exception('Noteboook not found.') |
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:return: self
:rtype: Rebulk
"""
self.pattern(self.build_string(*pattern, **kwargs))
return self | Add string pattern
:param pattern:
:type pattern:
:return: self
:rtype: Rebulk | Below is the the instruction that describes the task:
### Input:
Add string pattern
:param pattern:
:type pattern:
:return: self
:rtype: Rebulk
### Response:
def string(self, *pattern, **kwargs):
"""
Add string pattern
:param pattern:
:type pattern:
:return: self
:rtype: Rebulk
"""
self.pattern(self.build_string(*pattern, **kwargs))
return self |
def draw_dot(self, pos, color):
"""
Draw one single dot with the given color on the screen.
:param pos: Position of the dot
:param color: COlor for the dot
:type pos: tuple
:type color: tuple
"""
if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height:
self.matrix[pos[0]][pos[1]] = color | Draw one single dot with the given color on the screen.
:param pos: Position of the dot
:param color: COlor for the dot
:type pos: tuple
:type color: tuple | Below is the the instruction that describes the task:
### Input:
Draw one single dot with the given color on the screen.
:param pos: Position of the dot
:param color: COlor for the dot
:type pos: tuple
:type color: tuple
### Response:
def draw_dot(self, pos, color):
"""
Draw one single dot with the given color on the screen.
:param pos: Position of the dot
:param color: COlor for the dot
:type pos: tuple
:type color: tuple
"""
if 0 <= pos[0] < self.width and 0 <= pos[1] < self.height:
self.matrix[pos[0]][pos[1]] = color |
def PrimaryDatacenter(self):
"""Returns the primary datacenter object associated with the account.
>>> clc.v2.Account(alias='BTDI').PrimaryDatacenter()
<clc.APIv2.datacenter.Datacenter instance at 0x10a45ce18>
>>> print _
WA1
"""
return(clc.v2.Datacenter(alias=self.alias,location=self.data['primaryDataCenter'], session=self.session)) | Returns the primary datacenter object associated with the account.
>>> clc.v2.Account(alias='BTDI').PrimaryDatacenter()
<clc.APIv2.datacenter.Datacenter instance at 0x10a45ce18>
>>> print _
WA1 | Below is the the instruction that describes the task:
### Input:
Returns the primary datacenter object associated with the account.
>>> clc.v2.Account(alias='BTDI').PrimaryDatacenter()
<clc.APIv2.datacenter.Datacenter instance at 0x10a45ce18>
>>> print _
WA1
### Response:
def PrimaryDatacenter(self):
"""Returns the primary datacenter object associated with the account.
>>> clc.v2.Account(alias='BTDI').PrimaryDatacenter()
<clc.APIv2.datacenter.Datacenter instance at 0x10a45ce18>
>>> print _
WA1
"""
return(clc.v2.Datacenter(alias=self.alias,location=self.data['primaryDataCenter'], session=self.session)) |
def begin(self, sql=None):
"""Begin a transaction."""
self._transaction = True
try:
begin = self._con.begin
except AttributeError:
return self._con.query(sql or 'begin')
else:
# use existing method if available
if sql:
return begin(sql=sql)
else:
return begin() | Begin a transaction. | Below is the the instruction that describes the task:
### Input:
Begin a transaction.
### Response:
def begin(self, sql=None):
"""Begin a transaction."""
self._transaction = True
try:
begin = self._con.begin
except AttributeError:
return self._con.query(sql or 'begin')
else:
# use existing method if available
if sql:
return begin(sql=sql)
else:
return begin() |
def OnMeasureItem(self, item):
"""Returns the height of the items in the popup"""
item_name = self.GetItems()[item]
return icons[item_name].GetHeight() | Returns the height of the items in the popup | Below is the the instruction that describes the task:
### Input:
Returns the height of the items in the popup
### Response:
def OnMeasureItem(self, item):
"""Returns the height of the items in the popup"""
item_name = self.GetItems()[item]
return icons[item_name].GetHeight() |
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data | Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays). | Below is the the instruction that describes the task:
### Input:
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
### Response:
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data |
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | Below is the the instruction that describes the task:
### Input:
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
### Response:
def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) |
def iter_intersecting(self, iterable, key=None, descending=False):
"""Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect.
"""
return _ContainsVersionIterator(self, iterable, key, descending,
mode=_ContainsVersionIterator.MODE_INTERSECTING) | Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect. | Below is the the instruction that describes the task:
### Input:
Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect.
### Response:
def iter_intersecting(self, iterable, key=None, descending=False):
"""Like `iter_intersect_test`, but returns intersections only.
Returns:
An iterator that returns items from `iterable` that intersect.
"""
return _ContainsVersionIterator(self, iterable, key, descending,
mode=_ContainsVersionIterator.MODE_INTERSECTING) |
def cume_dist(expr, sort=None, ascending=True):
"""
Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _rank_op(expr, CumeDist, types.float64, sort=sort, ascending=ascending) | Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column | Below is the the instruction that describes the task:
### Input:
Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
### Response:
def cume_dist(expr, sort=None, ascending=True):
"""
Calculate cumulative ratio of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _rank_op(expr, CumeDist, types.float64, sort=sort, ascending=ascending) |
def contains_points(intersector,
points,
check_direction=None):
"""
Check if a mesh contains a set of points, using ray tests.
If the point is on the surface of the mesh, behavior is
undefined.
Parameters
---------
mesh: Trimesh object
points: (n,3) points in space
Returns
---------
contains : (n) bool
Whether point is inside mesh or not
"""
# convert points to float and make sure they are 3D
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)')
# placeholder result with no hits we'll fill in later
contains = np.zeros(len(points), dtype=np.bool)
# cull points outside of the axis aligned bounding box
# this avoids running ray tests unless points are close
inside_aabb = bounds.contains(intersector.mesh.bounds,
points)
# if everything is outside the AABB, exit early
if not inside_aabb.any():
return contains
# default ray direction is random, but we are not generating
# uniquely each time so the behavior of this function is easier to debug
default_direction = np.array([0.4395064455,
0.617598629942,
0.652231566745])
if check_direction is None:
# if no check direction is specified use the default
# stack it only for points inside the AABB
ray_directions = np.tile(default_direction,
(inside_aabb.sum(), 1))
else:
# if a direction is passed use it
ray_directions = np.tile(
np.array(check_direction).reshape(3),
(inside_aabb.sum(), 1))
# cast a ray both forwards and backwards
location, index_ray, c = intersector.intersects_location(
np.vstack(
(points[inside_aabb],
points[inside_aabb])),
np.vstack(
(ray_directions,
-ray_directions)))
# if we hit nothing in either direction just return with no hits
if len(index_ray) == 0:
return contains
# reshape so bi_hits[0] is the result in the forward direction and
# bi_hits[1] is the result in the backwards directions
bi_hits = np.bincount(
index_ray,
minlength=len(ray_directions) * 2).reshape((2, -1))
# a point is probably inside if it hits a surface an odd number of times
bi_contains = np.mod(bi_hits, 2) == 1
# if the mod of the hit count is the same in both
# directions, we can save that result and move on
agree = np.equal(*bi_contains)
# in order to do an assignment we can only have one
# level of boolean indexes, for example this doesn't work:
# contains[inside_aabb][agree] = bi_contains[0][agree]
# no error is thrown, but nothing gets assigned
# to get around that, we create a single mask for assignment
mask = inside_aabb.copy()
mask[mask] = agree
# set contains flags for things inside the AABB and who have
# ray tests that agree in both directions
contains[mask] = bi_contains[0][agree]
# if one of the rays in either direction hit nothing
# it is a very solid indicator we are in free space
# as the edge cases we are working around tend to
# add hits rather than miss hits
one_freespace = (bi_hits == 0).any(axis=0)
# rays where they don't agree and one isn't in free space
# are deemed to be broken
broken = np.logical_and(np.logical_not(agree),
np.logical_not(one_freespace))
# if all rays agree return
if not broken.any():
return contains
# try to run again with a new random vector
# only do it if check_direction isn't specified
# to avoid infinite recursion
if check_direction is None:
# we're going to run the check again in a random direction
new_direction = util.unitize(np.random.random(3) - .5)
# do the mask trick again to be able to assign results
mask = inside_aabb.copy()
mask[mask] = broken
contains[mask] = contains_points(
intersector,
points[inside_aabb][broken],
check_direction=new_direction)
constants.log.debug(
'detected %d broken contains test, attempted to fix',
broken.sum())
return contains | Check if a mesh contains a set of points, using ray tests.
If the point is on the surface of the mesh, behavior is
undefined.
Parameters
---------
mesh: Trimesh object
points: (n,3) points in space
Returns
---------
contains : (n) bool
Whether point is inside mesh or not | Below is the the instruction that describes the task:
### Input:
Check if a mesh contains a set of points, using ray tests.
If the point is on the surface of the mesh, behavior is
undefined.
Parameters
---------
mesh: Trimesh object
points: (n,3) points in space
Returns
---------
contains : (n) bool
Whether point is inside mesh or not
### Response:
def contains_points(intersector,
points,
check_direction=None):
"""
Check if a mesh contains a set of points, using ray tests.
If the point is on the surface of the mesh, behavior is
undefined.
Parameters
---------
mesh: Trimesh object
points: (n,3) points in space
Returns
---------
contains : (n) bool
Whether point is inside mesh or not
"""
# convert points to float and make sure they are 3D
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)')
# placeholder result with no hits we'll fill in later
contains = np.zeros(len(points), dtype=np.bool)
# cull points outside of the axis aligned bounding box
# this avoids running ray tests unless points are close
inside_aabb = bounds.contains(intersector.mesh.bounds,
points)
# if everything is outside the AABB, exit early
if not inside_aabb.any():
return contains
# default ray direction is random, but we are not generating
# uniquely each time so the behavior of this function is easier to debug
default_direction = np.array([0.4395064455,
0.617598629942,
0.652231566745])
if check_direction is None:
# if no check direction is specified use the default
# stack it only for points inside the AABB
ray_directions = np.tile(default_direction,
(inside_aabb.sum(), 1))
else:
# if a direction is passed use it
ray_directions = np.tile(
np.array(check_direction).reshape(3),
(inside_aabb.sum(), 1))
# cast a ray both forwards and backwards
location, index_ray, c = intersector.intersects_location(
np.vstack(
(points[inside_aabb],
points[inside_aabb])),
np.vstack(
(ray_directions,
-ray_directions)))
# if we hit nothing in either direction just return with no hits
if len(index_ray) == 0:
return contains
# reshape so bi_hits[0] is the result in the forward direction and
# bi_hits[1] is the result in the backwards directions
bi_hits = np.bincount(
index_ray,
minlength=len(ray_directions) * 2).reshape((2, -1))
# a point is probably inside if it hits a surface an odd number of times
bi_contains = np.mod(bi_hits, 2) == 1
# if the mod of the hit count is the same in both
# directions, we can save that result and move on
agree = np.equal(*bi_contains)
# in order to do an assignment we can only have one
# level of boolean indexes, for example this doesn't work:
# contains[inside_aabb][agree] = bi_contains[0][agree]
# no error is thrown, but nothing gets assigned
# to get around that, we create a single mask for assignment
mask = inside_aabb.copy()
mask[mask] = agree
# set contains flags for things inside the AABB and who have
# ray tests that agree in both directions
contains[mask] = bi_contains[0][agree]
# if one of the rays in either direction hit nothing
# it is a very solid indicator we are in free space
# as the edge cases we are working around tend to
# add hits rather than miss hits
one_freespace = (bi_hits == 0).any(axis=0)
# rays where they don't agree and one isn't in free space
# are deemed to be broken
broken = np.logical_and(np.logical_not(agree),
np.logical_not(one_freespace))
# if all rays agree return
if not broken.any():
return contains
# try to run again with a new random vector
# only do it if check_direction isn't specified
# to avoid infinite recursion
if check_direction is None:
# we're going to run the check again in a random direction
new_direction = util.unitize(np.random.random(3) - .5)
# do the mask trick again to be able to assign results
mask = inside_aabb.copy()
mask[mask] = broken
contains[mask] = contains_points(
intersector,
points[inside_aabb][broken],
check_direction=new_direction)
constants.log.debug(
'detected %d broken contains test, attempted to fix',
broken.sum())
return contains |
def _bool_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
assert not isinstance(y, (list, ABCSeries, ABCIndexClass))
if isinstance(y, np.ndarray):
# bool-bool dtype operations should be OK, should not get here
assert not (is_bool_dtype(x) and is_bool_dtype(y))
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
# let null fall thru
assert lib.is_scalar(y)
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except (TypeError, ValueError, AttributeError,
OverflowError, NotImplementedError):
raise TypeError("cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]"
.format(dtype=x.dtype,
typ=type(y).__name__))
return result
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
self, other = _align_method_SERIES(self, other, align_asobject=True)
res_name = get_op_result_name(self, other)
if isinstance(other, ABCDataFrame):
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, (ABCSeries, ABCIndexClass)):
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
ovalues = other.values
finalizer = lambda x: x
else:
# scalars, list, tuple, np.array
is_other_int_dtype = is_integer_dtype(np.asarray(other))
if is_list_like(other) and not isinstance(other, np.ndarray):
# TODO: Can we do this before the is_integer_dtype check?
# could the is_integer_dtype check be checking the wrong
# thing? e.g. other = [[0, 1], [2, 3], [4, 5]]?
other = construct_1d_object_array_from_listlike(other)
ovalues = other
finalizer = lambda x: x.__finalize__(self)
# For int vs int `^`, `|`, `&` are bitwise operators and return
# integer dtypes. Otherwise these are boolean ops
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
res_values = na_op(self.values, ovalues)
unfilled = self._constructor(res_values,
index=self.index, name=res_name)
filled = filler(unfilled)
return finalizer(filled)
wrapper.__name__ = op_name
return wrapper | Wrapper function for Series arithmetic operations, to avoid
code duplication. | Below is the the instruction that describes the task:
### Input:
Wrapper function for Series arithmetic operations, to avoid
code duplication.
### Response:
def _bool_method_SERIES(cls, op, special):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
op_name = _get_op_name(op, special)
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
assert not isinstance(y, (list, ABCSeries, ABCIndexClass))
if isinstance(y, np.ndarray):
# bool-bool dtype operations should be OK, should not get here
assert not (is_bool_dtype(x) and is_bool_dtype(y))
x = ensure_object(x)
y = ensure_object(y)
result = libops.vec_binop(x, y, op)
else:
# let null fall thru
assert lib.is_scalar(y)
if not isna(y):
y = bool(y)
try:
result = libops.scalar_binop(x, y, op)
except (TypeError, ValueError, AttributeError,
OverflowError, NotImplementedError):
raise TypeError("cannot compare a dtyped [{dtype}] array "
"with a scalar of type [{typ}]"
.format(dtype=x.dtype,
typ=type(y).__name__))
return result
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
self, other = _align_method_SERIES(self, other, align_asobject=True)
res_name = get_op_result_name(self, other)
if isinstance(other, ABCDataFrame):
# Defer to DataFrame implementation; fail early
return NotImplemented
elif isinstance(other, (ABCSeries, ABCIndexClass)):
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
ovalues = other.values
finalizer = lambda x: x
else:
# scalars, list, tuple, np.array
is_other_int_dtype = is_integer_dtype(np.asarray(other))
if is_list_like(other) and not isinstance(other, np.ndarray):
# TODO: Can we do this before the is_integer_dtype check?
# could the is_integer_dtype check be checking the wrong
# thing? e.g. other = [[0, 1], [2, 3], [4, 5]]?
other = construct_1d_object_array_from_listlike(other)
ovalues = other
finalizer = lambda x: x.__finalize__(self)
# For int vs int `^`, `|`, `&` are bitwise operators and return
# integer dtypes. Otherwise these are boolean ops
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
res_values = na_op(self.values, ovalues)
unfilled = self._constructor(res_values,
index=self.index, name=res_name)
filled = filler(unfilled)
return finalizer(filled)
wrapper.__name__ = op_name
return wrapper |
def get_parameter_p_value_too_high_warning(
model_type, model_params, parameter, p_value, maximum_p_value
):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings | Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning. | Below is the the instruction that describes the task:
### Input:
Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
### Response:
def get_parameter_p_value_too_high_warning(
model_type, model_params, parameter, p_value, maximum_p_value
):
""" Return an empty list or a single warning wrapped in a list indicating
whether model parameter p-value is too high.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
model_params : :any:`dict`
Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`.
parameter : :any:`str`
The name of the parameter, e.g., ``'intercept'``.
p_value : :any:`float`
The p-value of the parameter.
maximum_p_value : :any:`float`
The maximum allowable p-value of the parameter.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
if p_value > maximum_p_value:
data = {
"{}_p_value".format(parameter): p_value,
"{}_maximum_p_value".format(parameter): maximum_p_value,
}
data.update(model_params)
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format(
model_type=model_type, parameter=parameter
)
),
description=(
"Model fit {parameter} p-value is too high. Candidate model rejected.".format(
parameter=parameter
)
),
data=data,
)
)
return warnings |
def sweep(port, rate, ID, retry=3):
"""
Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ...
"""
if port == 'dummy':
s = ServoSerial(port, rate, fake=True)
else:
s = ServoSerial(port, rate)
if ID < 0:
ID = xl320.XL320_BROADCAST_ADDR
try:
s.open()
except SerialException as e:
# print('Error opening serial port:')
print('-'*40)
print(sys.argv[0], ':')
print(e)
exit(1)
pkt = makePingPacket(ID)
# print('ping', pkt)
s.write(pkt)
# as more servos add up, I might need to increase the cnt number???
for cnt in range(retry):
ans = s.read()
if ans:
for pkt in ans:
servo = packetToDict(pkt)
utils.prettyPrintPacket(servo)
print('raw pkt: {}'.format(pkt))
else:
print('Try {}: no servos found'.format(cnt))
time.sleep(0.1)
s.close() | Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ... | Below is the the instruction that describes the task:
### Input:
Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ...
### Response:
def sweep(port, rate, ID, retry=3):
"""
Sends a ping packet to ID's from 0 to maximum and prints out any returned
messages.
Actually send a broadcast and will retry (resend) the ping 3 times ...
"""
if port == 'dummy':
s = ServoSerial(port, rate, fake=True)
else:
s = ServoSerial(port, rate)
if ID < 0:
ID = xl320.XL320_BROADCAST_ADDR
try:
s.open()
except SerialException as e:
# print('Error opening serial port:')
print('-'*40)
print(sys.argv[0], ':')
print(e)
exit(1)
pkt = makePingPacket(ID)
# print('ping', pkt)
s.write(pkt)
# as more servos add up, I might need to increase the cnt number???
for cnt in range(retry):
ans = s.read()
if ans:
for pkt in ans:
servo = packetToDict(pkt)
utils.prettyPrintPacket(servo)
print('raw pkt: {}'.format(pkt))
else:
print('Try {}: no servos found'.format(cnt))
time.sleep(0.1)
s.close() |
def cache_key(*args, **kwargs):
"""
Base method for computing the cache key with respect to the given
arguments.
"""
key = ""
for arg in args:
if callable(arg):
key += ":%s" % repr(arg)
else:
key += ":%s" % str(arg)
return key | Base method for computing the cache key with respect to the given
arguments. | Below is the the instruction that describes the task:
### Input:
Base method for computing the cache key with respect to the given
arguments.
### Response:
def cache_key(*args, **kwargs):
"""
Base method for computing the cache key with respect to the given
arguments.
"""
key = ""
for arg in args:
if callable(arg):
key += ":%s" % repr(arg)
else:
key += ":%s" % str(arg)
return key |
def add_column_xsd(self, tb, column, attrs):
""" Add the XSD for a column to tb (a TreeBuilder) """
if column.nullable:
attrs['minOccurs'] = str(0)
attrs['nillable'] = 'true'
for cls, xsd_type in six.iteritems(self.SIMPLE_XSD_TYPES):
if isinstance(column.type, cls):
attrs['type'] = xsd_type
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, Geometry):
geometry_type = column.type.geometry_type
xsd_type = self.SIMPLE_GEOMETRY_XSD_TYPES[geometry_type]
attrs['type'] = xsd_type
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Enum):
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction', {'base': 'xsd:string'}) \
as tb:
for enum in column.type.enums:
with tag(tb, 'xsd:enumeration', {'value': enum}):
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Numeric):
if column.type.scale is None and column.type.precision is None:
attrs['type'] = 'xsd:decimal'
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction',
{'base': 'xsd:decimal'}) as tb:
if column.type.scale is not None:
with tag(tb, 'xsd:fractionDigits',
{'value': str(column.type.scale)}) \
as tb:
pass
if column.type.precision is not None:
precision = column.type.precision
with tag(tb, 'xsd:totalDigits',
{'value': str(precision)}) \
as tb:
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.String) \
or isinstance(column.type, sqlalchemy.Text) \
or isinstance(column.type, sqlalchemy.Unicode) \
or isinstance(column.type, sqlalchemy.UnicodeText):
if column.type.length is None:
attrs['type'] = 'xsd:string'
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction',
{'base': 'xsd:string'}) as tb:
with tag(tb, 'xsd:maxLength',
{'value': str(column.type.length)}):
pass
self.element_callback(tb, column)
return tb
raise UnsupportedColumnTypeError(column.type) | Add the XSD for a column to tb (a TreeBuilder) | Below is the the instruction that describes the task:
### Input:
Add the XSD for a column to tb (a TreeBuilder)
### Response:
def add_column_xsd(self, tb, column, attrs):
""" Add the XSD for a column to tb (a TreeBuilder) """
if column.nullable:
attrs['minOccurs'] = str(0)
attrs['nillable'] = 'true'
for cls, xsd_type in six.iteritems(self.SIMPLE_XSD_TYPES):
if isinstance(column.type, cls):
attrs['type'] = xsd_type
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, Geometry):
geometry_type = column.type.geometry_type
xsd_type = self.SIMPLE_GEOMETRY_XSD_TYPES[geometry_type]
attrs['type'] = xsd_type
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Enum):
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction', {'base': 'xsd:string'}) \
as tb:
for enum in column.type.enums:
with tag(tb, 'xsd:enumeration', {'value': enum}):
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Numeric):
if column.type.scale is None and column.type.precision is None:
attrs['type'] = 'xsd:decimal'
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction',
{'base': 'xsd:decimal'}) as tb:
if column.type.scale is not None:
with tag(tb, 'xsd:fractionDigits',
{'value': str(column.type.scale)}) \
as tb:
pass
if column.type.precision is not None:
precision = column.type.precision
with tag(tb, 'xsd:totalDigits',
{'value': str(precision)}) \
as tb:
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.String) \
or isinstance(column.type, sqlalchemy.Text) \
or isinstance(column.type, sqlalchemy.Unicode) \
or isinstance(column.type, sqlalchemy.UnicodeText):
if column.type.length is None:
attrs['type'] = 'xsd:string'
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction',
{'base': 'xsd:string'}) as tb:
with tag(tb, 'xsd:maxLength',
{'value': str(column.type.length)}):
pass
self.element_callback(tb, column)
return tb
raise UnsupportedColumnTypeError(column.type) |
def view_package_path(self, package: str) -> _PATH:
'''Print the path to the APK of the given.'''
if package not in self.view_packgets_list():
raise NoSuchPackageException(
f'There is no such package {package!r}.')
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'pm', 'path', package)
return output[8:-1] | Print the path to the APK of the given. | Below is the the instruction that describes the task:
### Input:
Print the path to the APK of the given.
### Response:
def view_package_path(self, package: str) -> _PATH:
'''Print the path to the APK of the given.'''
if package not in self.view_packgets_list():
raise NoSuchPackageException(
f'There is no such package {package!r}.')
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'pm', 'path', package)
return output[8:-1] |
def transform(self, image_feature, bigdl_type="float"):
"""
transform ImageFeature
"""
callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature)
return image_feature | transform ImageFeature | Below is the the instruction that describes the task:
### Input:
transform ImageFeature
### Response:
def transform(self, image_feature, bigdl_type="float"):
"""
transform ImageFeature
"""
callBigDlFunc(bigdl_type, "transformImageFeature", self.value, image_feature)
return image_feature |
def birch(args):
"""
%prog birch seqids layout
Plot birch macro-synteny, with an embedded phylogenetic tree to the right.
"""
p = OptionParser(birch.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 2:
sys.exit(not p.print_help())
seqids, layout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
K = Karyotype(fig, root, seqids, layout)
L = K.layout
xs = .79
dt = dict(rectangle=False, circle=False)
# Embed a phylogenetic tree to the right
coords = {}
coords["Amborella"] = (xs, L[0].y)
coords["Vitis"] = (xs, L[1].y)
coords["Prunus"] = (xs, L[2].y)
coords["Betula"] = (xs, L[3].y)
coords["Populus"] = (xs, L[4].y)
coords["Arabidopsis"] = (xs, L[5].y)
coords["fabids"] = join_nodes(root, coords, "Prunus", "Betula", xs, **dt)
coords["malvids"] = join_nodes(root, coords, \
"Populus", "Arabidopsis", xs, **dt)
coords["rosids"] = join_nodes(root, coords, "fabids", "malvids", xs, **dt)
coords["eudicots"] = join_nodes(root, coords, "rosids", "Vitis", xs, **dt)
coords["angiosperm"] = join_nodes(root, coords, \
"eudicots", "Amborella", xs, **dt)
# Show branch length
branch_length(root, coords["Amborella"], coords["angiosperm"], ">160.0")
branch_length(root, coords["eudicots"], coords["angiosperm"],
">78.2", va="top")
branch_length(root, coords["Vitis"], coords["eudicots"], "138.5")
branch_length(root, coords["rosids"], coords["eudicots"],
"19.8", va="top")
branch_length(root, coords["Prunus"], coords["fabids"],
"104.2", ha="right", va="top")
branch_length(root, coords["Arabidopsis"], coords["malvids"],
"110.2", va="top")
branch_length(root, coords["fabids"], coords["rosids"],
"19.8", ha="right", va="top")
branch_length(root, coords["malvids"], coords["rosids"],
"8.5", va="top")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "birch"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog birch seqids layout
Plot birch macro-synteny, with an embedded phylogenetic tree to the right. | Below is the the instruction that describes the task:
### Input:
%prog birch seqids layout
Plot birch macro-synteny, with an embedded phylogenetic tree to the right.
### Response:
def birch(args):
"""
%prog birch seqids layout
Plot birch macro-synteny, with an embedded phylogenetic tree to the right.
"""
p = OptionParser(birch.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x6")
if len(args) != 2:
sys.exit(not p.print_help())
seqids, layout = args
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
K = Karyotype(fig, root, seqids, layout)
L = K.layout
xs = .79
dt = dict(rectangle=False, circle=False)
# Embed a phylogenetic tree to the right
coords = {}
coords["Amborella"] = (xs, L[0].y)
coords["Vitis"] = (xs, L[1].y)
coords["Prunus"] = (xs, L[2].y)
coords["Betula"] = (xs, L[3].y)
coords["Populus"] = (xs, L[4].y)
coords["Arabidopsis"] = (xs, L[5].y)
coords["fabids"] = join_nodes(root, coords, "Prunus", "Betula", xs, **dt)
coords["malvids"] = join_nodes(root, coords, \
"Populus", "Arabidopsis", xs, **dt)
coords["rosids"] = join_nodes(root, coords, "fabids", "malvids", xs, **dt)
coords["eudicots"] = join_nodes(root, coords, "rosids", "Vitis", xs, **dt)
coords["angiosperm"] = join_nodes(root, coords, \
"eudicots", "Amborella", xs, **dt)
# Show branch length
branch_length(root, coords["Amborella"], coords["angiosperm"], ">160.0")
branch_length(root, coords["eudicots"], coords["angiosperm"],
">78.2", va="top")
branch_length(root, coords["Vitis"], coords["eudicots"], "138.5")
branch_length(root, coords["rosids"], coords["eudicots"],
"19.8", va="top")
branch_length(root, coords["Prunus"], coords["fabids"],
"104.2", ha="right", va="top")
branch_length(root, coords["Arabidopsis"], coords["malvids"],
"110.2", va="top")
branch_length(root, coords["fabids"], coords["rosids"],
"19.8", ha="right", va="top")
branch_length(root, coords["malvids"], coords["rosids"],
"8.5", va="top")
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
pf = "birch"
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) |
def port_bindings(self, value):
"""
{
u'8080/tcp': [
{
u'host_port': u'8080',
u'host_ip': u''
}
]
}
"""
if isinstance(value, (list, dict)):
self._port_bindings = self._convert_port_bindings(value)
elif value is None:
self._port_bindings = None
else:
raise TypeError('port bindings must be a dict, list, or None. {0} was passed.'.format(type(value))) | {
u'8080/tcp': [
{
u'host_port': u'8080',
u'host_ip': u''
}
]
} | Below is the the instruction that describes the task:
### Input:
{
u'8080/tcp': [
{
u'host_port': u'8080',
u'host_ip': u''
}
]
}
### Response:
def port_bindings(self, value):
"""
{
u'8080/tcp': [
{
u'host_port': u'8080',
u'host_ip': u''
}
]
}
"""
if isinstance(value, (list, dict)):
self._port_bindings = self._convert_port_bindings(value)
elif value is None:
self._port_bindings = None
else:
raise TypeError('port bindings must be a dict, list, or None. {0} was passed.'.format(type(value))) |
def remove(cls, repo, name):
"""Remove the remote with the given name
:return: the passed remote name to remove
"""
repo.git.remote("rm", name)
if isinstance(name, cls):
name._clear_cache()
return name | Remove the remote with the given name
:return: the passed remote name to remove | Below is the the instruction that describes the task:
### Input:
Remove the remote with the given name
:return: the passed remote name to remove
### Response:
def remove(cls, repo, name):
"""Remove the remote with the given name
:return: the passed remote name to remove
"""
repo.git.remote("rm", name)
if isinstance(name, cls):
name._clear_cache()
return name |
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
for filter_name in settings.RICHTEXT_FILTERS:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content | Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting. | Below is the the instruction that describes the task:
### Input:
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
### Response:
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
for filter_name in settings.RICHTEXT_FILTERS:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content |
def hidelist(self, window_name, object_name):
"""
Hide combo box list / menu
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
object_handle.activate()
object_handle.sendKey(AXKeyCodeConstants.ESCAPE)
return 1 | Hide combo box list / menu
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer | Below is the the instruction that describes the task:
### Input:
Hide combo box list / menu
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
### Response:
def hidelist(self, window_name, object_name):
"""
Hide combo box list / menu
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
object_handle = self._get_object_handle(window_name, object_name)
object_handle.activate()
object_handle.sendKey(AXKeyCodeConstants.ESCAPE)
return 1 |
def encode_pdf_date(d: datetime) -> str:
"""Encode Python datetime object as PDF date string
From Adobe pdfmark manual:
(D:YYYYMMDDHHmmSSOHH'mm')
D: is an optional prefix. YYYY is the year. All fields after the year are
optional. MM is the month (01-12), DD is the day (01-31), HH is the
hour (00-23), mm are the minutes (00-59), and SS are the seconds
(00-59). The remainder of the string defines the relation of local
time to GMT. O is either + for a positive difference (local time is
later than GMT) or - (minus) for a negative difference. HH' is the
absolute value of the offset from GMT in hours, and mm' is the
absolute value of the offset in minutes. If no GMT information is
specified, the relation between the specified time and GMT is
considered unknown. Regardless of whether or not GMT
information is specified, the remainder of the string should specify
the local time.
"""
# The formatting of %Y is not consistent as described in
# https://bugs.python.org/issue13305 and underspecification in libc.
# So explicitly format the year with leading zeros
s = "{:04d}".format(d.year)
s += d.strftime(r'%m%d%H%M%S')
tz = d.strftime('%z')
if tz:
sign, tz_hours, tz_mins = tz[0], tz[1:3], tz[3:5]
s += "{}{}'{}'".format(sign, tz_hours, tz_mins)
return s | Encode Python datetime object as PDF date string
From Adobe pdfmark manual:
(D:YYYYMMDDHHmmSSOHH'mm')
D: is an optional prefix. YYYY is the year. All fields after the year are
optional. MM is the month (01-12), DD is the day (01-31), HH is the
hour (00-23), mm are the minutes (00-59), and SS are the seconds
(00-59). The remainder of the string defines the relation of local
time to GMT. O is either + for a positive difference (local time is
later than GMT) or - (minus) for a negative difference. HH' is the
absolute value of the offset from GMT in hours, and mm' is the
absolute value of the offset in minutes. If no GMT information is
specified, the relation between the specified time and GMT is
considered unknown. Regardless of whether or not GMT
information is specified, the remainder of the string should specify
the local time. | Below is the the instruction that describes the task:
### Input:
Encode Python datetime object as PDF date string
From Adobe pdfmark manual:
(D:YYYYMMDDHHmmSSOHH'mm')
D: is an optional prefix. YYYY is the year. All fields after the year are
optional. MM is the month (01-12), DD is the day (01-31), HH is the
hour (00-23), mm are the minutes (00-59), and SS are the seconds
(00-59). The remainder of the string defines the relation of local
time to GMT. O is either + for a positive difference (local time is
later than GMT) or - (minus) for a negative difference. HH' is the
absolute value of the offset from GMT in hours, and mm' is the
absolute value of the offset in minutes. If no GMT information is
specified, the relation between the specified time and GMT is
considered unknown. Regardless of whether or not GMT
information is specified, the remainder of the string should specify
the local time.
### Response:
def encode_pdf_date(d: datetime) -> str:
"""Encode Python datetime object as PDF date string
From Adobe pdfmark manual:
(D:YYYYMMDDHHmmSSOHH'mm')
D: is an optional prefix. YYYY is the year. All fields after the year are
optional. MM is the month (01-12), DD is the day (01-31), HH is the
hour (00-23), mm are the minutes (00-59), and SS are the seconds
(00-59). The remainder of the string defines the relation of local
time to GMT. O is either + for a positive difference (local time is
later than GMT) or - (minus) for a negative difference. HH' is the
absolute value of the offset from GMT in hours, and mm' is the
absolute value of the offset in minutes. If no GMT information is
specified, the relation between the specified time and GMT is
considered unknown. Regardless of whether or not GMT
information is specified, the remainder of the string should specify
the local time.
"""
# The formatting of %Y is not consistent as described in
# https://bugs.python.org/issue13305 and underspecification in libc.
# So explicitly format the year with leading zeros
s = "{:04d}".format(d.year)
s += d.strftime(r'%m%d%H%M%S')
tz = d.strftime('%z')
if tz:
sign, tz_hours, tz_mins = tz[0], tz[1:3], tz[3:5]
s += "{}{}'{}'".format(sign, tz_hours, tz_mins)
return s |
def fgrad_y(self, y, return_precalc=False):
"""
gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff
"""
d = self.d
mpsi = self.psi
# vectorized version
S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T
R = np.tanh(S)
D = 1 - (R ** 2)
GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T
if return_precalc:
return GRAD, S, R, D
return GRAD | gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff | Below is the the instruction that describes the task:
### Input:
gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff
### Response:
def fgrad_y(self, y, return_precalc=False):
"""
gradient of f w.r.t to y ([N x 1])
:returns: Nx1 vector of derivatives, unless return_precalc is true,
then it also returns the precomputed stuff
"""
d = self.d
mpsi = self.psi
# vectorized version
S = (mpsi[:,1] * (y[:,:,None] + mpsi[:,2])).T
R = np.tanh(S)
D = 1 - (R ** 2)
GRAD = (d + (mpsi[:,0:1][:,:,None] * mpsi[:,1:2][:,:,None] * D).sum(axis=0)).T
if return_precalc:
return GRAD, S, R, D
return GRAD |
def instance_of(klass, arg):
"""Require that a value has a particular Python type."""
if not isinstance(arg, klass):
raise com.IbisTypeError(
'Given argument with type {} is not an instance of {}'.format(
type(arg), klass
)
)
return arg | Require that a value has a particular Python type. | Below is the the instruction that describes the task:
### Input:
Require that a value has a particular Python type.
### Response:
def instance_of(klass, arg):
"""Require that a value has a particular Python type."""
if not isinstance(arg, klass):
raise com.IbisTypeError(
'Given argument with type {} is not an instance of {}'.format(
type(arg), klass
)
)
return arg |
def get_tab_tip(self, filename, is_modified=None, is_readonly=None):
"""Return tab menu title"""
text = u"%s — %s"
text = self.__modified_readonly_title(text,
is_modified, is_readonly)
if self.tempfile_path is not None\
and filename == encoding.to_unicode_from_fs(self.tempfile_path):
temp_file_str = to_text_string(_("Temporary file"))
return text % (temp_file_str, self.tempfile_path)
else:
return text % (osp.basename(filename), osp.dirname(filename)) | Return tab menu title | Below is the the instruction that describes the task:
### Input:
Return tab menu title
### Response:
def get_tab_tip(self, filename, is_modified=None, is_readonly=None):
"""Return tab menu title"""
text = u"%s — %s"
text = self.__modified_readonly_title(text,
is_modified, is_readonly)
if self.tempfile_path is not None\
and filename == encoding.to_unicode_from_fs(self.tempfile_path):
temp_file_str = to_text_string(_("Temporary file"))
return text % (temp_file_str, self.tempfile_path)
else:
return text % (osp.basename(filename), osp.dirname(filename)) |
def from_array(array):
"""
Deserialize a new ResponseParameters from a given dictionary.
:return: new ResponseParameters instance.
:rtype: ResponseParameters
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['migrate_to_chat_id'] = int(array.get('migrate_to_chat_id')) if array.get('migrate_to_chat_id') is not None else None
data['retry_after'] = int(array.get('retry_after')) if array.get('retry_after') is not None else None
data['_raw'] = array
return ResponseParameters(**data) | Deserialize a new ResponseParameters from a given dictionary.
:return: new ResponseParameters instance.
:rtype: ResponseParameters | Below is the the instruction that describes the task:
### Input:
Deserialize a new ResponseParameters from a given dictionary.
:return: new ResponseParameters instance.
:rtype: ResponseParameters
### Response:
def from_array(array):
"""
Deserialize a new ResponseParameters from a given dictionary.
:return: new ResponseParameters instance.
:rtype: ResponseParameters
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['migrate_to_chat_id'] = int(array.get('migrate_to_chat_id')) if array.get('migrate_to_chat_id') is not None else None
data['retry_after'] = int(array.get('retry_after')) if array.get('retry_after') is not None else None
data['_raw'] = array
return ResponseParameters(**data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.