code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def update(self, response, **kwargs):
'''
If a record matching the instance already exists in the database, update
it, else create a new record.
'''
response_cls = self._get_instance(**kwargs)
if response_cls:
setattr(response_cls, self.column, self.accessor(response))
_action_and_commit(response_cls, session.add)
else:
self.get_or_create_from_legacy_response(response, **kwargs) | If a record matching the instance already exists in the database, update
it, else create a new record. |
def preferences_view(request):
"""View and process updates to the preferences page."""
user = request.user
if request.method == "POST":
logger.debug(dict(request.POST))
phone_formset, email_formset, website_formset, errors = save_personal_info(request, user)
if user.is_student:
preferred_pic_form = save_preferred_pic(request, user)
bus_route_form = save_bus_route(request, user)
else:
preferred_pic_form = None
bus_route_form = None
privacy_options_form = save_privacy_options(request, user)
notification_options_form = save_notification_options(request, user)
for error in errors:
messages.error(request, error)
try:
save_gcm_options(request, user)
except AttributeError:
pass
return redirect("preferences")
else:
phone_formset = PhoneFormset(instance=user, prefix='pf')
email_formset = EmailFormset(instance=user, prefix='ef')
website_formset = WebsiteFormset(instance=user, prefix='wf')
if user.is_student:
preferred_pic = get_preferred_pic(user)
bus_route = get_bus_route(user)
logger.debug(preferred_pic)
preferred_pic_form = PreferredPictureForm(user, initial=preferred_pic)
bus_route_form = BusRouteForm(user, initial=bus_route)
else:
bus_route_form = None
preferred_pic = None
preferred_pic_form = None
privacy_options = get_privacy_options(user)
logger.debug(privacy_options)
privacy_options_form = PrivacyOptionsForm(user, initial=privacy_options)
notification_options = get_notification_options(user)
logger.debug(notification_options)
notification_options_form = NotificationOptionsForm(user, initial=notification_options)
context = {
"phone_formset": phone_formset,
"email_formset": email_formset,
"website_formset": website_formset,
"preferred_pic_form": preferred_pic_form,
"privacy_options_form": privacy_options_form,
"notification_options_form": notification_options_form,
"bus_route_form": bus_route_form if settings.ENABLE_BUS_APP else None
}
return render(request, "preferences/preferences.html", context) | View and process updates to the preferences page. |
def split_gtf(gtf, sample_size=None, out_dir=None):
"""
split a GTF file into two equal parts, randomly selecting genes.
sample_size will select up to sample_size genes in total
"""
if out_dir:
part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf"
part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf"
part1 = os.path.join(out_dir, part1_fn)
part2 = os.path.join(out_dir, part2_fn)
if file_exists(part1) and file_exists(part2):
return part1, part2
else:
part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name
part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name
db = get_gtf_db(gtf)
gene_ids = set([x['gene_id'][0] for x in db.all_features()])
if not sample_size or (sample_size and sample_size > len(gene_ids)):
sample_size = len(gene_ids)
gene_ids = set(random.sample(gene_ids, sample_size))
part1_ids = set(random.sample(gene_ids, sample_size / 2))
part2_ids = gene_ids.difference(part1_ids)
with open(part1, "w") as part1_handle:
for gene in part1_ids:
for feature in db.children(gene):
part1_handle.write(str(feature) + "\n")
with open(part2, "w") as part2_handle:
for gene in part2_ids:
for feature in db.children(gene):
part2_handle.write(str(feature) + "\n")
return part1, part2 | split a GTF file into two equal parts, randomly selecting genes.
sample_size will select up to sample_size genes in total |
def discretize(value, factor=100):
"""Discretize the given value, pre-multiplying by the given factor"""
if not isinstance(value, Iterable):
return int(value * factor)
int_value = list(deepcopy(value))
for i in range(len(int_value)):
int_value[i] = int(int_value[i] * factor)
return int_value | Discretize the given value, pre-multiplying by the given factor |
def build_ricecooker_json_tree(args, options, metadata_provider, json_tree_path):
"""
Download all categories, subpages, modules, and resources from open.edu.
"""
LOGGER.info('Starting to build the ricecooker_json_tree')
channeldir = args['channeldir']
if channeldir.endswith(os.path.sep):
channeldir.rstrip(os.path.sep)
channelparentdir, channeldirname = os.path.split(channeldir)
channelparentdir, channeldirname = os.path.split(channeldir)
# Ricecooker tree
channel_info = metadata_provider.get_channel_info()
thumbnail_chan_path = channel_info.get('thumbnail_chan_path', None)
if thumbnail_chan_path:
thumbnail_rel_path = rel_path_from_chan_path(thumbnail_chan_path, metadata_provider.channeldir)
else:
thumbnail_rel_path = None
ricecooker_json_tree = dict(
dirname=channeldirname,
title=channel_info['title'],
description=channel_info['description'],
source_domain=channel_info['source_domain'],
source_id=channel_info['source_id'],
language=channel_info['language'],
thumbnail=thumbnail_rel_path,
children=[],
)
channeldir = args['channeldir']
content_folders = sorted(os.walk(channeldir))
# MAIN PROCESSING OF os.walk OUTPUT
############################################################################
# TODO(ivan): figure out all the implications of the
# _ = content_folders.pop(0) # Skip over channel folder because handled above
for rel_path, _subfolders, filenames in content_folders:
LOGGER.info('processing folder ' + str(rel_path))
# IMPLEMENTATION DETAIL:
# - `filenames` contains real files in the `channeldir` folder
# - `exercises_filenames` contains virtual files whose sole purpse is to set the
# order of nodes within a given topic. Since alphabetical order is used to
# walk the files in the `channeldir`, we must "splice in" the exercises here
if metadata_provider.has_exercises():
dir_chan_path = chan_path_from_rel_path(rel_path, metadata_provider.channeldir)
dir_path_tuple = path_to_tuple(dir_chan_path)
exercises_filenames = metadata_provider.get_exercises_for_dir(dir_path_tuple)
filenames.extend(exercises_filenames)
sorted_filenames = sorted(filenames)
process_folder(ricecooker_json_tree, rel_path, sorted_filenames, metadata_provider)
# Write out ricecooker_json_tree.json
write_tree_to_json_tree(json_tree_path, ricecooker_json_tree)
LOGGER.info('Folder hierarchy walk result stored in ' + json_tree_path) | Download all categories, subpages, modules, and resources from open.edu. |
def sample(self):
"""
Draws a trajectory length, first coordinates, lengths, angles and
length-angle-difference pairs according to the empirical distribution.
Each call creates one complete trajectory.
"""
lenghts = []
angles = []
coordinates = []
fix = []
sample_size = int(round(self.trajLen_borders[self.drawFrom('self.trajLen_cumsum', self.getrand('self.trajLen_cumsum'))]))
coordinates.append([0, 0])
fix.append(1)
while len(coordinates) < sample_size:
if len(lenghts) == 0 and len(angles) == 0:
angle, length = self._draw(self)
else:
angle, length = self._draw(prev_angle = angles[-1],
prev_length = lenghts[-1])
x, y = self._calc_xy(coordinates[-1], angle, length)
coordinates.append([x, y])
lenghts.append(length)
angles.append(angle)
fix.append(fix[-1]+1)
return coordinates | Draws a trajectory length, first coordinates, lengths, angles and
length-angle-difference pairs according to the empirical distribution.
Each call creates one complete trajectory. |
def generate(self):
'''
Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples.
'''
observed_arr = None
for result_tuple in self.__feature_generator.generate():
observed_arr = result_tuple[0]
break
if self.noise_sampler is not None:
self.noise_sampler.output_shape = observed_arr.shape
observed_arr += self.noise_sampler.generate()
observed_arr = observed_arr.astype(float)
if self.__norm_mode == "z_score":
if observed_arr.std() != 0:
observed_arr = (observed_arr - observed_arr.mean()) / observed_arr.std()
elif self.__norm_mode == "min_max":
if (observed_arr.max() - observed_arr.min()) != 0:
observed_arr = (observed_arr - observed_arr.min()) / (observed_arr.max() - observed_arr.min())
elif self.__norm_mode == "tanh":
observed_arr = np.tanh(observed_arr)
return observed_arr | Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples. |
def add_member_roles(self, guild_id: int, member_id: int, roles: List[int]):
"""Add roles to a member
This method takes a list of **role ids** that you want to give to the user,
on top of whatever roles they may already have. This method will fetch
the user's current roles, and add to that list the roles passed in. The
user's resulting list of roles will not contain duplicates, so you don't have
to filter role ids to this method (as long as they're still roles for this guild).
This method differs from ``set_member_roles`` in that this method ADDS roles
to the user's current role list. ``set_member_roles`` is used by this method.
Args:
guild_id: snowflake id of the guild
member_id: snowflake id of the member
roles: list of snowflake ids of roles to add
"""
current_roles = [role for role in self.get_guild_member_by_id(guild_id, member_id)['roles']]
roles.extend(current_roles)
new_list = list(set(roles))
self.set_member_roles(guild_id, member_id, new_list) | Add roles to a member
This method takes a list of **role ids** that you want to give to the user,
on top of whatever roles they may already have. This method will fetch
the user's current roles, and add to that list the roles passed in. The
user's resulting list of roles will not contain duplicates, so you don't have
to filter role ids to this method (as long as they're still roles for this guild).
This method differs from ``set_member_roles`` in that this method ADDS roles
to the user's current role list. ``set_member_roles`` is used by this method.
Args:
guild_id: snowflake id of the guild
member_id: snowflake id of the member
roles: list of snowflake ids of roles to add |
def ConsultarCortes(self, sep="||"):
"Retorna listado de cortes -carnes- (código, descripción)"
ret = self.client.consultarCortes(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['respuesta']
self.__analizar_errores(ret)
array = ret.get('corte', []) + ret.get('cortePorcino', [])
if sep is None:
return dict([(it['codigo'], it['descripcion']) for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigo'], it['descripcion']) for it in array] | Retorna listado de cortes -carnes- (código, descripción) |
def _realsize(self):
'''
Get the struct size without padding (or the "real size")
:returns: the "real size" in bytes
'''
current = self
size= 0
while current is not None:
size += current._parser.sizeof(current)
last = current
current = getattr(current, '_sub', None)
size += len(getattr(last, '_extra', b''))
return size | Get the struct size without padding (or the "real size")
:returns: the "real size" in bytes |
def update_wallet(self, wallet_name, limit):
"""Update a wallet with a new limit.
@param the name of the wallet.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request.
"""
request = {
'update': {
'limit': str(limit),
}
}
return make_request(
'{}wallet/{}'.format(self.url, wallet_name),
method='PATCH',
body=request,
timeout=self.timeout,
client=self._client) | Update a wallet with a new limit.
@param the name of the wallet.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request. |
def _build_archive(self, dir_path):
"""
Creates a zip archive from files in path.
"""
zip_path = os.path.join(dir_path, "import.zip")
archive = zipfile.ZipFile(zip_path, "w")
for filename in CSV_FILES:
filepath = os.path.join(dir_path, filename)
if os.path.exists(filepath):
archive.write(filepath, filename, zipfile.ZIP_DEFLATED)
archive.close()
with open(zip_path, "rb") as f:
body = f.read()
return body | Creates a zip archive from files in path. |
def index_spacing(self, value):
"""Validate and set the index_spacing flag."""
if not isinstance(value, bool):
raise TypeError('index_spacing attribute must be a logical type.')
self._index_spacing = value | Validate and set the index_spacing flag. |
def get_categories(self):
"""
Get all categories and post count of each category.
:return dict_item(category_name, Pair(count_all, count_published))
"""
posts = self.get_posts(include_draft=True)
result = {}
for post in posts:
for category_name in set(post.categories):
result[category_name] = result.setdefault(
category_name,
Pair(0, 0)) + Pair(1, 0 if post.is_draft else 1)
return list(result.items()) | Get all categories and post count of each category.
:return dict_item(category_name, Pair(count_all, count_published)) |
def get_parallel_value_for_key(self, key):
"""
Get the value for a key. If there is no value for the key then empty
string is returned.
"""
if self._remotelib:
return self._remotelib.run_keyword('get_parallel_value_for_key',
[key], {})
return _PabotLib.get_parallel_value_for_key(self, key) | Get the value for a key. If there is no value for the key then empty
string is returned. |
def delete_namespaced_cron_job(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_cron_job # noqa: E501
delete a CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | delete_namespaced_cron_job # noqa: E501
delete a CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode='text', conflicts='fail',
strict=False, domains=None, index=False,
index_domains=None):
"""
Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used.
"""
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
# TODO: Default to using only domains that have at least one
# tagged entity in the generated file.
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains) | Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used. |
def _complete_batch_send(self, resp):
"""Complete the processing of our batch send operation
Clear the deferred tracking our current batch processing
and reset our retry count and retry interval
Return none to eat any errors coming from up the deferred chain
"""
self._batch_send_d = None
self._req_attempts = 0
self._retry_interval = self._init_retry_interval
if isinstance(resp, Failure) and not resp.check(tid_CancelledError,
CancelledError):
log.error("Failure detected in _complete_batch_send: %r\n%r",
resp, resp.getTraceback())
return | Complete the processing of our batch send operation
Clear the deferred tracking our current batch processing
and reset our retry count and retry interval
Return none to eat any errors coming from up the deferred chain |
def dict_of_numpyarray_to_dict_of_list(d):
'''
Convert dictionary containing numpy arrays to dictionary containing lists
Parameters
----------
d : dict
sli parameter name and value as dictionary key and value pairs
Returns
-------
d : dict
modified dictionary
'''
for key,value in d.iteritems():
if isinstance(value,dict): # if value == dict
# recurse
d[key] = dict_of_numpyarray_to_dict_of_list(value)
elif isinstance(value,np.ndarray): # or isinstance(value,list) :
d[key] = value.tolist()
return d | Convert dictionary containing numpy arrays to dictionary containing lists
Parameters
----------
d : dict
sli parameter name and value as dictionary key and value pairs
Returns
-------
d : dict
modified dictionary |
def evaluate_accuracy(data_iterator, net):
"""Function to evaluate accuracy of any data iterator passed to it as an argument"""
acc = mx.metric.Accuracy()
for data, label in data_iterator:
output = net(data)
predictions = nd.argmax(output, axis=1)
predictions = predictions.reshape((-1, 1))
acc.update(preds=predictions, labels=label)
return acc.get()[1] | Function to evaluate accuracy of any data iterator passed to it as an argument |
def start(self) -> None:
"""Connect websocket to deCONZ."""
if self.config:
self.websocket = self.ws_client(
self.loop, self.session, self.host,
self.config.websocketport, self.async_session_handler)
self.websocket.start()
else:
_LOGGER.error('No deCONZ config available') | Connect websocket to deCONZ. |
def PhyDMSComprehensiveParser():
"""Returns *argparse.ArgumentParser* for ``phdyms_comprehensive`` script."""
parser = ArgumentParserNoArgHelp(description=("Comprehensive phylogenetic "
"model comparison and detection of selection informed by deep "
"mutational scanning data. This program runs 'phydms' repeatedly "
"to compare substitution models and detect selection. "
"{0} Version {1}. Full documentation at {2}").format(
phydmslib.__acknowledgments__, phydmslib.__version__,
phydmslib.__url__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('outprefix', help='Output file prefix.', type=str)
parser.add_argument('alignment', help='Existing FASTA file with aligned '
'codon sequences.', type=ExistingFile)
parser.add_argument('prefsfiles', help='Existing files with site-specific '
'amino-acid preferences.', type=ExistingFile, nargs='+')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--raxml', help="Path to RAxML (e.g., 'raxml')")
group.add_argument('--tree', type=ExistingFile,
help="Existing Newick file giving input tree.")
parser.add_argument('--ncpus', default=-1, help='Use this many CPUs; -1 '
'means all available.', type=int)
parser.add_argument('--brlen', choices=['scale', 'optimize'],
default='optimize', help=("How to handle branch lengths: "
"scale by single parameter or optimize each one"))
parser.set_defaults(omegabysite=False)
parser.add_argument('--omegabysite', dest='omegabysite',
action='store_true', help="Fit omega (dN/dS) for each site.")
parser.set_defaults(diffprefsbysite=False)
parser.add_argument('--diffprefsbysite', dest='diffprefsbysite',
action='store_true', help="Fit differential preferences for "
"each site.")
parser.set_defaults(gammaomega=False)
parser.add_argument('--gammaomega', dest='gammaomega', action=\
'store_true', help="Fit ExpCM with gamma distributed omega.")
parser.set_defaults(gammabeta=False)
parser.add_argument('--gammabeta', dest='gammabeta', action=\
'store_true', help="Fit ExpCM with gamma distributed beta.")
parser.set_defaults(noavgprefs=False)
parser.add_argument('--no-avgprefs', dest='noavgprefs', action='store_true',
help="No fitting of models with preferences averaged across sites "
"for ExpCM.")
parser.set_defaults(randprefs=False)
parser.add_argument('--randprefs', dest='randprefs', action='store_true',
help="Include ExpCM models with randomized preferences.")
parser.add_argument('-v', '--version', action='version', version=
'%(prog)s {version}'.format(version=phydmslib.__version__))
return parser | Returns *argparse.ArgumentParser* for ``phdyms_comprehensive`` script. |
def next(self):
"""Move to the next valid locus.
Will only return valid loci or exit via StopIteration exception
"""
while True:
self.cur_idx += 1
if self.__datasource.populate_iteration(self):
return self
raise StopIteration | Move to the next valid locus.
Will only return valid loci or exit via StopIteration exception |
def _fix_typo(s):
"""M:.-O:.-'M:.-wa.e.-'t.x.-s.y.-', => M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-',"""
subst, attr, mode = s
return m(subst, attr, script("t.-x.-s.y.-'")) | M:.-O:.-'M:.-wa.e.-'t.x.-s.y.-', => M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-', |
def exit_and_fail(self, msg=None, out=None):
"""Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional)
"""
self.exit(result=PANTS_FAILED_EXIT_CODE, msg=msg, out=out) | Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional) |
def host_members(self):
'''Return the members of the host committee.
'''
host = self.host()
if host is None:
return
for member, full_member in host.members_objects:
yield full_member | Return the members of the host committee. |
def xpathNextPreceding(self, cur):
"""Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order """
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextPreceding(self._o, cur__o)
if ret is None:raise xpathError('xmlXPathNextPreceding() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order |
def get_resource_group(access_token, subscription_id, rgname):
'''Get details about the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'?api-version=', RESOURCE_API])
return do_get(endpoint, access_token) | Get details about the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body. |
def auth(self):
"""
Auth is used to call the AUTH API of CricketAPI.
Access token required for every request call to CricketAPI.
Auth functional will post user Cricket API app details to server
and return the access token.
Return:
Access token
"""
if not self.store_handler.has_value('access_token'):
params = {}
params["access_key"] = self.access_key
params["secret_key"] = self.secret_key
params["app_id"] = self.app_id
params["device_id"] = self.device_id
auth_url = self.api_path + "auth/"
response = self.get_response(auth_url, params, "post")
if 'auth' in response:
self.store_handler.set_value("access_token", response['auth']['access_token'])
self.store_handler.set_value("expires", response['auth']['expires'])
logger.info('Getting new access token')
else:
msg = "Error getting access_token, " + \
"please verify your access_key, secret_key and app_id"
logger.error(msg)
raise Exception("Auth Failed, please check your access details") | Auth is used to call the AUTH API of CricketAPI.
Access token required for every request call to CricketAPI.
Auth functional will post user Cricket API app details to server
and return the access token.
Return:
Access token |
def fcm_send_single_device_data_message(
registration_id,
condition=None,
collapse_key=None,
delay_while_idle=False,
time_to_live=None,
restricted_package_name=None,
low_priority=False,
dry_run=False,
data_message=None,
content_available=None,
api_key=None,
timeout=5,
json_encoder=None):
"""
Send push message to a single device
All arguments correspond to that defined in pyfcm/fcm.py.
Args:
registration_id (str): FCM device registration IDs.
data_message (dict): Data message payload to send alone or with the
notification message
Keyword Args:
collapse_key (str, optional): Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed. Defaults to ``None``.
delay_while_idle (bool, optional): If ``True`` indicates that the
message should not be sent until the device becomes active.
time_to_live (int, optional): How long (in seconds) the message
should be kept in FCM storage if the device is offline. The
maximum time to live supported is 4 weeks. Defaults to ``None``
which uses the FCM default of 4 weeks.
low_priority (boolean, optional): Whether to send notification with
the low priority flag. Defaults to ``False``.
restricted_package_name (str, optional): Package name of the
application where the registration IDs must match in order to
receive the message. Defaults to ``None``.
dry_run (bool, optional): If ``True`` no message will be sent but
request will be tested.
timeout (int, optional): set time limit for the request
Returns:
:dict:`multicast_id(long), success(int), failure(int),
canonical_ids(int), results(list)`:
Response from FCM server.
Raises:
AuthenticationError: If :attr:`api_key` is not set or provided or there
is an error authenticating the sender.
FCMServerError: Internal server error or timeout error on Firebase cloud
messaging server
InvalidDataError: Invalid data provided
InternalPackageError: Mostly from changes in the response of FCM,
contact the project owner to resolve the issue
"""
push_service = FCMNotification(
api_key=SETTINGS.get("FCM_SERVER_KEY") if api_key is None else api_key,
json_encoder=json_encoder,
)
return push_service.single_device_data_message(
registration_id=registration_id,
condition=condition,
collapse_key=collapse_key,
delay_while_idle=delay_while_idle,
time_to_live=time_to_live,
restricted_package_name=restricted_package_name,
low_priority=low_priority,
dry_run=dry_run,
data_message=data_message,
content_available=content_available,
timeout=timeout
) | Send push message to a single device
All arguments correspond to that defined in pyfcm/fcm.py.
Args:
registration_id (str): FCM device registration IDs.
data_message (dict): Data message payload to send alone or with the
notification message
Keyword Args:
collapse_key (str, optional): Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed. Defaults to ``None``.
delay_while_idle (bool, optional): If ``True`` indicates that the
message should not be sent until the device becomes active.
time_to_live (int, optional): How long (in seconds) the message
should be kept in FCM storage if the device is offline. The
maximum time to live supported is 4 weeks. Defaults to ``None``
which uses the FCM default of 4 weeks.
low_priority (boolean, optional): Whether to send notification with
the low priority flag. Defaults to ``False``.
restricted_package_name (str, optional): Package name of the
application where the registration IDs must match in order to
receive the message. Defaults to ``None``.
dry_run (bool, optional): If ``True`` no message will be sent but
request will be tested.
timeout (int, optional): set time limit for the request
Returns:
:dict:`multicast_id(long), success(int), failure(int),
canonical_ids(int), results(list)`:
Response from FCM server.
Raises:
AuthenticationError: If :attr:`api_key` is not set or provided or there
is an error authenticating the sender.
FCMServerError: Internal server error or timeout error on Firebase cloud
messaging server
InvalidDataError: Invalid data provided
InternalPackageError: Mostly from changes in the response of FCM,
contact the project owner to resolve the issue |
def nltk_tree_to_logical_form(tree: Tree) -> str:
"""
Given an ``nltk.Tree`` representing the syntax tree that generates a logical form, this method
produces the actual (lisp-like) logical form, with all of the non-terminal symbols converted
into the correct number of parentheses.
This is used in the logic that converts action sequences back into logical forms. It's very
unlikely that you will need this anywhere else.
"""
# nltk.Tree actually inherits from `list`, so you use `len()` to get the number of children.
# We're going to be explicit about checking length, instead of using `if tree:`, just to avoid
# any funny business nltk might have done (e.g., it's really odd if `if tree:` evaluates to
# `False` if there's a single leaf node with no children).
if len(tree) == 0: # pylint: disable=len-as-condition
return tree.label()
if len(tree) == 1:
return tree[0].label()
return '(' + ' '.join(nltk_tree_to_logical_form(child) for child in tree) + ')' | Given an ``nltk.Tree`` representing the syntax tree that generates a logical form, this method
produces the actual (lisp-like) logical form, with all of the non-terminal symbols converted
into the correct number of parentheses.
This is used in the logic that converts action sequences back into logical forms. It's very
unlikely that you will need this anywhere else. |
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""]) | Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save |
def cmd_tracker_mode(self, args):
'''set arbitrary mode'''
connection = self.find_connection()
if not connection:
print("No antenna tracker found")
return
mode_mapping = connection.mode_mapping()
if mode_mapping is None:
print('No mode mapping available')
return
if len(args) != 1:
print('Available modes: ', mode_mapping.keys())
return
mode = args[0].upper()
if mode not in mode_mapping:
print('Unknown mode %s: ' % mode)
return
connection.set_mode(mode_mapping[mode]) | set arbitrary mode |
def setnx(self, key, value):
"""Set the value of a key, only if the key does not exist."""
fut = self.execute(b'SETNX', key, value)
return wait_convert(fut, bool) | Set the value of a key, only if the key does not exist. |
def set_check(self, name, state):
'''set a status value'''
if self.child.is_alive():
self.parent_pipe.send(CheckItem(name, state)) | set a status value |
def convert_to_unicode( tscii_input ):
""" convert a byte-ASCII encoded string into equivalent Unicode string
in the UTF-8 notation."""
output = list()
prev = None
prev2x = None
# need a look ahead of 2 tokens atleast
for char in tscii_input:
## print "%2x"%ord(char) # debugging
if ord(char) < 128 :
# base-ASCII copy to output
output.append( char )
prev = None
prev2x = None
elif ord(char) in TSCII_DIRECT_LOOKUP:
if ( prev in TSCII_PRE_MODIFIER ):
curr_char = [TSCII[ord(char)],TSCII[prev]]
else:
# we are direct lookup char
curr_char = [TSCII[ord(char)]]
char = None
output.extend( curr_char )
elif ( (ord(char) in TSCII_POST_MODIFIER) ):
if ( (prev in TSCII_DIRECT_LOOKUP) and
(prev2x in TSCII_PRE_MODIFIER) ):
if len(output) >= 2:
del output[-1] #we are reducing this token to something new
del output[-2]
elif len(output)==1:
del output[-1]
else:
# nothing to delete here..
pass
output.extend( [TSCII[prev], TSCII[prev2x]] )
else:
print("Warning: malformed TSCII encoded file; skipping characters")
prev = None
char = None
else:
# pass - must be one of the pre/post modifiers
pass
prev2x = prev
if char:
prev = ord(char)
return u"".join(output) | convert a byte-ASCII encoded string into equivalent Unicode string
in the UTF-8 notation. |
def banner(*lines, **kwargs):
"""prints a banner
sep -- string -- the character that will be on the line on the top and bottom
and before any of the lines, defaults to *
count -- integer -- the line width, defaults to 80
"""
sep = kwargs.get("sep", "*")
count = kwargs.get("width", globals()["WIDTH"])
out(sep * count)
if lines:
out(sep)
for line in lines:
out("{} {}".format(sep, line))
out(sep)
out(sep * count) | prints a banner
sep -- string -- the character that will be on the line on the top and bottom
and before any of the lines, defaults to *
count -- integer -- the line width, defaults to 80 |
def is_subdir(a, b):
"""
Return true if a is a subdirectory of b
"""
a, b = map(os.path.abspath, [a, b])
return os.path.commonpath([a, b]) == b | Return true if a is a subdirectory of b |
def _sanitize_parameters(self):
"""
Perform a sanity check on parameters passed in to CFG.__init__().
An AngrCFGError is raised if any parameter fails the sanity check.
:return: None
"""
# Check additional_edges
if isinstance(self._additional_edges, (list, set, tuple)):
new_dict = defaultdict(list)
for s, d in self._additional_edges:
new_dict[s].append(d)
self._additional_edges = new_dict
elif isinstance(self._additional_edges, dict):
pass
else:
raise AngrCFGError('Additional edges can only be a list, set, tuple, or a dict.')
# Check _advanced_backward_slicing
if self._advanced_backward_slicing and self._enable_symbolic_back_traversal:
raise AngrCFGError('Advanced backward slicing and symbolic back traversal cannot both be enabled.')
if self._advanced_backward_slicing and not self._keep_state:
raise AngrCFGError('Keep state must be enabled if advanced backward slicing is enabled.')
# Sanitize avoid_runs
self._avoid_runs = [ ] if self._avoid_runs is None else self._avoid_runs
if not isinstance(self._avoid_runs, (list, set)):
raise AngrCFGError('"avoid_runs" must either be None, or a list or a set.')
self._sanitize_starts() | Perform a sanity check on parameters passed in to CFG.__init__().
An AngrCFGError is raised if any parameter fails the sanity check.
:return: None |
def mk_subsuper_association(m, r_subsup):
'''
Create pyxtuml associations from a sub/super association in BridgePoint.
'''
r_rel = one(r_subsup).R_REL[206]()
r_rto = one(r_subsup).R_SUPER[212].R_RTO[204]()
target_o_obj = one(r_rto).R_OIR[203].O_OBJ[201]()
for r_sub in many(r_subsup).R_SUB[213]():
r_rgo = one(r_sub).R_RGO[205]()
source_o_obj = one(r_rgo).R_OIR[203].O_OBJ[201]()
source_ids, target_ids = _get_related_attributes(r_rgo, r_rto)
m.define_association(rel_id=r_rel.Numb,
source_kind=source_o_obj.Key_Lett,
target_kind=target_o_obj.Key_Lett,
source_keys=source_ids,
target_keys=target_ids,
source_conditional=True,
target_conditional=False,
source_phrase='',
target_phrase='',
source_many=False,
target_many=False) | Create pyxtuml associations from a sub/super association in BridgePoint. |
def get_template_data(template_file):
"""
Read the template file, parse it as JSON/YAML and return the template as a dictionary.
Parameters
----------
template_file : string
Path to the template to read
Returns
-------
Template data as a dictionary
"""
if not pathlib.Path(template_file).exists():
raise ValueError("Template file not found at {}".format(template_file))
with open(template_file, 'r') as fp:
try:
return yaml_parse(fp.read())
except (ValueError, yaml.YAMLError) as ex:
raise ValueError("Failed to parse template: {}".format(str(ex))) | Read the template file, parse it as JSON/YAML and return the template as a dictionary.
Parameters
----------
template_file : string
Path to the template to read
Returns
-------
Template data as a dictionary |
def _parse_request(self, schema, req, locations):
"""Return a parsed arguments dictionary for the current request."""
if schema.many:
assert (
"json" in locations
), "schema.many=True is only supported for JSON location"
# The ad hoc Nested field is more like a workaround or a helper,
# and it servers its purpose fine. However, if somebody has a desire
# to re-design the support of bulk-type arguments, go ahead.
parsed = self.parse_arg(
name="json",
field=ma.fields.Nested(schema, many=True),
req=req,
locations=locations,
)
if parsed is missing:
parsed = []
else:
argdict = schema.fields
parsed = {}
for argname, field_obj in iteritems(argdict):
if MARSHMALLOW_VERSION_INFO[0] < 3:
parsed_value = self.parse_arg(argname, field_obj, req, locations)
# If load_from is specified on the field, try to parse from that key
if parsed_value is missing and field_obj.load_from:
parsed_value = self.parse_arg(
field_obj.load_from, field_obj, req, locations
)
argname = field_obj.load_from
else:
argname = field_obj.data_key or argname
parsed_value = self.parse_arg(argname, field_obj, req, locations)
if parsed_value is not missing:
parsed[argname] = parsed_value
return parsed | Return a parsed arguments dictionary for the current request. |
def get_normal_draws(num_mixers,
num_draws,
num_vars,
seed=None):
"""
Parameters
----------
num_mixers : int.
Should be greater than zero. Denotes the number of observations for
which we are making draws from a normal distribution for. I.e. the
number of observations with randomly distributed coefficients.
num_draws : int.
Should be greater than zero. Denotes the number of draws that are to be
made from each normal distribution.
num_vars : int.
Should be greater than zero. Denotes the number of variables for which
we need to take draws from the normal distribution.
seed : int or None, optional.
If an int is passed, it should be greater than zero. Denotes the value
to be used in seeding the random generator used to generate the draws
from the normal distribution. Default == None.
Returns
-------
all_draws : list of 2D ndarrays.
The list will have num_vars elements. Each element will be a num_obs by
num_draws numpy array of draws from a normal distribution with mean
zero and standard deviation of one.
"""
# Check the validity of the input arguments
assert all([isinstance(x, int) for x in [num_mixers, num_draws, num_vars]])
assert all([x > 0 for x in [num_mixers, num_draws, num_vars]])
if seed is not None:
assert isinstance(seed, int) and seed > 0
normal_dist = scipy.stats.norm(loc=0.0, scale=1.0)
all_draws = []
if seed:
np.random.seed(seed)
for i in xrange(num_vars):
all_draws.append(normal_dist.rvs(size=(num_mixers, num_draws)))
return all_draws | Parameters
----------
num_mixers : int.
Should be greater than zero. Denotes the number of observations for
which we are making draws from a normal distribution for. I.e. the
number of observations with randomly distributed coefficients.
num_draws : int.
Should be greater than zero. Denotes the number of draws that are to be
made from each normal distribution.
num_vars : int.
Should be greater than zero. Denotes the number of variables for which
we need to take draws from the normal distribution.
seed : int or None, optional.
If an int is passed, it should be greater than zero. Denotes the value
to be used in seeding the random generator used to generate the draws
from the normal distribution. Default == None.
Returns
-------
all_draws : list of 2D ndarrays.
The list will have num_vars elements. Each element will be a num_obs by
num_draws numpy array of draws from a normal distribution with mean
zero and standard deviation of one. |
def create_and_register_access97_db(filename: str,
dsn: str,
description: str) -> bool:
"""
(Windows only.)
Creates a Microsoft Access 97 database and registers it with ODBC.
Args:
filename: filename of the database to create
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created?
"""
fullfilename = os.path.abspath(filename)
create_string = fullfilename + " General"
# ... filename, space, sort order ("General" for English)
return (create_user_dsn(access_driver, CREATE_DB3=create_string) and
register_access_db(filename, dsn, description)) | (Windows only.)
Creates a Microsoft Access 97 database and registers it with ODBC.
Args:
filename: filename of the database to create
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created? |
def domain_create(auth=None, **kwargs):
'''
Create a domain
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_create name=domain1
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_domain(**kwargs) | Create a domain
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_create name=domain1 |
def Softmax(x, params, axis=-1, **kwargs):
"""Apply softmax to x: exponentiate and normalize along the given axis."""
del params, kwargs
return np.exp(x - backend.logsumexp(x, axis, keepdims=True)) | Apply softmax to x: exponentiate and normalize along the given axis. |
def ParseRow(self, parser_mediator, row_offset, row):
"""Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
"""
timestamp = self._ParseTimestamp(parser_mediator, row)
if timestamp is None:
return
event_data = TrendMicroUrlEventData()
event_data.offset = row_offset
# Convert and store integer values.
for field in (
'credibility_rating', 'credibility_score', 'policy_identifier',
'threshold', 'block_mode'):
try:
value = int(row[field], 10)
except (ValueError, TypeError):
value = None
setattr(event_data, field, value)
# Store string values.
for field in ('url', 'group_name', 'group_code', 'application_name', 'ip'):
setattr(event_data, field, row[field])
event = time_events.DateTimeValuesEvent(
timestamp, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS. |
def _create_cell(args, cell_body):
"""Implements the pipeline cell create magic used to create Pipeline objects.
The supported syntax is:
%%pipeline create <args>
[<inline YAML>]
Args:
args: the arguments following '%%pipeline create'.
cell_body: the contents of the cell
"""
name = args.get('name')
if name is None:
raise Exception("Pipeline name was not specified.")
pipeline_spec = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment())
airflow_spec = google.datalab.contrib.pipeline._pipeline.PipelineGenerator.generate_airflow_spec(
name, pipeline_spec)
debug = args.get('debug')
if debug is True:
return airflow_spec | Implements the pipeline cell create magic used to create Pipeline objects.
The supported syntax is:
%%pipeline create <args>
[<inline YAML>]
Args:
args: the arguments following '%%pipeline create'.
cell_body: the contents of the cell |
def remove_attr(self, attr):
"""Removes an attribute."""
self._stable = False
self.attrs.pop(attr, None)
return self | Removes an attribute. |
def init_widget(self):
""" Initialize the underlying widget.
"""
super(AndroidChronometer, self).init_widget()
w = self.widget
w.setOnChronometerTickListener(w.getId())
w.onChronometerTick.connect(self.on_chronometer_tick) | Initialize the underlying widget. |
def getlist(self, section, option):
"""Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings.
"""
value_list = self.get(section, option)
values = []
for value_line in value_list.split('\n'):
for value in value_line.split(','):
value = value.strip()
if value:
values.append(value)
return values | Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings. |
def yn_prompt(text):
'''
Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input.
'''
text = "\n"+ text + "\n('y' or 'n'): "
while True:
answer = input(text).strip()
if answer != 'y' and answer != 'n':
continue
elif answer == 'y':
return True
elif answer == 'n':
return False | Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input. |
def find_declaration(self):
"""
Find the most likely function declaration from the embedded collection of prototypes, set it to self.prototype,
and update self.calling_convention with the declaration.
:return: None
"""
# determine the library name
if not self.is_plt:
binary_name = self.binary_name
if binary_name not in SIM_LIBRARIES:
return
else:
binary_name = None
# PLT entries must have the same declaration as their jump targets
# Try to determine which library this PLT entry will jump to
edges = self.transition_graph.edges()
node = next(iter(edges))[1]
if len(edges) == 1 and (type(node) is HookNode or type(node) is SyscallNode):
target = node.addr
if target in self._function_manager:
target_func = self._function_manager[target]
binary_name = target_func.binary_name
if binary_name is None:
return
library = SIM_LIBRARIES.get(binary_name, None)
if library is None:
return
if not library.has_prototype(self.name):
return
proto = library.prototypes[self.name]
self.prototype = proto
if self.calling_convention is not None:
self.calling_convention.args = None
self.calling_convention.func_ty = proto | Find the most likely function declaration from the embedded collection of prototypes, set it to self.prototype,
and update self.calling_convention with the declaration.
:return: None |
def _reset_problem_type(self):
"""Reset problem type to whatever is appropriate."""
# Only need to reset the type after the first solve. This also works
# around a bug in Cplex where get_num_binary() is some rare cases
# causes a segfault.
if self._solve_count > 0:
integer_count = 0
for func in (self._cp.variables.get_num_binary,
self._cp.variables.get_num_integer,
self._cp.variables.get_num_semicontinuous,
self._cp.variables.get_num_semiinteger):
integer_count += func()
integer = integer_count > 0
quad_constr = self._cp.quadratic_constraints.get_num() > 0
quad_obj = self._cp.objective.get_num_quadratic_variables() > 0
if not integer:
if quad_constr:
new_type = self._cp.problem_type.QCP
elif quad_obj:
new_type = self._cp.problem_type.QP
else:
new_type = self._cp.problem_type.LP
else:
if quad_constr:
new_type = self._cp.problem_type.MIQCP
elif quad_obj:
new_type = self._cp.problem_type.MIQP
else:
new_type = self._cp.problem_type.MILP
logger.debug('Setting problem type to {}...'.format(
self._cp.problem_type[new_type]))
self._cp.set_problem_type(new_type)
else:
logger.debug('Problem type is {}'.format(
self._cp.problem_type[self._cp.get_problem_type()]))
# Force QP/MIQP solver to look for global optimum. We set it here only
# for QP/MIQP problems to avoid the warnings generated for other
# problem types when this parameter is set.
quad_obj = self._cp.objective.get_num_quadratic_variables() > 0
if hasattr(self._cp.parameters, 'optimalitytarget'):
target_param = self._cp.parameters.optimalitytarget
else:
target_param = self._cp.parameters.solutiontarget
if quad_obj:
target_param.set(target_param.values.optimal_global)
else:
target_param.set(target_param.values.auto) | Reset problem type to whatever is appropriate. |
def dict_contents(self, use_dict=None, as_class=None):
"""Return the contents of an object as a dict."""
if _debug: _log.debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# exception to the rule of returning a dict
return str(self) | Return the contents of an object as a dict. |
def rdf_source(self, aformat="turtle"):
"""
Serialize graph using the format required
"""
if aformat and aformat not in self.SUPPORTED_FORMATS:
return "Sorry. Allowed formats are %s" % str(self.SUPPORTED_FORMATS)
if aformat == "dot":
return self.__serializedDot()
else:
# use stardard rdf serializations
return self.rdflib_graph.serialize(format=aformat) | Serialize graph using the format required |
def find_xml_all(cls, url, markup, tag, pattern):
"""
find xml(list)
:param url: contents url
:param markup: markup provider
:param tag: find tag
:param pattern: xml file pattern
:return: BeautifulSoup object list
"""
body = cls.find_xml(url, markup)
return body.find_all(tag, href=re.compile(pattern)) | find xml(list)
:param url: contents url
:param markup: markup provider
:param tag: find tag
:param pattern: xml file pattern
:return: BeautifulSoup object list |
def seq_minibatches(inputs, targets, batch_size, seq_length, stride=1):
"""Generate a generator that return a batch of sequence inputs and targets.
If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every element is a example.
batch_size : int
The batch size.
seq_length : int
The sequence length.
stride : int
The stride step, default is 1.
Examples
--------
Synced sequence input and output.
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0, 1, 2, 3, 4, 5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):
>>> print(batch)
(array([['a', 'a'], ['b', 'b'], ['b', 'b'], ['c', 'c']], dtype='<U1'), array([0, 1, 1, 2]))
(array([['c', 'c'], ['d', 'd'], ['d', 'd'], ['e', 'e']], dtype='<U1'), array([2, 3, 3, 4]))
Many to One
>>> return_last = True
>>> num_steps = 2
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> Y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):
>>> x, y = batch
>>> if return_last:
>>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])
>>> y = tmp_y[:, -1]
>>> print(x, y)
[['a' 'a']
['b' 'b']
['b' 'b']
['c' 'c']] [1 2]
[['c' 'c']
['d' 'd']
['d' 'd']
['e' 'e']] [3 4]
"""
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
n_loads = (batch_size * stride) + (seq_length - stride)
for start_idx in range(0, len(inputs) - n_loads + 1, (batch_size * stride)):
seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[1:], dtype=inputs.dtype)
seq_targets = np.zeros((batch_size, seq_length) + targets.shape[1:], dtype=targets.dtype)
for b_idx in xrange(batch_size):
start_seq_idx = start_idx + (b_idx * stride)
end_seq_idx = start_seq_idx + seq_length
seq_inputs[b_idx] = inputs[start_seq_idx:end_seq_idx]
seq_targets[b_idx] = targets[start_seq_idx:end_seq_idx]
flatten_inputs = seq_inputs.reshape((-1, ) + inputs.shape[1:])
flatten_targets = seq_targets.reshape((-1, ) + targets.shape[1:])
yield flatten_inputs, flatten_targets | Generate a generator that return a batch of sequence inputs and targets.
If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every element is a example.
batch_size : int
The batch size.
seq_length : int
The sequence length.
stride : int
The stride step, default is 1.
Examples
--------
Synced sequence input and output.
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0, 1, 2, 3, 4, 5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):
>>> print(batch)
(array([['a', 'a'], ['b', 'b'], ['b', 'b'], ['c', 'c']], dtype='<U1'), array([0, 1, 1, 2]))
(array([['c', 'c'], ['d', 'd'], ['d', 'd'], ['e', 'e']], dtype='<U1'), array([2, 3, 3, 4]))
Many to One
>>> return_last = True
>>> num_steps = 2
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> Y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):
>>> x, y = batch
>>> if return_last:
>>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])
>>> y = tmp_y[:, -1]
>>> print(x, y)
[['a' 'a']
['b' 'b']
['b' 'b']
['c' 'c']] [1 2]
[['c' 'c']
['d' 'd']
['d' 'd']
['e' 'e']] [3 4] |
def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
"""
Will filter if instructed to do so the result to remove matching criteria
:param result: list of dicts returned by Snakebite ls
:type result: list[dict]
:param ignored_ext: list of ignored extensions
:type ignored_ext: list
:param ignore_copying: shall we ignore ?
:type ignore_copying: bool
:return: list of dicts which were not removed
:rtype: list[dict]
"""
if ignore_copying:
log = LoggingMixin().log
regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext)
ignored_extensions_regex = re.compile(regex_builder)
log.debug(
'Filtering result for ignored extensions: %s in files %s',
ignored_extensions_regex.pattern, map(lambda x: x['path'], result)
)
result = [x for x in result if not ignored_extensions_regex.match(x['path'])]
log.debug('HdfsSensor.poke: after ext filter result is %s', result)
return result | Will filter if instructed to do so the result to remove matching criteria
:param result: list of dicts returned by Snakebite ls
:type result: list[dict]
:param ignored_ext: list of ignored extensions
:type ignored_ext: list
:param ignore_copying: shall we ignore ?
:type ignore_copying: bool
:return: list of dicts which were not removed
:rtype: list[dict] |
def save(self, ts):
"""
Save timestamp to file.
"""
with open(self, 'w') as f:
Timestamp.wrap(ts).dump(f) | Save timestamp to file. |
def _cur_band_filled(self):
"""Checks if the current band is filled.
The size of the current band should be equal to s_max_1"""
cur_band = self._hyperbands[self._state["band_idx"]]
return len(cur_band) == self._s_max_1 | Checks if the current band is filled.
The size of the current band should be equal to s_max_1 |
def extract(cls, padded):
"""
Removes the surrounding "@{...}" from the name.
:param padded: the padded string
:type padded: str
:return: the extracted name
:rtype: str
"""
if padded.startswith("@{") and padded.endswith("}"):
return padded[2:len(padded)-1]
else:
return padded | Removes the surrounding "@{...}" from the name.
:param padded: the padded string
:type padded: str
:return: the extracted name
:rtype: str |
def remove(self, data):
"""
Removes a data node from the list. If the list contains more than one
node having the same data that shall be removed, then the node having
the first occurrency of the data is removed.
:param data: the data to be removed in the new list node
:type data: object
"""
current_node = self._first_node
deleted = False
if self._size == 0:
return
if data == current_node.data():
# case 1: the list has only one item
if current_node.next() is None:
self._first_node = LinkedListNode(None, None)
self._last_node = self._first_node
self._size = 0
return
# case 2: the list has more than one item
current_node = current_node.next()
self._first_node = current_node
self._size -= 1
return
while True:
if current_node is None:
deleted = False
break
# Check next element's data
next_node = current_node.next()
if next_node is not None:
if data == next_node.data():
next_next_node = next_node.next()
current_node.update_next(next_next_node)
next_node = None
deleted = True
break
current_node = current_node.next()
if deleted:
self._size -= 1 | Removes a data node from the list. If the list contains more than one
node having the same data that shall be removed, then the node having
the first occurrency of the data is removed.
:param data: the data to be removed in the new list node
:type data: object |
def enqueue(self, item_type, item):
"""Queue a new data item, make item iterable"""
with self.enlock:
self.queue[item_type].append(item) | Queue a new data item, make item iterable |
def astra_projector(vol_interp, astra_vol_geom, astra_proj_geom, ndim, impl):
"""Create an ASTRA projector configuration dictionary.
Parameters
----------
vol_interp : {'nearest', 'linear'}
Interpolation type of the volume discretization. This determines
the projection model that is chosen.
astra_vol_geom : dict
ASTRA volume geometry.
astra_proj_geom : dict
ASTRA projection geometry.
ndim : {2, 3}
Number of dimensions of the projector.
impl : {'cpu', 'cuda'}
Implementation of the projector.
Returns
-------
proj_id : int
Handle for the created ASTRA internal projector object.
"""
if vol_interp not in ('nearest', 'linear'):
raise ValueError("`vol_interp` '{}' not understood"
''.format(vol_interp))
impl = str(impl).lower()
if impl not in ('cpu', 'cuda'):
raise ValueError("`impl` '{}' not understood"
''.format(impl))
if 'type' not in astra_proj_geom:
raise ValueError('invalid projection geometry dict {}'
''.format(astra_proj_geom))
if ndim == 3 and impl == 'cpu':
raise ValueError('3D projectors not supported on CPU')
ndim = int(ndim)
proj_type = astra_proj_geom['type']
if proj_type not in ('parallel', 'fanflat', 'fanflat_vec',
'parallel3d', 'parallel3d_vec', 'cone', 'cone_vec'):
raise ValueError('invalid geometry type {!r}'.format(proj_type))
# Mapping from interpolation type and geometry to ASTRA projector type.
# "I" means probably mathematically inconsistent. Some projectors are
# not implemented, e.g. CPU 3d projectors in general.
type_map_cpu = {'parallel': {'nearest': 'line',
'linear': 'linear'}, # I
'fanflat': {'nearest': 'line_fanflat',
'linear': 'line_fanflat'}, # I
'parallel3d': {'nearest': 'linear3d', # I
'linear': 'linear3d'}, # I
'cone': {'nearest': 'linearcone', # I
'linear': 'linearcone'}} # I
type_map_cpu['fanflat_vec'] = type_map_cpu['fanflat']
type_map_cpu['parallel3d_vec'] = type_map_cpu['parallel3d']
type_map_cpu['cone_vec'] = type_map_cpu['cone']
# GPU algorithms not necessarily require a projector, but will in future
# releases making the interface more coherent regarding CPU and GPU
type_map_cuda = {'parallel': 'cuda', # I
'parallel3d': 'cuda3d'} # I
type_map_cuda['fanflat'] = type_map_cuda['parallel']
type_map_cuda['fanflat_vec'] = type_map_cuda['fanflat']
type_map_cuda['cone'] = type_map_cuda['parallel3d']
type_map_cuda['parallel3d_vec'] = type_map_cuda['parallel3d']
type_map_cuda['cone_vec'] = type_map_cuda['cone']
# create config dict
proj_cfg = {}
if impl == 'cpu':
proj_cfg['type'] = type_map_cpu[proj_type][vol_interp]
else: # impl == 'cuda'
proj_cfg['type'] = type_map_cuda[proj_type]
proj_cfg['VolumeGeometry'] = astra_vol_geom
proj_cfg['ProjectionGeometry'] = astra_proj_geom
proj_cfg['options'] = {}
# Add the hacky 1/r^2 weighting exposed in intermediate versions of
# ASTRA
if (proj_type in ('cone', 'cone_vec') and
astra_supports('cone3d_hacky_density_weighting')):
proj_cfg['options']['DensityWeighting'] = True
if ndim == 2:
return astra.projector.create(proj_cfg)
else:
return astra.projector3d.create(proj_cfg) | Create an ASTRA projector configuration dictionary.
Parameters
----------
vol_interp : {'nearest', 'linear'}
Interpolation type of the volume discretization. This determines
the projection model that is chosen.
astra_vol_geom : dict
ASTRA volume geometry.
astra_proj_geom : dict
ASTRA projection geometry.
ndim : {2, 3}
Number of dimensions of the projector.
impl : {'cpu', 'cuda'}
Implementation of the projector.
Returns
-------
proj_id : int
Handle for the created ASTRA internal projector object. |
def capture_output(self, with_hook=True):
"""Steal stream output, return them in string, restore them"""
self.hooked = ''
def display_hook(obj):
# That's some dirty hack
self.hooked += self.safe_better_repr(obj)
self.last_obj = obj
stdout, stderr = sys.stdout, sys.stderr
if with_hook:
d_hook = sys.displayhook
sys.displayhook = display_hook
sys.stdout, sys.stderr = StringIO(), StringIO()
out, err = [], []
try:
yield out, err
finally:
out.extend(sys.stdout.getvalue().splitlines())
err.extend(sys.stderr.getvalue().splitlines())
if with_hook:
sys.displayhook = d_hook
sys.stdout, sys.stderr = stdout, stderr | Steal stream output, return them in string, restore them |
def process_placeholder_image(self):
"""
Process the field's placeholder image.
Ensures the placeholder image has been saved to the same storage class
as the field in a top level folder with a name specified by
settings.VERSATILEIMAGEFIELD_SETTINGS['placeholder_directory_name']
This should be called by the VersatileImageFileDescriptor __get__.
If self.placeholder_image_name is already set it just returns right away.
"""
if self.placeholder_image_name:
return
placeholder_image_name = None
placeholder_image = self.placeholder_image
if placeholder_image:
if isinstance(placeholder_image, OnStoragePlaceholderImage):
name = placeholder_image.path
else:
name = placeholder_image.image_data.name
placeholder_image_name = os.path.join(
VERSATILEIMAGEFIELD_PLACEHOLDER_DIRNAME, name
)
if not self.storage.exists(placeholder_image_name):
self.storage.save(
placeholder_image_name,
placeholder_image.image_data
)
self.placeholder_image_name = placeholder_image_name | Process the field's placeholder image.
Ensures the placeholder image has been saved to the same storage class
as the field in a top level folder with a name specified by
settings.VERSATILEIMAGEFIELD_SETTINGS['placeholder_directory_name']
This should be called by the VersatileImageFileDescriptor __get__.
If self.placeholder_image_name is already set it just returns right away. |
def search_related(self, request):
logger.debug("Cache Search Request")
if self.cache.is_empty() is True:
logger.debug("Empty Cache")
return None
"""
extracting everything from the cache
"""
result = []
items = list(self.cache.cache.items())
for key, item in items:
element = self.cache.get(item.key)
logger.debug("Element : {elm}".format(elm=str(element)))
if request.proxy_uri == element.uri:
result.append(item)
return result | extracting everything from the cache |
def random_string(length=6, alphabet=string.ascii_letters+string.digits):
"""
Return a random string of given length and alphabet.
Default alphabet is url-friendly (base62).
"""
return ''.join([random.choice(alphabet) for i in xrange(length)]) | Return a random string of given length and alphabet.
Default alphabet is url-friendly (base62). |
def authenticate_server(self, response):
"""
Uses GSSAPI to authenticate the server.
Returns True on success, False on failure.
"""
log.debug("authenticate_server(): Authenticate header: {0}".format(
_negotiate_value(response)))
host = urlparse(response.url).hostname
try:
# If this is set pass along the struct to Kerberos
if self.cbt_struct:
result = kerberos.authGSSClientStep(self.context[host],
_negotiate_value(response),
channel_bindings=self.cbt_struct)
else:
result = kerberos.authGSSClientStep(self.context[host],
_negotiate_value(response))
except kerberos.GSSError:
log.exception("authenticate_server(): authGSSClientStep() failed:")
return False
if result < 1:
log.error("authenticate_server(): authGSSClientStep() failed: "
"{0}".format(result))
return False
log.debug("authenticate_server(): returning {0}".format(response))
return True | Uses GSSAPI to authenticate the server.
Returns True on success, False on failure. |
def get_jobs_from_argument(self, raw_job_string):
""" return a list of jobs corresponding to the raw_job_string """
jobs = []
if raw_job_string.startswith(":"):
job_keys = raw_job_string.strip(" :")
jobs.extend([job for job in self.jobs(job_keys)])
# we assume a job code
else:
assert "/" in raw_job_string, "Job Code {0} is improperly formatted!".format(raw_job_string)
host, job_name = raw_job_string.rsplit("/", 1)
host_url = self._config_dict.get(host, {}).get('url', host)
host = self.get_host(host_url)
if host.has_job(job_name):
jobs.append(JenksJob(None, host, job_name,
lambda: self._get_job_api_instance(host_url, job_name)))
else:
raise JenksDataException(
"Could not find Job {0}/{1}!".format(host, job_name))
return jobs | return a list of jobs corresponding to the raw_job_string |
def get_all_events(self):
"""Gather all event IDs in the REACH output by type.
These IDs are stored in the self.all_events dict.
"""
self.all_events = {}
events = self.tree.execute("$.events.frames")
if events is None:
return
for e in events:
event_type = e.get('type')
frame_id = e.get('frame_id')
try:
self.all_events[event_type].append(frame_id)
except KeyError:
self.all_events[event_type] = [frame_id] | Gather all event IDs in the REACH output by type.
These IDs are stored in the self.all_events dict. |
def chisquare(n_ij, weighted):
"""
Calculates the chisquare for a matrix of ind_v x dep_v
for the unweighted and SPSS weighted case
"""
if weighted:
m_ij = n_ij / n_ij
nan_mask = np.isnan(m_ij)
m_ij[nan_mask] = 0.000001 # otherwise it breaks the chi-squared test
w_ij = m_ij
n_ij_col_sum = n_ij.sum(axis=1)
n_ij_row_sum = n_ij.sum(axis=0)
alpha, beta, eps = (1, 1, 1)
while eps > 10e-6:
alpha = alpha * np.vstack(n_ij_col_sum / m_ij.sum(axis=1))
beta = n_ij_row_sum / (alpha * w_ij).sum(axis=0)
eps = np.max(np.absolute(w_ij * alpha * beta - m_ij))
m_ij = w_ij * alpha * beta
else:
m_ij = (np.vstack(n_ij.sum(axis=1)) * n_ij.sum(axis=0)) / n_ij.sum().astype(float)
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_val = stats.chisquare(n_ij, f_exp=m_ij, ddof=n_ij.size - 1 - dof, axis=None)
return (chi, p_val, dof) | Calculates the chisquare for a matrix of ind_v x dep_v
for the unweighted and SPSS weighted case |
def Cp(self, T):
"""
Calculate the heat capacity of the compound phase.
:param T: [K] temperature
:returns: [J/mol/K] Heat capacity.
"""
result = 0.0
for c, e in zip(self._coefficients, self._exponents):
result += c*T**e
return result | Calculate the heat capacity of the compound phase.
:param T: [K] temperature
:returns: [J/mol/K] Heat capacity. |
def url_join(base, *args):
"""
Helper function to join an arbitrary number of url segments together.
"""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment]) | Helper function to join an arbitrary number of url segments together. |
def startGraph(self):
"""Starts RDF graph and bing namespaces"""
g = r.Graph()
g.namespace_manager.bind("rdf", r.namespace.RDF)
g.namespace_manager.bind("foaf", r.namespace.FOAF)
g.namespace_manager.bind("xsd", r.namespace.XSD)
g.namespace_manager.bind("opa", "http://purl.org/socialparticipation/opa/")
g.namespace_manager.bind("ops", "http://purl.org/socialparticipation/ops/")
g.namespace_manager.bind("wsg", "http://www.w3.org/2003/01/geo/wgs84_pos#")
g.namespace_manager.bind("dc2", "http://purl.org/dc/elements/1.1/")
g.namespace_manager.bind("dc", "http://purl.org/dc/terms/")
g.namespace_manager.bind("sioc", "http://rdfs.org/sioc/ns#")
g.namespace_manager.bind("tsioc", "http://rdfs.org/sioc/types#")
g.namespace_manager.bind("schema", "http://schema.org/")
g.namespace_manager.bind("part", "http://participa.br/")
self.g=g | Starts RDF graph and bing namespaces |
def GetDirections(self, origin, destination, sensor = False, mode = None, waypoints = None, alternatives = None, avoid = None, language = None, units = None,
region = None, departure_time = None, arrival_time = None):
'''Get Directions Service
Pls refer to the Google Maps Web API for the details of the remained parameters
'''
params = {
'origin': origin,
'destination': destination,
'sensor': str(sensor).lower()
}
if mode:
params['mode'] = mode
if waypoints:
params['waypoints'] = waypoints
if alternatives:
params['alternatives'] = alternatives
if avoid:
params['avoid'] = avoid
if language:
params['language'] = language
if units:
params['units'] = units
if region:
params['region'] = region
if departure_time:
params['departure_time'] = departure_time
if arrival_time:
params['arrival_time'] = arrival_time
if not self.premier:
url = self.get_url(params)
else:
url = self.get_signed_url(params)
return self.GetService_url(url) | Get Directions Service
Pls refer to the Google Maps Web API for the details of the remained parameters |
def ToLatLng(self):
"""
Returns that latitude and longitude that this point represents
under a spherical Earth model.
"""
rad_lat = math.atan2(self.z, math.sqrt(self.x * self.x + self.y * self.y))
rad_lng = math.atan2(self.y, self.x)
return (rad_lat * 180.0 / math.pi, rad_lng * 180.0 / math.pi) | Returns that latitude and longitude that this point represents
under a spherical Earth model. |
def bind(self, prefix, namespace, *args, **kwargs):
""" Extends the function to add an attribute to the class for each
added namespace to allow for use of dot notation. All prefixes are
converted to lowercase
Args:
prefix: string of namespace name
namespace: rdflib.namespace instance
kwargs:
calc: whether or not create the lookup reference dictionaries
Example usage:
RdfNsManager.rdf.type =>
http://www.w3.org/1999/02/22-rdf-syntax-ns#type
"""
# RdfNamespace(prefix, namespace, **kwargs)
setattr(self, prefix, RdfNamespace(prefix, namespace, **kwargs))
if kwargs.pop('calc', True):
self.__make_dicts__ | Extends the function to add an attribute to the class for each
added namespace to allow for use of dot notation. All prefixes are
converted to lowercase
Args:
prefix: string of namespace name
namespace: rdflib.namespace instance
kwargs:
calc: whether or not create the lookup reference dictionaries
Example usage:
RdfNsManager.rdf.type =>
http://www.w3.org/1999/02/22-rdf-syntax-ns#type |
def xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spin2x, spin2y):
"""Returns the effective precession spin argument for the smaller mass.
This function assumes it's given spins of the secondary mass.
"""
q = q_from_mass1_mass2(mass1, mass2)
a1 = 2 + 3 * q / 2
a2 = 2 + 3 / (2 * q)
return a1 / (q**2 * a2) * chi_perp_from_spinx_spiny(spin2x, spin2y) | Returns the effective precession spin argument for the smaller mass.
This function assumes it's given spins of the secondary mass. |
def log(self, branch, remote):
""" Call a log-command, if set by git-up.fetch.all. """
log_hook = self.settings['rebase.log-hook']
if log_hook:
if ON_WINDOWS: # pragma: no cover
# Running a string in CMD from Python is not that easy on
# Windows. Running 'cmd /C log_hook' produces problems when
# using multiple statements or things like 'echo'. Therefore,
# we write the string to a bat file and execute it.
# In addition, we replace occurences of $1 with %1 and so forth
# in case the user is used to Bash or sh.
# If there are occurences of %something, we'll replace it with
# %%something. This is the case when running something like
# 'git log --pretty=format:"%Cred%h..."'.
# Also, we replace a semicolon with a newline, because if you
# start with 'echo' on Windows, it will simply echo the
# semicolon and the commands behind instead of echoing and then
# running other commands
# Prepare log_hook
log_hook = re.sub(r'\$(\d+)', r'%\1', log_hook)
log_hook = re.sub(r'%(?!\d)', '%%', log_hook)
log_hook = re.sub(r'; ?', r'\n', log_hook)
# Write log_hook to an temporary file and get it's path
with NamedTemporaryFile(
prefix='PyGitUp.', suffix='.bat', delete=False
) as bat_file:
# Don't echo all commands
bat_file.file.write(b'@echo off\n')
# Run log_hook
bat_file.file.write(log_hook.encode('utf-8'))
# Run bat_file
state = subprocess.call(
[bat_file.name, branch.name, remote.name]
)
# Clean up file
os.remove(bat_file.name)
else: # pragma: no cover
# Run log_hook via 'shell -c'
state = subprocess.call(
[log_hook, 'git-up', branch.name, remote.name],
shell=True
)
if self.testing:
assert state == 0, 'log_hook returned != 0' | Call a log-command, if set by git-up.fetch.all. |
def _prefetch_items(self,change):
""" When the current_row in the model changes (whether from scrolling) or
set by the application. Make sure the results are loaded!
"""
if self.is_initialized:
view = self.item_view
upper_limit = view.iterable_index+view.iterable_fetch_size-view.iterable_prefetch
lower_limit = max(0,view.iterable_index+view.iterable_prefetch)
offset = int(view.iterable_fetch_size/2.0)
upper_visible_row = view.visible_rect[2]
lower_visible_row = view.visible_rect[0]
print("Visible rect = %s"%view.visible_rect)
if upper_visible_row >= upper_limit:
next_index = max(0,upper_visible_row-offset) # Center on current row
# Going up works...
if next_index>view.iterable_index:
print("Auto prefetch upper limit %s!"%upper_limit)
view.iterable_index = next_index
#view.model().reset()
# But doewn doesnt?
elif view.iterable_index>0 and lower_visible_row < lower_limit:
next_index = max(0,lower_visible_row-offset) # Center on current row
# Going down works
if next_index<view.iterable_index:
print("Auto prefetch lower limit=%s, iterable=%s, setting next=%s!"%(lower_limit,view.iterable_index,next_index))
view.iterable_index = next_index | When the current_row in the model changes (whether from scrolling) or
set by the application. Make sure the results are loaded! |
def snap_to_nearest_config(x, tune_params):
"""helper func that for each param selects the closest actual value"""
params = []
for i, k in enumerate(tune_params.keys()):
values = numpy.array(tune_params[k])
idx = numpy.abs(values-x[i]).argmin()
params.append(int(values[idx]))
return params | helper func that for each param selects the closest actual value |
def merge_parameters(parameters, date_time, macros, types_and_values):
""" Merge Return a mapping from airflow macro names (prefixed with '_') to values
Args:
date_time: The timestamp at which the macro values need to be evaluated. This is only
applicable when types_and_values = True
macros: If true, the values in the returned dict are the macro strings (like '{{ ds }}')
Returns:
The resolved value, i.e. the value with the format modifiers replaced with the corresponding
parameter-values. E.g. if value is <project-id>.<dataset-id>.logs_%(_ds)s, the returned
value could be <project-id>.<dataset-id>.logs_2017-12-21.
"""
merged_parameters = Query._airflow_macro_formats(date_time=date_time, macros=macros,
types_and_values=types_and_values)
if parameters:
if types_and_values:
parameters = {
item['name']: {'value': item['value'], 'type': item['type']}
for item in parameters
}
else: # macros = True, or the default (i.e. just values)
parameters = {item['name']: item['value'] for item in parameters}
merged_parameters.update(parameters)
return merged_parameters | Merge Return a mapping from airflow macro names (prefixed with '_') to values
Args:
date_time: The timestamp at which the macro values need to be evaluated. This is only
applicable when types_and_values = True
macros: If true, the values in the returned dict are the macro strings (like '{{ ds }}')
Returns:
The resolved value, i.e. the value with the format modifiers replaced with the corresponding
parameter-values. E.g. if value is <project-id>.<dataset-id>.logs_%(_ds)s, the returned
value could be <project-id>.<dataset-id>.logs_2017-12-21. |
def load_modules_alignak_configuration(self): # pragma: no cover, not yet with unit tests.
"""Load Alignak configuration from the arbiter modules
If module implements get_alignak_configuration, call this function
:param raw_objects: raw objects we got from reading config files
:type raw_objects: dict
:return: None
"""
alignak_cfg = {}
# Ask configured modules if they got configuration for us
for instance in self.modules_manager.instances:
if not hasattr(instance, 'get_alignak_configuration'):
return
try:
logger.info("Getting Alignak global configuration from module '%s'", instance.name)
cfg = instance.get_alignak_configuration()
alignak_cfg.update(cfg)
except Exception as exp: # pylint: disable=broad-except
logger.error("Module %s get_alignak_configuration raised an exception %s. "
"Log and continue to run", instance.name, str(exp))
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Back trace of this remove: %s", output.getvalue())
output.close()
continue
params = []
if alignak_cfg:
logger.info("Got Alignak global configuration:")
for key, value in sorted(alignak_cfg.items()):
logger.info("- %s = %s", key, value)
# properties starting with an _ character are "transformed" to macro variables
if key.startswith('_'):
key = '$' + key[1:].upper() + '$'
# properties valued as None are filtered
if value is None:
continue
# properties valued as None string are filtered
if value == 'None':
continue
# properties valued as empty strings are filtered
if value == '':
continue
# set properties as legacy Shinken configuration files
params.append("%s=%s" % (key, value))
self.conf.load_params(params) | Load Alignak configuration from the arbiter modules
If module implements get_alignak_configuration, call this function
:param raw_objects: raw objects we got from reading config files
:type raw_objects: dict
:return: None |
def surface_of_section(orbit, plane_ix, interpolate=False):
"""
Generate and return a surface of section from the given orbit.
.. warning::
This is an experimental function and the API may change.
Parameters
----------
orbit : `~gala.dynamics.Orbit`
plane_ix : int
Integer that represents the coordinate to record crossings in. For
example, for a 2D Hamiltonian where you want to make a SoS in
:math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the
:math:`x` axis), and this will only record crossings for which
:math:`p_x>0`.
interpolate : bool (optional)
Whether or not to interpolate on to the plane of interest. This
makes it much slower, but will work for orbits with a coarser
sampling.
Returns
-------
Examples
--------
If your orbit of interest is a tube orbit, it probably conserves (at
least approximately) some equivalent to angular momentum in the direction
of the circulation axis. Therefore, a surface of section in R-z should
be instructive for classifying these orbits. TODO...show how to convert
an orbit to Cylindrical..etc...
"""
w = orbit.w()
if w.ndim == 2:
w = w[...,None]
ndim,ntimes,norbits = w.shape
H_dim = ndim // 2
p_ix = plane_ix + H_dim
if interpolate:
raise NotImplementedError("Not yet implemented, sorry!")
# record position on specified plane when orbit crosses
all_sos = np.zeros((ndim,norbits), dtype=object)
for n in range(norbits):
cross_ix = argrelmin(w[plane_ix,:,n]**2)[0]
cross_ix = cross_ix[w[p_ix,cross_ix,n] > 0.]
sos = w[:,cross_ix,n]
for j in range(ndim):
all_sos[j,n] = sos[j,:]
return all_sos | Generate and return a surface of section from the given orbit.
.. warning::
This is an experimental function and the API may change.
Parameters
----------
orbit : `~gala.dynamics.Orbit`
plane_ix : int
Integer that represents the coordinate to record crossings in. For
example, for a 2D Hamiltonian where you want to make a SoS in
:math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the
:math:`x` axis), and this will only record crossings for which
:math:`p_x>0`.
interpolate : bool (optional)
Whether or not to interpolate on to the plane of interest. This
makes it much slower, but will work for orbits with a coarser
sampling.
Returns
-------
Examples
--------
If your orbit of interest is a tube orbit, it probably conserves (at
least approximately) some equivalent to angular momentum in the direction
of the circulation axis. Therefore, a surface of section in R-z should
be instructive for classifying these orbits. TODO...show how to convert
an orbit to Cylindrical..etc... |
def getinfo(self, member):
"""Return RarInfo for filename
"""
if isinstance(member, RarInfo):
fname = member.filename
else:
fname = member
# accept both ways here
if PATH_SEP == '/':
fname2 = fname.replace("\\", "/")
else:
fname2 = fname.replace("/", "\\")
try:
return self._info_map[fname]
except KeyError:
try:
return self._info_map[fname2]
except KeyError:
raise NoRarEntry("No such file: %s" % fname) | Return RarInfo for filename |
def vlan_classifier_rule_class_type_proto_proto_proto_val(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan")
classifier = ET.SubElement(vlan, "classifier")
rule = ET.SubElement(classifier, "rule")
ruleid_key = ET.SubElement(rule, "ruleid")
ruleid_key.text = kwargs.pop('ruleid')
class_type = ET.SubElement(rule, "class-type")
proto = ET.SubElement(class_type, "proto")
proto = ET.SubElement(proto, "proto")
proto_val = ET.SubElement(proto, "proto-val")
proto_val.text = kwargs.pop('proto_val')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def on_nick(self, connection, event):
"""
Someone changed their nickname - send the nicknames list to the
WebSocket.
"""
old_nickname = self.get_nickname(event)
old_color = self.nicknames.pop(old_nickname)
new_nickname = event.target()
message = "is now known as %s" % new_nickname
self.namespace.emit("message", old_nickname, message, old_color)
new_color = color(new_nickname)
self.nicknames[new_nickname] = new_color
self.emit_nicknames()
if self.nickname == old_nickname:
self.nickname = new_nickname | Someone changed their nickname - send the nicknames list to the
WebSocket. |
def _check_available_data(archive, arc_type, day):
"""
Function to check what stations are available in the archive for a given \
day.
:type archive: str
:param archive: The archive source
:type arc_type: str
:param arc_type: The type of archive, can be:
:type day: datetime.date
:param day: Date to retrieve data for
:returns: list of tuples of (station, channel) as available.
.. note:: Currently the seishub options are untested.
"""
available_stations = []
if arc_type.lower() == 'day_vols':
wavefiles = glob.glob(os.path.join(archive, day.strftime('Y%Y'),
day.strftime('R%j.01'), '*'))
for wavefile in wavefiles:
header = read(wavefile, headonly=True)
available_stations.append((header[0].stats.station,
header[0].stats.channel))
elif arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st = client.get_previews(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400)
for tr in st:
available_stations.append((tr.stats.station, tr.stats.channel))
elif arc_type.lower() == 'fdsn':
client = FDSNClient(archive)
inventory = client.get_stations(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400,
level='channel')
for network in inventory:
for station in network:
for channel in station:
available_stations.append((station.code,
channel.code))
return available_stations | Function to check what stations are available in the archive for a given \
day.
:type archive: str
:param archive: The archive source
:type arc_type: str
:param arc_type: The type of archive, can be:
:type day: datetime.date
:param day: Date to retrieve data for
:returns: list of tuples of (station, channel) as available.
.. note:: Currently the seishub options are untested. |
def render_file(self, filename):
"""Convert a reST file to HTML.
"""
dirname, basename = split(filename)
with changedir(dirname):
infile = abspath(basename)
outfile = abspath('.%s.html' % basename)
self.docutils.publish_file(infile, outfile, self.styles)
return outfile | Convert a reST file to HTML. |
def remove_config_to_machine_group(self, project_name, config_name, group_name):
""" remove a logtail config to a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name to apply
:type group_name: string
:param group_name: the machine group name
:return: RemoveConfigToMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs/" + config_name
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return RemoveConfigToMachineGroupResponse(header, resp) | remove a logtail config to a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name to apply
:type group_name: string
:param group_name: the machine group name
:return: RemoveConfigToMachineGroupResponse
:raise: LogException |
def build_values(name, values_mods):
"""Update name/values.yaml with modifications"""
values_file = os.path.join(name, 'values.yaml')
with open(values_file) as f:
values = yaml.load(f)
for key, value in values_mods.items():
parts = key.split('.')
mod_obj = values
for p in parts:
mod_obj = mod_obj[p]
print(f"Updating {values_file}: {key}: {value}")
if isinstance(mod_obj, MutableMapping):
keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys()
if keys:
for key in keys:
mod_obj[key] = value['repository']
else:
possible_keys = ' or '.join(IMAGE_REPOSITORY_KEYS)
raise KeyError(
f'Could not find {possible_keys} in {values_file}:{key}'
)
mod_obj['tag'] = value['tag']
else:
raise TypeError(
f'The key {key} in {values_file} must be a mapping.'
)
with open(values_file, 'w') as f:
yaml.dump(values, f) | Update name/values.yaml with modifications |
def create_in_cluster(self):
"""
call Kubernetes API and create this Service in cluster,
raise ConuExeption if the API call fails
:return: None
"""
try:
self.api.create_namespaced_service(self.namespace, self.body)
except ApiException as e:
raise ConuException(
"Exception when calling Kubernetes API - create_namespaced_service: {}\n".format(e))
logger.info(
"Creating Service %s in namespace: %s", self.name, self.namespace) | call Kubernetes API and create this Service in cluster,
raise ConuExeption if the API call fails
:return: None |
def intercept_(self):
"""
Intercept (bias) property
.. note:: Intercept is defined only for linear learners
Intercept (bias) is only defined when the linear model is chosen as base
learner (`booster=gblinear`). It is not defined for other base learner types, such
as tree learners (`booster=gbtree`).
Returns
-------
intercept_ : array of shape ``(1,)`` or ``[n_classes]``
"""
if getattr(self, 'booster', None) is not None and self.booster != 'gblinear':
raise AttributeError('Intercept (bias) is not defined for Booster type {}'
.format(self.booster))
b = self.get_booster()
return np.array(json.loads(b.get_dump(dump_format='json')[0])['bias']) | Intercept (bias) property
.. note:: Intercept is defined only for linear learners
Intercept (bias) is only defined when the linear model is chosen as base
learner (`booster=gblinear`). It is not defined for other base learner types, such
as tree learners (`booster=gbtree`).
Returns
-------
intercept_ : array of shape ``(1,)`` or ``[n_classes]`` |
def delete_vmss(access_token, subscription_id, resource_group, vmss_name):
'''Delete a virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
return do_delete(endpoint, access_token) | Delete a virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. |
def csoftmax_for_slice(input):
""" It is a implementation of the constrained softmax (csoftmax) for slice.
Based on the paper:
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" (page 4)
Args:
input: A list of [input tensor, cumulative attention].
Returns:
output: A list of [csoftmax results, masks]
"""
[ten, u] = input
shape_t = ten.shape
shape_u = u.shape
ten -= tf.reduce_mean(ten)
q = tf.exp(ten)
active = tf.ones_like(u, dtype=tf.int32)
mass = tf.constant(0, dtype=tf.float32)
found = tf.constant(True, dtype=tf.bool)
def loop(q_, mask, mass_, found_):
q_list = tf.dynamic_partition(q_, mask, 2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2) # 0 element it False,
# 1 element if true
p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])
# condition verification and mask modification
less_mask = tf.cast(tf.less(u, p_new), tf.int32) # 0 when u is bigger than p, 1 when u is less than p
condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
2) # 0 when u is bigger than p, 1 when u is less than p
split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
split_u = tf.dynamic_partition(u, less_mask, 2)
alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
mass_ += tf.reduce_sum(split_u[1])
mask = mask * (tf.ones_like(less_mask) - less_mask)
found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
lambda: False,
lambda: True)
alpha = tf.reshape(alpha, q_.shape)
return alpha, mask, mass_, found_
(csoft, mask_, _, _) = tf.while_loop(cond=lambda _0, _1, _2, f: f,
body=loop,
loop_vars=(q, active, mass, found))
return [csoft, mask_] | It is a implementation of the constrained softmax (csoftmax) for slice.
Based on the paper:
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" (page 4)
Args:
input: A list of [input tensor, cumulative attention].
Returns:
output: A list of [csoftmax results, masks] |
def classify(self, n_jobs=-1, configure=None):
"""
Returns input-output behaviors for the list of logical networks in the attribute :attr:`networks`
Example::
>>> from caspo import core, classify
>>> networks = core.LogicalNetworkList.from_csv('networks.csv')
>>> setup = core.Setup.from_json('setup.json')
>>> classifier = classify.Classifier(networks, setup)
>>> behaviors = classifier.classify()
>>> behaviors.to_csv('behaviors.csv', networks=True)
n_jobs : int
Number of jobs to run in parallel. Default to -1 (all cores available)
configure : callable
Callable object responsible of setting clingo configuration
Returns
-------
caspo.core.logicalnetwork.LogicalNetworkList
The list of networks with one representative for each behavior
"""
start = timeit.default_timer()
networks = self.networks
n = len(networks)
cpu = n_jobs if n_jobs > -1 else mp.cpu_count()
if cpu > 1:
lpart = int(np.ceil(n / float(cpu))) if n > cpu else 1
parts = networks.split(np.arange(lpart, n, lpart))
behaviors_parts = Parallel(n_jobs=n_jobs)(delayed(__learn_io__)(part, self.setup, configure) for part in parts)
networks = core.LogicalNetworkList.from_hypergraph(networks.hg)
for behavior in behaviors_parts:
networks = networks.concat(behavior)
behaviors = __learn_io__(networks, self.setup, configure)
self.stats['time_io'] = timeit.default_timer() - start
self._logger.info("%s input-output logical behaviors found in %.4fs", len(behaviors), self.stats['time_io'])
return behaviors | Returns input-output behaviors for the list of logical networks in the attribute :attr:`networks`
Example::
>>> from caspo import core, classify
>>> networks = core.LogicalNetworkList.from_csv('networks.csv')
>>> setup = core.Setup.from_json('setup.json')
>>> classifier = classify.Classifier(networks, setup)
>>> behaviors = classifier.classify()
>>> behaviors.to_csv('behaviors.csv', networks=True)
n_jobs : int
Number of jobs to run in parallel. Default to -1 (all cores available)
configure : callable
Callable object responsible of setting clingo configuration
Returns
-------
caspo.core.logicalnetwork.LogicalNetworkList
The list of networks with one representative for each behavior |
def highlight(self, *args):
""" Highlights the region with a colored frame. Accepts the following parameters:
highlight([toEnable], [seconds], [color])
* toEnable (boolean): Enables or disables the overlay
* seconds (number): Seconds to show overlay
* color (string): Hex code ("#XXXXXX") or color name ("black")
"""
toEnable = (self._highlighter is None)
seconds = 3
color = "red"
if len(args) > 3:
raise TypeError("Unrecognized argument(s) for highlight()")
for arg in args:
if type(arg) == bool:
toEnable = arg
elif isinstance(arg, Number):
seconds = arg
elif isinstance(arg, basestring):
color = arg
if self._highlighter is not None:
self._highlighter.close()
if toEnable:
self._highlighter = PlatformManager.highlight((self.getX(), self.getY(), self.getW(), self.getH()), color, seconds) | Highlights the region with a colored frame. Accepts the following parameters:
highlight([toEnable], [seconds], [color])
* toEnable (boolean): Enables or disables the overlay
* seconds (number): Seconds to show overlay
* color (string): Hex code ("#XXXXXX") or color name ("black") |
def run(self):
""" Run consumer
"""
if KSER_METRICS_ENABLED == "yes":
from prometheus_client import start_http_server
logger.info("Metric.Starting...")
start_http_server(
os.getenv("KSER_METRICS_PORT", 8888),
os.getenv("KSER_METRICS_ADDRESS", "0.0.0.0")
)
logger.info("{}.Starting...".format(self.__class__.__name__))
running = True
while running:
msg = self.client.poll()
if msg:
# noinspection PyProtectedMember
if not msg.error():
self.REGISTRY.run(msg.value().decode('utf-8'))
elif msg.error().code() != KafkaError._PARTITION_EOF:
logger.error(msg.error())
running = False
self.client.close() | Run consumer |
Subsets and Splits