text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def clear(self):
""" Clear Screen """
widgets.StringWidget(self, ref="_w1_", text=" " * 20, x=1, y=1)
widgets.StringWidget(self, ref="_w2_", text=" " * 20, x=1, y=2)
widgets.StringWidget(self, ref="_w3_", text=" " * 20, x=1, y=3)
widgets.StringWidget(self, ref="_w4_", text=" " * 20, x=1, y=4) | 0.006006 |
def chemical_formula(self):
"""the chemical formula of the molecule"""
counts = {}
for number in self.numbers:
counts[number] = counts.get(number, 0)+1
items = []
for number, count in sorted(counts.items(), reverse=True):
if count == 1:
items.append(periodic[number].symbol)
else:
items.append("%s%i" % (periodic[number].symbol, count))
return "".join(items) | 0.004219 |
def follow_user(self, user, delegate):
"""Follow the given user.
Returns the user info back to the given delegate
"""
parser = txml.Users(delegate)
return self.__postPage('/friendships/create/%s.xml' % (user), parser) | 0.007752 |
def set_numeric_score_increment(self, increment):
"""Sets the numeric score increment.
arg: increment (decimal): the numeric score increment
raise: InvalidArgument - ``increment`` is invalid
raise: NoAccess - ``increment`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.set_lowest_numeric_score
if self.get_numeric_score_increment_metadata().is_read_only():
raise errors.NoAccess()
try:
increment = float(increment)
except ValueError:
raise errors.InvalidArgument()
if not self._is_valid_decimal(increment, self.get_numeric_score_increment_metadata()):
raise errors.InvalidArgument()
self._my_map['numericScoreIncrement'] = increment | 0.00454 |
def get_next_interval_histogram(self,
range_start_time_sec=0.0,
range_end_time_sec=sys.maxsize,
absolute=False):
'''Read the next interval histogram from the log, if interval falls
within an absolute or relative time range.
Timestamps are assumed to appear in order in the log file, and as such
this method will return a null upon encountering a timestamp larger than
range_end_time_sec.
Relative time range:
the range is assumed to be in seconds relative to
the actual timestamp value found in each interval line in the log
Absolute time range:
Absolute timestamps are calculated by adding the timestamp found
with the recorded interval to the [latest, optional] start time
found in the log. The start time is indicated in the log with
a "#[StartTime: " followed by the start time in seconds.
Params:
range_start_time_sec The absolute or relative start of the expected
time range, in seconds.
range_start_time_sec The absolute or relative end of the expected
time range, in seconds.
absolute Defines if the passed range is absolute or relative
Return:
Returns an histogram object if an interval line was found with an
associated start timestamp value that falls between start_time_sec and
end_time_sec,
or null if no such interval line is found.
Upon encountering any unexpected format errors in reading the next
interval from the file, this method will return None.
The histogram returned will have it's timestamp set to the absolute
timestamp calculated from adding the interval's indicated timestamp
value to the latest [optional] start time found in the log.
Exceptions:
ValueError if there is a syntax error in one of the float fields
'''
return self._decode_next_interval_histogram(None,
range_start_time_sec,
range_end_time_sec,
absolute) | 0.002907 |
def inverse_transform(self, X, copy=None):
"""
Scale back the data to the original representation.
:param X: Scaled data matrix.
:type X: numpy.ndarray, shape [n_samples, n_features]
:param bool copy: Copy the X data matrix.
:return: X data matrix with the scaling operation reverted.
:rtype: numpy.ndarray, shape [n_samples, n_features]
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = numpy.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X | 0.001609 |
def resource_qualifier(resource):
""" Split a resource in (filename, directory) tuple with taking care of external resources
:param resource: A file path or a URI
:return: (Filename, Directory) for files, (URI, None) for URI
"""
if resource.startswith("//") or resource.startswith("http"):
return resource, None
else:
return reversed(op.split(resource)) | 0.005076 |
def get_params_parser():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-g', '--debug', dest='debug',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument("--arthur", action='store_true', dest='arthur',
help="Enable arthur to collect raw data")
parser.add_argument("--raw", action='store_true', dest='raw',
help="Activate raw task")
parser.add_argument("--enrich", action='store_true', dest='enrich',
help="Activate enrich task")
parser.add_argument("--identities", action='store_true', dest='identities',
help="Activate merge identities task")
parser.add_argument("--panels", action='store_true', dest='panels',
help="Activate panels task")
parser.add_argument("--cfg", dest='cfg_path',
help="Configuration file path")
parser.add_argument("--backends", dest='backend_sections', default=[],
nargs='*', help="Backend sections to execute")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser | 0.000793 |
def df_filter_col_sum(df, threshold, take_abs=True):
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
from copy import deepcopy
from .__init__ import Network
net = Network()
if take_abs is True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > 0]
if take_abs is True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = grab_df_subset(df['mat'], inst_rows, inst_cols)
if 'mat_up' in df:
df['mat_up'] = grab_df_subset(df['mat_up'], inst_rows, inst_cols)
df['mat_dn'] = grab_df_subset(df['mat_dn'], inst_rows, inst_cols)
if 'mat_orig' in df:
df['mat_orig'] = grab_df_subset(df['mat_orig'], inst_rows, inst_cols)
else:
df['mat'] = df_copy
return df | 0.017329 |
def set_logger(self):
"""
Prepare the logger, using self.logger_name and self.logger_level
"""
self.logger = logging.getLogger(self.logger_name)
self.logger.setLevel(self.logger_level) | 0.008929 |
def eta_hms(seconds, always_show_hours=False, always_show_minutes=False, hours_leading_zero=False):
"""Converts seconds remaining into a human readable timestamp (e.g. hh:mm:ss, h:mm:ss, mm:ss, or ss).
Positional arguments:
seconds -- integer/float indicating seconds remaining.
Keyword arguments:
always_show_hours -- don't hide the 0 hours.
always_show_minutes -- don't hide the 0 minutes.
hours_leading_zero -- show 01:00:00 instead of 1:00:00.
Returns:
Human readable string.
"""
# Convert seconds to other units.
final_hours, final_minutes, final_seconds = 0, 0, seconds
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(ceil(final_seconds))
# Determine which string template to use.
if final_hours or always_show_hours:
if hours_leading_zero:
template = '{hour:02.0f}:{minute:02.0f}:{second:02.0f}'
else:
template = '{hour}:{minute:02.0f}:{second:02.0f}'
elif final_minutes or always_show_minutes:
template = '{minute:02.0f}:{second:02.0f}'
else:
template = '{second:02.0f}'
return template.format(hour=final_hours, minute=final_minutes, second=final_seconds) | 0.002829 |
def discover_files(base_path, sub_path='', ext='', trim_base_path=False):
"""Discovers all files with certain extension in given paths."""
file_list = []
for root, dirs, files in walk(path.join(base_path, sub_path)):
if trim_base_path:
root = path.relpath(root, base_path)
file_list.extend([path.join(root, file_name)
for file_name in files
if file_name.endswith(ext)])
return sorted(file_list) | 0.002041 |
def on_message(self, client_conn, msg):
"""Handle message.
Returns
-------
ready : Future
A future that will resolve once we're ready, else None.
Notes
-----
*on_message* should not be called again until *ready* has resolved.
"""
MAX_QUEUE_SIZE = 30
if len(self._msg_queue) >= MAX_QUEUE_SIZE:
# This should never happen if callers to handle_message wait
# for its futures to resolve before sending another message.
# NM 2014-10-06: Except when there are multiple clients. Oops.
raise RuntimeError('MessageHandlerThread unhandled '
'message queue full, not handling message')
ready_future = Future()
self._msg_queue.append((ready_future, client_conn, msg))
self._wake.set()
return ready_future | 0.002227 |
def remove_dbs(self, double):
"""Remove double item from list
"""
one = []
for dup in double:
if dup not in one:
one.append(dup)
return one | 0.009662 |
def display(self, stats, cs_status=None):
"""Display stats on the screen.
stats: Stats database to display
cs_status:
"None": standalone or server mode
"Connected": Client is connected to a Glances server
"SNMP": Client is connected to a SNMP server
"Disconnected": Client is disconnected from the server
Return:
True if the stats have been displayed
False if the help have been displayed
"""
# Init the internal line/column for Glances Curses
self.init_line_column()
# Update the stats messages
###########################
# Get all the plugins but quicklook and proceslist
self.args.cs_status = cs_status
__stat_display = self.__get_stat_display(stats, layer=cs_status)
# Adapt number of processes to the available space
max_processes_displayed = (
self.screen.getmaxyx()[0] - 11 -
(0 if 'docker' not in __stat_display else
self.get_stats_display_height(__stat_display["docker"])) -
(0 if 'processcount' not in __stat_display else
self.get_stats_display_height(__stat_display["processcount"])) -
(0 if 'amps' not in __stat_display else
self.get_stats_display_height(__stat_display["amps"])) -
(0 if 'alert' not in __stat_display else
self.get_stats_display_height(__stat_display["alert"])))
try:
if self.args.enable_process_extended:
max_processes_displayed -= 4
except AttributeError:
pass
if max_processes_displayed < 0:
max_processes_displayed = 0
if (glances_processes.max_processes is None or
glances_processes.max_processes != max_processes_displayed):
logger.debug("Set number of displayed processes to {}".format(max_processes_displayed))
glances_processes.max_processes = max_processes_displayed
# Get the processlist
__stat_display["processlist"] = stats.get_plugin(
'processlist').get_stats_display(args=self.args)
# Display the stats on the curses interface
###########################################
# Help screen (on top of the other stats)
if self.args.help_tag:
# Display the stats...
self.display_plugin(
stats.get_plugin('help').get_stats_display(args=self.args))
# ... and exit
return False
# =====================================
# Display first line (system+ip+uptime)
# Optionnaly: Cloud on second line
# =====================================
self.__display_header(__stat_display)
# ==============================================================
# Display second line (<SUMMARY>+CPU|PERCPU+<GPU>+LOAD+MEM+SWAP)
# ==============================================================
self.__display_top(__stat_display, stats)
# ==================================================================
# Display left sidebar (NETWORK+PORTS+DISKIO+FS+SENSORS+Current time)
# ==================================================================
self.__display_left(__stat_display)
# ====================================
# Display right stats (process and co)
# ====================================
self.__display_right(__stat_display)
# =====================
# Others popup messages
# =====================
# Display edit filter popup
# Only in standalone mode (cs_status is None)
if self.edit_filter and cs_status is None:
new_filter = self.display_popup(
'Process filter pattern: \n\n' +
'Examples:\n' +
'- python\n' +
'- .*python.*\n' +
'- /usr/lib.*\n' +
'- name:.*nautilus.*\n' +
'- cmdline:.*glances.*\n' +
'- username:nicolargo\n' +
'- username:^root ',
is_input=True,
input_value=glances_processes.process_filter_input)
glances_processes.process_filter = new_filter
elif self.edit_filter and cs_status is not None:
self.display_popup('Process filter only available in standalone mode')
self.edit_filter = False
# Display graph generation popup
if self.args.generate_graph:
self.display_popup('Generate graph in {}'.format(self.args.export_graph_path))
return True | 0.001275 |
def _setup_model_loss(self, lr):
"""
Setup loss and optimizer for PyTorch model.
"""
# Setup loss
if not hasattr(self, "loss"):
self.loss = SoftCrossEntropyLoss()
# Setup optimizer
if not hasattr(self, "optimizer"):
self.optimizer = optim.Adam(self.parameters(), lr=lr) | 0.005714 |
def _apply_scales(array, scales, dtype):
"""Apply scales to the array.
"""
new_array = np.empty(array.shape, dtype)
for i in array.dtype.names:
try:
new_array[i] = array[i] * scales[i]
except TypeError:
if np.all(scales[i] == 1):
new_array[i] = array[i]
else:
raise
return new_array | 0.002591 |
def does_not_contain(self, element):
"""
Ensures :attr:`subject` does not contain *element*.
"""
self._run(unittest_case.assertNotIn, (element, self._subject))
return ChainInspector(self._subject) | 0.008475 |
def get_profane_words(self):
"""Returns all profane words currently in use."""
profane_words = []
if self._custom_censor_list:
profane_words = [w for w in self._custom_censor_list] # Previous versions of Python don't have list.copy()
else:
profane_words = [w for w in self._censor_list]
profane_words.extend(self._extra_censor_list)
profane_words.extend([inflection.pluralize(word) for word in profane_words])
profane_words = list(set(profane_words))
# We sort the list based on decreasing word length so that words like
# 'fu' aren't substituted before 'fuck' if no_word_boundaries = true
profane_words.sort(key=len)
profane_words.reverse()
return profane_words | 0.006289 |
def runSearchReferenceSets(self, request):
"""
Runs the specified SearchReferenceSetsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReferenceSetsRequest,
protocol.SearchReferenceSetsResponse,
self.referenceSetsGenerator) | 0.006494 |
def to_feather(self, fname):
"""
Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname) | 0.006211 |
def process_commmon(self):
'''
Some data processing common for all services.
No need to override this.
'''
data = self.data
data_content = data['content'][0]
## Paste the output of a command
# This is deprecated after piping support
if data['command']:
try:
call = subprocess.Popen(data_content.split(),
stderr=subprocess.PIPE,
stdout = subprocess.PIPE)
out, err = call.communicate()
content = out
except OSError:
logging.exception('Cannot execute the command')
content = ''
if not data['title']:
data['title'] = 'Output of command: `%s`' %(data_content)
## Paste the output of a file
# This is deprecated after piping support
elif data['file']:
try:
f = file(data_content)
content = f.read()
f.close()
except IOError:
logging.exception('File not present or unreadable')
content = ''
if not data['title']:
data['title'] = 'File: `%s`' %(data_content)
else:
content = data_content
self.data['content'] = content
self.data['syntax'] = self.SYNTAX_DICT.get(self.data['syntax'], '')
# Excluded data not useful in paste information
for key in ['func', 'verbose', 'service', 'extra', 'command', 'file']:
del self.data[key] | 0.00492 |
def _readMultiple(self, start, end, db):
"""
Returns a list of hashes with serial numbers between start
and end, both inclusive.
"""
self._validatePos(start, end)
# Converting any bytearray to bytes
return [bytes(db.get(str(pos))) for pos in range(start, end + 1)] | 0.006211 |
def info_factory(name, libnames, headers, frameworks=None,
section=None, classname=None):
"""Create a system_info class.
Parameters
----------
name : str
name of the library
libnames : seq
list of libraries to look for
headers : seq
list of headers to look for
classname : str
name of the returned class
section : str
section name in the site.cfg
Returns
-------
a system_info-derived class with the given meta-parameters
"""
if not classname:
classname = '%s_info' % name
if not section:
section = name
if not frameworks:
framesworks = []
class _ret(system_info):
def __init__(self):
system_info.__init__(self)
def library_extensions(self):
return system_info.library_extensions(self)
def calc_info(self):
""" Compute the informations of the library """
if libnames:
libs = self.get_libs('libraries', '')
if not libs:
libs = libnames
# Look for the shared library
lib_dirs = self.get_lib_dirs()
tmp = None
for d in lib_dirs:
tmp = self.check_libs(d, libs)
if tmp is not None:
info = tmp
break
if tmp is None:
return
# Look for the header file
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, headers)
if p:
inc_dir = os.path.dirname(p[0])
dict_append(info, include_dirs=[d])
break
if inc_dir is None:
log.info(' %s not found' % name)
return
self.set_info(**info)
else:
# Look for frameworks
if frameworks:
fargs = []
for f in frameworks:
p = "/System/Library/Frameworks/%s.framework" % f
if os.path.exists(p):
fargs.append("-framework")
fargs.append(f)
if fargs:
self.set_info(extra_link_args=fargs)
return
_ret.__name__ = classname
_ret.section = section
return _ret | 0.001136 |
def refresh_console(self, console: tcod.console.Console) -> None:
"""Update an Image created with :any:`tcod.image_from_console`.
The console used with this function should have the same width and
height as the Console given to :any:`tcod.image_from_console`.
The font width and height must also be the same as when
:any:`tcod.image_from_console` was called.
Args:
console (Console): A Console with a pixel width and height
matching this Image.
"""
lib.TCOD_image_refresh_console(self.image_c, _console(console)) | 0.003221 |
def ProcessHuntClientCrash(flow_obj, client_crash_info):
"""Processes client crash triggerted by a given hunt-induced flow."""
if not hunt.IsLegacyHunt(flow_obj.parent_hunt_id):
hunt.StopHuntIfCrashLimitExceeded(flow_obj.parent_hunt_id)
return
hunt_urn = rdfvalue.RDFURN("hunts").Add(flow_obj.parent_hunt_id)
with aff4.FACTORY.Open(hunt_urn, mode="rw") as fd:
# Legacy AFF4 code expects token to be set.
fd.token = access_control.ACLToken(username=fd.creator)
fd.RegisterCrash(client_crash_info) | 0.009506 |
def call_plugins(self, step):
'''
For each plugins, check if a "step" method exist on it, and call it
Args:
step (str): The method to search and call on each plugin
'''
for plugin in self.plugins:
try:
getattr(plugin, step)()
except AttributeError:
self.logger.debug("{} doesn't exist on plugin {}".format(step, plugin))
except TypeError:
self.logger.debug("{} on plugin {} is not callable".format(step, plugin)) | 0.007286 |
def extend(dict_, *dicts, **kwargs):
"""Extend a dictionary with keys and values from other dictionaries.
:param dict_: Dictionary to extend
Optional keyword arguments allow to control the exact way
in which ``dict_`` will be extended.
:param overwrite:
Whether repeated keys should have their values overwritten,
retaining the last value, as per given order of dictionaries.
This is the default behavior (equivalent to ``overwrite=True``).
If ``overwrite=False``, repeated keys are simply ignored.
Example::
>> foo = {'a': 1}
>> extend(foo, {'a': 10, 'b': 2}, overwrite=True)
{'a': 10, 'b': 2}
>> foo = {'a': 1}
>> extend(foo, {'a': 10, 'b': 2}, overwrite=False)
{'a': 1, 'b': 2}
:param deep:
Whether extending should proceed recursively, and cause
corresponding subdictionaries to be merged into each other.
By default, this does not happen (equivalent to ``deep=False``).
Example::
>> foo = {'a': {'b': 1}}
>> extend(foo, {'a': {'c': 2}}, deep=False)
{'a': {'c': 2}}
>> foo = {'a': {'b': 1}}
>> extend(foo, {'a': {'c': 2}}, deep=True)
{'a': {'b': 1, 'c': 2}}
:return: Extended ``dict_``
.. versionadded:: 0.0.2
"""
ensure_mapping(dict_)
dicts = list(imap(ensure_mapping, dicts))
ensure_keyword_args(kwargs, optional=('deep', 'overwrite'))
return _nary_dict_update([dict_] + dicts, copy=False,
deep=kwargs.get('deep', False),
overwrite=kwargs.get('overwrite', True)) | 0.000585 |
def cmd_iter(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
kwarg=None,
**kwargs):
'''
Execute a single command via the salt-ssh subsystem and return a
generator
.. versionadded:: 2015.5.0
'''
ssh = self._prep_ssh(
tgt,
fun,
arg,
timeout,
tgt_type,
kwarg,
**kwargs)
for ret in ssh.run_iter(jid=kwargs.get('jid', None)):
yield ret | 0.00315 |
def get_parser(cfg_file=cfg_file):
""" Returns a ConfigParser.ConfigParser() object for our cfg_file """
if not os.path.exists(cfg_file):
generate_configfile(cfg_file=cfg_file, defaults=defaults)
config = ConfigParser.ConfigParser()
config.read(cfg_file)
return config | 0.003378 |
def load(cls, cache_file, backend=None):
"""Instantiate AsyncResult from dumped `cache_file`.
This is the inverse of :meth:`dump`.
Parameters
----------
cache_file: str
Name of file from which the run should be read.
backend: clusterjob.backends.ClusterjobBackend or None
The backend instance for the job. If None, the backend will be
determined by the *name* of the dumped job's backend.
"""
with open(cache_file, 'rb') as pickle_fh:
(remote, backend_name, max_sleep_interval, job_id, status,
epilogue, ssh, scp) = pickle.load(pickle_fh)
if backend is None:
backend = JobScript._backends[backend_name]
ar = cls(backend)
(ar.remote, ar.max_sleep_interval, ar.job_id, ar._status, ar.epilogue,
ar.ssh, ar.scp) \
= (remote, max_sleep_interval, job_id, status, epilogue, ssh, scp)
ar.cache_file = cache_file
return ar | 0.001931 |
def least_upper_bound(*intervals_to_join):
"""
Pseudo least upper bound.
Join the given set of intervals into a big interval. The resulting strided interval is the one which in
all the possible joins of the presented SI, presented the least number of values.
The number of joins to compute is linear with the number of intervals to join.
Draft of proof:
Considering three generic SI (a,b, and c) ordered from their lower bounds, such that
a.lower_bund <= b.lower_bound <= c.lower_bound, where <= is the lexicographic less or equal.
The only joins which have sense to compute are:
* a U b U c
* b U c U a
* c U a U b
All the other combinations fall in either one of these cases. For example: b U a U c does not make make sense
to be calculated. In fact, if one draws this union, the result is exactly either (b U c U a) or (a U b U c) or
(c U a U b).
:param intervals_to_join: Intervals to join
:return: Interval that contains all intervals
"""
assert len(intervals_to_join) > 0, "No intervals to join"
# Check if all intervals are of same width
all_same = all(x.bits == intervals_to_join[0].bits for x in intervals_to_join)
assert all_same, "All intervals to join should be same"
# Optimization: If we have only one interval, then return that interval as result
if len(intervals_to_join) == 1:
return intervals_to_join[0].copy()
# Optimization: If we have only two intervals, the pseudo-join is fine and more precise
if len(intervals_to_join) == 2:
return StridedInterval.pseudo_join(intervals_to_join[0], intervals_to_join[1])
# sort the intervals in increasing left bound
sorted_intervals = sorted(intervals_to_join, key=lambda x: x.lower_bound)
# Fig 3 of the paper
ret = None
# we try all possible joins (linear with the number of SI to join)
# and we return the one with the least number of values.
for i in xrange(len(sorted_intervals)):
# let's join all of them
si = reduce(lambda x, y: StridedInterval.pseudo_join(x, y, False), sorted_intervals[i:] + sorted_intervals[0:i])
if ret is None or ret.n_values > si.n_values:
ret = si
if any([x for x in intervals_to_join if x.uninitialized]):
ret.uninitialized = True
return ret | 0.005978 |
def pull(self, repository, tag=None, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If no tag is specified, all tags from that repository will be
pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
low-level API.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
Returns:
(:py:class:`Image` or list): The image that has been pulled.
If no ``tag`` was specified, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> # Pull the image tagged `latest` in the busybox repo
>>> image = client.images.pull('busybox:latest')
>>> # Pull all tags in the busybox repo
>>> images = client.images.pull('busybox')
"""
if not tag:
repository, tag = parse_repository_tag(repository)
if 'stream' in kwargs:
warnings.warn(
'`stream` is not a valid parameter for this method'
' and will be overridden'
)
del kwargs['stream']
pull_log = self.client.api.pull(
repository, tag=tag, stream=True, **kwargs
)
for _ in pull_log:
# We don't do anything with the logs, but we need
# to keep the connection alive and wait for the image
# to be pulled.
pass
if tag:
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
))
return self.list(repository) | 0.000901 |
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.typical_or_extreme_period_name))
out.append(self._to_str(self.typical_or_extreme_period_type))
out.append(self._to_str(self.period_start_day))
out.append(self._to_str(self.period_end_day))
return ",".join(out) | 0.002448 |
def _registerInterface(self, iName, intf, isPrivate=False):
"""
Register interface object on interface level object
"""
nameAvailabilityCheck(self, iName, intf)
assert intf._parent is None
intf._parent = self
intf._name = iName
intf._ctx = self._ctx
if isPrivate:
self._private_interfaces.append(intf)
intf._isExtern = False
else:
self._interfaces.append(intf)
intf._isExtern = True | 0.003914 |
def _K_computations(self, X, X2=None):
"""Pre-computations for the covariance function (used both when computing the covariance and its gradients). Here self._dK_dvar and self._K_dist2 are updated."""
self._lengthscales=self.mapping.f(X)
self._lengthscales2=np.square(self._lengthscales)
if X2==None:
self._lengthscales_two = self._lengthscales
self._lengthscales_two2 = self._lengthscales2
Xsquare = np.square(X).sum(1)
self._K_dist2 = -2.*tdot(X) + Xsquare[:, None] + Xsquare[None, :]
else:
self._lengthscales_two = self.mapping.f(X2)
self._lengthscales_two2 = np.square(self._lengthscales_two)
self._K_dist2 = -2.*np.dot(X, X2.T) + np.square(X).sum(1)[:, None] + np.square(X2).sum(1)[None, :]
self._w2 = self._lengthscales2 + self._lengthscales_two2.T
prod_length = self._lengthscales*self._lengthscales_two.T
self._K_exponential = np.exp(-self._K_dist2/self._w2)
self._K_dvar = np.sign(prod_length)*(2*np.abs(prod_length)/self._w2)**(self.input_dim/2.)*np.exp(-self._K_dist2/self._w2) | 0.007867 |
def get_experiments(base, load=False):
''' get_experiments will return loaded json for all valid experiments from an experiment folder
:param base: full path to the base folder with experiments inside
:param load: if True, returns a list of loaded config.json objects. If False (default) returns the paths to the experiments
'''
experiments = find_directories(base)
valid_experiments = [e for e in experiments if validate(e,cleanup=False)]
bot.info("Found %s valid experiments" %(len(valid_experiments)))
if load is True:
valid_experiments = load_experiments(valid_experiments)
#TODO at some point in this workflow we would want to grab instructions from help
# and variables from labels, environment, etc.
return valid_experiments | 0.008929 |
def is_super_admin(self, req):
"""Returns True if the admin specified in the request represents the
.super_admin.
:param req: The swob.Request to check.
:param returns: True if .super_admin.
"""
return req.headers.get('x-auth-admin-user') == '.super_admin' and \
self.super_admin_key and \
req.headers.get('x-auth-admin-key') == self.super_admin_key | 0.004739 |
def isoformat(self, strict=False):
'''Return date in isoformat (same as __str__ but without qualifier).
WARNING: does not replace '?' in dates unless strict=True.
'''
out = self.year
# what do we do when no year ...
for val in [self.month, self.day]:
if not val:
break
out += u'-' + val
if strict:
out = out.replace('?', '0')
if self.hour:
out += u' '
out += self.hour
for val in [self.minute, self.second]:
if not val:
break
out += u':' + val
if self.microsecond:
out += u'.' + self.microsecond
return out | 0.002674 |
def replicator_eigenmaps(adjacency_matrix, k):
"""
Performs spectral graph embedding on the centrality reweighted adjacency matrix
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a scipy.sparse.coo_matrix
- k: The number of social dimensions/eigenvectors to extract
- max_iter: The maximum number of iterations for the iterative eigensolution method
Outputs: - S in R^(nxk): The social dimensions represented as a numpy.array matrix
"""
number_of_nodes = adjacency_matrix.shape[0]
max_eigenvalue = spla.eigsh(adjacency_matrix,
k=1,
which='LM',
return_eigenvectors=False)
# Calculate Replicator matrix
eye_matrix = sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64)
eye_matrix = eye_matrix.tocsr()
eye_matrix.data = eye_matrix.data*max_eigenvalue
replicator = eye_matrix - adjacency_matrix
# Calculate bottom k+1 eigenvalues and eigenvectors of normalised Laplacian
try:
eigenvalues, eigenvectors = spla.eigsh(replicator,
k=k+1,
which='SM',
return_eigenvectors=True)
except spla.ArpackNoConvergence as e:
print("ARPACK has not converged.")
eigenvalue = e.eigenvalues
eigenvectors = e.eigenvectors
eigenvectors = eigenvectors[:, 1:]
return eigenvectors | 0.003793 |
def get_commit_tree(profile, sha):
"""Get the SHA of a commit's tree.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of a commit.
Returns:
The SHA of the commit's tree.
"""
data = commits.get_commit(profile, sha)
tree = data.get("tree")
sha = tree.get("sha")
return sha | 0.001901 |
def naccess_available():
"""True if naccess is available on the path."""
available = False
try:
subprocess.check_output(['naccess'], stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
available = True
except FileNotFoundError:
print("naccess has not been found on your path. If you have already "
"installed naccess but are unsure how to add it to your path, "
"check out this: https://stackoverflow.com/a/14638025")
return available | 0.001912 |
def get_workspace_activities(brain, limit=1):
""" Return the workspace activities sorted by reverse chronological
order
Regarding the time value:
- the datetime value contains the time in international format
(machine readable)
- the title value contains the absolute date and time of the post
"""
mb = queryUtility(IMicroblogTool)
items = mb.context_values(brain.getObject(), limit=limit)
return [
{
'subject': item.creator,
'verb': 'published',
'object': item.text,
'time': {
'datetime': item.date.strftime('%Y-%m-%d'),
'title': item.date.strftime('%d %B %Y, %H:%M'),
}
} for item in items
] | 0.001332 |
def http_get_provider(provider,
request_url, params, token_secret, token_cookie = None):
'''Handle HTTP GET requests on an authentication endpoint.
Authentication flow begins when ``params`` has a ``login`` key with a value
of ``start``. For instance, ``/auth/twitter?login=start``.
:param str provider: An provider to obtain a user ID from.
:param str request_url: The authentication endpoint/callback.
:param dict params: GET parameters from the query string.
:param str token_secret: An app secret to encode/decode JSON web tokens.
:param str token_cookie: The current JSON web token, if available.
:return: A dict containing any of the following possible keys:
``status``: an HTTP status code the server should sent
``redirect``: where the client should be directed to continue the flow
``set_token_cookie``: contains a JSON web token and should be stored by
the client and passed in the next call.
``provider_user_id``: the user ID from the login provider
``provider_user_name``: the user name from the login provider
'''
if not validate_provider(provider):
raise InvalidUsage('Provider not supported')
klass = getattr(socialauth.providers, provider.capitalize())
provider = klass(request_url, params, token_secret, token_cookie)
if provider.status == 302:
ret = dict(status = 302, redirect = provider.redirect)
tc = getattr(provider, 'set_token_cookie', None)
if tc is not None:
ret['set_token_cookie'] = tc
return ret
if provider.status == 200 and provider.user_id is not None:
ret = dict(status = 200, provider_user_id = provider.user_id)
if provider.user_name is not None:
ret['provider_user_name'] = provider.user_name
return ret
raise InvalidUsage('Invalid request') | 0.006785 |
def commit_deposit(self, deposit_id, **params):
"""https://developers.coinbase.com/api/v2#commit-a-deposit"""
return self.api_client.commit_deposit(self.id, deposit_id, **params) | 0.010309 |
def local_dt(dt):
"""Return an aware datetime in system timezone, from a naive or aware
datetime.
Naive datetime are assumed to be in UTC TZ.
"""
if not dt.tzinfo:
dt = pytz.utc.localize(dt)
return LOCALTZ.normalize(dt.astimezone(LOCALTZ)) | 0.003676 |
def end_time(self):
""" Return the end time of the current valid segment of data """
return float(self.strain.start_time + (len(self.strain) - self.total_corruption) / self.sample_rate) | 0.014925 |
def _check_psutil(self, instance):
"""
Gather metrics about connections states and interfaces counters
using psutil facilities
"""
custom_tags = instance.get('tags', [])
if self._collect_cx_state:
self._cx_state_psutil(tags=custom_tags)
self._cx_counters_psutil(tags=custom_tags) | 0.005747 |
def _GetFileByPath(self, key_path_upper):
"""Retrieves a Windows Registry file for a specific path.
Args:
key_path_upper (str): Windows Registry key path, in upper case with
a resolved root key alias.
Returns:
tuple: consists:
str: upper case key path prefix
WinRegistryFile: corresponding Windows Registry file or None if not
available.
"""
# TODO: handle HKEY_USERS in both 9X and NT.
key_path_prefix, registry_file = self._GetCachedFileByPath(key_path_upper)
if not registry_file:
for mapping in self._GetFileMappingsByPath(key_path_upper):
try:
registry_file = self._OpenFile(mapping.windows_path)
except IOError:
registry_file = None
if not registry_file:
continue
if not key_path_prefix:
key_path_prefix = mapping.key_path_prefix
self.MapFile(key_path_prefix, registry_file)
key_path_prefix = key_path_prefix.upper()
break
return key_path_prefix, registry_file | 0.005671 |
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation) | 0.003241 |
def evaluation_metrics(predicted, actual, bow=True):
"""
Input:
predicted, actual = lists of the predicted and actual tokens
bow: if true use bag of words assumption
Returns:
precision, recall, F1, Levenshtein distance
"""
if bow:
p = set(predicted)
a = set(actual)
true_positive = 0
for token in p:
if token in a:
true_positive += 1
else:
# shove actual into a hash, count up the unique occurances of each token
# iterate through predicted, check which occur in actual
from collections import defaultdict
act = defaultdict(lambda: 0)
for token in actual:
act[token] += 1
true_positive = 0
for token in predicted:
if act[token] > 0:
true_positive += 1
act[token] -= 1
# for shared logic below
p = predicted
a = actual
try:
precision = true_positive / len(p)
except ZeroDivisionError:
precision = 0.0
try:
recall = true_positive / len(a)
except ZeroDivisionError:
recall = 0.0
try:
f1 = 2.0 * (precision * recall) / (precision + recall)
except ZeroDivisionError:
f1 = 0.0
# return (precision, recall, f1, dameraulevenshtein(predicted, actual))
return (precision, recall, f1) | 0.001427 |
def _upgrade(self):
"""
Upgrade the serialized object if necessary.
Raises:
FutureVersionError: file was written by a future version of the
software.
"""
logging.debug("[FeedbackResultsSeries]._upgrade()")
version = Version.fromstring(self.version)
logging.debug('[FeedbackResultsSeries] version=%s, class_version=%s',
str(version), self.class_version)
if version > Version.fromstring(self.class_version):
logging.debug('[FeedbackResultsSeries] version>class_version')
raise FutureVersionError(Version.fromstring(self.class_version),
version)
elif version < Version.fromstring(self.class_version):
if version < Version(0, 1):
self.time = [None]*len(self.data)
self.version = str(Version(0, 1)) | 0.002172 |
def add_proxy(self, proxy):
"""Add a valid proxy into pool
You must call `add_proxy` method to add a proxy into pool instead of
directly operate the `proxies` variable.
"""
protocol = proxy.protocol
addr = proxy.addr
if addr in self.proxies:
self.proxies[protocol][addr].last_checked = proxy.last_checked
else:
self.proxies[protocol][addr] = proxy
self.addr_list[protocol].append(addr) | 0.004115 |
def makebunches_alter(data, commdct, theidf):
"""make bunches with data"""
bunchdt = {}
dt, dtls = data.dt, data.dtls
for obj_i, key in enumerate(dtls):
key = key.upper()
objs = dt[key]
list1 = []
for obj in objs:
bobj = makeabunch(commdct, obj, obj_i)
list1.append(bobj)
bunchdt[key] = Idf_MSequence(list1, objs, theidf)
return bunchdt | 0.002381 |
def get_window():
"""Get IDA's top level window."""
tform = idaapi.get_current_tform()
# Required sometimes when closing IDBs and not IDA.
if not tform:
tform = idaapi.find_tform("Output window")
widget = form_to_widget(tform)
window = widget.window()
return window | 0.0033 |
def POST_query(self, req_hook, req_args):
''' Generic POST query method '''
# HTTP POST queries require keyManagerTokens and sessionTokens
headers = {'Content-Type': 'application/json',
'sessionToken': self.__session__,
'keyManagerToken': self.__keymngr__}
# HTTP POST query to keymanager authenticate API
try:
if req_args is None:
response = requests.post(self.__url__ + req_hook,
headers=headers,
verify=True)
else:
response = requests.post(self.__url__ + req_hook,
headers=headers,
data=req_args,
verify=True)
except requests.exceptions.RequestException as err:
self.logger.error(err)
return '500', 'Internal Error in RESTful.POST_query()'
# return the token
return response.status_code, response.text | 0.001832 |
def chain(self, other_task):
""" Add a chain listener to the execution of this task. Whenever
an item has been processed by the task, the registered listener
task will be queued to be executed with the output of this task.
Can also be written as::
pipeline = task1 > task2
"""
other_task._source = self
self._listeners.append(ChainListener(other_task))
return other_task | 0.004464 |
def simulate(self, nsites, transition_matrix, tree, ncat=1, alpha=1):
"""
Return sequences simulated under the transition matrix's model
"""
sim = SequenceSimulator(transition_matrix, tree, ncat, alpha)
return list(sim.simulate(nsites).items()) | 0.010526 |
def menu_item(self, sub_assistant, path):
"""
The function creates a menu item
and assigns signal like select and button-press-event for
manipulation with menu_item. sub_assistant and path
"""
if not sub_assistant[0].icon_path:
menu_item = self.create_menu_item(sub_assistant[0].fullname)
else:
menu_item = self.create_image_menu_item(
sub_assistant[0].fullname, sub_assistant[0].icon_path
)
if sub_assistant[0].description:
menu_item.set_has_tooltip(True)
menu_item.connect("query-tooltip",
self.parent.tooltip_queries,
self.get_formatted_description(sub_assistant[0].description),
)
menu_item.connect("select", self.parent.sub_menu_select, path)
menu_item.connect("button-press-event", self.parent.sub_menu_pressed)
menu_item.show()
return menu_item | 0.00402 |
def free_slave(**connection_args):
'''
Frees a slave from its master. This is a WIP, do not use.
CLI Example:
.. code-block:: bash
salt '*' mysql.free_slave
'''
slave_db = _connect(**connection_args)
if slave_db is None:
return ''
slave_cur = slave_db.cursor(MySQLdb.cursors.DictCursor)
slave_cur.execute('show slave status')
slave_status = slave_cur.fetchone()
master = {'host': slave_status['Master_Host']}
try:
# Try to connect to the master and flush logs before promoting to
# master. This may fail if the master is no longer available.
# I am also assuming that the admin password is the same on both
# servers here, and only overriding the host option in the connect
# function.
master_db = _connect(**master)
if master_db is None:
return ''
master_cur = master_db.cursor()
master_cur.execute('flush logs')
master_db.close()
except MySQLdb.OperationalError:
pass
slave_cur.execute('stop slave')
slave_cur.execute('reset master')
slave_cur.execute('change master to MASTER_HOST=''')
slave_cur.execute('show slave status')
results = slave_cur.fetchone()
if results is None:
return 'promoted'
else:
return 'failed' | 0.000747 |
def unique_field_data_types(self):
"""
Checks if all variants have different data types.
If so, the selected variant can be determined just by the data type of
the value without needing a field name / tag. In some languages, this
lets us make a shortcut
"""
data_type_names = set()
for field in self.fields:
if not is_void_type(field.data_type):
if field.data_type.name in data_type_names:
return False
else:
data_type_names.add(field.data_type.name)
else:
return True | 0.00314 |
def clean_content(content):
"""\
Removes paragraph numbers, section delimiters, xxxx etc. from the content.
This function can be used to clean-up the cable's content before it
is processed by NLP tools or to create a search engine index.
`content`
The content of the cable.
"""
for pattern, subst in _CLEAN_PATTERNS:
content = pattern.sub(subst, content)
return content | 0.007026 |
def ensure_index(index_like, copy=False):
"""
Ensure that we have an index from some index-like object.
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
>>> ensure_index([('a', 'a'), ('b', 'c')])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex(levels=[['a'], ['b', 'c']],
codes=[[0, 0], [0, 1]])
See Also
--------
ensure_index_from_sequences
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like) | 0.000596 |
def set_daemon_name(self, daemon_name):
"""Set the daemon name of the daemon which this manager is attached to
and propagate this daemon name to our managed modules
:param daemon_name:
:return:
"""
self.daemon_name = daemon_name
for instance in self.instances:
instance.set_loaded_into(daemon_name) | 0.00545 |
def ordereddict_push_front(dct, key, value):
"""Set a value at the front of an OrderedDict
The original dict isn't modified, instead a copy is returned
"""
d = OrderedDict()
d[key] = value
d.update(dct)
return d | 0.004167 |
def wait_for_plug_update(self, plug_name, remote_state, timeout_s):
"""Wait for a change in the state of a frontend-aware plug.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
remote_state: The last observed state.
timeout_s: Number of seconds to wait for an update.
Returns:
An updated state, or None if the timeout runs out.
Raises:
InvalidPlugError: The plug can't be waited on either because it's not in
use or it's not a frontend-aware plug.
"""
plug = self._plugs_by_name.get(plug_name)
if plug is None:
raise InvalidPlugError('Cannot wait on unknown plug "%s".' % plug_name)
if not isinstance(plug, FrontendAwareBasePlug):
raise InvalidPlugError('Cannot wait on a plug %s that is not an subclass '
'of FrontendAwareBasePlug.' % plug_name)
state, update_event = plug.asdict_with_event()
if state != remote_state:
return state
if update_event.wait(timeout_s):
return plug._asdict() | 0.005698 |
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
if self._state == self.RUNNING:
self._context.wait_all_futures([self], timeout)
return self._exception | 0.004167 |
def shadowRegisterDeltaCallback(self, srcCallback):
"""
**Description**
Listen on delta topics for this device shadow by subscribing to delta topics. Whenever there
is a difference between the desired and reported state, the registered callback will be called
and the delta payload will be available in the callback.
**Syntax**
.. code:: python
# Listen on delta topics for BotShadow
BotShadow.shadowRegisterDeltaCallback(customCallback)
**Parameters**
*srcCallback* - Function to be called when the response for this shadow request comes back. Should
be in form :code:`customCallback(payload, responseStatus, token)`, where :code:`payload` is the
JSON document returned, :code:`responseStatus` indicates whether the request has been accepted,
rejected or is a delta message, :code:`token` is the token used for tracing in this request.
**Returns**
None
"""
with self._dataStructureLock:
# Update callback data structure
self._shadowSubscribeCallbackTable["delta"] = srcCallback
# One subscription
self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "delta", self.generalCallback)
self._logger.info("Subscribed to delta topic for deviceShadow: " + self._shadowName) | 0.010799 |
def outline(dataset, generate_faces=False):
"""Produces an outline of the full extent for the input dataset.
Parameters
----------
generate_faces : bool, optional
Generate solid faces for the box. This is off by default
"""
alg = vtk.vtkOutlineFilter()
alg.SetInputDataObject(dataset)
alg.SetGenerateFaces(generate_faces)
alg.Update()
return wrap(alg.GetOutputDataObject(0)) | 0.004274 |
def custom_classfunc_rule(self, opname, token, customize, next_token):
"""
call ::= expr {expr}^n CALL_FUNCTION_n
call ::= expr {expr}^n CALL_FUNCTION_VAR_n
call ::= expr {expr}^n CALL_FUNCTION_VAR_KW_n
call ::= expr {expr}^n CALL_FUNCTION_KW_n
classdefdeco2 ::= LOAD_BUILD_CLASS mkfunc {expr}^n-1 CALL_FUNCTION_n
"""
args_pos, args_kw = self.get_pos_kw(token)
# Additional exprs for * and ** args:
# 0 if neither
# 1 for CALL_FUNCTION_VAR or CALL_FUNCTION_KW
# 2 for * and ** args (CALL_FUNCTION_VAR_KW).
# Yes, this computation based on instruction name is a little bit hoaky.
nak = ( len(opname)-len('CALL_FUNCTION') ) // 3
token.kind = self.call_fn_name(token)
uniq_param = args_kw + args_pos
# Note: 3.5+ have subclassed this method; so we don't handle
# 'CALL_FUNCTION_VAR' or 'CALL_FUNCTION_EX' here.
rule = ('call ::= expr ' +
('pos_arg ' * args_pos) +
('kwarg ' * args_kw) +
'expr ' * nak + token.kind)
self.add_unique_rule(rule, token.kind, uniq_param, customize)
if 'LOAD_BUILD_CLASS' in self.seen_ops:
if (next_token == 'CALL_FUNCTION' and next_token.attr == 1
and args_pos > 1):
rule = ('classdefdeco2 ::= LOAD_BUILD_CLASS mkfunc %s%s_%d'
% (('expr ' * (args_pos-1)), opname, args_pos))
self.add_unique_rule(rule, token.kind, uniq_param, customize) | 0.005685 |
def element_if_exists(self, using, value):
"""Check if an element in the current context.
Support:
Android iOS Web(WebView)
Args:
using(str): The element location strategy.
value(str): The value of the location strategy.
Returns:
Return True if the element does exists and return False otherwise.
Raises:
WebDriverException.
"""
try:
self._execute(Command.FIND_ELEMENT, {
'using': using,
'value': value
})
return True
except:
return False | 0.004644 |
def get_band(cls, b, **kwargs):
"""Defines what a "shortcut" band name refers to. Returns phot_system, band
"""
phot = None
# Default to SDSS for these
if b in ['u','g','r','i','z']:
phot = 'SDSS'
band = 'SDSS_{}'.format(b)
elif b in ['U','B','V','R','I']:
phot = 'UBVRIplus'
band = 'Bessell_{}'.format(b)
elif b in ['J','H','Ks']:
phot = 'UBVRIplus'
band = '2MASS_{}'.format(b)
elif b=='K':
phot = 'UBVRIplus'
band = '2MASS_Ks'
elif b in ['kep','Kepler','Kp']:
phot = 'UBVRIplus'
band = 'Kepler_Kp'
elif b=='TESS':
phot = 'UBVRIplus'
band = 'TESS'
elif b in ['W1','W2','W3','W4']:
phot = 'WISE'
band = 'WISE_{}'.format(b)
elif b in ('G', 'BP', 'RP'):
phot = 'UBVRIplus'
band = 'Gaia_{}'.format(b)
if 'version' in kwargs:
if kwargs['version']=='1.1':
band += '_DR2Rev'
else:
m = re.match('([a-zA-Z]+)_([a-zA-Z_]+)',b)
if m:
if m.group(1) in cls.phot_systems:
phot = m.group(1)
if phot=='PanSTARRS':
band = 'PS_{}'.format(m.group(2))
else:
band = m.group(0)
elif m.group(1) in ['UK','UKIRT']:
phot = 'UKIDSS'
band = 'UKIDSS_{}'.format(m.group(2))
if phot is None:
for system, bands in cls.phot_bands.items():
if b in bands:
phot = system
band = b
break
if phot is None:
raise ValueError('MIST grids cannot resolve band {}!'.format(b))
return phot, band | 0.01343 |
def get_incomings_per_page(self, per_page=1000, page=1, params=None):
"""
Get incomings per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=INCOMINGS, per_page=per_page, page=page, params=params) | 0.00716 |
def title(self):
"""Extract title from a release."""
if self.event:
if self.release['name']:
return u'{0}: {1}'.format(
self.repository['full_name'], self.release['name']
)
return u'{0} {1}'.format(self.repo_model.name, self.model.tag) | 0.006192 |
def register(self, username, password, attr_map=None):
"""
Register the user. Other base attributes from AWS Cognito User Pools
are address, birthdate, email, family_name (last name), gender,
given_name (first name), locale, middle_name, name, nickname,
phone_number, picture, preferred_username, profile, zoneinfo,
updated at, website
:param username: User Pool username
:param password: User Pool password
:param attr_map: Attribute map to Cognito's attributes
:return response: Response from Cognito
Example response::
{
'UserConfirmed': True|False,
'CodeDeliveryDetails': {
'Destination': 'string', # This value will be obfuscated
'DeliveryMedium': 'SMS'|'EMAIL',
'AttributeName': 'string'
}
}
"""
attributes = self.base_attributes.copy()
if self.custom_attributes:
attributes.update(self.custom_attributes)
cognito_attributes = dict_to_cognito(attributes, attr_map)
params = {
'ClientId': self.client_id,
'Username': username,
'Password': password,
'UserAttributes': cognito_attributes
}
self._add_secret_hash(params, 'SecretHash')
response = self.client.sign_up(**params)
attributes.update(username=username, password=password)
self._set_attributes(response, attributes)
response.pop('ResponseMetadata')
return response | 0.001271 |
def get_args():
""" get args from command line
"""
parser = argparse.ArgumentParser("FashionMNIST")
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer")
parser.add_argument("--epochs", type=int, default=200, help="epoch limit")
parser.add_argument(
"--learning_rate", type=float, default=0.001, help="learning rate"
)
parser.add_argument("--cutout", action="store_true", default=False, help="use cutout")
parser.add_argument("--cutout_length", type=int, default=8, help="cutout length")
parser.add_argument(
"--model_path", type=str, default="./", help="Path to save the destination model"
)
return parser.parse_args() | 0.007605 |
def set_level(name, level):
""" Set level for given logger
:param name: Name of logger to set the level for
:param level: The new level, see possible levels from python logging library
"""
if name is None or name == "" or name == "bench":
logging.getLogger("bench").setLevel(level)
loggername = "bench." + name
logging.getLogger(loggername).setLevel(level) | 0.005102 |
def dump_values(self, with_defaults=True, dict_cls=dict, flat=False):
"""
Export values of all items contained in this section to a dictionary.
Items with no values set (and no defaults set if ``with_defaults=True``) will be excluded.
Returns:
dict: A dictionary of key-value pairs, where for sections values are dictionaries
of their contents.
"""
values = dict_cls()
if flat:
for str_path, item in self.iter_items(recursive=True, key='str_path'):
if item.has_value:
if with_defaults or not item.is_default:
values[str_path] = item.value
else:
for item_name, item in self._tree.items():
if is_config_section(item):
section_values = item.dump_values(with_defaults=with_defaults, dict_cls=dict_cls)
if section_values:
values[item_name] = section_values
else:
if item.has_value:
if with_defaults or not item.is_default:
values[item.name] = item.value
return values | 0.004942 |
def move_page(self, direction, n_windows):
"""
Move the page down (positive direction) or up (negative direction).
Paging down:
The post on the bottom of the page becomes the post at the top of
the page and the cursor is moved to the top.
Paging up:
The post at the top of the page becomes the post at the bottom of
the page and the cursor is moved to the bottom.
"""
assert direction in (-1, 1)
assert n_windows >= 0
# top of subreddit/submission page or only one
# submission/reply on the screen: act as normal move
if (self.absolute_index < 0) | (n_windows == 0):
valid, redraw = self.move(direction, n_windows)
else:
# first page
if self.absolute_index < n_windows and direction < 0:
self.page_index = -1
self.cursor_index = 0
self.inverted = False
# not submission mode: starting index is 0
if not self._is_valid(self.absolute_index):
self.page_index = 0
valid = True
else:
# flip to the direction of movement
if ((direction > 0) & (self.inverted is True)) \
| ((direction < 0) & (self.inverted is False)):
self.page_index += (self.step * (n_windows - 1))
self.inverted = not self.inverted
self.cursor_index \
= (n_windows - (direction < 0)) - self.cursor_index
valid = False
adj = 0
# check if reached the bottom
while not valid:
n_move = n_windows - adj
if n_move == 0:
break
self.page_index += n_move * direction
valid = self._is_valid(self.absolute_index)
if not valid:
self.page_index -= n_move * direction
adj += 1
redraw = True
return valid, redraw | 0.000924 |
def command_canonize(string, vargs):
"""
Print the canonical representation of the given string.
It will replace non-canonical compound characters
with their canonical synonym.
:param str string: the string to act upon
:param dict vargs: the command line arguments
"""
try:
ipa_string = IPAString(
unicode_string=string,
ignore=vargs["ignore"],
single_char_parsing=vargs["single_char_parsing"]
)
print(vargs["separator"].join([(u"%s" % c) for c in ipa_string]))
except ValueError as exc:
print_error(str(exc)) | 0.003247 |
def check_data_port_connection(self, check_data_port):
"""Checks the connection validity of a data port
The method is called by a child state to check the validity of a data port in case it is connected with data
flows. The data port does not belong to 'self', but to one of self.states.
If the data port is connected to a data flow, the method checks, whether these connect consistent data types
of ports.
:param rafcon.core.data_port.DataPort check_data_port: The port to check
:return: valid, message
"""
for data_flow in self.data_flows.values():
# Check whether the data flow connects the given port
from_port = self.get_data_port(data_flow.from_state, data_flow.from_key)
to_port = self.get_data_port(data_flow.to_state, data_flow.to_key)
if check_data_port is from_port or check_data_port is to_port:
# check if one of the data_types if type 'object'; in this case the data flow is always valid
if not (from_port.data_type is object or to_port.data_type is object):
if not type_inherits_of_type(from_port.data_type, to_port.data_type):
return False, "Connection of two non-compatible data types"
return True, "valid" | 0.008277 |
def dump_to_response(request, app_label=None, exclude=None,
filename_prefix=None):
"""Utility function that dumps the given app/model to an HttpResponse.
"""
app_label = app_label or []
exclude = exclude
try:
filename = '%s.%s' % (datetime.now().isoformat(),
settings.SMUGGLER_FORMAT)
if filename_prefix:
filename = '%s_%s' % (filename_prefix, filename)
if not isinstance(app_label, list):
app_label = [app_label]
response = serialize_to_response(app_label, exclude)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
except CommandError as e:
messages.error(
request,
_('An exception occurred while dumping data: %s') % force_text(e))
return HttpResponseRedirect(request.build_absolute_uri().split('dump')[0]) | 0.001076 |
def segment_kmeans(self, rgb_weight, num_clusters, hue_weight=0.0):
"""
Segment a color image using KMeans based on spatial and color distances.
Black pixels will automatically be assigned to their own 'background' cluster.
Parameters
----------
rgb_weight : float
weighting of RGB distance relative to spatial and hue distance
num_clusters : int
number of clusters to use
hue_weight : float
weighting of hue from hsv relative to spatial and RGB distance
Returns
-------
:obj:`SegmentationImage`
image containing the segment labels
"""
# form features array
label_offset = 1
nonzero_px = np.where(self.data != 0.0)
nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]]
# get hsv data if specified
color_vals = rgb_weight * \
self._data[nonzero_px[:, 0], nonzero_px[:, 1], :]
if hue_weight > 0.0:
hsv_data = cv2.cvtColor(self.data, cv2.COLOR_BGR2HSV)
color_vals = np.c_[color_vals, hue_weight *
hsv_data[nonzero_px[:, 0], nonzero_px[:, 1], :1]]
features = np.c_[nonzero_px, color_vals.astype(np.float32)]
# perform KMeans clustering
kmeans = sc.KMeans(n_clusters=num_clusters)
labels = kmeans.fit_predict(features)
# create output label array
label_im = np.zeros([self.height, self.width]).astype(np.uint8)
label_im[nonzero_px[:, 0], nonzero_px[:, 1]] = labels + label_offset
return SegmentationImage(label_im, frame=self.frame) | 0.003016 |
def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):
"""
2D fft-based Gaussian kernel density estimate (KDE).
The code was adapted from https://github.com/mfouesneau/faststats
Parameters
----------
x : Numpy array or list
y : Numpy array or list
gridsize : tuple
Number of points used to discretize data. Use powers of 2 for fft optimization
circular: bool
If True, use circular boundaries. Defaults to False
Returns
-------
grid: A gridded 2D KDE of the input points (x, y)
xmin: minimum value of x
xmax: maximum value of x
ymin: minimum value of y
ymax: maximum value of y
"""
x = np.asarray(x, dtype=float)
x = x[np.isfinite(x)]
y = np.asarray(y, dtype=float)
y = y[np.isfinite(y)]
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
len_x = len(x)
weights = np.ones(len_x)
n_x, n_y = gridsize
d_x = (xmax - xmin) / (n_x - 1)
d_y = (ymax - ymin) / (n_y - 1)
xyi = np.vstack((x, y)).T
xyi -= [xmin, ymin]
xyi /= [d_x, d_y]
xyi = np.floor(xyi, xyi).T
scotts_factor = len_x ** (-1 / 6)
cov = np.cov(xyi)
std_devs = np.diag(cov ** 0.5)
kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)
inv_cov = np.linalg.inv(cov * scotts_factor ** 2)
x_x = np.arange(kern_nx) - kern_nx / 2
y_y = np.arange(kern_ny) - kern_ny / 2
x_x, y_y = np.meshgrid(x_x, y_y)
kernel = np.vstack((x_x.flatten(), y_y.flatten()))
kernel = np.dot(inv_cov, kernel) * kernel
kernel = np.exp(-kernel.sum(axis=0) / 2)
kernel = kernel.reshape((int(kern_ny), int(kern_nx)))
boundary = "wrap" if circular else "symm"
grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray()
grid = convolve2d(grid, kernel, mode="same", boundary=boundary)
norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor ** 2)
norm_factor = len_x * d_x * d_y * norm_factor ** 0.5
grid /= norm_factor
return grid, xmin, xmax, ymin, ymax | 0.000979 |
def link(self, source, target):
'creates a hard link `target -> source` (e.g. ln source target)'
return self.operations('link', target.decode(self.encoding),
source.decode(self.encoding)) | 0.012346 |
def zip_pack(filepath, options):
"""
Creates a zip archive containing the script at *filepath* along with all
imported modules that are local to *filepath* as a self-extracting python
script. A shebang will be appended to the beginning of the resulting
zip archive which will allow it to
If being run inside Python 3 and the `lzma` module is available the
resulting 'pyz' file will use ZIP_LZMA compression to maximize compression.
*options* is expected to be the the same options parsed from pyminifier.py
on the command line.
.. note::
* The file resulting from this method cannot be imported as a module into another python program (command line execution only).
* Any required local (implied path) modules will be automatically included (well, it does its best).
* The result will be saved as a .pyz file (which is an extension I invented for this format).
"""
import zipfile
# Hopefully some day we'll be able to use ZIP_LZMA too as the compression
# format to save even more space...
compression_format = zipfile.ZIP_DEFLATED
cumulative_size = 0 # For tracking size reduction stats
# Record the filesize for later comparison
cumulative_size += os.path.getsize(filepath)
dest = options.pyz
z = zipfile.ZipFile(dest, "w", compression_format)
# Take care of minifying our primary script first:
source = open(filepath).read()
primary_tokens = token_utils.listified_tokenizer(source)
# Preserve shebangs (don't care about encodings for this)
shebang = analyze.get_shebang(primary_tokens)
if not shebang:
# We *must* have a shebang for this to work so make a conservative default:
shebang = "#!/usr/bin/env python"
if py3:
if shebang.rstrip().endswith('python'): # Make it python3 (to be safe)
shebang = shebang.rstrip()
shebang += '3\n' #!/usr/bin/env python3
if not options.nominify: # Minify as long as we don't have this option set
source = minification.minify(primary_tokens, options)
# Write out to a temporary file to add to our zip
temp = tempfile.NamedTemporaryFile(mode='w')
temp.write(source)
temp.flush()
# Need the path where the script lives for the next steps:
path = os.path.split(filepath)[0]
if not path:
path = os.getcwd()
main_py = path + '/__main__.py'
if os.path.exists(main_py):
# There's an existing __main__.py, use it
z.write(main_py, '__main__.py')
z.write(temp.name, os.path.split(filepath)[1])
else:
# No __main__.py so we rename our main script to be the __main__.py
# This is so it will still execute as a zip
z.write(filepath, '__main__.py')
temp.close()
# Now write any required modules into the zip as well
local_modules = analyze.enumerate_local_modules(primary_tokens, path)
name_generator = None # So we can tell if we need to obfuscate
if options.obfuscate or options.obf_classes \
or options.obf_functions or options.obf_variables \
or options.obf_builtins or options.obf_import_methods:
# Put together that will be used for all obfuscation functions:
identifier_length = int(options.replacement_length)
if options.use_nonlatin:
if sys.version_info[0] == 3:
name_generator = obfuscate.obfuscation_machine(
use_unicode=True, identifier_length=identifier_length
)
else:
print(
"ERROR: You can't use nonlatin characters without Python 3")
sys.exit(2)
else:
name_generator = obfuscate.obfuscation_machine(
identifier_length=identifier_length)
table =[{}]
included_modules = []
for module in local_modules:
module = module.replace('.', '/')
module = "%s.py" % module
# Add the filesize to our total
cumulative_size += os.path.getsize(module)
# Also record that we've added it to the archive
included_modules.append(module)
# Minify these files too
source = open(os.path.join(path, module)).read()
tokens = token_utils.listified_tokenizer(source)
maybe_more_modules = analyze.enumerate_local_modules(tokens, path)
for mod in maybe_more_modules:
if mod not in local_modules:
local_modules.append(mod) # Extend the current loop, love it =)
if not options.nominify:
# Perform minification (this also handles obfuscation)
source = minification.minify(tokens, options)
# Have to re-tokenize for obfucation (it's quick):
tokens = token_utils.listified_tokenizer(source)
# Perform obfuscation if any of the related options were set
if name_generator:
obfuscate.obfuscate(
module,
tokens,
options,
name_generator=name_generator,
table=table
)
# Convert back to text
result = token_utils.untokenize(tokens)
result += (
"# Created by pyminifier "
"(https://github.com/liftoff/pyminifier)\n")
# Write out to a temporary file to add to our zip
temp = tempfile.NamedTemporaryFile(mode='w')
temp.write(source)
temp.flush()
z.write(temp.name, module)
temp.close()
z.close()
# Finish up by writing the shebang to the beginning of the zip
prepend(shebang, dest)
os.chmod(dest, 0o755) # Make it executable (since we added the shebang)
pyz_filesize = os.path.getsize(dest)
percent_saved = round(float(pyz_filesize) / float(cumulative_size) * 100, 2)
print('%s saved as compressed executable zip: %s' % (filepath, dest))
print('The following modules were automatically included (as automagic '
'dependencies):\n')
for module in included_modules:
print('\t%s' % module)
print('\nOverall size reduction: %s%% of original size' % percent_saved) | 0.002774 |
def generate_valid_keys():
""" create a list of valid keys """
valid_keys = []
for minimum, maximum in RANGES:
for i in range(ord(minimum), ord(maximum) + 1):
valid_keys.append(chr(i))
return valid_keys | 0.004202 |
def _print_general_vs_table(self, idset1, idset2):
"""
:param idset1:
:param idset2:
"""
ref1name = ''
set1_hasref = isinstance(idset1, idset_with_reference)
if set1_hasref:
ref1arr = np.array(idset1.reflst)
ref1name = idset1.refname
ref2name = ref1name
set2_hasref = isinstance(idset2, idset_with_reference)
if set2_hasref:
ref2arr = np.array(idset2.reflst)
ref2name = idset2.refname
else:
ref2name = ref1name
#First show a general table
hdr11 = '{0} > {1}'.format(idset1.name, idset2.name)
hdr12 = '{0} > {1} {2}'.format(idset1.name, idset2.name, ref2name)
hdr13 = '{0} < {1}'.format(idset1.name, idset2.name)
hdr14 = '{0} < {1} {2}'.format(idset1.name, idset2.name, ref1name)
table = [[hdr11, hdr12, hdr13, hdr14]]
set1 = set(idset1)
set2 = set(idset2)
row11 = list(set1 - set2)
if set1_hasref:
row12 = [ref1arr[np.where(idset1 == nom)][0] for nom in row11]
else:
row12 = ['Not found' for _ in row11]
row13 = list(set2 - set1)
if set2_hasref:
row14 = [ref2arr[np.where(idset2 == nom)][0] for nom in row13]
else:
row14 = ['Not found' for _ in row13]
tablst = self._tabulate_4_lists(row11, row12, row13, row14)
table.extend(tablst)
if len(table) > 1:
print(tabulate(table, headers='firstrow'))
print('\n') | 0.001913 |
def main(**options):
"""Slurp up linter output and send it to a GitHub PR review."""
configure_logging(log_all=options.get('log'))
stdin_stream = click.get_text_stream('stdin')
stdin_text = stdin_stream.read()
click.echo(stdin_text)
ci = find_ci_provider()
config = Config(options, ci=ci)
build = LintlyBuild(config, stdin_text)
try:
build.execute()
except NotPullRequestException:
logger.info('Not a PR. Lintly is exiting.')
sys.exit(0)
# Exit with the number of files that have violations
sys.exit(len(build.violations)) | 0.001669 |
def depends(self, *nodes):
""" Adds nodes as relatives to this one, and
updates the relatives with self as children.
:param nodes: GraphNode(s)
"""
for node in nodes:
self.add_relative(node)
node.add_children(self) | 0.007194 |
def present(name,
pattern,
definition,
priority=0,
vhost='/',
runas=None,
apply_to=None):
'''
Ensure the RabbitMQ policy exists.
Reference: http://www.rabbitmq.com/ha.html
name
Policy name
pattern
A regex of queues to apply the policy to
definition
A json dict describing the policy
priority
Priority (defaults to 0)
vhost
Virtual host to apply to (defaults to '/')
runas
Name of the user to run the command as
apply_to
Apply policy to 'queues', 'exchanges' or 'all' (default to 'all')
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
result = {}
policies = __salt__['rabbitmq.list_policies'](vhost=vhost, runas=runas)
policy = policies.get(vhost, {}).get(name)
updates = []
if policy:
if policy.get('pattern') != pattern:
updates.append('Pattern')
current_definition = policy.get('definition')
current_definition = json.loads(current_definition) if current_definition else ''
new_definition = json.loads(definition) if definition else ''
if current_definition != new_definition:
updates.append('Definition')
if apply_to and (policy.get('apply-to') != apply_to):
updates.append('Applyto')
if int(policy.get('priority')) != priority:
updates.append('Priority')
if policy and not updates:
ret['comment'] = 'Policy {0} {1} is already present'.format(vhost, name)
return ret
if not policy:
ret['changes'].update({'old': {}, 'new': name})
if __opts__['test']:
ret['comment'] = 'Policy {0} {1} is set to be created'.format(vhost, name)
else:
log.debug('Policy doesn\'t exist - Creating')
result = __salt__['rabbitmq.set_policy'](vhost,
name,
pattern,
definition,
priority=priority,
runas=runas,
apply_to=apply_to)
elif updates:
ret['changes'].update({'old': policy, 'new': updates})
if __opts__['test']:
ret['comment'] = 'Policy {0} {1} is set to be updated'.format(vhost, name)
else:
log.debug('Policy exists but needs updating')
result = __salt__['rabbitmq.set_policy'](vhost,
name,
pattern,
definition,
priority=priority,
runas=runas,
apply_to=apply_to)
if 'Error' in result:
ret['result'] = False
ret['comment'] = result['Error']
elif ret['changes'] == {}:
ret['comment'] = '\'{0}\' is already in the desired state.'.format(name)
elif __opts__['test']:
ret['result'] = None
elif 'Set' in result:
ret['comment'] = result['Set']
return ret | 0.001761 |
def file_hash(path, hash_type="md5", block_size=65536, hex_digest=True):
"""
Hash a given file with md5, or any other and return the hex digest. You
can run `hashlib.algorithms_available` to see which are available on your
system unless you have an archaic python version, you poor soul).
This function is designed to be non memory intensive.
.. code:: python
reusables.file_hash(test_structure.zip")
# '61e387de305201a2c915a4f4277d6663'
:param path: location of the file to hash
:param hash_type: string name of the hash to use
:param block_size: amount of bytes to add to hasher at a time
:param hex_digest: returned as hexdigest, false will return digest
:return: file's hash
"""
hashed = hashlib.new(hash_type)
with open(path, "rb") as infile:
buf = infile.read(block_size)
while len(buf) > 0:
hashed.update(buf)
buf = infile.read(block_size)
return hashed.hexdigest() if hex_digest else hashed.digest() | 0.000974 |
def cartopy_globe(self):
"""Initialize a `cartopy.crs.Globe` from the metadata."""
if 'earth_radius' in self._attrs:
kwargs = {'ellipse': 'sphere', 'semimajor_axis': self._attrs['earth_radius'],
'semiminor_axis': self._attrs['earth_radius']}
else:
attr_mapping = [('semimajor_axis', 'semi_major_axis'),
('semiminor_axis', 'semi_minor_axis'),
('inverse_flattening', 'inverse_flattening')]
kwargs = self._map_arg_names(self._attrs, attr_mapping)
# WGS84 with semi_major==semi_minor is NOT the same as spherical Earth
# Also need to handle the case where we're not given any spheroid
kwargs['ellipse'] = None if kwargs else 'sphere'
return ccrs.Globe(**kwargs) | 0.004756 |
def contents_of(f, encoding='utf-8'):
"""Helper to read the contents of the given file or path into a string with the given encoding.
Encoding defaults to 'utf-8', other useful encodings are 'ascii' and 'latin-1'."""
try:
contents = f.read()
except AttributeError:
try:
with open(f, 'r') as fp:
contents = fp.read()
except TypeError:
raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__)
except OSError:
if not isinstance(f, str_types):
raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__)
raise
if sys.version_info[0] == 3 and type(contents) is bytes:
# in PY3 force decoding of bytes to target encoding
return contents.decode(encoding, 'replace')
elif sys.version_info[0] == 2 and encoding == 'ascii':
# in PY2 force encoding back to ascii
return contents.encode('ascii', 'replace')
else:
# in all other cases, try to decode to target encoding
try:
return contents.decode(encoding, 'replace')
except AttributeError:
pass
# if all else fails, just return the contents "as is"
return contents | 0.003903 |
def get_last_doc(self):
"""Searches through the doc dict to find the document that was
modified or deleted most recently."""
return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict | 0.00939 |
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
"WARNING: Could not produce a deterministic class ID for class "
"{}".format(cls))
return hashlib.sha1(new_class_id).digest() | 0.000562 |
def _finaliseRequest(self, request, status, content, mimetype='text/plain'):
"""
Finalises the request.
@param request: The HTTP Request.
@type request: C{http.Request}
@param status: The HTTP status code.
@type status: C{int}
@param content: The content of the response.
@type content: C{str}
@param mimetype: The MIME type of the request.
@type mimetype: C{str}
"""
request.setResponseCode(status)
request.setHeader("Content-Type", mimetype)
request.setHeader("Content-Length", str(len(content)))
request.setHeader("Server", gateway.SERVER_NAME)
request.write(content)
request.finish() | 0.002755 |
def filter(self, func):
"""
Filter array along an axis.
Applies a function which should evaluate to boolean,
along a single axis or multiple axes. Array will be
aligned so that the desired set of axes are in the
keys, which may require a transpose/reshape.
Parameters
----------
func : function
Function to apply, should return boolean
"""
if self.mode == 'local':
reshaped = self._align(self.baseaxes)
filtered = asarray(list(filter(func, reshaped)))
if self.labels is not None:
mask = asarray(list(map(func, reshaped)))
if self.mode == 'spark':
sort = False if self.labels is None else True
filtered = self.values.filter(func, axis=self.baseaxes, sort=sort)
if self.labels is not None:
keys, vals = zip(*self.values.map(func, axis=self.baseaxes, value_shape=(1,)).tordd().collect())
perm = sorted(range(len(keys)), key=keys.__getitem__)
mask = asarray(vals)[perm]
if self.labels is not None:
s1 = prod(self.baseshape)
newlabels = self.labels.reshape(s1, 1)[mask].squeeze()
else:
newlabels = None
return self._constructor(filtered, labels=newlabels).__finalize__(self, noprop=('labels',)) | 0.002853 |
def _write_mtlist_ins(ins_filename,df,prefix):
""" write an instruction file for a MODFLOW list file
Parameters
----------
ins_filename : str
name of the instruction file to write
df : pandas.DataFrame
the dataframe of list file entries
prefix : str
the prefix to add to the column names to form
obseravtions names
"""
try:
dt_str = df.index.map(lambda x: x.strftime("%Y%m%d"))
except:
dt_str = df.index.map(lambda x: "{0:08.1f}".format(x).strip())
if prefix == '':
name_len = 11
else:
name_len = 11 - (len(prefix)+1)
with open(ins_filename,'w') as f:
f.write('pif ~\nl1\n')
for dt in dt_str:
f.write("l1 ")
for col in df.columns:
col = col.replace("(",'').replace(")",'')
raw = col.split('_')
name = ''.join([r[:2] for r in raw[:-2]])[:6] + raw[-2] + raw[-1][0]
#raw[0] = raw[0][:6]
#name = ''.join(raw)
if prefix == '':
obsnme = "{1}_{2}".format(prefix,name[:name_len],dt)
else:
obsnme = "{0}_{1}_{2}".format(prefix, name[:name_len], dt)
f.write(" w !{0}!".format(obsnme))
f.write("\n") | 0.00907 |
def apply_mapping(raw_row, mapping):
'''
Override this to hand craft conversion of row.
'''
row = {target: mapping_func(raw_row[source_key])
for target, (mapping_func, source_key)
in mapping.fget().items()}
return row | 0.003861 |
Subsets and Splits