text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def ready(self):
"""
Assumes postgres now talks to pg_ctl, but might not yet be listening
or connections from psql. Test that psql is able to connect, as
it occasionally takes 5-10 seconds for postgresql to start listening.
"""
cmd = self._psql_cmd()
for i in range(50, -1, -1):
res = subprocess.call(
cmd, stdin=DEV_NULL, stdout=DEV_NULL,
stderr=DEV_NULL)
if res == 0:
break
time.sleep(0.2)
return i != 0 | 0.00361 |
def console_host(self, new_host):
"""
If allow remote connection we need to bind console host to 0.0.0.0
"""
server_config = Config.instance().get_section_config("Server")
remote_console_connections = server_config.getboolean("allow_remote_console")
if remote_console_connections:
log.warning("Remote console connections are allowed")
self._console_host = "0.0.0.0"
else:
self._console_host = new_host | 0.006085 |
def run_all(self, delay_seconds=0):
"""
Run all jobs regardless if they are scheduled to run or not.
A delay of `delay` seconds is added between each job. This helps
distribute system load generated by the jobs more evenly
over time.
:param delay_seconds: A delay added between every executed job
"""
logger.info('Running *all* %i jobs with %is delay inbetween',
len(self.jobs), delay_seconds)
for job in self.jobs[:]:
self._run_job(job)
time.sleep(delay_seconds) | 0.003442 |
def clear_text(self, label):
"""stub"""
if label not in self.my_osid_object_form._my_map['texts']:
raise NotFound()
del self.my_osid_object_form._my_map['texts'][label] | 0.009804 |
def _get_device_grain(name, proxy=None):
'''
Retrieves device-specific grains.
'''
device = _retrieve_device_cache(proxy=proxy)
return device.get(name.upper()) | 0.005587 |
def create_project(self, key, name, description=""):
"""
Create project
:param key:
:param name:
:param description:
:return:
"""
url = 'rest/api/1.0/projects'
data = {"key": key,
"name": name,
"description": description
}
return self.post(url, data=data) | 0.005222 |
def list(self, name, platform='', genre=''):
""" The name argument is required for this method as per the API
server specification. This method also provides the platform and genre
optional arguments as filters.
"""
data_list = self.db.get_data(self.list_path, name=name,
platform=platform, genre=genre)
data_list = data_list.get('Data') or {}
games = data_list.get('Game') or []
return [self._build_item(**i) for i in games] | 0.003802 |
def sort_seq_records(self, seq_records):
"""Checks that SeqExpandedRecords are sorted by gene_code and then by voucher code.
The dashes in taxon names need to be converted to underscores so the
dataset will be accepted by Biopython to do format conversions.
"""
for seq_record in seq_records:
seq_record.voucher_code = seq_record.voucher_code.replace("-", "_")
unsorted_gene_codes = set([i.gene_code for i in seq_records])
sorted_gene_codes = list(unsorted_gene_codes)
sorted_gene_codes.sort(key=lambda x: x.lower())
unsorted_voucher_codes = set([i.voucher_code for i in seq_records])
sorted_voucher_codes = list(unsorted_voucher_codes)
sorted_voucher_codes.sort(key=lambda x: x.lower())
sorted_seq_records = []
for gene_code in sorted_gene_codes:
for voucher_code in sorted_voucher_codes:
for seq_record in seq_records:
should_be_done = (
seq_record.gene_code == gene_code and
seq_record.voucher_code == voucher_code
)
if should_be_done:
sorted_seq_records.append(seq_record)
return sorted_seq_records | 0.002324 |
def _really_parse_entity(self):
"""Actually parse an HTML entity and ensure that it is valid."""
self._emit(tokens.HTMLEntityStart())
self._head += 1
this = self._read(strict=True)
if this == "#":
numeric = True
self._emit(tokens.HTMLEntityNumeric())
self._head += 1
this = self._read(strict=True)
if this[0].lower() == "x":
hexadecimal = True
self._emit(tokens.HTMLEntityHex(char=this[0]))
this = this[1:]
if not this:
self._fail_route()
else:
hexadecimal = False
else:
numeric = hexadecimal = False
valid = "0123456789abcdefABCDEF" if hexadecimal else "0123456789"
if not numeric and not hexadecimal:
valid += "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
if not all([char in valid for char in this]):
self._fail_route()
self._head += 1
if self._read() != ";":
self._fail_route()
if numeric:
test = int(this, 16) if hexadecimal else int(this)
if test < 1 or test > 0x10FFFF:
self._fail_route()
else:
if this not in htmlentities.entitydefs:
self._fail_route()
self._emit(tokens.Text(text=this))
self._emit(tokens.HTMLEntityEnd()) | 0.001378 |
def checkUserAccess(self):
""" Checks if the current user has granted access to this worksheet.
Returns False if the user has no access, otherwise returns True
"""
# Deny access to foreign analysts
allowed = True
pm = getToolByName(self, "portal_membership")
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
roles = member.getRoles()
restrict = 'Manager' not in roles \
and 'LabManager' not in roles \
and 'LabClerk' not in roles \
and 'RegulatoryInspector' not in roles \
and self.bika_setup.getRestrictWorksheetUsersAccess()
allowed = not restrict
return allowed | 0.002475 |
def convert_units(self, from_units, to_units):
'''
Convert the mesh from one set of units to another.
These calls are equivalent:
- mesh.convert_units(from_units='cm', to_units='m')
- mesh.scale(.01)
'''
from blmath import units
factor = units.factor(
from_units=from_units,
to_units=to_units,
units_class='length'
)
self.scale(factor) | 0.004405 |
def run_command_under_r_root(self, cmd, catched=True):
"""
subprocess run on here
"""
RPATH = self.path
with self.cd(newdir=RPATH):
if catched:
process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
else:
process = sp.run(cmd)
return process | 0.005747 |
def _resolveambig(subseq):
"""
Randomly resolves iupac hetero codes. This is a shortcut
for now, we could instead use the phased alleles in RAD loci.
"""
N = []
for col in subseq:
rand = np.random.binomial(1, 0.5)
N.append([_AMBIGS[i][rand] for i in col])
return np.array(N) | 0.00627 |
def setup(self, name_filters=['*.py', '*.pyw'], show_all=False,
single_click_to_open=False):
"""Setup tree widget"""
self.setup_view()
self.set_name_filters(name_filters)
self.show_all = show_all
self.single_click_to_open = single_click_to_open
# Setup context menu
self.menu = QMenu(self)
self.common_actions = self.setup_common_actions() | 0.009132 |
def for_object(self, instance, flag=''):
"""
Filter to a specific instance.
"""
check(instance)
content_type = ContentType.objects.get_for_model(instance).pk
queryset = self.filter(content_type=content_type, object_id=instance.pk)
if flag:
queryset = queryset.filter(flag=flag)
return queryset | 0.00813 |
def set_focus(self, focus_stage=None, samples=None, subset=None):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
if subset is None:
subset = 'All_Analyses'
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
else:
self.focus_stage = focus_stage
for s in samples:
self.data[s].setfocus(focus_stage) | 0.002454 |
def start(self):
"""Gets the rtm ws_host and user information
Returns:
None if request failed,
else a dict containing "user"(User) and "ws_host"
"""
resp = self.post('start')
if resp.is_fail():
return None
if 'result' not in resp.data:
return None
result = resp.data['result']
return {
'user': result['user'],
'ws_host': result['ws_host'],
} | 0.004098 |
def _sample(self, n_samples):
"""
Sample
Generate samples for posterior distribution using Gauss-Newton
proposal parameters
Inputs :
n_samples :
number of samples to generate
Hidden Outputs :
chain :
chain of samples
n_samples :
length of chain
n_accepted :
number of proposals accepted
step_count :
count of the steps accepted
"""
try :
n_samples = int(n_samples)
except :
raise TypeError("number of samples has to be an integer")
exit()
# fetch info
X = self._proposal_params(self._X)
k_max = self._max_steps
# initialize
chain = np.zeros((n_samples, self._n))
n_accepted = 0
step_count = np.zeros(k_max+2)
# begin outer loop
for i in xrange(n_samples):
accepted = False # check if sample is accepted
r_ = [1] # list of step sizes
Z_ = [X] # initialize list of Z s
self._r_ = r_
log_P_z_x = 0. + X['log_p']
k = 0 # back-off steps taken so far
while k <= k_max:
# get proposal
chi_z = False
while not chi_z:
z = multi_normal(X, r_[-1])
chi_z, f_z, J_z = self._f(z)
Z = self._proposal_params({'x':z,'f':f_z,'J':J_z})
Z_.append(Z)
self._Z_ = Z_
log_P_z_x += log_K(Z, X, r_[-1])
# N is the Numerator of the acceptance, N = P_x_z
self._N_is_0 = False # check to see if N = 0, to use in _log_P
log_N = self._log_P(X, Z, k)
# calculating acceptance probability
if self._N_is_0 == True :
A_z_x = 0.
elif log_N >= log_P_z_x :
A_z_x = 1.
else :
A_z_x = np.exp(log_N - log_P_z_x)
# acceptance rejection
if np.random.rand() <= A_z_x:
accepted = True
break
else :
log_P_z_x += np.log(1. - A_z_x)
self._back_off()
k += 1
# end of steps for loop
if accepted == True :
chain[i,:] = z[:,0]
X = Z
# for statistics
n_accepted += 1
step_count[k+1] += 1
else :
chain[i,:] = X['x'][:,0]
# for statistics
step_count[0] += 1
# end outer loop
# update stored info
self._X = X
# outputs
if self._n_samples == 0 :
self._chain = chain
self._step_count = step_count
else :
self._chain = np.append(self._chain, chain, axis=0)
self._step_count = np.add(self._step_count, step_count)
self._n_samples += n_samples
self._n_accepted += n_accepted | 0.013446 |
def create_site(sitename):
"""Create a new site directory and init Yass"""
sitepath = os.path.join(CWD, sitename)
if os.path.isdir(sitepath):
print("Site directory '%s' exists already!" % sitename)
else:
print("Creating site: %s..." % sitename)
os.makedirs(sitepath)
copy_resource("skel/", sitepath)
stamp_yass_current_version(sitepath)
print("Site created successfully!")
print("CD into '%s' and run 'yass serve' to view the site" % sitename)
footer() | 0.00189 |
def _move_agent(self, agent, direction, wrap_allowed=True):
"""
moves agent 'agent' in 'direction'
"""
x,y = agent.coords['x'], agent.coords['y']
print('moving agent ', agent.name, 'to x,y=', direction, 'wrap_allowed = ', wrap_allowed)
agent.coords['x'] = x + direction[0]
agent.coords['y'] = y + direction[1] | 0.010959 |
def delete(self):
""" Delete the table.
Returns:
True if the Table no longer exists; False otherwise.
"""
try:
self._api.table_delete(self._name_parts)
except google.datalab.utils.RequestException:
# TODO(gram): May want to check the error reasons here and if it is not
# because the file didn't exist, return an error.
pass
except Exception as e:
raise e
return not self.exists() | 0.013483 |
def _handle_shutdown_reply(self, msg):
""" Handle shutdown signal, only if from other console.
"""
self.log.debug("shutdown: %s", msg.get('content', ''))
if not self._hidden and not self._is_from_this_session(msg):
if self._local_kernel:
if not msg['content']['restart']:
self.exit_requested.emit(self)
else:
# we just got notified of a restart!
time.sleep(0.25) # wait 1/4 sec to reset
# lest the request for a new prompt
# goes to the old kernel
self.reset()
else: # remote kernel, prompt on Kernel shutdown/reset
title = self.window().windowTitle()
if not msg['content']['restart']:
reply = QtGui.QMessageBox.question(self, title,
"Kernel has been shutdown permanently. "
"Close the Console?",
QtGui.QMessageBox.Yes,QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.exit_requested.emit(self)
else:
# XXX: remove message box in favor of using the
# clear_on_kernel_restart setting?
reply = QtGui.QMessageBox.question(self, title,
"Kernel has been reset. Clear the Console?",
QtGui.QMessageBox.Yes,QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
time.sleep(0.25) # wait 1/4 sec to reset
# lest the request for a new prompt
# goes to the old kernel
self.reset() | 0.010718 |
def getStatus(rh):
"""
Get the power (logon/off) status of a virtual machine.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'STATUS'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
results['overallRC'] - 0: ok, non-zero: error
if ok:
results['rc'] - 0: for both on and off cases
results['rs'] - 0: powered on
results['rs'] - 1: powered off
"""
rh.printSysLog("Enter powerVM.getStatus, userid: " +
rh.userid)
results = isLoggedOn(rh, rh.userid)
if results['overallRC'] != 0:
# Unexpected error
pass
elif results['rs'] == 0:
rh.printLn("N", rh.userid + ": on")
else:
rh.printLn("N", rh.userid + ": off")
rh.updateResults(results)
rh.printSysLog("Exit powerVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | 0.002941 |
def items( self ):
"""
Returns all the rollout items for this widget.
:return [<XRolloutItem>, ..]
"""
layout = self.widget().layout()
return [layout.itemAt(i).widget() for i in range(layout.count()-1)] | 0.019011 |
def format_op_row(ipFile, totLines, totWords, uniqueWords):
"""
Format the output row with stats
"""
txt = os.path.basename(ipFile).ljust(36) + ' '
txt += str(totLines).rjust(7) + ' '
txt += str(totWords).rjust(7) + ' '
txt += str(len(uniqueWords)).rjust(7) + ' '
return txt | 0.003268 |
def getAvailableMethods(self):
""" Returns the methods available for this analysis.
If the service has the getInstrumentEntryOfResults(), returns
the methods available from the instruments capable to perform
the service, as well as the methods set manually for the
analysis on its edit view. If getInstrumentEntryOfResults()
is unset, only the methods assigned manually to that service
are returned.
"""
methods = self.getMethods()
muids = [m.UID() for m in methods]
if self.getInstrumentEntryOfResults():
# Add the methods from the instruments capable to perform
# this analysis service
for ins in self.getInstruments():
for method in ins.getMethods():
if method and method.UID() not in muids:
methods.append(method)
muids.append(method.UID())
return methods | 0.002 |
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query')
return query | 0.001344 |
def add_years(dateobj, nb_years):
"""return `dateobj` + `nb_years`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_years(date(2018, 1, 1), 1)
datetime.date(2019, 1, 1)
>>> add_years(date(2018, 1, 1), -1)
datetime.date(2017, 1, 1)
>>> add_years(date(2020, 2, 29), 1)
datetime.date(2021, 2, 28)
>>> add_years(date(2020, 2, 29), -1)
datetime.date(2019, 2, 28)
"""
year = dateobj.year + nb_years
lastday = monthrange(year, dateobj.month)[1]
return dateobj.replace(year=year, day=min(lastday, dateobj.day)) | 0.001618 |
def monitor(name, callback):
'''
monitors actions on the specified container,
callback is a function to be called on
'''
global _monitor
if not exists(name):
raise ContainerNotExists("The container (%s) does not exist!" % name)
if _monitor:
if _monitor.is_monitored(name):
raise Exception("You are already monitoring this container (%s)" % name)
else:
_monitor = _LXCMonitor()
logging.info("Starting LXC Monitor")
_monitor.start()
def kill_handler(sg, fr):
stop_monitor()
signal.signal(signal.SIGTERM, kill_handler)
signal.signal(signal.SIGINT, kill_handler)
_monitor.add_monitor(name, callback) | 0.006757 |
def normalize_signature(func):
"""Decorator. Combine args and kwargs. Unpack single item tuples."""
@wraps(func)
def wrapper(*args, **kwargs):
if kwargs:
args = args, kwargs
if len(args) is 1:
args = args[0]
return func(args)
return wrapper | 0.003236 |
def reverse_func(apps, schema_editor):
"""
manage migrate backup_app 0003_auto_20160127_2002
"""
print("\n")
remove_count = 0
BackupRun = apps.get_model("backup_app", "BackupRun")
backup_runs = BackupRun.objects.all()
for backup_run in backup_runs:
# Use the origin BackupRun model to get access to get_config_path()
temp = OriginBackupRun(name=backup_run.name, backup_datetime=backup_run.backup_datetime)
config_path = temp.get_config_path()
try:
config_path.unlink()
except OSError as err:
print("ERROR removing config file: %s" % err)
else:
remove_count += 1
# print("%r removed." % config_path.path)
print("%i config files removed.\n" % remove_count) | 0.002541 |
def export_as_string(self):
"""
Returns a string of CQL queries that can be used to recreate this table
along with all indexes on it. The returned string is formatted to
be human readable.
"""
if self._exc_info:
import traceback
ret = "/*\nWarning: Table %s.%s is incomplete because of an error processing metadata.\n" % \
(self.keyspace_name, self.name)
for line in traceback.format_exception(*self._exc_info):
ret += line
ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql()
elif not self.is_cql_compatible:
# If we can't produce this table with CQL, comment inline
ret = "/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n" % \
(self.keyspace_name, self.name)
ret += "\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/" % self._all_as_cql()
elif self.virtual:
ret = ('/*\nWarning: Table {ks}.{tab} is a virtual table and cannot be recreated with CQL.\n'
'Structure, for reference:\n'
'{cql}\n*/').format(ks=self.keyspace_name, tab=self.name, cql=self._all_as_cql())
else:
ret = self._all_as_cql()
return ret | 0.005387 |
def task_done(self, **kw):
"""
Marks a pending task as done, optionally specifying a completion
date with the 'end' argument.
"""
def validate(task):
if not Status.is_pending(task['status']):
raise ValueError("Task is not pending.")
return self._task_change_status(Status.COMPLETED, validate, **kw) | 0.005333 |
def binaryRecordsStream(self, directory, recordLength):
"""
Create an input stream that monitors a Hadoop-compatible file system
for new files and reads them as flat binary files with records of
fixed length. Files must be written to the monitored directory by "moving"
them from another location within the same file system.
File names starting with . are ignored.
@param directory: Directory to load data from
@param recordLength: Length of each record in bytes
"""
return DStream(self._jssc.binaryRecordsStream(directory, recordLength), self,
NoOpSerializer()) | 0.005917 |
def parse_values_from_lines(self,lines,iskeyword=False):
""" cast the string lines for a pest control file into actual inputs
Parameters
----------
lines : list
strings from pest control file
"""
if iskeyword:
extra = {}
for line in lines:
raw = line.strip().split()
if len(raw) == 0 or raw[0] == "#":
continue
name = raw[0].strip().lower()
value = raw[1].strip()
v,t,f = self._parse_value(value)
if name not in self._df.index:
extra[name] = v
else:
# if the parsed values type isn't right
if t != self._df.loc[name, "type"]:
# if a float was expected and int return, not a problem
if t == np.int32 and self._df.loc[name, "type"] == np.float64:
self._df.loc[name, "value"] = np.float64(v)
# if this is a required input, throw
elif self._df.loc[name, "required"]:
raise Exception("wrong type found for variable " + name + ":" + str(t))
else:
# else, since this problem is usually a string, check for acceptable values
found = False
for nname, avalues in self.accept_values.items():
if v in avalues:
if t == self._df.loc[nname, "type"]:
self._df.loc[nname, "value"] = v
found = True
break
if not found:
warnings.warn("non-conforming value found for " + \
name + ":" + str(v) + "...ignoring", PyemuWarning)
else:
self._df.loc[name, "value"] = v
return extra
assert len(lines) == len(CONTROL_VARIABLE_LINES),\
"ControlData error: len of lines not equal to " +\
str(len(CONTROL_VARIABLE_LINES))
for iline,line in enumerate(lines):
vals = line.strip().split()
names = CONTROL_VARIABLE_LINES[iline].strip().split()
for name,val in zip(names,vals):
v,t,f = self._parse_value(val)
name = name.replace('[','').replace(']','')
#if the parsed values type isn't right
if t != self._df.loc[name,"type"]:
# if a float was expected and int return, not a problem
if t == np.int32 and self._df.loc[name,"type"] == np.float64:
self._df.loc[name,"value"] = np.float64(v)
# if this is a required input, throw
elif self._df.loc[name,"required"]:
raise Exception("wrong type found for variable " + name + ":" + str(t))
else:
#else, since this problem is usually a string, check for acceptable values
found = False
for nname,avalues in self.accept_values.items():
if v in avalues:
if t == self._df.loc[nname,"type"]:
self._df.loc[nname,"value"] = v
found = True
break
if not found:
warnings.warn("non-conforming value found for " +\
name + ":" + str(v) + "...ignoring",PyemuWarning)
else:
self._df.loc[name,"value"] = v | 0.011292 |
def scan_list(self, start_time=None, end_time=None, **kwargs):
"""List scans stored in Security Center in a given time range.
Time is given in UNIX timestamps, assumed to be UTC. If a `datetime` is
passed it is converted. If `end_time` is not specified it is NOW. If
`start_time` is not specified it is 30 days previous from `end_time`.
:param start_time: start of range to filter
:type start_time: date, datetime, int
:param end_time: end of range to filter
:type start_time: date, datetime, int
:return: list of dictionaries representing scans
"""
try:
end_time = datetime.utcfromtimestamp(int(end_time))
except TypeError:
if end_time is None:
end_time = datetime.utcnow()
try:
start_time = datetime.utcfromtimestamp(int(start_time))
except TypeError:
if start_time is None:
start_time = end_time - timedelta(days=30)
data = {"startTime": calendar.timegm(start_time.utctimetuple()),
"endTime": calendar.timegm(end_time.utctimetuple())}
data.update(kwargs)
result = self.raw_query("scanResult", "getRange", data=data)
return result["scanResults"] | 0.001544 |
def get_default(self, node):
"""
If not explicitly set, check if onwrite sets the equivalent
"""
if node.inst.properties.get("onwrite", None) == rdltypes.OnWriteType.woclr:
return True
else:
return self.default | 0.010949 |
def ddel_tasks(provider,
user_ids=None,
job_ids=None,
task_ids=None,
labels=None,
create_time_min=None,
create_time_max=None):
"""Kill jobs or job tasks.
This function separates ddel logic from flag parsing and user output. Users
of ddel who intend to access the data programmatically should use this.
Args:
provider: an instantiated dsub provider.
user_ids: a set of user ids who "own" the job(s) to delete.
job_ids: a set of job ids to delete.
task_ids: a set of task ids to delete.
labels: a set of LabelParam, each must match the job(s) to be cancelled.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent create
time of a task, inclusive.
Returns:
list of job ids which were deleted.
"""
# Delete the requested jobs
deleted_tasks, error_messages = provider.delete_jobs(
user_ids, job_ids, task_ids, labels, create_time_min, create_time_max)
# Emit any errors canceling jobs
for msg in error_messages:
print(msg)
return deleted_tasks | 0.005578 |
def fastrcnn_2fc_head(feature):
"""
Args:
feature (any shape):
Returns:
2D head feature
"""
dim = cfg.FPN.FRCNN_FC_HEAD_DIM
init = tf.variance_scaling_initializer()
hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu)
hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu)
return hidden | 0.007212 |
def next(self) -> Future:
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = Future()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future | 0.005464 |
def getc(self, block=True):
"""Return one character from the input queue"""
if not block:
if not len(self.cookedq):
return ''
while not len(self.cookedq):
time.sleep(0.05)
self.IQUEUELOCK.acquire()
ret = self.cookedq[0]
self.cookedq = self.cookedq[1:]
self.IQUEUELOCK.release()
return ret | 0.005102 |
def require(self, value):
"""
Setter for **self.__require** attribute.
:param value: Attribute value.
:type value: tuple or list
"""
if value is not None:
assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
"require", value)
self.__require = value | 0.007853 |
def load(self, d3mds):
"""Load X, y and context from D3MDS."""
X, y = d3mds.get_data()
resource_columns = d3mds.get_related_resources(self.data_modality)
for resource_column in resource_columns:
X = self.load_resources(X, resource_column, d3mds)
context = self.get_context(X, y)
return Dataset(d3mds.dataset_id, X, y, context=context) | 0.005038 |
def available_files(self):
"""
The filenames of the available configuration files (a list of strings).
The value of :attr:`available_files` is computed the first time its
needed by searching for available configuration files that match
:attr:`filename_patterns` using :func:`~glob.glob()`. If you set
:attr:`available_files` this effectively disables searching for
configuration files.
"""
matches = []
for pattern in self.filename_patterns:
logger.debug("Matching filename pattern: %s", pattern)
matches.extend(natsort(glob.glob(parse_path(pattern))))
return matches | 0.002941 |
def _write(self, session, openFile, replaceParamFile):
"""
Replace Param File Write to File Method
"""
# Retrieve TargetParameter objects
targets = self.targetParameters
# Write lines
openFile.write('%s\n' % self.numParameters)
for target in targets:
openFile.write('%s %s\n' % (target.targetVariable, target.varFormat)) | 0.007538 |
def diff_info(data):
"""
>>> diff_info([5,5,10,10,5,5,10,10])
(0, 15)
>>> diff_info([5,10,10,5,5,10,10,5])
(15, 0)
"""
def get_diff(l):
diff = 0
for no1, no2 in iter_steps(l, steps=2):
diff += abs(no1 - no2)
return diff
data1 = data[2:]
diff1 = get_diff(data1)
data2 = data[1:-1]
diff2 = get_diff(data2)
return diff1, diff2 | 0.004878 |
def model_deleted(sender, instance,
using,
**kwargs):
"""
Automatically triggers "deleted" actions.
"""
opts = get_opts(instance)
model = '.'.join([opts.app_label, opts.object_name])
distill_model_event(instance, model, 'deleted') | 0.009804 |
def configured_logger(self, name=None):
"""Configured logger.
"""
log_handlers = self.log_handlers
# logname
if not name:
# base name is always pulsar
basename = 'pulsar'
# the namespace name for this config
name = self.name
if name and name != basename:
name = '%s.%s' % (basename, name)
else:
name = basename
#
namespaces = {}
for log_level in self.log_level or ():
bits = log_level.split('.')
namespaces['.'.join(bits[:-1]) or ''] = bits[-1]
for namespace in sorted(namespaces):
if self.daemon: # pragma nocover
handlers = []
for hnd in log_handlers:
if hnd != 'console':
handlers.append(hnd)
if not handlers:
handlers.append('file')
log_handlers = handlers
configured_logger(namespace,
config=self.log_config,
level=namespaces[namespace],
handlers=log_handlers)
return logging.getLogger(name) | 0.001594 |
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name | 0.007926 |
def connect(self, db_uri, debug=False):
"""Configure connection to a SQL database.
Args:
db_uri (str): path/URI to the database to connect to
debug (Optional[bool]): whether to output logging information
"""
kwargs = {'echo': debug, 'convert_unicode': True}
# connect to the SQL database
if 'mysql' in db_uri:
kwargs['pool_recycle'] = 3600
elif '://' not in db_uri:
logger.debug("detected sqlite path URI: {}".format(db_uri))
db_path = os.path.abspath(os.path.expanduser(db_uri))
db_uri = "sqlite:///{}".format(db_path)
self.engine = create_engine(db_uri, **kwargs)
logger.debug('connection established successfully')
# make sure the same engine is propagated to the BASE classes
BASE.metadata.bind = self.engine
# start a session
self.session = scoped_session(sessionmaker(bind=self.engine))
# shortcut to query method
self.query = self.session.query
return self | 0.001878 |
def _parse_node(graph, text, condition_node_params, leaf_node_params):
"""parse dumped node"""
match = _NODEPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), **condition_node_params)
return node
match = _LEAFPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), **leaf_node_params)
return node
raise ValueError('Unable to parse node: {0}'.format(text)) | 0.001931 |
def delete_item(self, table_name, key,
expected=None, return_values=None,
object_hook=None):
"""
Delete an item and all of it's attributes by primary key.
You can perform a conditional delete by specifying an
expected rule.
:type table_name: str
:param table_name: The name of the table containing the item.
:type key: dict
:param key: A Python version of the Key data structure
defined by DynamoDB.
:type expected: dict
:param expected: A Python version of the Expected
data structure defined by DynamoDB.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
data = {'TableName' : table_name,
'Key' : key}
if expected:
data['Expected'] = expected
if return_values:
data['ReturnValues'] = return_values
json_input = json.dumps(data)
return self.make_request('DeleteItem', json_input,
object_hook=object_hook) | 0.004438 |
def btc_is_multisig_segwit(privkey_info):
"""
Does the given private key info represent
a multisig bundle?
For Bitcoin, this is true for multisig p2sh (not p2sh-p2wsh)
"""
try:
jsonschema.validate(privkey_info, PRIVKEY_MULTISIG_SCHEMA)
if len(privkey_info['private_keys']) == 1:
return False
return privkey_info.get('segwit', False)
except ValidationError as e:
return False | 0.002232 |
def use(wcspkg, raise_err=True):
"""Choose WCS package."""
global coord_types, wcs_configured, WCS
if wcspkg not in common.custom_wcs:
# Try to dynamically load WCS
modname = 'wcs_%s' % (wcspkg)
path = os.path.join(wcs_home, '%s.py' % (modname))
try:
my_import(modname, path)
except ImportError:
return False
if wcspkg in common.custom_wcs:
bnch = common.custom_wcs[wcspkg]
WCS = bnch.wrapper_class
coord_types = bnch.coord_types
wcs_configured = True
return True
return False | 0.001658 |
def do_batch(value, linecount, fill_with=None):
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
tmp = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp | 0.001157 |
def create_or_update_vmextension(call=None, kwargs=None): # pylint: disable=unused-argument
'''
.. versionadded:: 2019.2.0
Create or update a VM extension object "inside" of a VM object.
required kwargs:
.. code-block:: yaml
extension_name: myvmextension
virtual_machine_name: myvm
settings: {"commandToExecute": "hostname"}
optional kwargs:
.. code-block:: yaml
resource_group: < inferred from cloud configs >
location: < inferred from cloud configs >
publisher: < default: Microsoft.Azure.Extensions >
virtual_machine_extension_type: < default: CustomScript >
type_handler_version: < default: 2.0 >
auto_upgrade_minor_version: < default: True >
protected_settings: < default: None >
'''
if kwargs is None:
kwargs = {}
if 'extension_name' not in kwargs:
raise SaltCloudSystemExit(
'An extension name must be specified'
)
if 'virtual_machine_name' not in kwargs:
raise SaltCloudSystemExit(
'A virtual machine name must be specified'
)
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
VirtualMachineExtension = getattr(
compute_models, 'VirtualMachineExtension'
)
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
location = kwargs.get('location') or get_location()
if not location:
raise SaltCloudSystemExit(
'A location must be specified'
)
publisher = kwargs.get('publisher', 'Microsoft.Azure.Extensions')
virtual_machine_extension_type = kwargs.get('virtual_machine_extension_type', 'CustomScript')
type_handler_version = kwargs.get('type_handler_version', '2.0')
auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', True)
settings = kwargs.get('settings', {})
protected_settings = kwargs.get('protected_settings')
if not isinstance(settings, dict):
raise SaltCloudSystemExit(
'VM extension settings are not valid'
)
elif 'commandToExecute' not in settings and 'script' not in settings:
raise SaltCloudSystemExit(
'VM extension settings are not valid. Either commandToExecute or script must be specified.'
)
log.info('Creating VM extension %s', kwargs['extension_name'])
ret = {}
try:
params = VirtualMachineExtension(
location=location,
publisher=publisher,
virtual_machine_extension_type=virtual_machine_extension_type,
type_handler_version=type_handler_version,
auto_upgrade_minor_version=auto_upgrade_minor_version,
settings=settings,
protected_settings=protected_settings
)
poller = compconn.virtual_machine_extensions.create_or_update(
resource_group,
kwargs['virtual_machine_name'],
kwargs['extension_name'],
params
)
ret = poller.result()
ret = ret.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to create the VM extension: {0}'.format(exc.message))
ret = {'error': exc.message}
return ret | 0.001957 |
def _get_policies(self, resource_properties):
"""
Returns a list of policies from the resource properties. This method knows how to interpret and handle
polymorphic nature of the policies property.
Policies can be one of the following:
* Managed policy name: string
* List of managed policy names: list of strings
* IAM Policy document: dict containing Statement key
* List of IAM Policy documents: list of IAM Policy Document
* Policy Template: dict with only one key where key is in list of supported policy template names
* List of Policy Templates: list of Policy Template
:param dict resource_properties: Dictionary of resource properties containing the policies property.
It is assumed that this is already a dictionary and contains policies key.
:return list of PolicyEntry: List of policies, where each item is an instance of named tuple `PolicyEntry`
"""
policies = None
if self._contains_policies(resource_properties):
policies = resource_properties[self.POLICIES_PROPERTY_NAME]
if not policies:
# Policies is None or empty
return []
if not isinstance(policies, list):
# Just a single entry. Make it into a list of convenience
policies = [policies]
result = []
for policy in policies:
policy_type = self._get_type(policy)
entry = PolicyEntry(data=policy, type=policy_type)
result.append(entry)
return result | 0.004324 |
def alias_absent(name, index):
'''
Ensure that the index alias is absent.
name
Name of the index alias to remove
index
Name of the index for the alias
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
alias = __salt__['elasticsearch.alias_get'](aliases=name, indices=index)
if alias and alias.get(index, {}).get("aliases", {}).get(name, None) is not None:
if __opts__['test']:
ret['comment'] = 'Alias {0} for index {1} will be removed'.format(name, index)
ret['changes']['old'] = alias.get(index, {}).get("aliases", {}).get(name, {})
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.alias_delete'](aliases=name, indices=index)
if ret['result']:
ret['comment'] = 'Successfully removed alias {0} for index {1}'.format(name, index)
ret['changes']['old'] = alias.get(index, {}).get("aliases", {}).get(name, {})
else:
ret['comment'] = 'Failed to remove alias {0} for index {1} for unknown reasons'.format(name, index)
else:
ret['comment'] = 'Alias {0} for index {1} is already absent'.format(name, index)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret | 0.007018 |
def get_gender(self, name, country=None):
"""Returns best gender for the given name and country pair"""
if not self.case_sensitive:
name = name.lower()
if name not in self.names:
return self.unknown_value
elif not country:
def counter(country_values):
country_values = map(ord, country_values.replace(" ", ""))
return (len(country_values),
sum(map(lambda c: c > 64 and c-55 or c-48, country_values)))
return self._most_popular_gender(name, counter)
elif country in self.__class__.COUNTRIES:
index = self.__class__.COUNTRIES.index(country)
counter = lambda e: (ord(e[index])-32, 0)
return self._most_popular_gender(name, counter)
else:
raise NoCountryError("No such country: %s" % country) | 0.004499 |
def _is_mandatory_method_param(self, node):
"""Check if astroid.Name corresponds to first attribute variable name
Name is `self` for method, `cls` for classmethod and `mcs` for metaclass.
"""
return (
self._first_attrs
and isinstance(node, astroid.Name)
and node.name == self._first_attrs[-1]
) | 0.008086 |
def modifie_many(self, dic: dict):
"""Convenience function which calls modifie on each element of dic"""
for i, v in dic.items():
self.modifie(i, v) | 0.011364 |
def keys_with_value(dictionary, value):
"Returns a subset of keys from the dict with the value supplied."
subset = [key for key in dictionary if dictionary[key] == value]
return subset | 0.009524 |
def _anime_add(self, data):
"""
Adds an anime to a user's list.
:param data: A :class:`Pymoe.Mal.Objects.Anime` object with the anime data
:raises: SyntaxError on invalid data type
:raises: ServerError on failure to add
:rtype: Bool
:return: True on success
"""
if isinstance(data, Anime):
xmlstr = data.to_xml()
r = requests.get(self.apiurl + "animelist/add/{}.xml".format(data.id),
params={'data': xmlstr},
auth=HTTPBasicAuth(self._username, self._password),
headers=self.header)
if r.status_code != 201:
raise ServerError(r.text, r.status_code)
return True
else:
raise SyntaxError(
"Invalid type: data should be a Pymoe.Mal.Objects.Anime object. Got a {}".format(type(data))) | 0.006397 |
def get_adcm(self):
"""
Absolute deviation around class median (ADCM).
Calculates the absolute deviations of each observation about its class
median as a measure of fit for the classification method.
Returns sum of ADCM over all classes
"""
adcm = 0
for class_def in self.classes:
if len(class_def) > 0:
yc = self.y[class_def]
yc_med = np.median(yc)
ycd = np.abs(yc - yc_med)
adcm += sum(ycd)
return adcm | 0.00361 |
def voxel_count(dset,p=None,positive_only=False,mask=None,ROI=None):
''' returns the number of non-zero voxels
:p: threshold the dataset at the given *p*-value, then count
:positive_only: only count positive values
:mask: count within the given mask
:ROI: only use the ROI with the given value (or list of values) within the mask
if ROI is 'all' then return the voxel count of each ROI
as a dictionary
'''
if p:
dset = nl.thresh(dset,p,positive_only)
else:
if positive_only:
dset = nl.calc(dset,'step(a)')
count = 0
devnull = open(os.devnull,"w")
if mask:
cmd = ['3dROIstats','-1Dformat','-nomeanout','-nobriklab', '-nzvoxels']
cmd += ['-mask',str(mask),str(dset)]
out = subprocess.check_output(cmd,stderr=devnull).split('\n')
if len(out)<4:
return 0
rois = [int(x.replace('NZcount_','')) for x in out[1].strip()[1:].split()]
counts = [int(x.replace('NZcount_','')) for x in out[3].strip().split()]
count_dict = None
if ROI==None:
ROI = rois
if ROI=='all':
count_dict = {}
ROI = rois
else:
if not isinstance(ROI,list):
ROI = [ROI]
for r in ROI:
if r in rois:
roi_count = counts[rois.index(r)]
if count_dict!=None:
count_dict[r] = roi_count
else:
count += roi_count
else:
cmd = ['3dBrickStat', '-slow', '-count', '-non-zero', str(dset)]
count = int(subprocess.check_output(cmd,stderr=devnull).strip())
if count_dict:
return count_dict
return count | 0.015634 |
def defaults(self):
"""
Reset the chart options and style to defaults
"""
self.chart_style = {}
self.chart_opts = {}
self.style("color", "#30A2DA")
self.width(900)
self.height(250) | 0.05102 |
def update(self, turret_data):
"""Update a given turret
:param dict turret_data: the data of the turret to update
"""
if turret_data.get('uuid') not in self.turrets:
return False
turret = self.turrets[turret_data.get('uuid')]
turret.update(**turret_data)
self.write(turret)
return True | 0.005525 |
def get_queryset(self):
"""Replicates Django CBV `get_queryset()` method, but for MongoEngine.
"""
if hasattr(self, "queryset") and self.queryset:
return self.queryset
self.set_mongonaut_base()
self.set_mongoadmin()
self.document = getattr(self.models, self.document_name)
queryset = self.document.objects.all()
if self.mongoadmin.ordering:
queryset = queryset.order_by(*self.mongoadmin.ordering)
# search. move this to get_queryset
# search. move this to get_queryset
q = self.request.GET.get('q')
queryset = self.get_qset(queryset, q)
### Start pagination
### Note:
### Can't use Paginator in Django because mongoengine querysets are
### not the same as Django ORM querysets and it broke.
# Make sure page request is an int. If not, deliver first page.
try:
self.page = int(self.request.GET.get('page', '1'))
except ValueError:
self.page = 1
obj_count = queryset.count()
self.total_pages = math.ceil(obj_count / self.documents_per_page)
if self.page > self.total_pages:
self.page = self.total_pages
if self.page < 1:
self.page = 1
start = (self.page - 1) * self.documents_per_page
end = self.page * self.documents_per_page
queryset = queryset[start:end] if obj_count else queryset
self.queryset = queryset
return queryset | 0.003911 |
def add_dependency (self, targets, sources):
"""Adds a dependency from 'targets' to 'sources'
Both 'targets' and 'sources' can be either list
of target names, or a single target name.
"""
if isinstance (targets, str):
targets = [targets]
if isinstance (sources, str):
sources = [sources]
assert is_iterable(targets)
assert is_iterable(sources)
for target in targets:
for source in sources:
self.do_add_dependency (target, source) | 0.010811 |
def getMetadata(self, remote, address, key):
"""Get metadata of device"""
if self._server is not None:
# pylint: disable=E1121
return self._server.getAllMetadata(remote, address, key) | 0.008969 |
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio):
"""This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify
and remove duplicates. Specifically, it uses the process.extract to identify duplicates that
score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list
since we assume this item contains the most entity information and returns that. It breaks string
length ties on an alphabetical sort.
Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the
returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less
sensitive.
Args:
contains_dupes: A list of strings that we would like to dedupe.
threshold: the numerical value (0,100) point at which we expect to find duplicates.
Defaults to 70 out of 100
scorer: Optional function for scoring matches between the query and
an individual processed choice. This should be a function
of the form f(query, choice) -> int.
By default, fuzz.token_set_ratio() is used and expects both query and
choice to be strings.
Returns:
A deduplicated list. For example:
In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins']
In: fuzzy_dedupe(contains_dupes)
Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf']
"""
extractor = []
# iterate over items in *contains_dupes*
for item in contains_dupes:
# return all duplicate matches found
matches = extract(item, contains_dupes, limit=None, scorer=scorer)
# filter matches based on the threshold
filtered = [x for x in matches if x[1] > threshold]
# if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*
if len(filtered) == 1:
extractor.append(filtered[0][0])
else:
# alpha sort
filtered = sorted(filtered, key=lambda x: x[0])
# length sort
filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True)
# take first item as our 'canonical example'
extractor.append(filter_sort[0][0])
# uniquify *extractor* list
keys = {}
for e in extractor:
keys[e] = 1
extractor = keys.keys()
# check that extractor differs from contain_dupes (e.g. duplicates were found)
# if not, then return the original list
if len(extractor) == len(contains_dupes):
return contains_dupes
else:
return extractor | 0.004681 |
def seek(self, offset, whence=Seek.set):
# type: (int, SupportsInt) -> int
"""Change stream position.
Change the stream position to the given byte offset. The
offset is interpreted relative to the position indicated by
``whence``.
Arguments:
offset (int): the offset to the new position, in bytes.
whence (int): the position reference. Possible values are:
* `Seek.set`: start of stream (the default).
* `Seek.current`: current position; offset may be negative.
* `Seek.end`: end of stream; offset must be negative.
Returns:
int: the new absolute position.
Raises:
ValueError: when ``whence`` is not known, or ``offset``
is invalid.
Note:
Zip compression does not support seeking, so the seeking
is emulated. Seeking somewhere else than the current position
will need to either:
* reopen the file and restart decompression
* read and discard data to advance in the file
"""
_whence = int(whence)
if _whence == Seek.current:
offset += self._pos
if _whence == Seek.current or _whence == Seek.set:
if offset < 0:
raise ValueError("Negative seek position {}".format(offset))
elif _whence == Seek.end:
if offset > 0:
raise ValueError("Positive seek position {}".format(offset))
offset += self._end
else:
raise ValueError(
"Invalid whence ({}, should be {}, {} or {})".format(
_whence, Seek.set, Seek.current, Seek.end
)
)
if offset < self._pos:
self._f = self._zip.open(self.name) # type: ignore
self._pos = 0
self.read(offset - self._pos)
return self._pos | 0.001528 |
def get_notify_observers_kwargs(self):
""" Return the mapping between the metrics call and the iterated
variables.
Return
----------
notify_observers_kwargs: dict,
the mapping between the iterated variables.
"""
return {'x_new': self._linear.adj_op(self._x_new),
'z_new': self._z_new, 'idx': self.idx} | 0.005195 |
def get_vulnerability_functions_04(node, fname):
"""
:param node:
a vulnerabilityModel node
:param fname:
path to the vulnerability file
:returns:
a dictionary imt, vf_id -> vulnerability function
"""
logging.warning('Please upgrade %s to NRML 0.5', fname)
# NB: the IMTs can be duplicated and with different levels, each
# vulnerability function in a set will get its own levels
imts = set()
vf_ids = set()
# imt, vf_id -> vulnerability function
vmodel = scientific.VulnerabilityModel(**node.attrib)
for vset in node:
imt_str = vset.IML['IMT']
imls = ~vset.IML
imts.add(imt_str)
for vfun in vset.getnodes('discreteVulnerability'):
vf_id = vfun['vulnerabilityFunctionID']
if vf_id in vf_ids:
raise InvalidFile(
'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
(vf_id, fname, vfun.lineno))
vf_ids.add(vf_id)
with context(fname, vfun):
loss_ratios = ~vfun.lossRatio
coefficients = ~vfun.coefficientsVariation
if len(loss_ratios) != len(imls):
raise InvalidFile(
'There are %d loss ratios, but %d imls: %s, line %d' %
(len(loss_ratios), len(imls), fname,
vfun.lossRatio.lineno))
if len(coefficients) != len(imls):
raise InvalidFile(
'There are %d coefficients, but %d imls: %s, line %d' %
(len(coefficients), len(imls), fname,
vfun.coefficientsVariation.lineno))
with context(fname, vfun):
vmodel[imt_str, vf_id] = scientific.VulnerabilityFunction(
vf_id, imt_str, imls, loss_ratios, coefficients,
vfun['probabilisticDistribution'])
return vmodel | 0.000514 |
def allow_blank(self, form, name):
"""
Allow blank determines if the form might be completely empty. If it's
empty it will result in a None as the saved value for the ForeignKey.
"""
if self.blank is not None:
return self.blank
model = form._meta.model
field = model._meta.get_field(self.get_field_name(form, name))
return field.blank | 0.004878 |
def extract_angular(fileobj, keywords, comment_tags, options):
"""Extract messages from angular template (HTML) files.
It extract messages from angular template (HTML) files that use
angular-gettext translate directive as per
https://angular-gettext.rocketeer.be/
:param fileobj: the file-like object the messages should be extracted
from
:param keywords: This is a standard parameter so it isaccepted but ignored.
:param comment_tags: This is a standard parameter so it is accepted but
ignored.
:param options: Another standard parameter that is accepted but ignored.
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: ``iterator``
"""
parser = AngularGettextHTMLParser()
for line in fileobj:
parser.feed(encodeutils.safe_decode(line))
for string in parser.strings:
yield(string) | 0.001056 |
def cmd_reload(args):
'''reload graphs'''
mestate.console.writeln('Reloading graphs', fg='blue')
load_graphs()
setup_menus()
mestate.console.write("Loaded %u graphs\n" % len(mestate.graphs)) | 0.004762 |
def extract_dynamic_part(uri):
""" Extract dynamic url part from :uri: string.
:param uri: URI string that may contain dynamic part.
"""
for part in uri.split('/'):
part = part.strip()
if part.startswith('{') and part.endswith('}'):
return clean_dynamic_uri(part) | 0.003247 |
def _EccZmaxRperiRap(self,*args,**kwargs):
"""
NAME:
EccZmaxRperiRap (_EccZmaxRperiRap)
PURPOSE:
evaluate the eccentricity, maximum height above the plane, peri- and apocenter for a spherical potential
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(e,zmax,rperi,rap)
HISTORY:
2017-12-22 - Written - Bovy (UofT)
"""
if len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= nu.array([R])
vR= nu.array([vR])
vT= nu.array([vT])
z= nu.array([z])
vz= nu.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
L= nu.sqrt(L2)
#Set up an actionAngleAxi object for EL and rap/rperi calculations
axiR= nu.sqrt(R**2.+z**2.)
axivT= L/axiR
axivR= (R*vR+z*vz)/axiR
rperi, rap= [], []
for ii in range(len(axiR)):
axiaA= actionAngleAxi(axiR[ii],axivR[ii],axivT[ii],
pot=self._2dpot)
trperi,trap= axiaA.calcRapRperi()
rperi.append(trperi)
rap.append(trap)
rperi= nu.array(rperi)
rap= nu.array(rap)
return ((rap-rperi)/(rap+rperi),rap*nu.sqrt(1.-Lz**2./L2),
rperi,rap) | 0.022862 |
def get_item_project(self, eitem):
"""
Get the project name related to the eitem
:param eitem: enriched item for which to find the project
:return: a dictionary with the project data
"""
eitem_project = {}
project = self.find_item_project(eitem)
if project is None:
project = DEFAULT_PROJECT
eitem_project = {"project": project}
# Time to add the project levels: eclipse.platform.releng.aggregator
eitem_project.update(self.add_project_levels(project))
# And now time to add the metadata
eitem_project.update(self.get_item_metadata(eitem))
return eitem_project | 0.002907 |
def benchmark(store, n=10000):
"""
Increments an integer count n times.
"""
x = UpdatableItem(store=store, count=0)
for _ in xrange(n):
x.count += 1 | 0.005682 |
def poke_native(getstate):
"""
Serializer factory for types which state can be natively serialized.
Arguments:
getstate (callable): takes an object and returns the object's state
to be passed to `pokeNative`.
Returns:
callable: serializer (`poke` routine).
"""
def poke(service, objname, obj, container, visited=None, _stack=None):
service.pokeNative(objname, getstate(obj), container)
return poke | 0.002146 |
def manifold(self, transformer):
"""
Creates the manifold estimator if a string value is passed in,
validates other objects passed in.
"""
if not is_estimator(transformer):
if transformer not in self.ALGORITHMS:
raise YellowbrickValueError(
"could not create manifold for '%s'".format(str(transformer))
)
# Create a new transformer with the specified params
self._name = MANIFOLD_NAMES[transformer]
transformer = clone(self.ALGORITHMS[transformer])
params = {
"n_components": 2,
"n_neighbors": self.n_neighbors,
"random_state": self.random_state,
}
for param in list(params.keys()):
if param not in transformer.get_params():
del params[param]
transformer.set_params(**params)
self._manifold = transformer
if self._name is None:
self._name = self._manifold.__class__.__name__ | 0.002783 |
def plot4_nolog(self, num):
"""
Plots the abundances of H-1, He-4, C-12 and O-16.
"""
self.plot_prof_2(num,'H-1',0.,5.)
self.plot_prof_2(num,'He-4',0.,5.)
self.plot_prof_2(num,'C-12',0.,5.)
self.plot_prof_2(num,'O-16',0.,5.)
pyl.legend(loc=3) | 0.045307 |
def Upload(self, fd, sign_fn=None):
"""Uploads data from a given stream and signs them with a given key."""
if not sign_fn:
raise ValueError("sign_fn can't be empty. "
"See DefaultUploadSigner as a possible option.")
args = binary_management_pb2.ApiUploadGrrBinaryArgs(
type=self.binary_type, path=self.path)
while True:
data = fd.read(self.__class__.CHUNK_SIZE)
if not data:
break
blob = args.blobs.add()
blob.signature = sign_fn(data)
blob.signature_type = blob.RSA_PKCS1v15
blob.digest = hashlib.sha256(data).digest()
blob.digest_type = blob.SHA256
blob.data = data
self._context.SendRequest("UploadGrrBinary", args) | 0.013514 |
def geo(*params):
"""
根据经纬度后去地址
:param params: 经纬度
:return: 地址字符串
"""
api = 'http://www.gpsspg.com/apis/maps/geo/'
headers = {
'Accept':'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
'Cookie':'ARRAffinity=7996acd7385beb55da51ad553ab9df60d6b742a8d0840faa612d0bd27f840017; Hm_lvt_15b1a40a8d25f43208adae1c1e12a514=1507564728; Hm_lpvt_15b1a40a8d25f43208adae1c1e12a514=1507564728; AJSTAT_ok_pages=1; AJSTAT_ok_times=1',
'Host':'www.gpsspg.com',
'Referer':'http://www.gpsspg.com/iframe/maps/qq_161128.htm?mapi=2',
'User-Agent': random_user_agent(),
'X-Requested-With':'XMLHttpRequest',
}
query_params = {
'output': 'jsonp',
'lat': params[0],
'lng': params[1],
'type': '0',
'callback': 'jQuery110207091189337888508_1507564728439',
'_': int(get_current_timestamp()),
}
http = HTTP()
t, h, c, hi, s = http.get(api, headers=headers, params=query_params)
rp = ReParser()
real_data = rp.compute(r'address":".*","rids', t).replace('address":"', '').replace('","rids', '')
return real_data | 0.008554 |
def _graph(self):
""""Return a graph containing the dependencies of this expression
Structure is:
[<string expression>, <function name if callable>, <function object if callable>, [subgraph/dependencies, ....]]
"""
expression = self.expression
def walk(node):
if isinstance(node, six.string_types):
if node in self.ds.virtual_columns:
ex = Expression(self.ds, self.ds.virtual_columns[node])
return [node, None, None, [ex._graph()]]
else:
return node
else:
fname, node_repr, deps = node
if len(node_repr) > 30: # clip too long expressions
node_repr = node_repr[:26] + ' ....'
deps = [walk(dep) for dep in deps]
obj = self.ds.functions.get(fname)
# we don't want the wrapper, we want the underlying object
if isinstance(obj, Function):
obj = obj.f
if isinstance(obj, FunctionSerializablePickle):
obj = obj.f
return [node_repr, fname, obj, deps]
return walk(expresso._graph(expression)) | 0.002402 |
def get_cartesian(r, theta):
"""
Given a radius and theta, return the cartesian (x, y) coordinates.
"""
x = r*np.sin(theta)
y = r*np.cos(theta)
return x, y | 0.005556 |
def reserve(cls, queues, res, worker=None, timeout=10):
"""Reserve a job on one of the queues. This marks this job so
that other workers will not pick it up.
"""
if isinstance(queues, string_types):
queues = [queues]
queue, payload = res.pop(queues, timeout=timeout)
if payload:
return cls(queue, payload, res, worker) | 0.005115 |
def detect_encoding(string):
"""
Tries to detect the encoding of the passed string.
Defaults to UTF-8.
"""
assert isinstance(string, bytes)
try:
detected = chardet.detect(string)
if detected:
return detected.get('encoding') or 'utf-8'
except Exception as e:
pass
return 'utf-8' | 0.00289 |
def update_expiry(self, commit=True):
"""Update token's expiration datetime on every auth action."""
self.expires = update_expiry(self.created)
if commit:
self.save() | 0.009901 |
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name) | 0.001214 |
def use_comparative_asset_view(self):
"""Pass through to provider AssetLookupSession.use_comparative_asset_view"""
self._object_views['asset'] = COMPARATIVE
# self._get_provider_session('asset_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_asset_view()
except AttributeError:
pass | 0.008869 |
def get_metadata_value(self, key: str) -> typing.Any:
"""Get the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
.. versionadded:: 1.0
Scriptable: Yes
"""
return self._data_item.get_metadata_value(key) | 0.007398 |
def prj_created_data(project, role):
"""Return the data for created
:param project: the project that holds the data
:type project: :class:`jukeboxcore.djadapter.models.Project`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the created
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole:
return project.date_created.isoformat(' ') | 0.002273 |
def pipe(cmd, *arguments, **kwargs):
"""
Pipe many commands::
>>> noop = pipe(['gzip'], ['gzip'], ['zcat'], ['zcat'])
>>> _ = noop.stdin.write('foo'.encode()) # Ignore output in Python 3
>>> noop.stdin.close()
>>> print(noop.stdout.read().decode())
foo
Returns a Subprocess.
"""
acc = run(*cmd, **kwargs)
for cmd in arguments:
if isinstance(cmd, Subprocess):
acc = acc.pipe(cmd)
else:
acc = acc.pipe(*cmd, **kwargs)
return acc | 0.001862 |
def edit_message_live_location(self, *args, **kwargs):
"""See :func:`edit_message_live_location`"""
return edit_message_live_location(*args, **self._merge_overrides(**kwargs)).run() | 0.015228 |
def parse_nestings(string, only_curl=False):
r"""
References:
http://stackoverflow.com/questions/4801403/pyparsing-nested-mutiple-opener-clo
CommandLine:
python -m utool.util_gridsearch parse_nestings:1 --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> string = r'lambda u: sign(u) * abs(u)**3.0 * greater(u, 0)'
>>> parsed_blocks = parse_nestings(string)
>>> recombined = recombine_nestings(parsed_blocks)
>>> print('PARSED_BLOCKS = ' + ut.repr3(parsed_blocks, nl=1))
>>> print('recombined = %r' % (recombined,))
>>> print('orig = %r' % (string,))
PARSED_BLOCKS = [
('nonNested', 'lambda u: sign'),
('paren', [('ITEM', '('), ('nonNested', 'u'), ('ITEM', ')')]),
('nonNested', '* abs'),
('paren', [('ITEM', '('), ('nonNested', 'u'), ('ITEM', ')')]),
('nonNested', '**3.0 * greater'),
('paren', [('ITEM', '('), ('nonNested', 'u, 0'), ('ITEM', ')')]),
]
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> string = r'\chapter{Identification \textbf{foobar} workflow}\label{chap:application}'
>>> parsed_blocks = parse_nestings(string)
>>> print('PARSED_BLOCKS = ' + ut.repr3(parsed_blocks, nl=1))
PARSED_BLOCKS = [
('nonNested', '\\chapter'),
('curl', [('ITEM', '{'), ('nonNested', 'Identification \\textbf'), ('curl', [('ITEM', '{'), ('nonNested', 'foobar'), ('ITEM', '}')]), ('nonNested', 'workflow'), ('ITEM', '}')]),
('nonNested', '\\label'),
('curl', [('ITEM', '{'), ('nonNested', 'chap:application'), ('ITEM', '}')]),
]
"""
import utool as ut # NOQA
import pyparsing as pp
def as_tagged(parent, doctag=None):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
namedItems = dict((v[1], k) for (k, vlist) in parent._ParseResults__tokdict.items()
for v in vlist)
# collapse out indents if formatting is not desired
parentTag = None
if doctag is not None:
parentTag = doctag
else:
if parent._ParseResults__name:
parentTag = parent._ParseResults__name
if not parentTag:
parentTag = "ITEM"
out = []
for i, res in enumerate(parent._ParseResults__toklist):
if isinstance(res, pp.ParseResults):
if i in namedItems:
child = as_tagged(res, namedItems[i])
else:
child = as_tagged(res, None)
out.append(child)
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
resTag = "ITEM"
child = (resTag, pp._ustr(res))
out += [child]
return (parentTag, out)
def combine_nested(opener, closer, content, name=None):
r"""
opener, closer, content = '(', ')', nest_body
"""
import utool as ut # NOQA
ret1 = pp.Forward()
_NEST = ut.identity
#_NEST = pp.Suppress
opener_ = _NEST(opener)
closer_ = _NEST(closer)
group = pp.Group(opener_ + pp.ZeroOrMore(content) + closer_)
ret2 = ret1 << group
if ret2 is None:
ret2 = ret1
else:
pass
#raise AssertionError('Weird pyparsing behavior. Comment this line if encountered. pp.__version__ = %r' % (pp.__version__,))
if name is None:
ret3 = ret2
else:
ret3 = ret2.setResultsName(name)
assert ret3 is not None, 'cannot have a None return'
return ret3
# Current Best Grammar
nest_body = pp.Forward()
nestedParens = combine_nested('(', ')', content=nest_body, name='paren')
nestedBrackets = combine_nested('[', ']', content=nest_body, name='brak')
nestedCurlies = combine_nested('{', '}', content=nest_body, name='curl')
nonBracePrintables = ''.join(c for c in pp.printables if c not in '(){}[]') + ' '
nonNested = pp.Word(nonBracePrintables).setResultsName('nonNested')
nonNested = nonNested.leaveWhitespace()
# if with_curl and not with_paren and not with_brak:
if only_curl:
# TODO figure out how to chain |
nest_body << (nonNested | nestedCurlies)
else:
nest_body << (nonNested | nestedParens | nestedBrackets | nestedCurlies)
nest_body = nest_body.leaveWhitespace()
parser = pp.ZeroOrMore(nest_body)
debug_ = ut.VERBOSE
if len(string) > 0:
tokens = parser.parseString(string)
if debug_:
print('string = %r' % (string,))
print('tokens List: ' + ut.repr3(tokens.asList()))
print('tokens XML: ' + tokens.asXML())
parsed_blocks = as_tagged(tokens)[1]
if debug_:
print('PARSED_BLOCKS = ' + ut.repr3(parsed_blocks, nl=1))
else:
parsed_blocks = []
return parsed_blocks | 0.001864 |
def redirect(self, redirect_error, auth):
"""Redirect the connection to an alternative endpoint.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
try:
self.lock()
_logger.info("Redirecting connection %r.", self.container_id)
if self.hostname == redirect_error.hostname:
return
if self._state != c_uamqp.ConnectionState.END:
_logger.info("Connection not closed yet - shutting down.")
self._close()
self.hostname = redirect_error.hostname
self.auth = auth
self._conn = self._create_connection(auth)
for setting, value in self._settings.items():
setattr(self, setting, value)
self._error = None
self._closing = False
finally:
_logger.info("Finished redirecting connection %r.", self.container_id)
self.release() | 0.002616 |
def prerequisite_check():
"""
Check prerequisites of the framework, including Python version, installation of
modules, etc.
Returns:
Optional[str]: If the check is not passed, return error message regarding
failed test case. None is returned otherwise.
"""
# Check Python version
if sys.version_info < (3, 6):
version_str = "%s.%s.%s" % sys.version_info[:3]
# TRANSLATORS: This word is used as a part of search query suggested to users,
# it may appears in context like "Ubuntu 16.04 install Python 3.7"
search_url = build_search_query(_("install") + " Python 3.7")
return _("EH Forwarder Bot requires a minimum of Python 3.6 to run. You "
"are currently using Python {version}. \n"
"\n"
"You may want to try:\n"
"{url}").format(version=version_str, url=search_url)
# Check installations of modules
modules_err = _("You may want to visit the modules repository to find a list of "
"available modules to install.\n"
"https://github.com/blueset/ehForwarderBot/wiki/Channels-Repository")
# 1. At least 1 master channel must be installed
try:
next(pkg_resources.iter_entry_points("ehforwarderbot.master"))
except StopIteration:
return _("No master channel detected. EH Forwarder Bot requires at least one "
"master channel installed to run.") + "\n\n" + modules_err
# 2. At least 1 slave channel must be installed
try:
next(pkg_resources.iter_entry_points("ehforwarderbot.slave"))
except StopIteration:
return _("No slave channel detected. EH Forwarder Bot requires at least one "
"slave channel installed to run.") + "\n\n" + modules_err | 0.004902 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.