text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def jcal2jd(year, month, day):
"""Julian calendar date to Julian date.
The input and output are for the proleptic Julian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd1, jd2: 2-element tuple of floats
When added together, the numbers give the Julian date for the
given Julian calendar date. The first number is always
MJD_0 i.e., 2451545.5. So the second is the MJD.
Examples
--------
>>> jcal2jd(2000, 1, 1)
(2400000.5, 51557.0)
>>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678, 2000,
...: 2012, 2245]
>>> month = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12]
>>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31]
>>> x = [jcal2jd(y, m, d) for y, m, d in zip(year, month, day)]
>>> for i in x: print i
(2400000.5, -2395252.0)
(2400000.5, -1451039.0)
(2400000.5, -1062374.0)
(2400000.5, -723765.0)
(2400000.5, -679164.0)
(2400000.5, -678776.0)
(2400000.5, -678370.0)
(2400000.5, -633798.0)
(2400000.5, -65772.0)
(2400000.5, 51871.0)
(2400000.5, 56285.0)
Notes
-----
Unlike `gcal2jd`, negative months and days can result in incorrect
Julian dates.
"""
year = int(year)
month = int(month)
day = int(day)
jd = 367 * year
x = ipart((month - 9) / 7.0)
jd -= ipart((7 * (year + 5001 + x)) / 4.0)
jd += ipart((275 * month) / 9.0)
jd += day
jd += 1729777 - 2400000.5 # Return 240000.5 as first part of JD.
jd -= 0.5 # Convert midday to midnight.
return MJD_0, jd | 0.000561 |
def getAdditionalImages(self):
'''
The same as calling ``client.getAdditionalImages(build.setID)``.
:returns: A list of URL strings.
:rtype: list
'''
self._additionalImages = self._client.getAdditionalImages(self.setID)
return self._additionalImages | 0.006515 |
def write_dict_to_file(file_path, obj):
"""
Write a dictionary of string keys to a file
"""
lines = []
for key, value in obj.items():
lines.append(key + ':' + repr(value) + '\n')
with open(file_path, 'w+') as file:
file.writelines(lines)
return None | 0.00339 |
def save_file(data, export_file):
"""Write data to a file."""
create_dir(os.path.dirname(export_file))
try:
with open(export_file, "w") as file:
file.write(data)
except PermissionError:
logging.warning("Couldn't write to %s.", export_file) | 0.003521 |
def refresh_stats(self):
"""
only need this when generating terrain (sea = 100 - perc_land at start).
This function forces a recount, otherwise just call the variables
"""
self.tot_pix = 0
self.tot_sea = 0
self.tot_land = 0
self.tot_blocked = 0
for row in range(self.grd.grid_height):
for col in range(self.grd.grid_width):
self.tot_pix += 1
val = self.grd.get_tile(row, col)
if val == TERRAIN_SEA:
self.tot_sea += 1
elif val == TERRAIN_LAND:
self.tot_land += 1
else:
self.tot_blocked += 1 | 0.006993 |
def slice(self, x, y, width):
"""
Provide a slice of data from the buffer at the specified location
:param x: The X origin
:param y: The Y origin
:param width: The width of slice required
:return: The slice of tuples from the current double-buffer
"""
return self._double_buffer[y][x:x + width] | 0.005571 |
def execute(self):
"""Generate local DB, pulling metadata and data from RWSConnection"""
logging.info('Requesting view metadata for project %s' % self.project_name)
project_csv_meta = self.rws_connection.send_request(ProjectMetaDataRequest(self.project_name))
# Process it into a set of tables
self.db_adapter.processMetaData(project_csv_meta)
# Get the data for the study
for dataset_name in self.db_adapter.datasets.keys():
logging.info('Requesting data from dataset %s' % dataset_name)
form_name, _type = self.name_type_from_viewname(dataset_name)
form_data = self.rws_connection.send_request(
FormDataRequest(self.project_name, self.environment, _type, form_name))
# Now process the form_data into the db of choice
logging.info('Populating dataset %s' % dataset_name)
self.db_adapter.processFormData(form_data, dataset_name)
logging.info('Process complete') | 0.004912 |
def update_keywords(self):
"""653 Free Keywords."""
for field in record_get_field_instances(self.record, '653', ind1='1'):
subs = field_get_subfields(field)
new_subs = []
if 'a' in subs:
for val in subs['a']:
new_subs.extend([('9', 'author'), ('a', val)])
new_field = create_field(subfields=new_subs, ind1='1')
record_replace_field(
self.record, '653', new_field, field_position_global=field[4]) | 0.003824 |
def readShocks(self):
'''
Reads values of shock variables for the current period from history arrays. For each var-
iable X named in self.shock_vars, this attribute of self is set to self.X_hist[self.t_sim,:].
This method is only ever called if self.read_shocks is True. This can be achieved by using
the method makeShockHistory() (or manually after storing a "handcrafted" shock history).
Parameters
----------
None
Returns
-------
None
'''
for var_name in self.shock_vars:
setattr(self,var_name,getattr(self,var_name+'_hist')[self.t_sim,:]) | 0.015083 |
def _read_mulliken(self):
"""
Parses Mulliken charges. Also parses spins given an unrestricted SCF.
"""
if self.data.get('unrestricted', []):
header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+Spin\s\(a\.u\.\)\s+\-+"
table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer_pattern = r"\s\s\-+\s+Sum of atomic charges"
else:
header_pattern = r"\-+\s+Ground-State Mulliken Net Atomic Charges\s+Atom\s+Charge \(a\.u\.\)\s+\-+"
table_pattern = r"\s+\d+\s\w+\s+([\d\-\.]+)"
footer_pattern = r"\s\s\-+\s+Sum of atomic charges"
temp_mulliken = read_table_pattern(self.text, header_pattern,
table_pattern, footer_pattern)
real_mulliken = []
for one_mulliken in temp_mulliken:
if self.data.get('unrestricted', []):
temp = np.zeros(shape=(len(one_mulliken), 2))
for ii, entry in enumerate(one_mulliken):
temp[ii, 0] = float(entry[0])
temp[ii, 1] = float(entry[1])
else:
temp = np.zeros(len(one_mulliken))
for ii, entry in enumerate(one_mulliken):
temp[ii] = float(entry[0])
real_mulliken += [temp]
self.data["Mulliken"] = real_mulliken | 0.002801 |
def isentropic_efficiency(P1, P2, k, eta_s=None, eta_p=None):
r'''Calculates either isentropic or polytropic efficiency from the other
type of efficiency.
.. math::
\eta_s = \frac{(P_2/P_1)^{(k-1)/k}-1}
{(P_2/P_1)^{\frac{k-1}{k\eta_p}}-1}
.. math::
\eta_p = \frac{\left(k - 1\right) \log{\left (\frac{P_{2}}{P_{1}}
\right )}}{k \log{\left (\frac{1}{\eta_{s}} \left(\eta_{s}
+ \left(\frac{P_{2}}{P_{1}}\right)^{\frac{1}{k} \left(k - 1\right)}
- 1\right) \right )}}
Parameters
----------
P1 : float
Initial pressure of gas [Pa]
P2 : float
Final pressure of gas [Pa]
k : float
Isentropic exponent of the gas (Cp/Cv) [-]
eta_s : float, optional
Isentropic (adiabatic) efficiency of the process, [-]
eta_p : float, optional
Polytropic efficiency of the process, [-]
Returns
-------
eta_s or eta_p : float
Isentropic or polytropic efficiency, depending on input, [-]
Notes
-----
The form for obtained `eta_p` from `eta_s` was derived with SymPy.
Examples
--------
>>> isentropic_efficiency(1E5, 1E6, 1.4, eta_p=0.78)
0.7027614191263858
References
----------
.. [1] Couper, James R., W. Roy Penney, and James R. Fair. Chemical Process
Equipment: Selection and Design. 2nd ed. Amsterdam ; Boston: Gulf
Professional Publishing, 2009.
'''
if eta_s is None and eta_p:
return ((P2/P1)**((k-1.0)/k)-1.0)/((P2/P1)**((k-1.0)/(k*eta_p))-1.0)
elif eta_p is None and eta_s:
return (k - 1.0)*log(P2/P1)/(k*log(
(eta_s + (P2/P1)**((k - 1.0)/k) - 1.0)/eta_s))
else:
raise Exception('Either eta_s or eta_p is required') | 0.000568 |
def create_data_types(self):
"""Map of standard playbook variable types to create method."""
return {
'Binary': self.create_binary,
'BinaryArray': self.create_binary_array,
'KeyValue': self.create_key_value,
'KeyValueArray': self.create_key_value_array,
'String': self.create_string,
'StringArray': self.create_string_array,
'TCEntity': self.create_tc_entity,
'TCEntityArray': self.create_tc_entity_array,
} | 0.003795 |
def decompress(content, encoding, filename='N/A'):
"""
Decompress file content.
Required:
content (bytes): a file to be compressed
encoding: None (no compression) or 'gzip'
Optional:
filename (str:default:'N/A'): Used for debugging messages
Raises:
NotImplementedError if an unsupported codec is specified.
compression.EncodeError if the encoder has an issue
Return: decompressed content
"""
try:
encoding = (encoding or '').lower()
if encoding == '':
return content
elif encoding == 'gzip':
return gunzip(content)
except DecompressionError as err:
print("Filename: " + str(filename))
raise
raise NotImplementedError(str(encoding) + ' is not currently supported. Supported Options: None, gzip') | 0.017995 |
def set(self, opts, popsize=None, ccovfac=1, verbose=True):
"""Compute strategy parameters as a function
of dimension and population size """
alpha_cc = 1.0 # cc-correction for mueff, was zero before
def conedf(df, mu, N):
"""used for computing separable learning rate"""
return 1. / (df + 2.*sqrt(df) + float(mu) / N)
def cmudf(df, mu, alphamu):
"""used for computing separable learning rate"""
return (alphamu + mu - 2. + 1. / mu) / (df + 4.*sqrt(df) + mu / 2.)
sp = self
N = sp.N
if popsize:
opts.evalall({'N':N, 'popsize':popsize})
else:
popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in CMAOptions()
## meta_parameters.lambda_exponent == 0.0
popsize = int(popsize + N** 0.0 - 1)
sp.popsize = popsize
if opts['CMA_mirrors'] < 0.5:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)
elif opts['CMA_mirrors'] > 1:
sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])
else:
sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal
# lam = arange(2,22)
# mirr = 0.16 + 0.29/lam
# print(lam); print([int(0.5 + l) for l in mirr*lam])
# [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]
# [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]
## meta_parameters.parent_fraction == 0.5
sp.mu_f = 0.5 * sp.popsize # float value of mu
if opts['CMA_mu'] is not None:
sp.mu_f = opts['CMA_mu']
sp.mu = int(sp.mu_f + 0.499999) # round down for x.5
sp.mu = max((sp.mu, 1))
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:
_print_warning("pairwise selection is not implemented, therefore " +
" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias" % (
sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))
if sp.lam_mirr > sp.popsize // 2:
raise _Error("fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, " +
"theoretically optimal is 0.159")
sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))
sp.weights /= sum(sp.weights)
sp.mueff = 1 / sum(sp.weights**2)
# TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA
## meta_parameters.cs_exponent == 1.0
b = 1.0
## meta_parameters.cs_multiplier == 1.0
sp.cs = 1.0 * (sp.mueff + 2)**b / (N + (sp.mueff + 3)**b) # TODO: this doesn't change dependency of dimension
# sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)
## meta_parameters.cc_exponent == 1.0
b = 1.0
## meta_parameters.cc_multiplier == 1.0
sp.cc = 1.0 * \
(4 + alpha_cc * sp.mueff / N)**b / \
(N**b + (4 + alpha_cc * 2 * sp.mueff / N)**b)
sp.cc_sep = (1 + 1 / N + alpha_cc * sp.mueff / N) / (N**0.5 + 1 / N + alpha_cc * 2 * sp.mueff / N) # \not\gg\cc
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1':
## meta_parameters.cc_multiplier == 1.0
sp.cc = 1.0 * (4 + sp.mueff / N)**0.5 / ((N + 4)**0.5 + (2 * sp.mueff / N)**0.5)
sp.rankmualpha = opts['CMA_rankmualpha']
# sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)
## meta_parameters.c1_multiplier == 1.0
sp.c1 = ( 1.0 * ccovfac * min(1, sp.popsize / 6) *
## meta_parameters.c1_exponent == 2.0
2 / ((N + 1.3)** 2.0 + sp.mueff))
# 1/0
sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)
if opts['CMA_rankmu'] != 0: # also empty
## meta_parameters.cmu_multiplier == 2.0
alphacov, mu = 2.0 , sp.mueff
sp.cmu = min(1 - sp.c1, ccovfac * alphacov *
## meta_parameters.cmu_exponent == 2.0
(sp.rankmualpha + mu - 2 + 1 / mu) / ((N + 2)** 2.0 + alphacov * mu / 2))
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov':
sp.cmu = opts['vv'][1]
sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))
else:
sp.cmu = sp.cmu_sep = 0
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov1':
sp.c1 = opts['vv'][1]
sp.neg = _BlancClass()
if opts['CMA_active'] and opts['CMA_on']:
# in principle we have mu_opt = popsize/2 + lam_mirr/2,
# which means in particular weights should only be negative for q > 0.5+mirr_frac/2
if 1 < 3: # seems most natural: continuation of log(lambda/2) - log(k) qqqqqqqqqqqqqqqqqqqqqqqqqq
sp.neg.mu_f = popsize // 2 # not sure anymore what this is good for
sp.neg.weights = array([log(k) - log(popsize/2 + 1/2) for k in np.arange(np.ceil(popsize/2 + 1.1/2), popsize + .1)])
sp.neg.mu = len(sp.neg.weights)
sp.neg.weights /= sum(sp.neg.weights)
sp.neg.mueff = 1 / sum(sp.neg.weights**2)
## meta_parameters.cact_exponent == 1.5
sp.neg.cmuexp = opts['CMA_active'] * 0.3 * sp.neg.mueff / ((N + 2)** 1.5 + 1.0 * sp.neg.mueff)
if hasattr(opts['vv'], '__getitem__') and opts['vv'][0] == 'sweep_ccov_neg':
sp.neg.cmuexp = opts['vv'][1]
# reasoning on learning rate cmuexp: with sum |w| == 1 and
# length-normalized vectors in the update, the residual
# variance in any direction exceeds exp(-N*cmuexp)
assert sp.neg.mu >= sp.lam_mirr # not really necessary
# sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical
else:
sp.neg.cmuexp = 0
sp.CMA_on = sp.c1 + sp.cmu > 0
# print(sp.c1_sep / sp.cc_sep)
if not opts['CMA_on'] and opts['CMA_on'] not in (None, [], (), ''):
sp.CMA_on = False
# sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0
mueff_exponent = 0.5
if 1 < 3:
mueff_exponent = opts['CSA_damp_mueff_exponent']
# TODO: this will disappear, as it is done in class CMAAdaptSigmaCSA
sp.damps = opts['CSA_dampfac'] * (0.5 +
0.5 * min([1, (sp.lam_mirr / (0.159 * sp.popsize) - 1)**2])**1 +
2 * max([0, ((sp.mueff - 1) / (N + 1))**mueff_exponent - 1]) + sp.cs
)
sp.cmean = float(opts['CMA_cmean'])
# sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate
# in larger dim it does, 15-D with defaults, kappa=8 factor 2
if verbose:
if not sp.CMA_on:
print('covariance matrix adaptation turned off')
if opts['CMA_mu'] != None:
print('mu = %f' % (sp.mu_f)) | 0.007708 |
def _bselect(self, selection, start_bindex, end_bindex):
""" add the given buffer indices to the given QItemSelection, both byte and char panes """
selection.select(self._model.index2qindexb(start_bindex), self._model.index2qindexb(end_bindex))
selection.select(self._model.index2qindexc(start_bindex), self._model.index2qindexc(end_bindex)) | 0.013699 |
def parse(self):
"""
The function for parsing the JSON response to the vars dictionary.
"""
try:
self.vars['handle'] = self.json['handle'].strip()
except (KeyError, ValueError):
log.debug('Handle missing, json_output: {0}'.format(json.dumps(
self.json)))
raise InvalidNetworkObject('Handle is missing for RDAP network '
'object')
try:
self.vars['ip_version'] = self.json['ipVersion'].strip()
# RDAP IPv4 addresses are padded to 3 digits per octet, remove
# the leading 0's.
if self.vars['ip_version'] == 'v4':
self.vars['start_address'] = ip_address(
ipv4_lstrip_zeros(self.json['startAddress'])
).__str__()
self.vars['end_address'] = ip_address(
ipv4_lstrip_zeros(self.json['endAddress'])
).__str__()
# No bugs found for IPv6 yet, proceed as normal.
else:
self.vars['start_address'] = self.json['startAddress'].strip()
self.vars['end_address'] = self.json['endAddress'].strip()
except (KeyError, ValueError, TypeError):
log.debug('IP address data incomplete. Data parsed prior to '
'exception: {0}'.format(json.dumps(self.vars)))
raise InvalidNetworkObject('IP address data is missing for RDAP '
'network object.')
try:
self.vars['cidr'] = ', '.join(calculate_cidr(
self.vars['start_address'], self.vars['end_address']
))
except (KeyError, ValueError, TypeError, AttributeError) as \
e: # pragma: no cover
log.debug('CIDR calculation failed: {0}'.format(e))
pass
for v in ['name', 'type', 'country']:
try:
self.vars[v] = self.json[v].strip()
except (KeyError, ValueError):
pass
try:
self.vars['parent_handle'] = self.json['parentHandle'].strip()
except (KeyError, ValueError):
pass
self._parse() | 0.00088 |
def signal(signal=None):
'''
Signals Apache Solr to start, stop, or restart. Obviously this is only
going to work if the minion resides on the solr host. Additionally Solr
doesn't ship with an init script so one must be created.
signal : str (None)
The command to pass to the apache solr init valid values are 'start',
'stop', and 'restart'
CLI Example:
.. code-block:: bash
salt '*' solr.signal restart
'''
valid_signals = ('start', 'stop', 'restart')
# Give a friendly error message for invalid signals
# TODO: Fix this logic to be reusable and used by apache.signal
if signal not in valid_signals:
msg = valid_signals[:-1] + ('or {0}'.format(valid_signals[-1]),)
return '{0} is an invalid signal. Try: one of: {1}'.format(
signal, ', '.join(msg))
cmd = "{0} {1}".format(__opts__['solr.init_script'], signal)
__salt__['cmd.run'](cmd, python_shell=False) | 0.001032 |
def summary_permutation(context_counts,
context_to_mut,
seq_context,
gene_seq,
score_dir,
num_permutations=10000,
min_frac=0.0,
min_recur=2,
drop_silent=False):
"""Performs null-permutations and summarizes the results as features over
the gene.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
drop_silent : bool, default=False
Flage on whether to drop all silent mutations. Some data sources
do not report silent mutations, and the simulations should match this.
Returns
-------
summary_info_list : list of lists
list of non-silent and silent mutation counts under the null along
with information on recurrent missense counts and missense positional
entropy.
"""
mycontexts = context_counts.index.tolist()
somatic_base = [base
for one_context in mycontexts
for base in context_to_mut[one_context]]
# get random positions determined by sequence context
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
num_permutations)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
# determine result of random positions
gene_name = gene_seq.bed.gene_name
gene_len = gene_seq.bed.cds_len
summary_info_list = []
for i, row in enumerate(tmp_mut_pos):
# get info about mutations
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
# Get all metrics summarizing each gene
tmp_summary = cutils.calc_summary_info(tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
tmp_mut_info['Codon Pos'],
gene_name,
score_dir,
min_frac=min_frac,
min_recur=min_recur)
# drop silent if needed
if drop_silent:
# silent mutation count is index 1
tmp_summary[1] = 0
# limit the precision of floats
#pos_ent = tmp_summary[-1]
#tmp_summary[-1] = '{0:.5f}'.format(pos_ent)
summary_info_list.append([gene_name, i+1, gene_len]+tmp_summary)
return summary_info_list | 0.000938 |
def create_action(self):
"""Create actions associated with this widget."""
actions = {}
act = QAction(QIcon(ICON['step_prev']), 'Previous Step', self)
act.setShortcut('[')
act.triggered.connect(self.step_prev)
actions['step_prev'] = act
act = QAction(QIcon(ICON['step_next']), 'Next Step', self)
act.setShortcut(']')
act.triggered.connect(self.step_next)
actions['step_next'] = act
act = QAction(QIcon(ICON['page_prev']), 'Previous Page', self)
act.setShortcut(QKeySequence.MoveToPreviousChar)
act.triggered.connect(self.page_prev)
actions['page_prev'] = act
act = QAction(QIcon(ICON['page_next']), 'Next Page', self)
act.setShortcut(QKeySequence.MoveToNextChar)
act.triggered.connect(self.page_next)
actions['page_next'] = act
act = QAction('Go to Epoch', self)
act.setShortcut(QKeySequence.FindNext)
act.triggered.connect(self.go_to_epoch)
actions['go_to_epoch'] = act
act = QAction('Line Up with Epoch', self)
act.setShortcut('F4')
act.triggered.connect(self.line_up_with_epoch)
actions['line_up_with_epoch'] = act
act = QAction(QIcon(ICON['zoomprev']), 'Wider Time Window', self)
act.setShortcut(QKeySequence.ZoomIn)
act.triggered.connect(self.X_more)
actions['X_more'] = act
act = QAction(QIcon(ICON['zoomnext']), 'Narrower Time Window', self)
act.setShortcut(QKeySequence.ZoomOut)
act.triggered.connect(self.X_less)
actions['X_less'] = act
act = QAction(QIcon(ICON['zoomin']), 'Larger Scaling', self)
act.setShortcut(QKeySequence.MoveToPreviousLine)
act.triggered.connect(self.Y_more)
actions['Y_less'] = act
act = QAction(QIcon(ICON['zoomout']), 'Smaller Scaling', self)
act.setShortcut(QKeySequence.MoveToNextLine)
act.triggered.connect(self.Y_less)
actions['Y_more'] = act
act = QAction(QIcon(ICON['ydist_more']), 'Larger Y Distance', self)
act.triggered.connect(self.Y_wider)
actions['Y_wider'] = act
act = QAction(QIcon(ICON['ydist_less']), 'Smaller Y Distance', self)
act.triggered.connect(self.Y_tighter)
actions['Y_tighter'] = act
act = QAction(QIcon(ICON['chronometer']), '6 Hours Earlier', self)
act.triggered.connect(partial(self.add_time, -6 * 60 * 60))
actions['addtime_-6h'] = act
act = QAction(QIcon(ICON['chronometer']), '1 Hour Earlier', self)
act.triggered.connect(partial(self.add_time, -60 * 60))
actions['addtime_-1h'] = act
act = QAction(QIcon(ICON['chronometer']), '10 Minutes Earlier', self)
act.triggered.connect(partial(self.add_time, -10 * 60))
actions['addtime_-10min'] = act
act = QAction(QIcon(ICON['chronometer']), '10 Minutes Later', self)
act.triggered.connect(partial(self.add_time, 10 * 60))
actions['addtime_10min'] = act
act = QAction(QIcon(ICON['chronometer']), '1 Hour Later', self)
act.triggered.connect(partial(self.add_time, 60 * 60))
actions['addtime_1h'] = act
act = QAction(QIcon(ICON['chronometer']), '6 Hours Later', self)
act.triggered.connect(partial(self.add_time, 6 * 60 * 60))
actions['addtime_6h'] = act
act = QAction('Go to Next Event', self)
act.setShortcut('s')
act.triggered.connect(self.next_event)
actions['next_event'] = act
act = QAction('Delete Event and Go to Next', self)
act.setShortcut('d')
act.triggered.connect(partial(self.next_event, True))
actions['del_and_next_event'] = act
act = QAction('Next Event of Same Type', self)
act.setCheckable(True)
act.setChecked(True)
actions['next_of_same_type'] = act
act = QAction('Change Event Type', self)
act.setShortcut('e')
act.triggered.connect(self.change_event_type)
actions['change_event_type'] = act
act = QAction('Centre Window Around Event', self)
act.setCheckable(True)
act.setChecked(True)
actions['centre_event'] = act
act = QAction('Full-length Markers', self)
act.setCheckable(True)
act.setChecked(True)
act.triggered.connect(self.display_annotations)
actions['cross_chan_mrk'] = act
# Misc
act = QAction('Export to svg...', self)
act.triggered.connect(partial(export_graphics, MAIN=self.parent))
actions['export_svg'] = act
self.action = actions | 0.001487 |
def use_plenary_asset_composition_view(self):
"""Pass through to provider AssetCompositionSession.use_plenary_asset_composition_view"""
self._object_views['asset_composition'] = PLENARY
# self._get_provider_session('asset_composition_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_asset_composition_view()
except AttributeError:
pass | 0.008114 |
def _create_for_element(cls, element_obj, conn, namespace, classname,
propname=None, methodname=None, parametername=None):
# pylint: disable=line-too-long
"""
Return a new :class:`~pywbem.ValueMapping` instance for the specified
CIM element.
If a `Values` qualifier is defined but no `ValueMap` qualifier, a
default of 0-based consecutive numbers is applied (that is the default
defined in :term:`DSP0004`).
Parameters:
element_obj (:class:`~pywbem.CIMProperty`, :class:`~pywbem.CIMMethod`, or :class:`~pywbem.CIMParameter`):
The CIM element on which the qualifiers are defined.
conn (:class:`~pywbem.WBEMConnection`):
The connection to the WBEM server containing the namespace.
namespace (:term:`string`):
Name of the CIM namespace containing the class.
classname (:term:`string`):
Name of the CIM class exposing the method. The method can be
defined in that class or inherited into that class.
propname (:term:`string`):
Name of the CIM property that defines the `Values` / `ValueMap`
qualifiers.
methodname (:term:`string`):
Name of the CIM method that has the parameter.
parametername (:term:`string`):
Name of the CIM parameter that defines the `Values` / `ValueMap`
qualifiers.
Returns:
The created :class:`~pywbem.ValueMapping` instance for the specified
CIM element.
Raises:
TypeError: The CIM element is not integer-typed.
ValueError: No `Values` qualifier defined on the CIM element.
ValueError: Invalid integer representation in `ValueMap` qualifier
defined on the CIM element.
""" # noqa: E501
# pylint: enable=line-too-long
# pylint: disable=protected-access
vm = ValueMapping()
vm._element_obj = element_obj
vm._conn = conn
vm._namespace = namespace
vm._classname = classname
vm._propname = propname
vm._methodname = methodname
vm._parametername = parametername
try:
typename = element_obj.type # Property, Parameter
except AttributeError:
typename = element_obj.return_type # Method
cimtype = type_from_name(typename)
if not issubclass(cimtype, CIMInt):
raise TypeError(
_format("The value-mapped {0} is not integer-typed, but "
"has CIM type: {1}", vm._element_str(), typename))
values_qual = element_obj.qualifiers.get('Values', None)
if values_qual is None:
# DSP0004 defines no default for a missing Values qualifier
raise ValueError(
_format("The value-mapped {0} has no Values qualifier "
"defined", vm._element_str()))
values_list = values_qual.value
valuemap_qual = element_obj.qualifiers.get('ValueMap', None)
if valuemap_qual is None:
# DSP0004 defines a default of consecutive index numbers
vm._b2v_single_dict = dict(zip(range(0, len(values_list)),
values_list))
vm._b2v_range_tuple_list = []
vm._b2v_unclaimed = None
vm._v2b_dict = OrderedDict(zip(values_list,
range(0, len(values_list))))
else:
vm._b2v_single_dict = {}
vm._b2v_range_tuple_list = []
vm._b2v_unclaimed = None
vm._v2b_dict = OrderedDict()
valuemap_list = valuemap_qual.value
for i, valuemap_str in enumerate(valuemap_list):
values_str = values_list[i]
if valuemap_str == '..':
vm._b2v_unclaimed = values_str
vm._v2b_dict[values_str] = None
else:
lo, hi, values_str = vm._values_tuple(
i, valuemap_list, values_list, cimtype)
if lo == hi:
# single value
vm._b2v_single_dict[lo] = values_str
vm._v2b_dict[values_str] = lo
else:
# value range
vm._b2v_range_tuple_list.append((lo, hi, values_str))
vm._v2b_dict[values_str] = (lo, hi)
return vm | 0.001092 |
def get_handler_stats(self):
''' Return handler read statistics
Returns a dictionary of managed handler data read statistics. The
format is primarily controlled by the
:func:`SocketStreamCapturer.dump_all_handler_stats` function::
{
<capture address>: <list of handler capture statistics>
}
'''
return {
address : stream_capturer[0].dump_all_handler_stats()
for address, stream_capturer in self._stream_capturers.iteritems()
} | 0.005464 |
def describe_instance_health(self, load_balancer_name, instances=None):
"""
Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DescribeInstanceHealth', params,
[('member', InstanceState)]) | 0.003015 |
def download(name, filenames):
'''
Download a file from the virtual folder to the current working directory.
The files with the same names will be overwirtten.
\b
NAME: Name of a virtual folder.
FILENAMES: Paths of the files to be uploaded.
'''
with Session() as session:
try:
session.VFolder(name).download(filenames, show_progress=True)
print_done('Done.')
except Exception as e:
print_error(e)
sys.exit(1) | 0.00198 |
def generate_antonym(self, input_word):
""" Generate an antonym using a Synset
and its lemmas.
"""
results = []
synset = wordnet.synsets(input_word)
for i in synset:
if i.pos in ['n','v']:
for j in i.lemmas:
if j.antonyms():
name = j.antonyms()[0].name
results.append(PataLib().strip_underscore(name))
results = {'input' : input_word, 'results' : results, 'category' : 'antonym'}
return results | 0.019928 |
def license(self, license):
"""
Sets the license of this DatasetPatchRequest.
Dataset license. Find additional info for allowed values [here](https://data.world/license-help).
:param license: The license of this DatasetPatchRequest.
:type: str
"""
allowed_values = ["Public Domain", "PDDL", "CC-0", "CC-BY", "ODC-BY", "CC-BY-SA", "ODC-ODbL", "CC BY-NC", "CC BY-NC-SA", "Other"]
if license not in allowed_values:
raise ValueError(
"Invalid value for `license` ({0}), must be one of {1}"
.format(license, allowed_values)
)
self._license = license | 0.005935 |
def reset(self, clear=False):
""" Resets the widget to its initial state if ``clear`` parameter or
``clear_on_kernel_restart`` configuration setting is True, otherwise
prints a visual indication of the fact that the kernel restarted, but
does not clear the traces from previous usage of the kernel before it
was restarted. With ``clear=True``, it is similar to ``%clear``, but
also re-writes the banner and aborts execution if necessary.
"""
if self._executing:
self._executing = False
self._request_info['execute'] = {}
self._reading = False
self._highlighter.highlighting_on = False
if self.clear_on_kernel_restart or clear:
self._control.clear()
self._append_plain_text(self.banner)
else:
self._append_plain_text("# restarting kernel...")
self._append_html("<hr><br>")
# XXX: Reprinting the full banner may be too much, but once #1680 is
# addressed, that will mitigate it.
#self._append_plain_text(self.banner)
# update output marker for stdout/stderr, so that startup
# messages appear after banner:
self._append_before_prompt_pos = self._get_cursor().position()
self._show_interpreter_prompt() | 0.002992 |
def version_upload(fname,username="nibjb"):
"""Only scott should do this. Upload new version to site."""
print("popping up pasword window...")
password=TK_askPassword("FTP LOGIN","enter password for %s"%username)
if not password:
return
print("username:",username)
print("password:","*"*(len(password)))
print("connecting...")
ftp = ftplib.FTP("swharden.com")
ftp.login(username, password)
print("successful login!")
ftp.cwd("/software/swhlab/versions") #IMMEDIATELY GO HERE!!!
print("uploading",os.path.basename(fname))
ftp.storbinary("STOR " + os.path.basename(fname), open(fname, "rb"), 1024) #for binary files
print("disconnecting...")
ftp.quit() | 0.018106 |
def install(ctx, services, delete_after_install=False):
"""Install a honeypot service from the online library, local path or zipfile."""
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
home = ctx.obj["HOME"]
services_path = os.path.join(home, SERVICES)
installed_all_plugins = True
for service in services:
try:
plugin_utils.install_plugin(service, SERVICE, services_path, register_service)
except exceptions.PluginAlreadyInstalled as exc:
click.echo(exc)
installed_all_plugins = False
if not installed_all_plugins:
raise ctx.exit(errno.EEXIST) | 0.004093 |
def general_acquisition_info(metadata):
"""
General sentence on data acquisition. Should be first sentence in MRI data
acquisition section.
Parameters
----------
metadata : :obj:`dict`
The metadata for the dataset.
Returns
-------
out_str : :obj:`str`
Output string with scanner information.
"""
out_str = ('MR data were acquired using a {tesla}-Tesla {manu} {model} '
'MRI scanner.')
out_str = out_str.format(tesla=metadata.get('MagneticFieldStrength',
'UNKNOWN'),
manu=metadata.get('Manufacturer', 'MANUFACTURER'),
model=metadata.get('ManufacturersModelName',
'MODEL'))
return out_str | 0.001212 |
def mag_to_fnu(self, mag):
"""SDSS *primed* magnitudes to F_ν. The primed magnitudes are the "USNO"
standard-star system defined in Smith+ (2002AJ....123.2121S) and
Fukugita+ (1996AJ....111.1748F). This system is anchored to the AB
magnitude system, and as far as I can tell it is not known to have
measurable offsets from that system. (As of DR10, the *unprimed* SDSS
system is known to have small offsets from AB, but I do not believe
that that necessarily has implications for u'g'r'i'z'.)
However, as far as I can tell the filter responses of the USNO
telescope are not published -- only those of the main SDSS 2.5m
telescope. The whole reason for the existence of both the primed and
unprimed ugriz systems is that their responses do not quite match. For
my current application, which involves a completely different
telescope anyway, the difference shouldn't matter.
"""
# `band` should be 'up', 'gp', 'rp', 'ip', or 'zp'.
if len(band) != 2 or band[1] != 'p':
raise ValueError('band: ' + band)
return abmag_to_fnu_cgs(mag) | 0.002547 |
def temporary_path(self):
"""
A context manager that enables a reasonably short, general and
magic-less way to solve the :ref:`AtomicWrites`.
* On *entering*, it will create the parent directories so the
temporary_path is writeable right away.
This step uses :py:meth:`FileSystem.mkdir`.
* On *exiting*, it will move the temporary file if there was no exception thrown.
This step uses :py:meth:`FileSystem.rename_dont_move`
The file system operations will be carried out by calling them on :py:attr:`fs`.
The typical use case looks like this:
.. code:: python
class MyTask(luigi.Task):
def output(self):
return MyFileSystemTarget(...)
def run(self):
with self.output().temporary_path() as self.temp_output_path:
run_some_external_command(output_path=self.temp_output_path)
"""
num = random.randrange(0, 1e10)
slashless_path = self.path.rstrip('/').rstrip("\\")
_temp_path = '{}-luigi-tmp-{:010}{}'.format(
slashless_path,
num,
self._trailing_slash())
# TODO: os.path doesn't make sense here as it's os-dependent
tmp_dir = os.path.dirname(slashless_path)
if tmp_dir:
self.fs.mkdir(tmp_dir, parents=True, raise_if_exists=False)
yield _temp_path
# We won't reach here if there was an user exception.
self.fs.rename_dont_move(_temp_path, self.path) | 0.003153 |
def pyxwriter(self):
"""Update the pyx file."""
model = self.Model()
if hasattr(self, 'Parameters'):
model.parameters = self.Parameters(vars(self))
else:
model.parameters = parametertools.Parameters(vars(self))
if hasattr(self, 'Sequences'):
model.sequences = self.Sequences(model=model, **vars(self))
else:
model.sequences = sequencetools.Sequences(model=model,
**vars(self))
return PyxWriter(self, model, self.pyxfilepath) | 0.003436 |
def get_cookbook_dirs(self, base_dir=None):
"""Find cookbook directories."""
if base_dir is None:
base_dir = self.env_root
cookbook_dirs = []
dirs_to_skip = set(['.git'])
for root, dirs, files in os.walk(base_dir): # pylint: disable=W0612
dirs[:] = [d for d in dirs if d not in dirs_to_skip]
for name in files:
if name == 'metadata.rb':
if 'cookbook' in os.path.basename(os.path.dirname(root)):
cookbook_dirs.append(root)
return cookbook_dirs | 0.003401 |
def _pull_out_unaffected_blocks_rhs(rest, rhs, out_port, in_port):
"""Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback.
"""
_, block_index = rhs.index_in_block(in_port)
rest = tuple(rest)
bs = rhs.block_structure
(nbefore, nblock, nafter) = (sum(bs[:block_index]),
bs[block_index],
sum(bs[block_index + 1:]))
before, block, after = rhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_rhs = before + cid(nblock - 1) + after
inner_rhs = cid(nbefore) + block + cid(nafter)
return Feedback.create(SeriesProduct.create(*(rest + (inner_rhs,))),
out_port=out_port, in_port=in_port) << outer_rhs
elif block == cid(nblock):
outer_rhs = before + cid(nblock - 1) + after
return Feedback.create(SeriesProduct.create(*rest),
out_port=out_port, in_port=in_port) << outer_rhs
raise CannotSimplify() | 0.000915 |
def existing_analysis(using):
"""
Get the existing analysis for the `using` Elasticsearch connection
"""
es = connections.get_connection(using)
index_name = settings.ELASTICSEARCH_CONNECTIONS[using]['index_name']
if es.indices.exists(index=index_name):
return stringer(es.indices.get_settings(index=index_name)[index_name]['settings']['index'].get('analysis', {}))
return DOES_NOT_EXIST | 0.004739 |
def _is_xml_mode(self):
'''
Is Zypper's output is in XML format?
:return:
'''
return [itm for itm in self.XML_DIRECTIVES if itm in self.__cmd] and True or False | 0.014925 |
def remove_subproducts(self):
"""Removes all archived files subproducts associated with this DP"""
if not self.fullpath or not self.archived:
raise RuntimeError("""Can't remove a non-archived data product""")
for root, dirs, files in os.walk(self.subproduct_dir(), topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except:
pass
for name in dirs:
try:
os.remove(os.path.join(root, name))
except:
pass | 0.006359 |
def _glob_events_files(self, paths, recursive):
"""Find all tf events files under a list of paths recursively. """
event_files = []
for path in paths:
dirs = tf.gfile.Glob(path)
dirs = filter(lambda x: tf.gfile.IsDirectory(x), dirs)
for dir in dirs:
if recursive:
dir_files_pair = [(root, filenames) for root, _, filenames in tf.gfile.Walk(dir)]
else:
dir_files_pair = [(dir, tf.gfile.ListDirectory(dir))]
for root, filenames in dir_files_pair:
file_names = fnmatch.filter(filenames, '*.tfevents.*')
file_paths = [os.path.join(root, x) for x in file_names]
file_paths = filter(lambda x: not tf.gfile.IsDirectory(x), file_paths)
event_files += file_paths
return event_files | 0.015152 |
def load(xmlstr):
"""
Loads the contents for this walkthrough from XML.
:param xmlstr | <str>
:return <XWalkthrough> || None
"""
try:
xml = ElementTree.fromstring(xmlstr)
except StandardError:
return None
return XWalkthrough.fromXml(xml) | 0.010929 |
def divide(self, data_source_factory):
"""Divides the task according to the number of workers."""
data_length = data_source_factory.length()
data_interval_length = data_length / self.workers_number() + 1
current_index = 0
self.responses = []
while current_index < data_length:
self.responses.append(0)
offset = current_index
limit = min((data_length - current_index, data_interval_length))
yield data_source_factory.part(limit, offset)
current_index += limit | 0.003527 |
def get_shark_field(self, fields):
"""
:fields: str[]
"""
out = super(BACK, self).get_shark_field(fields)
out.update({'acked_seqs': self.acked_seqs,
'bitmap_str': self.bitmap_str})
return out | 0.007722 |
def define_attribute(self, name, atype, data=None):
"""
Define a new attribute. atype has to be one of 'integer', 'real', 'numeric', 'string', 'date' or 'nominal'.
For nominal attributes, pass the possible values as data.
For date attributes, pass the format as data.
"""
self.attributes.append(name)
assert atype in TYPES, "Unknown type '%s'. Must be one of: %s" % (atype, ', '.join(TYPES),)
self.attribute_types[name] = atype
self.attribute_data[name] = data | 0.007519 |
def channel_ready_future(channel):
"""Creates a Future that tracks when a Channel is ready.
Cancelling the Future does not affect the channel's state machine.
It merely decouples the Future from channel state machine.
Args:
channel: A Channel object.
Returns:
A Future object that matures when the channel connectivity is
ChannelConnectivity.READY.
"""
fut = channel._loop.create_future()
def _set_result(state):
if not fut.done() and state is _grpc.ChannelConnectivity.READY:
fut.set_result(None)
fut.add_done_callback(lambda f: channel.unsubscribe(_set_result))
channel.subscribe(_set_result, try_to_connect=True)
return fut | 0.002878 |
def validate(self, val):
"""
Validates that the val is in the list of values for this Enum.
Returns two element tuple: (bool, string)
- `bool` - True if valid, False if not
- `string` - Description of validation error, or None if valid
:Parameters:
val
Value to validate. Should be a string.
"""
if val in self.values:
return True, None
else:
return False, "'%s' is not in enum: %s" % (val, str(self.values)) | 0.003774 |
def optimise_signal(self, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0, filt=True,
weights=None, mode='minimise',
samples=None, subset=None):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array-like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array-like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
if isinstance(analytes, str):
analytes = [analytes]
self.minimal_analytes.update(analytes)
errs = []
with self.pbar.set(total=len(samples), desc='Optimising Data selection') as prog:
for s in samples:
e = self.data[s].signal_optimiser(analytes=analytes, min_points=min_points,
threshold_mode=threshold_mode, threshold_mult=threshold_mult,
x_bias=x_bias, weights=weights, filt=filt, mode=mode)
if e != '':
errs.append(e)
prog.update()
if len(errs) > 0:
print('\nA Few Problems:\n' + '\n'.join(errs) + '\n\n *** Check Optimisation Plots ***') | 0.005905 |
def _docstring(self):
"""
Generate a docstring for the generated source file.
:return: new docstring
:rtype: str
"""
s = '"""' + "\n"
s += "webhook2lambda2sqs generated function source\n"
s += "this code was generated by webhook2lambda2sqs v%s\n" % VERSION
s += "<%s>\n" % PROJECT_URL
s += "this project is licensed under the AGPLv3 open source license.\n"
s += "\n"
s += "DO NOT MODIFY this function; modifications should be made by "
s += "re-running\n"
s += "the project with an updated configuration.\n"
s += '"""' + "\n"
return s | 0.003017 |
def parse(binary, **params):
"""Turns a TAR file into a frozen sample."""
binary = io.BytesIO(binary)
collection = list()
with tarfile.TarFile(fileobj=binary, mode='r') as tar:
for tar_info in tar.getmembers():
content_type, encoding = mimetypes.guess_type(tar_info.name)
content = tar.extractfile(tar_info)
content = content_encodings.get(encoding).decode(content)
content = content_types.get(content_type).parse(content, **params)
collection.apppend((tar_info.name, content))
return collection | 0.001715 |
def do(self, **kwargs):
"""
Here for compatibility with legacy clients only - DO NOT USE!!!
This is sort of mix of "append" and "insert": it puts commands in the list,
with some half smarts about which commands go at the front or back.
If you add multiple commands to the back in one call, they will get added sorted by command name.
:param kwargs: the commands in key=val format
:return: the Action, so you can do Action(...).do(...).do(...)
"""
# add "create" / "add" / "removeFrom" first
for k, v in list(six.iteritems(kwargs)):
if k.startswith("create") or k.startswith("addAdobe") or k.startswith("removeFrom"):
self.commands.append({k: v})
del kwargs[k]
# now do the other actions, in a canonical order (to avoid py2/py3 variations)
for k, v in sorted(six.iteritems(kwargs)):
if k in ['add', 'remove']:
self.commands.append({k: {"product": v}})
else:
self.commands.append({k: v})
return self | 0.005445 |
def SetModel( self, model, adapter=None ):
"""Set our model object (root of the tree)"""
self.model = model
if adapter is not None:
self.adapter = adapter
self.UpdateDrawing() | 0.018265 |
def update_default(self, new_default, respect_none=False):
"""Update our current default with the new_default.
Args:
new_default: New default to set.
respect_none: Flag to determine if ``None`` is a valid value.
"""
if new_default is not None:
self.default = new_default
elif new_default is None and respect_none:
self.default = None | 0.004728 |
def visit_delete(self, node): # XXX check if correct
"""return an astroid.Delete node as string"""
return "del %s" % ", ".join(child.accept(self) for child in node.targets) | 0.015873 |
def simplify(self) -> None:
"""Simplify this expression."""
self.raw = cast(T, z3.simplify(self.raw)) | 0.017094 |
def relevant_items(df):
"""
Dataframe with items used by cultural projects,
filtered by date and price.
"""
start_date = datetime(2013, 1, 1)
df['DataProjeto'] = pd.to_datetime(df['DataProjeto'])
# get only projects newer than start_date
# and items with price > 0
df = df[df.DataProjeto >= start_date]
df = df[df.VlUnitarioAprovado > 0.0]
return df | 0.002525 |
def user_provenance(self, document): # type: (ProvDocument) -> None
"""Add the user provenance."""
self.self_check()
(username, fullname) = _whoami()
if not self.full_name:
self.full_name = fullname
document.add_namespace(UUID)
document.add_namespace(ORCID)
document.add_namespace(FOAF)
account = document.agent(
ACCOUNT_UUID, {provM.PROV_TYPE: FOAF["OnlineAccount"],
"prov:label": username,
FOAF["accountName"]: username})
user = document.agent(
self.orcid or USER_UUID,
{provM.PROV_TYPE: PROV["Person"],
"prov:label": self.full_name,
FOAF["name"]: self.full_name,
FOAF["account"]: account})
# cwltool may be started on the shell (directly by user),
# by shell script (indirectly by user)
# or from a different program
# (which again is launched by any of the above)
#
# We can't tell in which way, but ultimately we're still
# acting in behalf of that user (even if we might
# get their name wrong!)
document.actedOnBehalfOf(account, user) | 0.001623 |
def get_appapi_params(self, prepay_id, timestamp=None, nonce_str=None):
"""
获取 APP 支付参数
:param prepay_id: 统一下单接口返回的 prepay_id 参数值
:param timestamp: 可选,时间戳,默认为当前时间戳
:param nonce_str: 可选,随机字符串,默认自动生成
:return: 签名
"""
data = {
'appid': self.appid,
'partnerid': self.mch_id,
'prepayid': prepay_id,
'package': 'Sign=WXPay',
'timestamp': timestamp or to_text(int(time.time())),
'noncestr': nonce_str or random_string(32)
}
sign = calculate_signature(data, self._client.api_key)
data['sign'] = sign
return data | 0.002981 |
def get_release_task_attachments(self, project, release_id, environment_id, attempt_id, plan_id, type):
"""GetReleaseTaskAttachments.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int attempt_id:
:param str plan_id:
:param str type:
:rtype: [ReleaseTaskAttachment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if attempt_id is not None:
route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='a4d06688-0dfa-4895-82a5-f43ec9452306',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('[ReleaseTaskAttachment]', self._unwrap_collection(response)) | 0.00657 |
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
statements = sorted(statements)
lines = sorted(lines)
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret | 0.002088 |
def get_instance(self, payload):
"""
Build an instance of UserChannelInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
:rtype: twilio.rest.chat.v2.service.user.user_channel.UserChannelInstance
"""
return UserChannelInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
) | 0.007547 |
def getTypeDefinition(self, attribute=None):
"""If attribute is None, "type" is assumed, return the corresponding
representation of the global type definition (TypeDefinition),
or the local definition if don't find "type". To maintain backwards
compat, if attribute is provided call base class method.
"""
if attribute:
return XMLSchemaComponent.getTypeDefinition(self, attribute)
gt = XMLSchemaComponent.getTypeDefinition(self, 'type')
if gt:
return gt
return self.content | 0.003509 |
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = ['gpasswd', '-a', username, group]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd) | 0.004785 |
def get_sphere(coords, r=4, vox_dims=(2, 2, 2), dims=(91, 109, 91)):
""" # Return all points within r mm of coordinates. Generates a cube
and then discards all points outside sphere. Only returns values that
fall within the dimensions of the image."""
r = float(r)
xx, yy, zz = [slice(-r / vox_dims[i], r / vox_dims[
i] + 0.01, 1) for i in range(len(coords))]
cube = np.vstack([row.ravel() for row in np.mgrid[xx, yy, zz]])
sphere = cube[:, np.sum(np.dot(np.diag(
vox_dims), cube) ** 2, 0) ** .5 <= r]
sphere = np.round(sphere.T + coords)
return sphere[(np.min(sphere, 1) >= 0) &
(np.max(np.subtract(sphere, dims), 1) <= -1), :].astype(int) | 0.001377 |
def p_type_ref(self, p):
'type_ref : ID args nullable'
p[0] = AstTypeRef(
path=self.path,
lineno=p.lineno(1),
lexpos=p.lexpos(1),
name=p[1],
args=p[2],
nullable=p[3],
ns=None,
) | 0.007018 |
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children) | 0.014184 |
def query_target_count(self, target):
"""Return the target count"""
reply = NVCtrlQueryTargetCountReplyRequest(display=self.display,
opcode=self.display.get_extension_major(extname),
target_type=target.type())
return int(reply._data.get('count')) | 0.005682 |
def _udf_name_and_parent_from_path(self, udf_path):
# type: (bytes) -> Tuple[bytes, udfmod.UDFFileEntry]
'''
An internal method to find the parent directory record and name given a
UDF path. If the parent is found, return a tuple containing the basename
of the path and the parent UDF File Entry object.
Parameters:
udf_path - The absolute UDF path to the entry on the ISO.
Returns:
A tuple containing just the name of the entry and a UDF File Entry
object representing the parent of the entry.
'''
splitpath = utils.split_path(udf_path)
name = splitpath.pop()
(parent_ident_unused, parent) = self._find_udf_record(b'/' + b'/'.join(splitpath))
return (name.decode('utf-8').encode('utf-8'), parent) | 0.006075 |
def get_vnetwork_vms_input_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vms = ET.Element("get_vnetwork_vms")
config = get_vnetwork_vms
input = ET.SubElement(get_vnetwork_vms, "input")
name = ET.SubElement(input, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004425 |
def p_definition_list(p):
"""
definition_list : definition definition_list
| definition
"""
if len(p) == 3:
p[0] = p[1] + p[2]
elif len(p) == 2:
p[0] = p[1]
else:
raise RuntimeError("Invalid production rules 'p_action_list'") | 0.003413 |
def pairwise_align_sequences_to_representative(self, gapopen=10, gapextend=0.5, outdir=None,
engine='needle', parse=True, force_rerun=False):
"""Pairwise all sequences in the sequences attribute to the representative sequence. Stores the alignments
in the ``sequence_alignments`` DictList attribute.
Args:
gapopen (int): Only for ``engine='needle'`` - Gap open penalty is the score taken away when a gap is created
gapextend (float): Only for ``engine='needle'`` - Gap extension penalty is added to the standard gap penalty
for each base or residue in the gap
outdir (str): Only for ``engine='needle'`` - Path to output directory. Default is the protein sequence
directory.
engine (str): ``biopython`` or ``needle`` - which pairwise alignment program to use.
``needle`` is the standard EMBOSS tool to run pairwise alignments.
``biopython`` is Biopython's implementation of needle. Results can differ!
parse (bool): Store locations of mutations, insertions, and deletions in the alignment object (as an
annotation)
force_rerun (bool): Only for ``engine='needle'`` - Default False, set to True if you want to rerun the
alignment if outfile exists.
"""
if not self.representative_sequence:
raise ValueError('{}: no representative sequence set'.format(self.id))
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
for seq in self.sequences:
aln_id = '{}_{}'.format(self.id, seq.id)
outfile = '{}.needle'.format(aln_id)
if self.sequence_alignments.has_id(aln_id):
log.debug('{}: alignment already completed'.format(seq.id))
continue
if not seq.seq_str:
log.error('{}: no sequence stored, skipping alignment'.format(seq.id))
continue
# Don't need to compare sequence to itself
if seq.id == self.representative_sequence.id:
continue
aln = ssbio.protein.sequence.utils.alignment.pairwise_sequence_alignment(a_seq=self.representative_sequence.seq_str,
a_seq_id=self.id,
b_seq=seq.seq_str,
b_seq_id=seq.id,
gapopen=gapopen, gapextend=gapextend,
engine=engine,
outdir=outdir,
outfile=outfile,
force_rerun=force_rerun)
# Add an identifier to the MultipleSeqAlignment object for storage in a DictList
aln.id = aln_id
aln.annotations['a_seq'] = self.representative_sequence.id
aln.annotations['b_seq'] = seq.id
if parse:
aln_df = ssbio.protein.sequence.utils.alignment.get_alignment_df(a_aln_seq=str(list(aln)[0].seq),
b_aln_seq=str(list(aln)[1].seq))
aln.annotations['ssbio_type'] = 'seqalign'
aln.annotations['mutations'] = ssbio.protein.sequence.utils.alignment.get_mutations(aln_df)
aln.annotations['deletions'] = ssbio.protein.sequence.utils.alignment.get_deletions(aln_df)
aln.annotations['insertions'] = ssbio.protein.sequence.utils.alignment.get_insertions(aln_df)
self.sequence_alignments.append(aln) | 0.007434 |
def _compute_lcptab(self, string, suftab):
"""Computes the LCP array in O(n) based on the input string & its suffix array.
Kasai et al. (2001).
"""
n = len(suftab)
rank = [0] * n
for i in xrange(n):
rank[suftab[i]] = i
lcptab = np.zeros(n, dtype=np.int)
h = 0
for i in xrange(n):
if rank[i] >= 1:
j = suftab[rank[i] - 1]
while string[i + h] == string[j + h]:
h += 1
lcptab[rank[i]] = h
if h > 0:
h -= 1
return lcptab | 0.0048 |
def resource_from_data(self, data_element, resource=None):
"""
Converts the given data element to a resource.
:param data_element: object implementing
:class:`everest.representers.interfaces.IExplicitDataElement`
"""
return self._mapping.map_to_resource(data_element, resource=resource) | 0.0059 |
def exclude_items(items, any_all=any, ignore_case=False, normalize_values=False, **kwargs):
"""Exclude items by matching metadata.
Note:
Metadata values are lowercased when ``normalized_values`` is ``True``,
so ``ignore_case`` is automatically set to ``True``.
Parameters:
items (list): A list of item dicts or filepaths.
any_all (callable): A callable to determine if any or all filters must match to exclude items.
Expected values :obj:`any` (default) or :obj:`all`.
ignore_case (bool): Perform case-insensitive matching.
Default: ``False``
normalize_values (bool): Normalize metadata values to remove common differences between sources.
Default: ``False``
kwargs (list): Lists of values to match the given metadata field.
Yields:
dict: The next item to be included.
Example:
>>> from google_music_utils import exclude_items
>>> list(exclude_items(song_list, any_all=all, ignore_case=True, normalize_values=True, artist=['Beck'], album=['Golden Feelings']))
"""
if kwargs:
match = functools.partial(
_match_item, any_all=any_all, ignore_case=ignore_case, normalize_values=normalize_values, **kwargs
)
return filterfalse(match, items)
else:
return iter(items) | 0.026359 |
def max_len(iterable, minimum=0):
"""Return the len() of the longest item in ``iterable`` or ``minimum``.
>>> max_len(['spam', 'ham'])
4
>>> max_len([])
0
>>> max_len(['ham'], 4)
4
"""
try:
result = max(map(len, iterable))
except ValueError:
result = minimum
return minimum if result < minimum else result | 0.002717 |
async def read(self, *, decode: bool=False) -> Any:
"""Reads body part data.
decode: Decodes data following by encoding
method from Content-Encoding header. If it missed
data remains untouched
"""
if self._at_eof:
return b''
data = bytearray()
while not self._at_eof:
data.extend((await self.read_chunk(self.chunk_size)))
if decode:
return self.decode(data)
return data | 0.007968 |
def is_method_of(method, object):
"""Decide whether ``method`` is contained within the MRO of ``object``."""
if not callable(method) or not hasattr(method, "__name__"):
return False
if inspect.ismethod(method):
return method.__self__ is object
for cls in inspect.getmro(object.__class__):
if cls.__dict__.get(method.__name__, None) is method:
return True
return False | 0.002364 |
def update(self, other_context):
"""
Updates this lookup set with the inputted options.
:param other_context | <dict> || <orb.Context>
"""
# convert a context instance into a dictionary
if isinstance(other_context, orb.Context):
other_context = copy.copy(other_context.raw_values)
ignore = ('where', 'columns', 'scope')
inherit_kwds = {}
inherit_scope = {}
inherit_columns = []
inherit_where = orb.Query()
# update from the base context
base_context = other_context.pop('context', None)
if base_context is not None:
inherit_kwds = base_context.raw_values
# use the default contexts
else:
for default in self.defaultContexts():
if default is not None:
# extract expandable information
for k, v in default.raw_values.items():
if k not in ignore:
inherit_kwds[k] = copy.copy(v)
# merge where queries
where = default.where
if where is not None:
inherit_where &= where
# merge column queries
columns = default.columns
if columns is not None:
inherit_columns += list(columns)
# merge scope
scope = default.scope
if scope:
inherit_scope.update(scope)
# update the inherited kwds
for k, v in inherit_kwds.items():
other_context.setdefault(k, v)
# update the inherited query
if inherit_where:
other_context.setdefault('where', orb.Query())
other_context['where'] &= inherit_where
# update the inherited columns
if inherit_columns:
other_context['columns'] = inherit_columns + (other_context.get('columns') or [])
# update the inherited scope
if inherit_scope:
new_scope = {}
new_scope.update(inherit_scope)
new_scope.update(other_context.get('scope') or {})
other_context['scope'] = new_scope
# convert the columns to a list
if 'columns' in other_context and isinstance(other_context['columns'], (str, unicode)):
other_context['columns'] = other_context['columns'].split(',')
# convert where to query
where = other_context.get('where')
if isinstance(where, dict):
other_context['where'] = orb.Query.fromJSON(where)
if isinstance(where, (orb.Query, orb.QueryCompound)):
other_context['where'] &= self.where
# validate values
if other_context.get('start') is not None and (type(other_context['start']) != int or other_context['start'] < 0):
msg = 'Start needs to be a positive number, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('start)')))
if other_context.get('page') is not None and (type(other_context['page']) != int or other_context['page'] < 1):
msg = 'Page needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('page')))
if other_context.get('limit') is not None and (type(other_context['limit']) != int or other_context['limit'] < 1):
msg = 'Limit needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('limit')))
if other_context.get('pageSize') is not None and (type(other_context['pageSize']) != int or other_context['pageSize'] < 1):
msg = 'Page size needs to be a number equal to or greater than 1, got {0} instead'
raise orb.errors.ContextError(msg.format(other_context.get('pageSize')))
# update the raw values
self.raw_values.update({k: v for k, v in other_context.items() if k in self.Defaults}) | 0.003887 |
def TK_message(title,msg):
"""use the GUI to pop up a message."""
root = tkinter.Tk()
root.withdraw() #hide tk window
root.attributes("-topmost", True) #always on top
root.lift() #bring to top
tkinter.messagebox.showwarning(title, msg)
root.destroy() | 0.028777 |
def _create_target_dir_if_needed(self, target, depth_limit=20):
"""Creates the directory for the path given, recursively creating
parent directories when needed"""
if depth_limit <= 0:
raise FtpCreateDirsException('Depth limit exceeded')
if not target:
return
target_dir = os.path.dirname(target)
parent_dir, dir_name = os.path.split(target_dir)
parent_dir_ls = []
try:
parent_dir_ls = self.ftp.nlst(parent_dir)
except:
# Possibly a microsoft server
# They throw exceptions when we try to ls non-existing folders
pass
parent_dir_files = [os.path.basename(d) for d in parent_dir_ls]
if dir_name not in parent_dir_files:
if parent_dir and target_dir != '/':
self._create_target_dir_if_needed(target_dir, depth_limit=depth_limit - 1)
self.logger.info('Will create dir: %s' % target)
self.ftp.mkd(target_dir) | 0.003922 |
def setPlainText(self, txt, mimetype='text/x-python', encoding='utf-8'):
"""
Extends QCodeEdit.setPlainText to allow user to setPlainText without
mimetype (since the python syntax highlighter does not use it).
"""
try:
self.syntax_highlighter.docstrings[:] = []
self.syntax_highlighter.import_statements[:] = []
except AttributeError:
pass
super(PyCodeEditBase, self).setPlainText(txt, mimetype, encoding) | 0.004024 |
def derivatives(self, x, y, n_sersic, R_sersic, k_eff, center_x=0, center_y=0):
"""
returns df/dx and df/dy of the function
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
if isinstance(r, int) or isinstance(r, float):
r = max(self._s, r)
else:
r[r < self._s] = self._s
alpha = -self.alpha_abs(x, y, n_sersic, R_sersic, k_eff, center_x, center_y)
f_x = alpha * x_ / r
f_y = alpha * y_ / r
return f_x, f_y | 0.005525 |
def ordinal_encoding(X_in, mapping=None, cols=None, handle_unknown='value', handle_missing='value'):
"""
Ordinal encoding uses a single column of integers to represent the classes. An optional mapping dict can be passed
in, in this case we use the knowledge that there is some true order to the classes themselves. Otherwise, the classes
are assumed to have no true order and integers are selected at random.
"""
return_nan_series = pd.Series(data=[np.nan], index=[-2])
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns.values
if mapping is not None:
mapping_out = mapping
for switch in mapping:
column = switch.get('col')
X[column] = X[column].map(switch['mapping'])
try:
X[column] = X[column].astype(int)
except ValueError as e:
X[column] = X[column].astype(float)
if handle_unknown == 'value':
X[column].fillna(-1, inplace=True)
elif handle_unknown == 'error':
missing = X[column].isnull()
if any(missing):
raise ValueError('Unexpected categories found in column %s' % column)
if handle_missing == 'return_nan':
X[column] = X[column].map(return_nan_series).where(X[column] == -2, X[column])
else:
mapping_out = []
for col in cols:
nan_identity = np.nan
if util.is_category(X[col].dtype):
categories = X[col].cat.categories
else:
categories = X[col].unique()
index = pd.Series(categories).fillna(nan_identity).unique()
data = pd.Series(index=index, data=range(1, len(index) + 1))
if handle_missing == 'value' and ~data.index.isnull().any():
data.loc[nan_identity] = -2
elif handle_missing == 'return_nan':
data.loc[nan_identity] = -2
mapping_out.append({'col': col, 'mapping': data, 'data_type': X[col].dtype}, )
return X, mapping_out | 0.003521 |
def has_add_permission(self):
"""
Returns a boolean if the current user has permission to add another object of the same
type which is being viewed/edited.
"""
has_permission = False
if self.user is not None:
# We don't check for the object level permission - as the add permission doesn't make
# sense on a per object level here.
has_permission = self.user.has_perm(
'{}.add_{}'.format(self.opts.app_label, self.opts.model_name)
)
return has_permission | 0.006957 |
def spelling(self):
"""Return the spelling of the entity pointed at by the cursor."""
if not hasattr(self, '_spelling'):
self._spelling = conf.lib.clang_getCursorSpelling(self)
return self._spelling | 0.008511 |
def refresh_oauth_credential(self):
"""Refresh session's OAuth 2.0 credentials if they are stale."""
if self.session.token_type == auth.SERVER_TOKEN_TYPE:
return
credential = self.session.oauth2credential
if credential.is_stale():
refresh_session = refresh_access_token(credential)
self.session = refresh_session | 0.005249 |
def set_status(self, message=None, console_url=None, status_links=None):
"""Sets the current status of this pipeline.
This method is purposefully non-transactional. Updates are written to the
datastore immediately and overwrite all existing statuses.
Args:
message: (optional) Overall status message.
console_url: (optional) Relative URL to use for the "console" of this
pipeline that displays current progress. When None, no console will
be displayed.
status_links: (optional) Dictionary of readable link names to relative
URLs that should be associated with this pipeline as it runs. These links
provide convenient access to other dashboards, consoles, etc associated
with the pipeline.
Raises:
PipelineRuntimeError if the status could not be set for any reason.
"""
if _TEST_MODE:
logging.info(
'New status for %s#%s: message=%r, console_url=%r, status_links=%r',
self, self.pipeline_id, message, console_url, status_links)
return
status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id)
root_pipeline_key = db.Key.from_path(
_PipelineRecord.kind(), self.root_pipeline_id)
status_record = _StatusRecord(
key=status_key, root_pipeline=root_pipeline_key)
try:
if message:
status_record.message = message
if console_url:
status_record.console_url = console_url
if status_links:
# Alphabeticalize the list.
status_record.link_names = sorted(
db.Text(s) for s in status_links.iterkeys())
status_record.link_urls = [
db.Text(status_links[name]) for name in status_record.link_names]
status_record.status_time = datetime.datetime.utcnow()
status_record.put()
except Exception, e:
raise PipelineRuntimeError('Could not set status for %s#%s: %s' %
(self, self.pipeline_id, str(e))) | 0.005598 |
def _lnk_delete_link(self, link_name):
"""Removes a link from disk"""
translated_name = '/' + self._trajectory_name + '/' + link_name.replace('.','/')
link = self._hdf5file.get_node(where=translated_name)
link._f_remove() | 0.01581 |
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
formatstr = str(self.resolve_option("format"))
expanded = self.storagehandler.expand(formatstr)
self._output.append(Token(expanded))
return None | 0.005764 |
def _generate_ngram_table(self, output_dir, labels, results):
"""Returns an HTML table containing data on each n-gram in
`results`."""
html = []
grouped = results.groupby(constants.NGRAM_FIELDNAME)
row_template = self._generate_ngram_row_template(labels)
for name, group in grouped:
html.append(self._render_ngram_row(name, group, row_template,
labels))
return '\n'.join(html) | 0.004073 |
def to_bytes(self):
'''
Create bytes from properties
'''
# Verify that the properties make sense
self.sanitize()
# Write the next header type
bitstream = BitStream('uint:8=%d' % self.next_header)
# Add the reserved bits
bitstream += BitStream(8)
# Add the fragment offset
bitstream += BitStream('uint:13=%d' % self.fragment_offset)
# Add the reserved bits
bitstream += BitStream(2)
# Add the flags
bitstream += BitStream('bool=%d' % self.more_fragments)
# Add the identification
bitstream += BitStream('uint:32=%d' % self.identification)
return bitstream.bytes + bytes(self.payload) | 0.002729 |
def find_characteristic(self, uuid):
"""Return the first child characteristic found that has the specified
UUID. Will return None if no characteristic that matches is found.
"""
for char in self.list_characteristics():
if char.uuid == uuid:
return char
return None | 0.006006 |
async def probe_message(self, _message, context):
"""Handle a probe message.
See :meth:`AbstractDeviceAdapter.probe`.
"""
client_id = context.user_data
await self.probe(client_id) | 0.00905 |
def getActiveProperties(self):
""" Returns the non-zero accidental dignities. """
score = self.getScoreProperties()
return {key: value for (key, value) in score.items()
if value != 0} | 0.008969 |
def f0(E, fermi, T):
"""
Returns the equilibrium fermi-dirac.
Args:
E (float): energy in eV
fermi (float): the fermi level in eV
T (float): the temperature in kelvin
"""
return 1. / (1. + np.exp((E - fermi) / (_cd("Boltzmann constant in eV/K") * T))) | 0.006803 |
def limit (s, length=72):
"""If the length of the string exceeds the given limit, it will be cut
off and three dots will be appended.
@param s: the string to limit
@type s: string
@param length: maximum length
@type length: non-negative integer
@return: limited string, at most length+3 characters long
"""
assert length >= 0, "length limit must be a non-negative integer"
if not s or len(s) <= length:
return s
if length == 0:
return ""
return "%s..." % s[:length] | 0.003774 |
def segment(text: str) -> str:
"""
Enhanced Thai Character Cluster (ETCC)
:param string text: word input
:return: etcc
"""
if not text or not isinstance(text, str):
return ""
if re.search(r"[เแ]" + _C + r"[" + "".join(_UV) + r"]" + r"\w", text):
search = re.findall(r"[เแ]" + _C + r"[" + "".join(_UV) + r"]" + r"\w", text)
for i in search:
text = re.sub(i, "/" + i + "/", text)
if re.search(_C + r"[" + "".join(_UV1) + r"]" + _C + _C + r"ุ" + r"์", text):
search = re.findall(
_C + r"[" + "".join(_UV1) + r"]" + _C + _C + r"ุ" + r"์", text
)
for i in search:
text = re.sub(i, "//" + i + "/", text)
if re.search(_C + _UV2 + _C, text):
search = re.findall(_C + _UV2 + _C, text)
for i in search:
text = re.sub(i, "/" + i + "/", text)
re.sub("//", "/", text)
if re.search("เ" + _C + "า" + "ะ", text):
search = re.findall("เ" + _C + "า" + "ะ", text)
for i in search:
text = re.sub(i, "/" + i + "/", text)
if re.search("เ" + r"\w\w" + "า" + "ะ", text):
search = re.findall("เ" + r"\w\w" + "า" + "ะ", text)
for i in search:
text = re.sub(i, "/" + i + "/", text)
text = re.sub("//", "/", text)
if re.search(_C + "[" + "".join(_UV1) + "]" + _C + _C + "์", text):
search = re.findall(_C + "[" + "".join(_UV1) + "]" + _C + _C + "์", text)
for i in search:
text = re.sub(i, "/" + i + "/", text)
if re.search("/" + _C + "".join(["ุ", "์"]) + "/", text):
# แก้ไขในกรณี พัน/ธุ์
search = re.findall("/" + _C + "".join(["ุ", "์"]) + "/", text)
for i in search:
ii = re.sub("/", "", i)
text = re.sub(i, ii + "/", text)
text = re.sub("//", "/", text)
return text.split("/") | 0.00213 |
def expr_labelfunc(leaf_renderer=str, fallback=str):
"""Factory for function ``labelfunc(expr, is_leaf)``
It has the following behavior:
* If ``is_leaf`` is True, return ``leaf_renderer(expr)``.
* Otherwise,
- if `expr` is an Expression, return a custom string similar to
:func:`~qnet.printing.srepr`, but with an ellipsis for ``args``
- otherwise, return ``fallback(expr)``
"""
def _labelfunc(expr, is_leaf):
if is_leaf:
label = leaf_renderer(expr)
elif isinstance(expr, Expression):
if len(expr.kwargs) == 0:
label = expr.__class__.__name__
else:
label = "%s(..., %s)" % (
expr.__class__.__name__,
", ".join([
"%s=%s" % (key, val)
for (key, val) in expr.kwargs.items()]))
else:
label = fallback(expr)
return label
return _labelfunc | 0.001013 |
def parse(expected, query):
"""
Parse query parameters.
:type expected: `dict` mapping `bytes` to `callable`
:param expected: Mapping of query argument names to argument parsing
callables.
:type query: `dict` mapping `bytes` to `list` of `bytes`
:param query: Mapping of query argument names to lists of argument values,
this is the form that Twisted Web's `IRequest.args
<twisted:twisted.web.iweb.IRequest.args>` value takes.
:rtype: `dict` mapping `bytes` to `object`
:return: Mapping of query argument names to parsed argument values.
"""
return dict(
(key, parser(query.get(key, [])))
for key, parser in expected.items()) | 0.001408 |
def _make_marker_token(self, type_):
"""Make a token that has no content"""
tok = Token(type_,
'',
self.line,
self.line_num,
self.start,
self.start)
return tok | 0.007067 |
def create_file_from_stream(
self, share_name, directory_name, file_name, stream, count,
content_settings=None, metadata=None, progress_callback=None,
max_connections=1, max_retries=5, retry_wait=1.0, timeout=None):
'''
Creates a new file from a file/stream, or updates the content of an
existing file, with automatic chunking and progress notifications.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of file to create or update.
:param io.IOBase stream:
Opened file/stream to upload as the file content.
:param int count:
Number of bytes to read from the stream. This is required, a
file cannot be created if the count is unknown.
:param ~azure.storage.file.models.ContentSettings content_settings:
ContentSettings object used to set file properties.
:param metadata:
Name-value pairs associated with the file as metadata.
:type metadata: a dict mapping str to str
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far and total is the
size of the file, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use when the file size
exceeds 64MB.
Set to 1 to upload the file chunks sequentially.
Set to 2 or more to upload the file chunks in parallel. This uses
more system resources but will upload faster.
Note that parallel upload requires the stream to be seekable.
:param int max_retries:
Number of times to retry upload of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('stream', stream)
_validate_not_none('count', count)
if count < 0:
raise TypeError(_ERROR_VALUE_NEGATIVE.format('count'))
self.create_file(
share_name,
directory_name,
file_name,
count,
content_settings,
metadata,
timeout
)
_upload_file_chunks(
self,
share_name,
directory_name,
file_name,
count,
self.MAX_RANGE_SIZE,
stream,
max_connections,
max_retries,
retry_wait,
progress_callback,
timeout
) | 0.001585 |
def check_categories(lines):
'''
find out how many row and col categories are available
'''
# count the number of row categories
rcat_line = lines[0].split('\t')
# calc the number of row names and categories
num_rc = 0
found_end = False
# skip first tab
for inst_string in rcat_line[1:]:
if inst_string == '':
if found_end is False:
num_rc = num_rc + 1
else:
found_end = True
max_rcat = 15
if max_rcat > len(lines):
max_rcat = len(lines) - 1
num_cc = 0
for i in range(max_rcat):
ccat_line = lines[i + 1].split('\t')
# make sure that line has length greater than one to prevent false cats from
# trailing new lines at end of matrix
if ccat_line[0] == '' and len(ccat_line) > 1:
num_cc = num_cc + 1
num_labels = {}
num_labels['row'] = num_rc + 1
num_labels['col'] = num_cc + 1
return num_labels | 0.023622 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.