text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def build(cls, data, *args, **kwargs):
"""
Constructs a classification or regression tree in a single batch by
analyzing the given data.
"""
assert isinstance(data, Data)
if data.is_continuous_class:
fitness_func = gain_variance
else:
fitness_func = get_gain
t = cls(data=data, *args, **kwargs)
t._data = data
t.sample_count = len(data)
t._tree = create_decision_tree(
data=data,
attributes=data.attribute_names,
class_attr=data.class_attribute_name,
fitness_func=fitness_func,
wrapper=t,
)
return t | 0.00431 |
def pb_id(self, pb_id: str):
"""Set the PB Id for this device."""
# FIXME(BMo) instead of creating the object to check if the PB exists
# use a method on PB List?
# ProcessingBlock(pb_id)
self.set_state(DevState.ON)
self._pb_id = pb_id | 0.006803 |
def is_prime( n ):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
# Run the test t times:
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in range( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True | 0.036541 |
def create_from_table(cls, tab_e):
"""
Parameters
----------
tab_e : `~astropy.table.Table`
EBOUNDS table.
"""
convert_sed_cols(tab_e)
try:
emin = np.array(tab_e['e_min'].to(u.MeV))
emax = np.array(tab_e['e_max'].to(u.MeV))
except:
emin = np.array(tab_e['e_min'])
emax = np.array(tab_e['e_max'])
ne = len(emin)
try:
ref_dnde = np.array(tab_e['ref_dnde'])
except:
ref_dnde = np.ones((ne))
try:
ref_flux = np.array(tab_e['ref_flux'])
except:
ref_flux = np.ones((ne))
try:
ref_eflux = np.array(tab_e['ref_eflux'])
except:
ref_eflux = np.ones((ne))
try:
ref_npred = np.array(tab_e['ref_npred'])
except:
ref_npred = np.ones((ne))
return cls(emin, emax, ref_dnde, ref_flux, ref_eflux, ref_npred) | 0.007976 |
def fetch_official_missions(data_dir, start_date, end_date):
"""
:param data_dir: (str) directory in which the output file will be saved
:param start_date: (datetime) first date of the range to be scraped
:param end_date: (datetime) last date of the range to be scraped
"""
official_missions = OfficialMissionsDataset()
df = official_missions.fetch(start_date, end_date)
save_to_csv(df, data_dir, "official-missions")
return df | 0.002155 |
def tf_if(condition, a, b):
"""
Implements an if condition in tensorflow.
:param condition: A boolean condition.
:param a: Case a.
:param b: Case b.
:return: A if condition was true, b otherwise.
"""
int_condition = tf.to_float(tf.to_int64(condition))
return a * int_condition + (1 - int_condition) * b | 0.002959 |
def get_from(input_file, property_names):
'''
Reads a geojson and returns a list of value tuples, each value corresponding to a
property in property_names.
Args:
input_file (str): File name.
property_names: List of strings; each string is a property name.
Returns:
List of value tuples.
'''
# get feature collections
with open(input_file) as f:
feature_collection = geojson.load(f)
features = feature_collection['features']
values = [tuple([feat['properties'].get(x)
for x in property_names]) for feat in features]
return values | 0.003155 |
def api_key_post(params, request_path, _async=False):
"""
from 火币demo, 构造post请求并调用post方法
:param params:
:param request_path:
:return:
"""
method = 'POST'
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
params_to_sign = {
'AccessKeyId': ACCESS_KEY,
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': timestamp
}
host_url = TRADE_URL
host_name = urllib.parse.urlparse(host_url).hostname
host_name = host_name.lower()
secret_sign = createSign(params_to_sign, method, host_name,
request_path, SECRET_KEY)
params_to_sign['Signature'] = secret_sign
if PRIVATE_KEY:
params_to_sign['PrivateSignature'] = createPrivateSign(secret_sign, PRIVATE_KEY)
url = host_url + request_path + '?' + urllib.parse.urlencode(params_to_sign)
return http_post_request(url, params, _async=_async) | 0.004132 |
def _redirect_edge(self, u_id, v_id, new_v_id):
"""Redirect the layer to a new node.
Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id`
while keeping all other property of the edge the same.
"""
layer_id = None
for index, edge_tuple in enumerate(self.adj_list[u_id]):
if edge_tuple[0] == v_id:
layer_id = edge_tuple[1]
self.adj_list[u_id][index] = (new_v_id, layer_id)
self.layer_list[layer_id].output = self.node_list[new_v_id]
break
for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]):
if edge_tuple[0] == u_id:
layer_id = edge_tuple[1]
self.reverse_adj_list[v_id].remove(edge_tuple)
break
self.reverse_adj_list[new_v_id].append((u_id, layer_id))
for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]):
if value == v_id:
self.layer_id_to_output_node_ids[layer_id][index] = new_v_id
break | 0.003604 |
def append_data(self, data_buffer):
""" Append data to this audio stream
:Parameters:
`data_buffer` : str, basestring, Bytes
a buffer with a length multiple of (sample_width * channels)
"""
if len(data_buffer) % (self.sample_width * self.channels) != 0:
raise ValueError("length of data_buffer must be a multiple of (sample_width * channels)")
self._buffer += data_buffer
self._left += len(data_buffer) | 0.006073 |
def hash_func(name):
"""Hash the string using a hash algorithm found in
tombkeeper/Shellcode_Template_in_C.
"""
ret = 0
for char in name:
ret = ((ret << 5) + ret + ord(char)) & 0xffffffff
return hex(ret) | 0.004255 |
def _parse(self):
"""
Parse atomic data of the XML file.
"""
atom_counter = 0
structure_build = self.structure_builder
residues = self._extract_residues()
cur_model = None
cur_chain = None
structure_build.init_seg(' ') # There is never a SEGID present
for r in residues:
# New model?
if cur_model != r['model']:
cur_model = r['model']
try:
structure_build.init_model(cur_model)
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
# New chain?
if cur_chain != r['chain']:
cur_chain = r['chain']
try:
structure_build.init_chain(cur_chain)
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
# Create residue
if r['name'] in AA_LIST: # Get residue type crudely since there is no HETATM / ATOM
hetero_flag = ' '
elif r['name'] == 'WAT' or r['name'] == 'HOH':
hetero_flag = 'W'
else:
hetero_flag = 'H'
# Some terminal atoms are added at residue 0. This residue has a small number of atoms.
# Protonated non-terminal glycine has 7 atoms. Any of these residues is smaller.
# HETATMs have only a couple of atoms (3 for water for example) and they are ok.
if (len(r['atoms']) >= 7) or (hetero_flag != " "):
try:
structure_build.init_residue(r['name'], hetero_flag, r['number'], r['icode'])
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
# Create Atoms
for atom in r['atoms']:
a = self._parse_atom(atom)
if not sum(a['coord']): # e.g. HG of metal bound CYS coords are 0,0,0.
continue
try:
atom_counter += 1
# fullname = name; altloc is empty;
structure_build.init_atom(a['name'], a['coord'], a['bfactor'], a['occupancy'], ' ',
a['name'], atom_counter, a['element'], hetero_flag)
except PDBConstructionException, message:
self._handle_builder_exception(message, r)
elif len(r['atoms']) < 7: # Terminal Residues
for atom in r['atoms']:
a = self._parse_atom(atom)
if not sum(a['coord']): # e.g. HG of metal bound CYS coords are 0,0,0.
continue
atom_counter += 1
ter_atom = Atom(a['name'], a['coord'], a['bfactor'], a['occupancy'], ' ',
a['name'], atom_counter, a['element'], hetero_flag)
if a['name'] in N_TERMINAL_ATOMS:
inc_struct = self.structure_builder.get_structure()
for model in inc_struct:
for chain in model:
if chain.id == r['chain']:
for residue in chain: # Find First residue matching name
if residue.resname == r['name']:
residue.add(ter_atom)
break
elif a['name'] in C_TERMINAL_ATOMS:
inc_struct = self.structure_builder.get_structure()
c_ter = None
for model in inc_struct:
for chain in model:
if chain.id == r['chain']:
for residue in chain: # Find Last residue matching name
if residue.resname == r['name']:
c_ter = residue
if c_ter:
c_ter.add(ter_atom) | 0.00938 |
def set_coords(self, x=0, y=0, z=0, t=0):
"""
set coords of agent in an arbitrary world
"""
self.coords = {}
self.coords['x'] = x
self.coords['y'] = y
self.coords['z'] = z
self.coords['t'] = t | 0.007813 |
def _aload16(ins):
''' Loads a 16 bit value from a memory address
If 2nd arg. start with '*', it is always treated as
an indirect value.
'''
output = _addr(ins.quad[2])
output.append('ld e, (hl)')
output.append('inc hl')
output.append('ld d, (hl)')
output.append('ex de, hl')
output.append('push hl')
return output | 0.002778 |
def InitLocCheck(self):
"""
make an interactive grid in which users can edit locations
"""
# if there is a location without a name, name it 'unknown'
self.contribution.rename_item('locations', 'nan', 'unknown')
# propagate lat/lon values from sites table
self.contribution.get_min_max_lat_lon()
# propagate lithologies & geologic classes from sites table
self.contribution.propagate_cols_up(['lithologies',
'geologic_classes'], 'locations', 'sites')
res = self.contribution.propagate_min_max_up()
if cb.not_null(res):
self.contribution.propagate_cols_up(['age_unit'], 'locations', 'sites')
# set up frame
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
self.grid_frame = grid_frame3.GridFrame(self.contribution, self.WD,
'locations', 'locations', self.panel,
main_frame=self.main_frame)
# redefine default 'save & exit grid' button to go to next dialog instead
self.grid_frame.exitButton.SetLabel('Save and continue')
grid = self.grid_frame.grid
self.grid_frame.Bind(wx.EVT_BUTTON,
lambda event: self.onContinue(event, grid, self.InitAgeCheck),
self.grid_frame.exitButton)
# add back button
self.backButton = wx.Button(self.grid_frame.panel, id=-1, label='Back',
name='back_btn')
self.Bind(wx.EVT_BUTTON,
lambda event: self.onbackButton(event, self.InitSiteCheck),
self.backButton)
self.grid_frame.main_btn_vbox.Add(self.backButton, flag=wx.ALL, border=5)
# re-do fit
self.grid_frame.do_fit(None, min_size=self.min_size)
# center
self.grid_frame.Centre()
return | 0.004575 |
def select(self):
"""
Makes this fit the selected fit on the GUI that is it's parent
(Note: may be moved into GUI soon)
"""
if self.GUI==None: return
self.GUI.current_fit = self
if self.tmax != None and self.tmin != None:
self.GUI.update_bounds_boxes()
if self.PCA_type != None:
self.GUI.update_PCA_box()
try: self.GUI.zijplot
except AttributeError: self.GUI.draw_figure(self.GUI.s)
self.GUI.fit_box.SetStringSelection(self.name)
self.GUI.get_new_PCA_parameters(-1) | 0.017094 |
def take_complement(list_, index_list):
""" Returns items in ``list_`` not indexed by index_list """
mask = not_list(index_to_boolmask(index_list, len(list_)))
return compress(list_, mask) | 0.005 |
def set_time(self, timestamp):
"""
set Device time (pass datetime object)
:param timestamp: python datetime object
"""
command = const.CMD_SET_TIME
command_string = pack(b'I', self.__encode_time(timestamp))
cmd_response = self.__send_command(command, command_string)
if cmd_response.get('status'):
return True
else:
raise ZKErrorResponse("can't set time") | 0.004425 |
def add_state(self):
"""Adds a new state"""
sid = len(self.states)
self.states.append(DFAState(sid))
return sid | 0.013986 |
async def CreateModel(self, cloud_tag, config, credential, name, owner_tag, region):
'''
cloud_tag : str
config : typing.Mapping[str, typing.Any]
credential : str
name : str
owner_tag : str
region : str
Returns -> typing.Union[_ForwardRef('Number'), str, typing.Sequence[~ModelMachineInfo], _ForwardRef('ModelMigrationStatus'), _ForwardRef('ModelSLAInfo'), _ForwardRef('EntityStatus'), typing.Sequence[~ModelUserInfo]]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='ModelManager',
request='CreateModel',
version=5,
params=_params)
_params['cloud-tag'] = cloud_tag
_params['config'] = config
_params['credential'] = credential
_params['name'] = name
_params['owner-tag'] = owner_tag
_params['region'] = region
reply = await self.rpc(msg)
return reply | 0.004057 |
def get_websensors(self):
"""
Get sensors with defined tag as a dictionary of format ``{name: status}``
"""
return {i.name: i.status for i in self.system.sensors if self.tag & i.tags} | 0.018265 |
def json_clean(obj):
"""Clean an object to ensure it's safe to encode in JSON.
Atomic, immutable objects are returned unmodified. Sets and tuples are
converted to lists, lists are copied and dicts are also copied.
Note: dicts whose keys could cause collisions upon encoding (such as a dict
with both the number 1 and the string '1' as keys) will cause a ValueError
to be raised.
Parameters
----------
obj : any python object
Returns
-------
out : object
A version of the input which will not cause an encoding error when
encoded as JSON. Note that this function does not *encode* its inputs,
it simply sanitizes it so that there will be no encoding errors later.
Examples
--------
>>> json_clean(4)
4
>>> json_clean(range(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> sorted(json_clean(dict(x=1, y=2)).items())
[('x', 1), ('y', 2)]
>>> sorted(json_clean(dict(x=1, y=2, z=[1,2,3])).items())
[('x', 1), ('y', 2), ('z', [1, 2, 3])]
>>> json_clean(True)
True
"""
# types that are 'atomic' and ok in json as-is. bool doesn't need to be
# listed explicitly because bools pass as int instances
atomic_ok = (unicode, int, types.NoneType)
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
if isinstance(obj, float):
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
return obj
if isinstance(obj, atomic_ok):
return obj
if isinstance(obj, bytes):
return obj.decode(DEFAULT_ENCODING, 'replace')
if isinstance(obj, container_to_list) or (
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)):
obj = list(obj)
if isinstance(obj, list):
return [json_clean(x) for x in obj]
if isinstance(obj, dict):
# First, validate that the dict won't lose data in conversion due to
# key collisions after stringification. This can happen with keys like
# True and 'true' or 1 and '1', which collide in JSON.
nkeys = len(obj)
nkeys_collapsed = len(set(map(str, obj)))
if nkeys != nkeys_collapsed:
raise ValueError('dict can not be safely converted to JSON: '
'key collision would lead to dropped values')
# If all OK, proceed by making the new dict that will be json-safe
out = {}
for k,v in obj.iteritems():
out[str(k)] = json_clean(v)
return out
# If we get here, we don't know how to handle the object, so we just get
# its repr and return that. This will catch lambdas, open sockets, class
# objects, and any other complicated contraption that json can't encode
return repr(obj) | 0.003117 |
def daynum_to_date(daynum, max_days=1000000):
""" Convert a number of days to a date. If it's out of range, default to a
max date. If it is not a number (or a numeric string), return None. Using
a max_days of more than 2932896 (9999-12-31) will throw an exception if the
specified daynum exceeds the max.
:param daynum: A number of days since Jan 1, 1970
"""
if daynum is None:
return None
try:
daycount = int(daynum)
except ValueError:
return None
if daycount > max_days:
# Using default: some time in the 48th century, clearly bogus.
daycount = max_days
return date(1970, 1, 1) + timedelta(daycount) | 0.001458 |
def append_new_text(destination, text, join_str=None):
"""
This method provides the functionality of adding text appropriately
underneath the destination node. This will be either to the destination's
text attribute or to the tail attribute of the last child.
"""
if join_str is None:
join_str = ' '
if len(destination) > 0: # Destination has children
last = destination[-1]
if last.tail is None: # Last child has no tail
last.tail = text
else: # Last child has a tail
last.tail = join_str.join([last.tail, text])
else: # Destination has no children
if destination.text is None: # Destination has no text
destination.text = text
else: # Destination has a text
destination.text = join_str.join([destination.text, text]) | 0.001174 |
def get_modes(self, zone):
"""Returns the set of modes the device can be assigned."""
self._populate_full_data()
device = self._get_device(zone)
return device['thermostat']['allowedModes'] | 0.009091 |
def nearby(self, expand=50):
""" Returns a new Region that includes the nearby neighbourhood of the the current region.
The new region is defined by extending the current region's dimensions
all directions by range number of pixels. The center of the new region remains the
same.
"""
return Region(
self.x-expand,
self.y-expand,
self.w+(2*expand),
self.h+(2*expand)).clipRegionToScreen() | 0.008264 |
def get_variable_value_for_variation(self, variable, variation):
""" Get the variable value for the given variation.
Args:
variable: The Variable for which we are getting the value.
variation: The Variation for which we are getting the variable value.
Returns:
The variable value or None if any of the inputs are invalid.
"""
if not variable or not variation:
return None
if variation.id not in self.variation_variable_usage_map:
self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id)
return None
# Get all variable usages for the given variation
variable_usages = self.variation_variable_usage_map[variation.id]
# Find usage in given variation
variable_usage = None
if variable_usages:
variable_usage = variable_usages.get(variable.id)
if variable_usage:
variable_value = variable_usage.value
self.logger.info('Value for variable "%s" for variation "%s" is "%s".' % (
variable.key,
variation.key,
variable_value
))
else:
variable_value = variable.defaultValue
self.logger.info('Variable "%s" is not used in variation "%s". Assigning default value "%s".' % (
variable.key,
variation.key,
variable_value
))
return variable_value | 0.008915 |
def broadcast_event(self, event, *args):
"""
This is sent to all in the sockets in this particular Namespace,
including itself.
"""
pkt = dict(type="event",
name=event,
args=args,
endpoint=self.ns_name)
for sessid, socket in six.iteritems(self.socket.server.sockets):
socket.send_packet(pkt) | 0.00489 |
def fetch_arc_errors(self):
'''
Evaluates the current tree of the arc and provides a list of errors that
the user should correct.
'''
error_list = []
hnode = self.validate_first_element()
if hnode:
error_list.append({'hook_error': hnode})
rnode = self.validate_last_element()
if rnode:
error_list.append({'reso_error': rnode})
try:
self.validate_generations()
except ArcGenerationError as ag:
error_list.append({'generation_error': str(ag)})
milecheck = self.validate_milestones()
if milecheck:
error_list.append({'mseq_error': milecheck})
return error_list | 0.004115 |
def add_at(self, index: int, requester: int, track: dict):
""" Adds a track at a specific index in the queue. """
self.queue.insert(min(index, len(self.queue) - 1), AudioTrack().build(track, requester)) | 0.013636 |
def _validate_volumes(input_volumes):
'''Check input_volumes contains a valid list of volumes.
Parameters
----------
input_volumes : list
list of volume values. Castable to numbers.
'''
if not (input_volumes is None or isinstance(input_volumes, list)):
raise TypeError("input_volumes must be None or a list.")
if isinstance(input_volumes, list):
for vol in input_volumes:
if not core.is_number(vol):
raise ValueError(
"Elements of input_volumes must be numbers: found {}"
.format(vol)
) | 0.0016 |
def mad(y_true, y_pred):
"""Median absolute deviation
"""
y_true, y_pred = _mask_nan(y_true, y_pred)
return np.mean(np.abs(y_true - y_pred)) | 0.00641 |
def load_SUEWS_Forcing_met_df_pattern(path_input, forcingfile_met_pattern):
"""Short summary.
Parameters
----------
forcingfile_met_pattern : type
Description of parameter `forcingfile_met_pattern`.
Returns
-------
type
Description of returned object.
"""
# list of met forcing files
path_input = path_input.resolve()
# forcingfile_met_pattern = os.path.abspath(forcingfile_met_pattern)
list_file_MetForcing = sorted([
f for f in path_input.glob(forcingfile_met_pattern)
if 'ESTM' not in f.name])
# print(forcingfile_met_pattern)
# print(list_file_MetForcing)
# load raw data
# read in forcing with dask.dataframe in parallel
dd_forcing_met = dd.read_csv(
list_file_MetForcing,
delim_whitespace=True,
comment='!',
error_bad_lines=True
)
# convert to normal pandas dataframe
df_forcing_met = dd_forcing_met.compute()
# `drop_duplicates` in case some duplicates mixed
df_forcing_met = df_forcing_met.drop_duplicates()
col_suews_met_forcing = [
'iy', 'id', 'it', 'imin',
'qn', 'qh', 'qe', 'qs', 'qf',
'U', 'RH', 'Tair', 'pres', 'rain', 'kdown',
'snow', 'ldown', 'fcld',
'Wuh', 'xsmd', 'lai', 'kdiff', 'kdir', 'wdir'
]
# rename these columns to match variables via the driver interface
df_forcing_met.columns = col_suews_met_forcing
# convert unit from kPa to hPa
df_forcing_met['pres'] *= 10
# add `isec` for WRF-SUEWS interface
df_forcing_met['isec'] = 0
# set correct data types
df_forcing_met[['iy', 'id', 'it', 'imin', 'isec']] = df_forcing_met[[
'iy', 'id', 'it', 'imin', 'isec']].astype(np.int64)
# set timestamp as index
idx_dt = pd.date_range(
*df_forcing_met.iloc[[0, -1], :4].astype(int).astype(str).apply(
lambda ser: ser.str.cat(sep=' '), axis=1).map(
lambda dt: pd.Timestamp.strptime(dt, '%Y %j %H %M')),
periods=df_forcing_met.shape[0])
df_forcing_met = df_forcing_met.set_index(idx_dt)
return df_forcing_met | 0.00047 |
def __regkey_value(self, path, name='', start_key=None):
r'''Return the data of value mecabrc at MeCab HKEY node.
On Windows, the path to the mecabrc as set in the Windows Registry is
used to deduce the path to libmecab.dll.
Returns:
The full path to the mecabrc on Windows.
Raises:
WindowsError: A problem was encountered in trying to locate the
value mecabrc at HKEY_CURRENT_USER\Software\MeCab.
'''
if sys.version < '3':
import _winreg as reg
else:
import winreg as reg
def _fn(path, name='', start_key=None):
if isinstance(path, str):
path = path.split('\\')
if start_key is None:
start_key = getattr(reg, path[0])
return _fn(path[1:], name, start_key)
else:
subkey = path.pop(0)
with reg.OpenKey(start_key, subkey) as handle:
if path:
return _fn(path, name, handle)
else:
desc, i = None, 0
while not desc or desc[0] != name:
desc = reg.EnumValue(handle, i)
i += 1
return desc[1]
return _fn(path, name, start_key) | 0.001458 |
def makeImages(self):
"""Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
"""
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png") | 0.014881 |
def _parse_list(self, text, i):
"""Parse a list from source text starting at i."""
res = []
end_match = self.end_list_re.match(text, i)
old_current_type = self.current_type
while not end_match:
list_item, i = self._parse(text, i)
res.append(list_item)
end_match = self.end_list_re.match(text, i)
if not end_match:
m = self.list_delim_re.match(text, i)
if not m:
self._fail("Missing delimiter in list before content", text, i)
parsed = m.group(0)
i += len(parsed)
self.current_type = old_current_type
parsed = end_match.group(0)
i += len(parsed)
return res, i | 0.003896 |
def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5):
"""Non-differentiable Intersection over Union (IoU) for comparing the
similarity of two batch of data, usually be used for evaluating binary image segmentation.
The coefficient between 0 to 1, and 1 means totally match.
Parameters
-----------
output : tensor
A batch of distribution with shape: [batch_size, ....], (any dimensions).
target : tensor
The target distribution, format the same with `output`.
threshold : float
The threshold value to be true.
axis : tuple of integer
All dimensions are reduced, default ``(1,2,3)``.
smooth : float
This small value will be added to the numerator and denominator, see ``dice_coe``.
Notes
------
- IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating.
"""
pre = tf.cast(output > threshold, dtype=tf.float32)
truth = tf.cast(target > threshold, dtype=tf.float32)
inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND
union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR
# old axis=[0,1,2,3]
# epsilon = 1e-5
# batch_iou = inse / (union + epsilon)
# new haodong
batch_iou = (inse + smooth) / (union + smooth)
iou = tf.reduce_mean(batch_iou, name='iou_coe')
return iou | 0.00417 |
def from_cli(opt, dyn_range_fac=1, precision='single',
inj_filter_rejector=None):
"""Parses the CLI options related to strain data reading and conditioning.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, (frame-cache or frame-files), channel-name,
fake-strain, fake-strain-seed, fake-strain-from-file, gating_file).
dyn_range_fac : {float, 1}, optional
A large constant to reduce the dynamic range of the strain.
precision : string
Precision of the returned strain ('single' or 'double').
inj_filter_rejector : InjFilterRejector instance; optional, default=None
If given send the InjFilterRejector instance to the inject module so
that it can store a reduced representation of injections if
necessary.
Returns
-------
strain : TimeSeries
The time series containing the conditioned strain data.
"""
gating_info = {}
if opt.frame_cache or opt.frame_files or opt.frame_type:
if opt.frame_cache:
frame_source = opt.frame_cache
if opt.frame_files:
frame_source = opt.frame_files
logging.info("Reading Frames")
if hasattr(opt, 'frame_sieve') and opt.frame_sieve:
sieve = opt.frame_sieve
else:
sieve = None
if opt.frame_type:
strain = pycbc.frame.query_and_read_frame(
opt.frame_type, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
else:
strain = pycbc.frame.read_frame(
frame_source, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
if opt.zpk_z and opt.zpk_p and opt.zpk_k:
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
logging.info("Applying zpk filter")
z = numpy.array(opt.zpk_z)
p = numpy.array(opt.zpk_p)
k = float(opt.zpk_k)
strain = filter_zpk(strain.astype(numpy.float64), z, p, k)
if opt.normalize_strain:
logging.info("Dividing strain by constant")
l = opt.normalize_strain
strain = strain / l
if opt.injection_file:
logging.info("Applying injections")
injector = InjectionSet(opt.injection_file)
injections = \
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logging.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor)
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if precision == 'single':
logging.info("Converting to float32")
strain = (strain * dyn_range_fac).astype(pycbc.types.float32)
elif precision == "double":
logging.info("Converting to float64")
strain = (strain * dyn_range_fac).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.gating_file is not None:
logging.info("Gating times contained in gating file")
gate_params = numpy.loadtxt(opt.gating_file)
if len(gate_params.shape) == 1:
gate_params = [gate_params]
strain = gate_data(strain, gate_params)
gating_info['file'] = \
[gp for gp in gate_params \
if (gp[0] + gp[1] + gp[2] >= strain.start_time) \
and (gp[0] - gp[1] - gp[2] <= strain.end_time)]
if opt.autogating_threshold is not None:
# the + 0 is for making a copy
glitch_times = detect_loud_glitches(
strain + 0., threshold=opt.autogating_threshold,
cluster_window=opt.autogating_cluster,
low_freq_cutoff=opt.strain_high_pass,
high_freq_cutoff=opt.sample_rate/2,
corrupt_time=opt.pad_data+opt.autogating_pad)
gate_params = [[gt, opt.autogating_width, opt.autogating_taper] \
for gt in glitch_times]
if len(glitch_times) > 0:
logging.info('Autogating at %s',
', '.join(['%.3f' % gt for gt in glitch_times]))
strain = gate_data(strain, gate_params)
gating_info['auto'] = gate_params
logging.info("Resampling data")
strain = resample_to_delta_t(strain, 1.0/opt.sample_rate, method='ldas')
logging.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if hasattr(opt, 'witness_frame_type') and opt.witness_frame_type:
stilde = strain.to_frequencyseries()
import h5py
tf_file = h5py.File(opt.witness_tf_file)
for key in tf_file:
witness = pycbc.frame.query_and_read_frame(opt.witness_frame_type, str(key),
start_time=strain.start_time, end_time=strain.end_time)
witness = (witness * dyn_range_fac).astype(strain.dtype)
tf = pycbc.types.load_frequencyseries(opt.witness_tf_file, group=key)
tf = tf.astype(stilde.dtype)
flen = int(opt.witness_filter_length * strain.sample_rate)
tf = pycbc.psd.interpolate(tf, stilde.delta_f)
tf_time = tf.to_timeseries()
window = Array(numpy.hanning(flen*2), dtype=strain.dtype)
tf_time[0:flen] *= window[flen:]
tf_time[len(tf_time)-flen:] *= window[0:flen]
tf = tf_time.to_frequencyseries()
kmax = min(len(tf), len(stilde)-1)
stilde[:kmax] -= tf[:kmax] * witness.to_frequencyseries()[:kmax]
strain = stilde.to_timeseries()
logging.info("Remove Padding")
start = opt.pad_data*opt.sample_rate
end = len(strain)-opt.sample_rate*opt.pad_data
strain = strain[start:end]
if opt.fake_strain or opt.fake_strain_from_file:
logging.info("Generating Fake Strain")
if not opt.low_frequency_cutoff:
raise ValueError('Please provide low frequency cutoff to '
'generate a fake strain')
duration = opt.gps_end_time - opt.gps_start_time
tlen = duration * opt.sample_rate
pdf = 1.0/128
plen = int(opt.sample_rate / pdf) / 2 + 1
if opt.fake_strain_from_file:
logging.info("Reading ASD from file")
strain_psd = pycbc.psd.from_txt(opt.fake_strain_from_file, plen, pdf,
opt.low_frequency_cutoff, is_asd_file=True)
elif opt.fake_strain != 'zeroNoise':
logging.info("Making PSD for strain")
strain_psd = pycbc.psd.from_string(opt.fake_strain, plen, pdf,
opt.low_frequency_cutoff)
if opt.fake_strain == 'zeroNoise':
logging.info("Making zero-noise time series")
strain = TimeSeries(pycbc.types.zeros(tlen),
delta_t=1.0/opt.sample_rate,
epoch=opt.gps_start_time)
else:
logging.info("Making colored noise")
from pycbc.noise.reproduceable import colored_noise
lowfreq = opt.low_frequency_cutoff / 2.
strain = colored_noise(strain_psd, opt.gps_start_time,
opt.gps_end_time,
seed=opt.fake_strain_seed,
low_frequency_cutoff=lowfreq)
strain = resample_to_delta_t(strain, 1.0/opt.sample_rate)
if not opt.channel_name and (opt.injection_file \
or opt.sgburst_injection_file):
raise ValueError('Please provide channel names with the format '
'ifo:channel (e.g. H1:CALIB-STRAIN) to inject '
'simulated signals into fake strain')
if opt.injection_file:
logging.info("Applying injections")
injector = InjectionSet(opt.injection_file)
injections = \
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logging.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name[0:2],
distance_scale=opt.injection_scale_factor)
if precision == 'single':
logging.info("Converting to float32")
strain = (dyn_range_fac * strain).astype(pycbc.types.float32)
elif precision == 'double':
logging.info("Converting to float64")
strain = (dyn_range_fac * strain).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.taper_data:
logging.info("Tapering data")
# Use auto-gating stuff for this, a one-sided gate is a taper
pd_taper_window = opt.taper_data
gate_params = [(strain.start_time, 0., pd_taper_window)]
gate_params.append( (strain.end_time, 0.,
pd_taper_window) )
gate_data(strain, gate_params)
if opt.injection_file:
strain.injections = injections
strain.gating_info = gating_info
return strain | 0.002198 |
def threaded_per_region(q, params):
"""
Helper for multithreading on a per-region basis
:param q:
:param params:
:return:
"""
while True:
try:
params['region'] = q.get()
method = params['method']
method(params)
except Exception as e:
printException(e)
finally:
q.task_done() | 0.002577 |
def wif(self, s):
"""
Parse a WIF.
Return a :class:`Key <pycoin.key.Key>` or None.
"""
data = self.parse_b58_hashed(s)
if data is None or not data.startswith(self._wif_prefix):
return None
data = data[len(self._wif_prefix):]
is_compressed = (len(data) > 32)
if is_compressed:
data = data[:-1]
se = from_bytes_32(data)
return self._network.keys.private(se, is_compressed=is_compressed) | 0.004032 |
def get_vlan_brief_output_vlan_vlan_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
vlan = ET.SubElement(output, "vlan")
vlan_id_key = ET.SubElement(vlan, "vlan-id")
vlan_id_key.text = kwargs.pop('vlan_id')
vlan_name = ET.SubElement(vlan, "vlan-name")
vlan_name.text = kwargs.pop('vlan_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003221 |
def write_buffers(self, conn, locked=True):
''' Write any buffer headers and payloads to the given connection.
Args:
conn (object) :
May be any object with a ``write_message`` method. Typically,
a Tornado ``WSHandler`` or ``WebSocketClientConnection``
locked (bool) :
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot write_buffers to connection None")
sent = 0
for header, payload in self._buffers:
yield conn.write_message(header, locked=locked)
yield conn.write_message(payload, binary=True, locked=locked)
sent += (len(header) + len(payload))
raise gen.Return(sent) | 0.002548 |
def query_list_of_words(target_word, list_of_words, edit_distance=1):
"""
Checks whether a target word is within editing distance of any one in a set of keywords.
Inputs: - target_word: A string containing the word we want to search in a list.
- list_of_words: A python list of words.
- edit_distance: For larger words, we also check for similar words based on edit_distance.
Outputs: - new_list_of_words: This is the input list of words minus any found keywords.
- found_list_of_words: This is the list of words that are within edit distance of the target word.
"""
# Initialize lists
new_list_of_words = list()
found_list_of_words = list()
append_left_keyword = new_list_of_words.append
append_found_keyword = found_list_of_words.append
# Iterate over the list of words
for word in list_of_words:
if len(word) > 6:
effective_edit_distance = edit_distance
else:
effective_edit_distance = 0 # No edit distance for small words.
if abs(len(word)-len(target_word)) <= effective_edit_distance:
if nltk.edit_distance(word, target_word) <= effective_edit_distance:
append_found_keyword(word)
else:
append_left_keyword(word)
else:
append_left_keyword(word)
return new_list_of_words, found_list_of_words | 0.00495 |
def is_title(p):
"""
Certain p tags are denoted as ``Title`` tags. This function will return
True if the passed in p tag is considered a title.
"""
w_namespace = get_namespace(p, 'w')
styles = p.xpath('.//w:pStyle', namespaces=p.nsmap)
if len(styles) == 0:
return False
style = styles[0]
return style.get('%sval' % w_namespace) == 'Title' | 0.002618 |
def _setup_metric_group_values(self):
"""
Return the list of MetricGroupValues objects for this metrics response,
by processing its metrics response string.
The lines in the metrics response string are::
MetricsResponse: MetricsGroup{0,*}
<emptyline> a third empty line at the end
MetricsGroup: MetricsGroupName
ObjectValues{0,*}
<emptyline> a second empty line after each MG
ObjectValues: ObjectURI
Timestamp
ValueRow{1,*}
<emptyline> a first empty line after this blk
"""
mg_defs = self._metrics_context.metric_group_definitions
metric_group_name = None
resource_uri = None
dt_timestamp = None
object_values = None
metric_group_values = list()
state = 0
for mr_line in self._metrics_response_str.splitlines():
if state == 0:
if object_values is not None:
# Store the result from the previous metric group
mgv = MetricGroupValues(metric_group_name, object_values)
metric_group_values.append(mgv)
object_values = None
if mr_line == '':
# Skip initial (or trailing) empty lines
pass
else:
# Process the next metrics group
metric_group_name = mr_line.strip('"') # No " or \ inside
assert metric_group_name in mg_defs
m_defs = mg_defs[metric_group_name].metric_definitions
object_values = list()
state = 1
elif state == 1:
if mr_line == '':
# There are no (or no more) ObjectValues items in this
# metrics group
state = 0
else:
# There are ObjectValues items
resource_uri = mr_line.strip('"') # No " or \ inside
state = 2
elif state == 2:
# Process the timestamp
assert mr_line != ''
try:
dt_timestamp = datetime_from_timestamp(int(mr_line))
except ValueError:
# Sometimes, the returned epoch timestamp values are way
# too large, e.g. 3651584404810066 (which would translate
# to the year 115791 A.D.). Python datetime supports
# up to the year 9999. We circumvent this issue by
# simply using the current date&time.
# TODO: Remove the circumvention for too large timestamps.
dt_timestamp = datetime.now(pytz.utc)
state = 3
elif state == 3:
if mr_line != '':
# Process the metric values in the ValueRow line
str_values = mr_line.split(',')
metrics = dict()
for m_name in m_defs:
m_def = m_defs[m_name]
m_type = m_def.type
m_value_str = str_values[m_def.index]
m_value = _metric_value(m_value_str, m_type)
metrics[m_name] = m_value
ov = MetricObjectValues(
self._client, mg_defs[metric_group_name], resource_uri,
dt_timestamp, metrics)
object_values.append(ov)
# stay in this state, for more ValueRow lines
else:
# On the empty line after the last ValueRow line
state = 1
return metric_group_values | 0.000506 |
def whois(ip_address):
"""Whois client for Python"""
whois_ip = str(ip_address)
try:
query = socket.gethostbyname(whois_ip)
except Exception:
query = whois_ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("whois.ripe.net", 43))
s.send(query.encode("utf8") + b"\r\n")
answer = b""
while True:
d = s.recv(4096)
answer += d
if not d:
break
s.close()
ignore_tag = b"remarks:"
# ignore all lines starting with the ignore_tag
lines = [line for line in answer.split(b"\n") if not line or (line and not line.startswith(ignore_tag))] # noqa: E501
# remove empty lines at the bottom
for i in range(1, len(lines)):
if not lines[-i].strip():
del lines[-i]
else:
break
return b"\n".join(lines[3:]) | 0.001163 |
def bbox(self, out_crs=None):
"""
Return data bounding box.
Parameters
----------
out_crs : ``rasterio.crs.CRS``
rasterio CRS object (default: CRS of process pyramid)
Returns
-------
bounding box : geometry
Shapely geometry object
"""
return reproject_geometry(
self.process.config.area_at_zoom(),
src_crs=self.process.config.process_pyramid.crs,
dst_crs=self.pyramid.crs if out_crs is None else out_crs
) | 0.003604 |
def sitetree_breadcrumbs(parser, token):
"""Parses sitetree_breadcrumbs tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_breadcrumbs from "mytree" %}
Used to render breadcrumb path for "mytree" site tree.
2. Four arguments:
{% sitetree_breadcrumbs from "mytree" template "sitetree/mycrumb.html" %}
Used to render breadcrumb path for "mytree" site tree using specific
template "sitetree/mycrumb.html"
"""
tokens = token.split_contents()
use_template = detect_clause(parser, 'template', tokens)
tokens_num = len(tokens)
if tokens_num == 3:
tree_alias = parser.compile_filter(tokens[2])
return sitetree_breadcrumbsNode(tree_alias, use_template)
else:
raise template.TemplateSyntaxError(
'%r tag requires two arguments. E.g. {%% sitetree_breadcrumbs from "mytree" %%}.' % tokens[0]) | 0.003155 |
def get_fqn(base_fqn, delimiter, name=None):
"""Return the fully qualified name of an object within this context.
If the name passed already appears to be a fully qualified name, it
will be returned with no further processing.
"""
if name and name.startswith("%s%s" % (base_fqn, delimiter)):
return name
return delimiter.join([_f for _f in [base_fqn, name] if _f]) | 0.002506 |
def iter_content(self, chunk_size=1024):
"""Return the file content as an iterable stream."""
r = self._session.get(self.content, stream=True)
return r.iter_content(chunk_size) | 0.01 |
def get(self, key, default=None):
"""Returns a token by text or local ID, with a default.
A given text image may be associated with more than one symbol ID. This will return the first definition.
Note:
User defined symbol IDs are always one-based. Symbol zero is a special symbol that
always has no text.
Args:
key (unicode | int): The key to lookup.
default(Optional[SymbolToken]): The default to return if the key is not found
Returns:
SymbolToken: The token associated with the key or the default if it doesn't exist.
"""
if isinstance(key, six.text_type):
return self.__mapping.get(key, None)
if not isinstance(key, int):
raise TypeError('Key must be int or Unicode sequence.')
# TODO determine if $0 should be returned for all symbol tables.
if key == 0:
return SYMBOL_ZERO_TOKEN
# Translate one-based SID to zero-based intern table
index = key - 1
if index < 0 or key > len(self):
return default
return self.__symbols[index] | 0.005177 |
def get_parent_families(self, family_id):
"""Gets the parent families of the given ``id``.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to
query
return: (osid.relationship.FamilyList) - the parent families of
the ``id``
raise: NotFound - a ``Family`` identified by ``Id is`` not
found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bins
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=family_id)
return FamilyLookupSession(
self._proxy,
self._runtime).get_families_by_ids(
list(self.get_parent_family_ids(family_id))) | 0.002893 |
def is_venv(directory, executable='python'):
"""
:param directory: base directory of python environment
"""
path=os.path.join(directory, 'bin', executable)
return os.path.isfile(path) | 0.009852 |
def hosts_append(hostsfile='/etc/hosts', ip_addr=None, entries=None):
'''
Append a single line to the /etc/hosts file.
CLI Example:
.. code-block:: bash
salt '*' dnsutil.hosts_append /etc/hosts 127.0.0.1 ad1.yuk.co,ad2.yuk.co
'''
host_list = entries.split(',')
hosts = parse_hosts(hostsfile=hostsfile)
if ip_addr in hosts:
for host in host_list:
if host in hosts[ip_addr]:
host_list.remove(host)
if not host_list:
return 'No additional hosts were added to {0}'.format(hostsfile)
append_line = '\n{0} {1}'.format(ip_addr, ' '.join(host_list))
with salt.utils.files.fopen(hostsfile, 'a') as fp_:
fp_.write(salt.utils.stringutils.to_str(append_line))
return 'The following line was added to {0}:{1}'.format(hostsfile,
append_line) | 0.00222 |
def get_shake_info(self, ticket):
"""
获取摇周边的设备及用户信息
详情请参考
http://mp.weixin.qq.com/wiki/3/34904a5db3d0ec7bb5306335b8da1faf.html
:param ticket: 摇周边业务的ticket,可在摇到的URL中得到,ticket生效时间为30分钟
:return: 设备及用户信息
"""
res = self._post(
'shakearound/user/getshakeinfo',
data={
'ticket': ticket
},
result_processor=lambda x: x['data']
)
return res | 0.004202 |
async def clear(self):
"""Manually expire all sessions in the pool."""
for session in list(self.values()):
if session.state != STATE_CLOSED:
await session._remote_closed()
self.sessions.clear()
super(SessionManager, self).clear() | 0.006897 |
def text_to_speech(text, synthesizer, synth_args, sentence_break):
"""
Converts given text to a pydub AudioSegment using a specified speech
synthesizer. At the moment, IBM Watson's text-to-speech API is the only
available synthesizer.
:param text:
The text that will be synthesized to audio.
:param synthesizer:
The text-to-speech synthesizer to use. At the moment, 'watson' is the
only available input.
:param synth_args:
A dictionary of arguments to pass to the synthesizer. Parameters for
authorization (username/password) should be passed here.
:param sentence_break:
A string that identifies a sentence break or another logical break in
the text. Necessary for text longer than 50 words. Defaults to '. '.
"""
if len(text.split()) < 50:
if synthesizer == 'watson':
with open('.temp.wav', 'wb') as temp:
temp.write(watson_request(text=text, synth_args=synth_args).content)
response = AudioSegment.from_wav('.temp.wav')
os.remove('.temp.wav')
return response
else:
raise ValueError('"' + synthesizer + '" synthesizer not found.')
else:
segments = []
for i, sentence in enumerate(text.split(sentence_break)):
if synthesizer == 'watson':
with open('.temp' + str(i) + '.wav', 'wb') as temp:
temp.write(watson_request(text=sentence, synth_args=synth_args).content)
segments.append(AudioSegment.from_wav('.temp' + str(i) + '.wav'))
os.remove('.temp' + str(i) + '.wav')
else:
raise ValueError('"' + synthesizer + '" synthesizer not found.')
response = segments[0]
for segment in segments[1:]:
response = response + segment
return response | 0.002644 |
def _filter_version_specific_options(self, tmos_ver, **kwargs):
'''Filter version-specific optional parameters
Some optional parameters only exist in v12.1.0 and greater,
filter these out for earlier versions to allow backward comatibility.
'''
if LooseVersion(tmos_ver) < LooseVersion('12.1.0'):
for k, parms in self._meta_data['optional_parameters'].items():
for r in kwargs.get(k, []):
for parm in parms:
value = r.pop(parm, None)
if value is not None:
logger.info(
"Policy parameter %s:%s is invalid for v%s",
k, parm, tmos_ver) | 0.002625 |
def intersects(self, b):
""" Return True if a part of the two bounds overlaps.
"""
return max(self.x, b.x) < min(self.x+self.width, b.x+b.width) \
and max(self.y, b.y) < min(self.y+self.height, b.y+b.height) | 0.012195 |
def reset_stats_history(self):
"""Reset the stats history (dict of GlancesAttribute)."""
if self.history_enable():
reset_list = [a['name'] for a in self.get_items_history_list()]
logger.debug("Reset history for plugin {} (items: {})".format(self.plugin_name, reset_list))
self.stats_history.reset() | 0.008571 |
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. expected <= 2."
% (array.ndim))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s."
% (dtype_orig, array.dtype))
warnings.warn(msg, DataConversionWarning)
return array | 0.000164 |
def start(self):
'''
Start the fuzzing session
If fuzzer already running, it will return immediatly
'''
if self._started:
self.logger.warning('called while fuzzer is running. ignoring.')
return
self._started = True
assert(self.model)
assert(self.user_interface)
assert(self.target)
if self._load_session():
self._check_session_validity()
self._set_test_ranges(
self.session_info.start_index,
self.session_info.end_index,
self.session_info.test_list_str
)
else:
self.session_info.kitty_version = _get_current_version()
# TODO: write hash for high level
self.session_info.data_model_hash = self.model.hash()
# if self.session_info.end_index is None:
# self.session_info.end_index = self.model.last_index()
if self._test_list is None:
self._test_list = StartEndList(0, self.model.num_mutations())
else:
self._test_list.set_last(self.model.last_index())
list_count = self._test_list.get_count()
self._test_list.skip(list_count - 1)
self.session_info.end_index = self._test_list.current()
self._test_list.reset()
self._store_session()
self._test_list.skip(self.session_info.current_index)
self.session_info.test_list_str = self._test_list.as_test_list_str()
self._set_signal_handler()
self.user_interface.set_data_provider(self.dataman)
self.user_interface.set_continue_event(self._continue_event)
self.user_interface.start()
self.session_info.start_time = time.time()
try:
self._start_message()
self.target.setup()
start_from = self.session_info.current_index
if self._skip_env_test:
self.logger.info('Skipping environment test')
else:
self.logger.info('Performing environment test')
self._test_environment()
self._in_environment_test = False
self._test_list.reset()
self._test_list.skip(start_from)
self.session_info.current_index = start_from
self.model.skip(self._test_list.current())
self._start()
return True
except Exception as e:
self.logger.error('Error occurred while fuzzing: %s', repr(e))
self.logger.error(traceback.format_exc())
return False | 0.000773 |
def remove_perm(perm, user_or_group, forum=None):
""" Remove a permission to a user (anonymous or not) or a group. """
user, group = get_identity(user_or_group)
perm = ForumPermission.objects.get(codename=perm)
if user:
UserForumPermission.objects.filter(
forum=forum,
permission=perm,
user=user if not user.is_anonymous else None,
anonymous_user=user.is_anonymous,
).delete()
if group:
GroupForumPermission.objects.filter(forum=forum, permission=perm, group=group).delete() | 0.003534 |
def save_ckpt(
sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, global_step=None, printable=False
):
"""Save parameters into `ckpt` file.
Parameters
------------
sess : Session
TensorFlow Session.
mode_name : str
The name of the model, default is ``model.ckpt``.
save_dir : str
The path / file directory to the `ckpt`, default is ``checkpoint``.
var_list : list of tensor
The parameters / variables (tensor) to be saved. If empty, save all global variables (default).
global_step : int or None
Step number.
printable : boolean
Whether to print all parameters information.
See Also
--------
load_ckpt
"""
if sess is None:
raise ValueError("session is None.")
if var_list is None:
var_list = []
ckpt_file = os.path.join(save_dir, mode_name)
if var_list == []:
var_list = tf.global_variables()
logging.info("[*] save %s n_params: %d" % (ckpt_file, len(var_list)))
if printable:
for idx, v in enumerate(var_list):
logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape())))
saver = tf.train.Saver(var_list)
saver.save(sess, ckpt_file, global_step=global_step) | 0.003098 |
def _expectation(p, obj1, feat1, obj2, feat2, nghp=None):
"""
Nota Bene: if only one object is passed, obj1 is
associated with x_n, whereas obj2 with x_{n+1}
"""
if obj2 is None:
gaussian = Gaussian(p.mu[:-1], p.cov[0, :-1])
return expectation(gaussian, (obj1, feat1), nghp=nghp)
elif obj1 is None:
gaussian = Gaussian(p.mu[1:], p.cov[0, 1:])
return expectation(gaussian, (obj2, feat2), nghp=nghp)
else:
return expectation(p, (obj1, feat1), (obj2, feat2), nghp=nghp) | 0.001869 |
def execute(api):
"""Executes operation.
Args:
api: The base API object
Returns:
A response body object
"""
try:
return api.execute()
except Exception as exception:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
_print_error('%s: Exception %s: %s' % (now, type(exception).__name__,
str(exception)))
# Re-raise exception to be handled by retry logic
raise exception | 0.012371 |
def check(self, obj, prefix=None):
"""Recursively look for :class:`.LocalSetting`s in ``obj``.
``obj`` can be a dict, tuple, or list. Other types are skipped.
This will prompt to get the value of local settings (excluding
those that have had prompting disabled) that haven't already
been set locally.
Returns ``True`` or ``False`` to indicate whether settings were
successfully checked.
"""
self._populate_registry(obj, prefix)
settings_to_write, missing = self._check(obj, prefix, {}, {})
if settings_to_write:
self.strategy.write_settings(settings_to_write, self.file_name, self.section)
if missing:
for name, local_setting in missing.items():
printer.print_error('Local setting `{name}` must be set'.format(name=name))
return False
return True | 0.004415 |
def _generateFind(self, **kwargs):
"""Generator which yields matches on AXChildren."""
for needle in self._generateChildren():
if needle._match(**kwargs):
yield needle | 0.009479 |
def eurominder_explorer():
"""Generate interactive data exploration tool
Output is an html page, rendered to 'eurominder_explorer.html' in the output directory.
Data structure (only relevant items are shown below):
.. code-block:: none
{
"nuts2Features": [{"geometry": [<boundary geometry>],
"properties": { "NUTS_ID": "AT11" } },
{"geometry": [<boundary geometry>],
"properties": { "NUTS_ID": "AT12" } },
...],
"regionNames": {"AT11": "Burgenland",
"AT13": "Wien",
...
},
"indicatorDescriptions": {"000005": "some description",
"000010": "another description",
...},
"indicators": [ {"key": "AT11",
"000005 2010": 37.0
"000005 2011": 38.0
...
"000010 2010": 1234.0
"000010 2011": 1235.0
...
"Mean temperature (C) Apr-Sep": 16.0,
"Mean temperature (C) Oct-Mar": 9.0,
...
},
{"key": "AT12",
"000005 2010: 19.0,
..
},
...
]
"ranges": {"000005": [10, 20],
"000010": [1234, 5678],
...,
"Mean temperature (C) Apr-Sep": 16.0,
"Mean temperature (C) Oct-Mar": 9.0,
...
},
"years": ["1999", "2000", ...],
}
"""
# switch off some pandas warnings
import pandas as pd
pd.options.mode.chained_assignment = None
# page template
template = jenv.get_template("eurominder_explorer.html")
# container for template context
context = dict()
# plot data
data = dict()
# a database client/session to run queries in
cl = client.get_client()
session = cl.create_session()
#
# query data
#
# get region names and keys
query = session.query(models.NUTS2Region.key,
models.NUTS2Region.name).all()
data['regionNames'] = {t[0]: t[1] for t in query}
# get indicator descriptions
query = session.query(models.EuroStatIndicator.number,
models.EuroStatIndicator.description)
data['indicatorDescriptions'] = {t[0]: t[1] for t in query}
# build options for HTML selector for selecting which indicator to show
context['y_field_selector_options'] = ''
context['x_field_selector_options'] = ''
for i_field, number in enumerate(sorted(data['indicatorDescriptions'].keys())):
description = data['indicatorDescriptions'][number]
# first field is default option for y selector, second field for x selector
y_selected = " selected=\"selected\"" if i_field == 0 else ""
x_selected = " selected=\"selected\"" if i_field == 1 else ""
context['y_field_selector_options'] += "<option value=\"{:s}\" {:s}>{:s}</option>" \
"".format(number, y_selected, description)
context['x_field_selector_options'] += "<option value=\"{:s}\" {:s}>{:s}</option>" \
"".format(number, x_selected, description)
# get all eurostats data
query = session.query(models.EuroStatValue.value,
models.EuroStatValue.year,
models.EuroStatIndicator.number,
models.NUTS2Region.key) \
.join(models.EuroStatIndicator) \
.join(models.NUTS2Region)
eurostat = cl.df_query(query)
# reformat to list of value dicts per region, as described above
data['indicators'] = []
for region_key, region_data in eurostat.groupby('key'):
region_data['str_id'] = region_data['number'] + ' ' + region_data['year'].apply(lambda x: str(x))
value_dict = region_data.set_index('str_id')['value'].to_dict()
value_dict['key'] = region_key
data['indicators'].append(value_dict)
# store min/max values per indicator across all years, to keep plot scales fixed on playback
data['ranges'] = dict()
for number, indicator_data in eurostat.groupby('number'):
data['ranges'][number] = [indicator_data['value'].min(), indicator_data['value'].max()]
# build options for year selector (also store options in a list, for playback control)
data['years'] = sorted([str(y) for y in eurostat['year'].unique()])
context['year_selector_options'] = ''
for year in data['years']:
year_selected = " selected=\"selected\"" if year == "2010" else ""
context['year_selector_options'] += "<option {:s}>{:s}</option>".format(year_selected, year)
# query climate data
query = session.query(models.ClimateValue.value,
models.ClimateIndicator.description,
models.NUTS2Region.key) \
.join(models.ClimateIndicator) \
.join(models.NUTS2Region)
climate = cl.df_query(query).set_index('key')
# inject data into 'indicators' records
for record in data['indicators']:
try:
# index by a list of keys to be sure to get back a DataFrame even for one matching record
cdata = climate.loc[[record['key']]]
except KeyError:
# no data available
continue
for description, value in cdata.set_index('description')['value'].iteritems():
record[description] = value
# sorted list of climate descriptions
data['climateDescriptions'] = sorted(list(climate['description'].unique()))
# store ranges of climate indicators, lump summer and winter together (by stripping season indicator)
# to get comparable scales
climate['variable_description'] = climate['description'].map(lambda s: ' '.join(s.split()[:-1]))
for variable_description, indicator_data in climate.groupby('variable_description'):
r = [indicator_data['value'].min(), indicator_data['value'].max()]
data['ranges'][variable_description + pipeline.CLIMATE_SEASON_SUFFIXES['winter']] = r
data['ranges'][variable_description + pipeline.CLIMATE_SEASON_SUFFIXES['summer']] = r
# inject GeoJSON polygons
with open(pipeline.NUTS2GeoJSONInputFile().input_file) as in_file:
nuts2_boundaries = json.load(in_file)
data['nuts2Features'] = nuts2_boundaries['features']
#
# render template
#
# inject data
context['data'] = json.dumps(data, indent=4)
out_file = path.join(out_dir, "eurominder_explorer.html")
html_content = template.render(**context)
with open(out_file, 'w') as f:
f.write(html_content)
# done, clean up
session.close() | 0.002767 |
def dim_range_key(eldim):
"""
Returns the key to look up a dimension range.
"""
if isinstance(eldim, dim):
dim_name = repr(eldim)
if dim_name.startswith("'") and dim_name.endswith("'"):
dim_name = dim_name[1:-1]
else:
dim_name = eldim.name
return dim_name | 0.003175 |
def fromProfileName(cls, name):
"""Return an `Origin` from a given configuration profile name.
:see: `ProfileStore`.
"""
session = bones.SessionAPI.fromProfileName(name)
return cls(session) | 0.008696 |
def viewBoxAxisRange(viewBox, axisNumber):
""" Calculates the range of an axis of a viewBox.
"""
rect = viewBox.childrenBoundingRect() # taken from viewBox.autoRange()
if rect is not None:
if axisNumber == X_AXIS:
return rect.left(), rect.right()
elif axisNumber == Y_AXIS:
return rect.bottom(), rect.top()
else:
raise ValueError("axisNumber should be 0 or 1, got: {}".format(axisNumber))
else:
# Does this happen? Probably when the plot is empty.
raise AssertionError("No children bbox. Plot range not updated.") | 0.004926 |
def consistency(self, consistency):
"""
Sets the consistency level for the operation. See :class:`.ConsistencyLevel`.
.. code-block:: python
for user in User.objects(id=3).consistency(CL.ONE):
print(user)
"""
clone = copy.deepcopy(self)
clone._consistency = consistency
return clone | 0.008152 |
def batch_workflow_status(self, batch_workflow_id):
"""Checks GBDX batch workflow status.
Args:
batch workflow_id (str): Batch workflow id.
Returns:
Batch Workflow status (str).
"""
self.logger.debug('Get status of batch workflow: ' + batch_workflow_id)
url = '%(base_url)s/batch_workflows/%(batch_id)s' % {
'base_url': self.base_url, 'batch_id': batch_workflow_id
}
r = self.gbdx_connection.get(url)
return r.json() | 0.003766 |
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 4:
return self.print_help()
text_format = gf.safe_unicode(self.actual_arguments[0])
if text_format == u"list":
text = gf.safe_unicode(self.actual_arguments[1])
elif text_format in TextFileFormat.ALLOWED_VALUES:
text = self.actual_arguments[1]
if not self.check_input_file(text):
return self.ERROR_EXIT_CODE
else:
return self.print_help()
l1_id_regex = self.has_option_with_value(u"--l1-id-regex")
l2_id_regex = self.has_option_with_value(u"--l2-id-regex")
l3_id_regex = self.has_option_with_value(u"--l3-id-regex")
id_regex = self.has_option_with_value(u"--id-regex")
class_regex = self.has_option_with_value(u"--class-regex")
sort = self.has_option_with_value(u"--sort")
backwards = self.has_option([u"-b", u"--backwards"])
quit_after = gf.safe_float(self.has_option_with_value(u"--quit-after"), None)
start_fragment = gf.safe_int(self.has_option_with_value(u"--start"), None)
end_fragment = gf.safe_int(self.has_option_with_value(u"--end"), None)
parameters = {
gc.PPN_TASK_IS_TEXT_MUNPARSED_L1_ID_REGEX: l1_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L2_ID_REGEX: l2_id_regex,
gc.PPN_TASK_IS_TEXT_MUNPARSED_L3_ID_REGEX: l3_id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_CLASS_REGEX: class_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_REGEX: id_regex,
gc.PPN_TASK_IS_TEXT_UNPARSED_ID_SORT: sort,
}
if (text_format == TextFileFormat.MUNPARSED) and ((l1_id_regex is None) or (l2_id_regex is None) or (l3_id_regex is None)):
self.print_error(u"You must specify --l1-id-regex and --l2-id-regex and --l3-id-regex for munparsed format")
return self.ERROR_EXIT_CODE
if (text_format == TextFileFormat.UNPARSED) and (id_regex is None) and (class_regex is None):
self.print_error(u"You must specify --id-regex and/or --class-regex for unparsed format")
return self.ERROR_EXIT_CODE
language = gf.safe_unicode(self.actual_arguments[2])
output_file_path = self.actual_arguments[3]
if not self.check_output_file(output_file_path):
return self.ERROR_EXIT_CODE
text_file = self.get_text_file(text_format, text, parameters)
if text_file is None:
self.print_error(u"Unable to build a TextFile from the given parameters")
return self.ERROR_EXIT_CODE
elif len(text_file) == 0:
self.print_error(u"No text fragments found")
return self.ERROR_EXIT_CODE
text_file.set_language(language)
self.print_info(u"Read input text with %d fragments" % (len(text_file)))
if start_fragment is not None:
self.print_info(u"Slicing from index %d" % (start_fragment))
if end_fragment is not None:
self.print_info(u"Slicing to index %d" % (end_fragment))
text_slice = text_file.get_slice(start_fragment, end_fragment)
self.print_info(u"Synthesizing %d fragments" % (len(text_slice)))
if quit_after is not None:
self.print_info(u"Stop synthesizing upon reaching %.3f seconds" % (quit_after))
try:
synt = Synthesizer(rconf=self.rconf, logger=self.logger)
synt.synthesize(
text_slice,
output_file_path,
quit_after=quit_after,
backwards=backwards
)
self.print_success(u"Created file '%s'" % output_file_path)
synt.clear_cache()
return self.NO_ERROR_EXIT_CODE
except ImportError as exc:
tts = self.rconf[RuntimeConfiguration.TTS]
if tts == Synthesizer.AWS:
self.print_error(u"You need to install Python module boto3 to use the AWS Polly TTS API wrapper. Run:")
self.print_error(u"$ pip install boto3")
self.print_error(u"or, to install for all users:")
self.print_error(u"$ sudo pip install boto3")
elif tts == Synthesizer.NUANCE:
self.print_error(u"You need to install Python module requests to use the Nuance TTS API wrapper. Run:")
self.print_error(u"$ pip install requests")
self.print_error(u"or, to install for all users:")
self.print_error(u"$ sudo pip install requests")
else:
self.print_error(u"An unexpected error occurred while synthesizing text:")
self.print_error(u"%s" % exc)
except Exception as exc:
self.print_error(u"An unexpected error occurred while synthesizing text:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE | 0.002996 |
def find_resistance(record):
"""Infer the antibiotics resistance of the given record.
Arguments:
record (`~Bio.SeqRecord.SeqRecord`): an annotated sequence.
Raises:
RuntimeError: when there's not exactly one resistance cassette.
"""
for feature in record.features:
labels = set(feature.qualifiers.get("label", []))
cassettes = labels.intersection(_ANTIBIOTICS)
if len(cassettes) > 1:
raise RuntimeError("multiple resistance cassettes detected")
elif len(cassettes) == 1:
return _ANTIBIOTICS.get(cassettes.pop())
raise RuntimeError("could not find the resistance of '{}'".format(record.id)) | 0.002907 |
def create_profile(ctx, package):
"""Creates an importable Maltego profile (*.mtz) file."""
from canari.commands.create_profile import create_profile
create_profile(ctx.config_dir, ctx.project, package) | 0.004673 |
def add_node(self, n, layers=None, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
layers : set of str or None
the set of layers the node belongs to,
e.g. {'tiger:token', 'anaphoricity:annotation'}.
Will be set to {self.ns} if None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> from discoursegraphs import DiscourseDocumentGraph
>>> d = DiscourseDocumentGraph()
>>> d.add_node(1, {'node'})
# adding the same node with a different layer
>>> d.add_node(1, {'number'})
>>> d.nodes(data=True)
[(1, {'layers': {'node', 'number'}})]
Use keywords set/change node attributes:
>>> d.add_node(1, {'node'}, size=10)
>>> d.add_node(3, layers={'num'}, weight=0.4, UTM=('13S',382))
>>> d.nodes(data=True)
[(1, {'layers': {'node', 'number'}, 'size': 10}),
(3, {'UTM': ('13S', 382), 'layers': {'num'}, 'weight': 0.4})]
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if not layers:
layers = {self.ns}
assert isinstance(layers, set), \
"'layers' parameter must be given as a set of strings."
assert all((isinstance(layer, str) for layer in layers)), \
"All elements of the 'layers' set must be strings."
# add layers to keyword arguments dict
attr.update({'layers': layers})
# set up attribute dict
if attr_dict is None:
attr_dict = attr
else:
assert isinstance(attr_dict, dict), \
"attr_dict must be a dictionary, not a '{}'".format(type(attr_dict))
attr_dict.update(attr)
# if there's no node with this ID in the graph, yet
if n not in self.succ:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
# if a node exists, its attributes will be updated, except
# for the layers attribute. the value of 'layers' will
# be the union of the existing layers set and the new one.
existing_layers = self.node[n]['layers']
all_layers = existing_layers.union(layers)
attrs_without_layers = {k: v for (k, v) in attr_dict.items()
if k != 'layers'}
self.node[n].update(attrs_without_layers)
self.node[n].update({'layers': all_layers}) | 0.000911 |
def deps_from_pydit_json(requires, runtime=True):
"""Parses dependencies returned by pydist.json, since versions
uses brackets we can't use pkg_resources to parse and we need a separate
method
Args:
requires: list of dependencies as written in pydist.json of the package
runtime: are the dependencies runtime (True) or build time (False)
Returns:
List of semi-SPECFILE dependecies (see dependency_to_rpm for format)
"""
parsed = []
for req in requires:
# req looks like 'some-name (>=X.Y,!=Y.X)' or 'someme-name' where
# 'some-name' is the name of required package and '(>=X.Y,!=Y.X)'
# are specs
name, specs = None, None
# len(reqs) == 1 if there are not specified versions, 2 otherwise
reqs = req.split(' ')
name = reqs[0]
if len(reqs) == 2:
specs = reqs[1]
# try if there are more specs in spec part of the requires
specs = specs.split(",")
# strip brackets
specs = [re.sub('[()]', '', spec) for spec in specs]
# this will divide (>=0.1.2) to ['>=', '0', '.1.2']
# or (0.1.2) into ['', '0', '.1.2']
specs = [re.split('([0-9])', spec, 1) for spec in specs]
# we have separated specs based on number as delimiter
# so we need to join it back to rest of version number
# e.g ['>=', '0', '.1.2'] to ['>=', '0.1.2']
for spec in specs:
spec[1:3] = [''.join(spec[1:3])]
if specs:
for spec in specs:
if '!' in spec[0]:
parsed.append(['Conflicts', name, '=', spec[1]])
elif specs[0] == '==':
parsed.append(['Requires', name, '=', spec[1]])
else:
parsed.append(['Requires', name, spec[0], spec[1]])
else:
parsed.append(['Requires', name])
if not runtime:
for pars in parsed:
pars[0] = 'Build' + pars[0]
return parsed | 0.000484 |
def start_instances(instances, region):
'''Start all the instances given by its ids'''
if not instances: return
conn = ec2_connect(region)
log("Starting instances {0}.".format(instances))
conn.start_instances(instances)
log("Done") | 0.007843 |
def cmd_cammsg(self, args):
'''cammsg'''
params = [0, 0, 0, 0, 1, 0, 0]
# fill in any args passed by user
for i in range(min(len(args),len(params))):
params[i] = float(args[i])
print("Sent DIGICAM_CONTROL CMD_LONG")
self.master.mav.command_long_send(
self.settings.target_system, # target_system
0, # target_component
mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL, # command
0, # confirmation
params[0], # param1
params[1], # param2
params[2], # param3
params[3], # param4
params[4], # param5
params[5], # param6
params[6]) | 0.01676 |
def save(self, path, verbose=False):
"""Save the suite to disk.
Args:
path (str): Path to save the suite to. If a suite is already saved
at `path`, then it will be overwritten. Otherwise, if `path`
exists, an error is raised.
"""
path = os.path.realpath(path)
if os.path.exists(path):
if self.load_path and self.load_path == path:
if verbose:
print "saving over previous suite..."
for context_name in self.context_names:
self.context(context_name) # load before dir deleted
shutil.rmtree(path)
else:
raise SuiteError("Cannot save, path exists: %r" % path)
contexts_path = os.path.join(path, "contexts")
os.makedirs(contexts_path)
# write suite data
data = self.to_dict()
filepath = os.path.join(path, "suite.yaml")
with open(filepath, "w") as f:
f.write(dump_yaml(data))
# write contexts
for context_name in self.context_names:
context = self.context(context_name)
context._set_parent_suite(path, context_name)
filepath = self._context_path(context_name, path)
if verbose:
print "writing %r..." % filepath
context.save(filepath)
# create alias wrappers
tools_path = os.path.join(path, "bin")
os.makedirs(tools_path)
if verbose:
print "creating alias wrappers in %r..." % tools_path
tools = self.get_tools()
for tool_alias, d in tools.iteritems():
tool_name = d["tool_name"]
context_name = d["context_name"]
data = self._context(context_name)
prefix_char = data.get("prefix_char")
if verbose:
print ("creating %r -> %r (%s context)..."
% (tool_alias, tool_name, context_name))
filepath = os.path.join(tools_path, tool_alias)
create_forwarding_script(filepath,
module="suite",
func_name="_FWD__invoke_suite_tool_alias",
context_name=context_name,
tool_name=tool_name,
prefix_char=prefix_char) | 0.001234 |
def _pre_process_cfg(self):
"""
Pre-process the acyclic CFG.
- Change all FakeRet edges to normal edges when necessary (e.g. the normal/expected return edge does not exist)
"""
for _, dst, data in self._acyclic_cfg.graph.edges(data=True):
if 'jumpkind' in data and data['jumpkind'] == 'Ijk_FakeRet':
all_edges_to_dst = self._acyclic_cfg.graph.in_edges([ dst ], data=True)
if not any((s, d) for s, d, da in all_edges_to_dst if da['jumpkind'] != 'Ijk_FakeRet' ):
# All in edges are FakeRets
# Change them to a normal edge
for _, _, data_ in all_edges_to_dst:
data_['jumpkind'] = 'Ijk_Boring' | 0.010568 |
def close(self):
"""
Close the connection to the AMQP compliant broker.
"""
if self.channel is not None: self.channel.close()
if self.__connection is not None: self.__connection.close() | 0.017778 |
def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""Return a unicode object representing 's'.
Treats bytes using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.text_type):
return s
if strings_only and not isinstance(s, six.string_types):
return s
if not isinstance(s, six.string_types):
if hasattr(s, "__unicode__"):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, six.binary_type):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(six.binary_type(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
return s | 0.000941 |
def heightmap_new(w: int, h: int, order: str = "C") -> np.ndarray:
"""Return a new numpy.ndarray formatted for use with heightmap functions.
`w` and `h` are the width and height of the array.
`order` is given to the new NumPy array, it can be 'C' or 'F'.
You can pass a NumPy array to any heightmap function as long as all the
following are true::
* The array is 2 dimensional.
* The array has the C_CONTIGUOUS or F_CONTIGUOUS flag.
* The array's dtype is :any:`dtype.float32`.
The returned NumPy array will fit all these conditions.
.. versionchanged:: 8.1
Added the `order` parameter.
"""
if order == "C":
return np.zeros((h, w), np.float32, order="C")
elif order == "F":
return np.zeros((w, h), np.float32, order="F")
else:
raise ValueError("Invalid order parameter, should be 'C' or 'F'.") | 0.001129 |
async def trigger_event(self, event, *args):
"""Dispatch an event to the proper handler method.
In the most common usage, this method is not overloaded by subclasses,
as it performs the routing of events to methods. However, this
method can be overriden if special dispatching rules are needed, or if
having a single method that catches all events is desired.
Note: this method is a coroutine.
"""
handler_name = 'on_' + event
if hasattr(self, handler_name):
handler = getattr(self, handler_name)
if asyncio.iscoroutinefunction(handler) is True:
try:
ret = await handler(*args)
except asyncio.CancelledError: # pragma: no cover
ret = None
else:
ret = handler(*args)
return ret | 0.002255 |
def toXml(self):
"""
Saves this profile toolbar as XML information.
:return <xml.etree.ElementTree.Element>
"""
xtoolbar = ElementTree.Element('toolbar')
prof = self._currentProfile
if prof is not None:
xtoolbar.set('current', prof.name())
for profile in self.profiles():
profile.toXml(xtoolbar)
return xtoolbar | 0.012987 |
def build_ingest_fs(self):
"""Return a pyfilesystem subdirectory for the ingested source files"""
base_path = 'ingest'
if not self.build_fs.exists(base_path):
self.build_fs.makedir(base_path, recursive=True, allow_recreate=True)
return self.build_fs.opendir(base_path) | 0.009524 |
def expand_entry(entry, ignore_xs=0x0):
"""Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and
``1``\ s.
The following will expand any Xs in bits ``1..3``\ ::
>>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100)
>>> list(expand_entry(entry, 0xfffffff1)) == [
... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X
... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X
... ]
True
Parameters
----------
entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar
The entry to expand.
ignore_xs : int
Bit-mask of Xs which should not be expanded.
Yields
------
:py:class:`~rig.routing_table.RoutingTableEntry`
Routing table entries which represent the original entry but with all
Xs not masked off by `ignore_xs` replaced with 1s and 0s.
"""
# Get all the Xs in the entry that are not ignored
xs = (~entry.key & ~entry.mask) & ~ignore_xs
# Find the most significant X
for bit in (1 << i for i in range(31, -1, -1)):
if bit & xs:
# Yield all the entries with this bit set as 0
entry_0 = RoutingTableEntry(entry.route, entry.key,
entry.mask | bit, entry.sources)
for new_entry in expand_entry(entry_0, ignore_xs):
yield new_entry
# And yield all the entries with this bit set as 1
entry_1 = RoutingTableEntry(entry.route, entry.key | bit,
entry.mask | bit, entry.sources)
for new_entry in expand_entry(entry_1, ignore_xs):
yield new_entry
# Stop looking for Xs
break
else:
# If there are no Xs then yield the entry we were given.
yield entry | 0.002121 |
def draw_circle(ctx, x, y, radius, cairo_color):
"""
Draw a circle.
:param radius: radius in pixels
:param cairo_color: normalized rgb color
"""
ctx.new_path()
ctx.set_source_rgb(cairo_color.red, cairo_color.green, cairo_color.blue)
ctx.arc(x, y, radius, 0, 2 * pi)
ctx.fill() | 0.003205 |
def call_fdel(self, obj) -> None:
"""Remove the predefined custom value and call the delete function."""
self.fdel(obj)
try:
del vars(obj)[self.name]
except KeyError:
pass | 0.008811 |
def opensearch(self, query, results=10, redirect=True):
""" Execute a MediaWiki opensearch request, similar to search box
suggestions and conforming to the OpenSearch specification
Args:
query (str): Title to search for
results (int): Number of pages within the radius to return
redirect (bool): If **False** return the redirect itself, \
otherwise resolve redirects
Returns:
List: List of results that are stored in a tuple \
(Title, Summary, URL) """
self._check_query(query, "Query must be specified")
query_params = {
"action": "opensearch",
"search": query,
"limit": (100 if results > 100 else results),
"redirects": ("resolve" if redirect else "return"),
"warningsaserror": True,
"namespace": "",
}
results = self.wiki_request(query_params)
self._check_error_response(results, query)
res = list()
for i, item in enumerate(results[1]):
res.append((item, results[2][i], results[3][i]))
return res | 0.001642 |
def line_nbr_from_position(self, y_pos):
"""
Returns the line number from the y_pos.
:param y_pos: Y pos in the editor
:return: Line number (0 based), -1 if out of range
"""
editor = self._editor
height = editor.fontMetrics().height()
for top, line, block in editor.visible_blocks:
if top <= y_pos <= top + height:
return line
return -1 | 0.004577 |
def get_interface_detail_output_interface_if_description(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
if_description = ET.SubElement(interface, "if-description")
if_description.text = kwargs.pop('if_description')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.002315 |
def onfini(self, func):
'''
Add a function/coroutine/Base to be called on fini().
'''
if isinstance(func, Base):
self.tofini.add(func)
return
assert self.anitted
self._fini_funcs.append(func) | 0.007576 |
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
logger.info('Declaring exchange', name=exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self._exchange_type) | 0.003584 |
def modify_main_app(app, config: Config):
"""
Modify the app we're serving to make development easier, eg.
* modify responses to add the livereload snippet
* set ``static_root_url`` on the app
* setup the debug toolbar
"""
app._debug = True
dft_logger.debug('livereload enabled: %s', '✓' if config.livereload else '✖')
def get_host(request):
if config.infer_host:
return request.headers.get('host', 'localhost').split(':', 1)[0]
else:
return config.host
if config.livereload:
async def on_prepare(request, response):
if (not request.path.startswith('/_debugtoolbar') and
'text/html' in response.content_type and
getattr(response, 'body', False)):
lr_snippet = LIVE_RELOAD_HOST_SNIPPET.format(get_host(request), config.aux_port)
dft_logger.debug('appending live reload snippet "%s" to body', lr_snippet)
response.body += lr_snippet.encode()
app.on_response_prepare.append(on_prepare)
static_path = config.static_url.strip('/')
if config.infer_host and config.static_path is not None:
# we set the app key even in middleware to make the switch to production easier and for backwards compat.
@web.middleware
async def static_middleware(request, handler):
static_url = 'http://{}:{}/{}'.format(get_host(request), config.aux_port, static_path)
dft_logger.debug('settings app static_root_url to "%s"', static_url)
request.app['static_root_url'].change(static_url)
return await handler(request)
app.middlewares.insert(0, static_middleware)
if config.static_path is not None:
static_url = 'http://{}:{}/{}'.format(config.host, config.aux_port, static_path)
dft_logger.debug('settings app static_root_url to "%s"', static_url)
app['static_root_url'] = MutableValue(static_url)
if config.debug_toolbar and aiohttp_debugtoolbar:
aiohttp_debugtoolbar.setup(app, intercept_redirects=False) | 0.003799 |
def create_equipamento(self):
"""Get an instance of equipamento services facade."""
return Equipamento(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | 0.008696 |
def _run_snpeff(snp_in, out_format, data):
"""Run effects prediction with snpEff, skipping if snpEff database not present.
"""
snpeff_db, datadir = get_db(data)
if not snpeff_db:
return None, None
assert os.path.exists(os.path.join(datadir, snpeff_db)), \
"Did not find %s snpEff genome data in %s" % (snpeff_db, datadir)
ext = utils.splitext_plus(snp_in)[1] if out_format == "vcf" else ".tsv"
out_file = "%s-effects%s" % (utils.splitext_plus(snp_in)[0], ext)
stats_file = "%s-stats.html" % utils.splitext_plus(out_file)[0]
csv_file = "%s-stats.csv" % utils.splitext_plus(out_file)[0]
if not utils.file_exists(out_file):
config_args = " ".join(_snpeff_args_from_config(data))
if ext.endswith(".gz"):
bgzip_cmd = "| %s -c" % tools.get_bgzip_cmd(data["config"])
else:
bgzip_cmd = ""
with file_transaction(data, out_file) as tx_out_file:
snpeff_cmd = _get_snpeff_cmd("eff", datadir, data, tx_out_file)
cmd = ("{snpeff_cmd} {config_args} -noLog -i vcf -o {out_format} "
"-csvStats {csv_file} -s {stats_file} {snpeff_db} {snp_in} {bgzip_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "snpEff effects", data)
if ext.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file, [stats_file, csv_file] | 0.002114 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.