text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def galcenrect_to_vxvyvz(vXg,vYg,vZg,vsun=[0.,1.,0.],Xsun=1.,Zsun=0.,
_extra_rot=True):
"""
NAME:
galcenrect_to_vxvyvz
PURPOSE:
transform rectangular Galactocentric coordinates to XYZ coordinates (wrt Sun) for velocities
INPUT:
vXg - Galactocentric x-velocity
vYg - Galactocentric y-velocity
vZg - Galactocentric z-velocity
vsun - velocity of the sun in the GC frame ndarray[3] (can be array of same length as vXg; shape [3,N])
Xsun - cylindrical distance to the GC (can be array of same length as vXg)
Zsun - Sun's height above the midplane (can be array of same length as vXg)
_extra_rot= (True) if True, perform an extra tiny rotation to align the Galactocentric coordinate frame with astropy's definition
OUTPUT:
[:,3]= vx, vy, vz
HISTORY:
2011-02-24 - Written - Bovy (NYU)
2016-05-12 - Edited to properly take into account the Sun's vertical position; dropped Ysun keyword - Bovy (UofT)
2017-10-24 - Allowed vsun/Xsun/Zsun to be arrays - Bovy (UofT)
2018-04-18 - Tweaked to be consistent with astropy's Galactocentric frame - Bovy (UofT)
"""
dgc= nu.sqrt(Xsun**2.+Zsun**2.)
costheta, sintheta= Xsun/dgc, Zsun/dgc
if isinstance(Xsun,nu.ndarray):
zero= nu.zeros(len(Xsun))
one= nu.ones(len(Xsun))
Carr= nu.rollaxis(nu.array([[-costheta,zero,-sintheta],
[zero,one,zero],
[-nu.sign(Xsun)*sintheta,zero,
nu.sign(Xsun)*costheta]]),2)
out= ((Carr
*nu.array([[vXg-vsun[0],vXg-vsun[0],vXg-vsun[0]],
[vYg-vsun[1],vYg-vsun[1],vYg-vsun[1]],
[vZg-vsun[2],vZg-vsun[2],vZg-vsun[2]]]).T).sum(-1))
else:
out= nu.dot(nu.array([[-costheta,0.,-sintheta],
[0.,1.,0.],
[-nu.sign(Xsun)*sintheta,0.,
nu.sign(Xsun)*costheta]]),
nu.array([vXg-vsun[0],vYg-vsun[1],vZg-vsun[2]])).T
if _extra_rot:
return nu.dot(galcen_extra_rot.T,out.T).T
else:
return out | 0.020632 |
def _import_genos_from_oedb(network):
"""Import generator data from the Open Energy Database (OEDB).
The importer uses SQLAlchemy ORM objects.
These are defined in ego.io,
see https://github.com/openego/ego.io/tree/dev/egoio/db_tables
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
Notes
------
Right now only solar and wind generators can be imported.
"""
def _import_conv_generators():
"""Import conventional (conv) generators
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of medium-voltage generators
Notes
-----
You can find a full list of columns in
:func:`edisgo.data.import_data._update_grids`
"""
# build query
generators_sqla = session.query(
orm_conv_generators.columns.id,
orm_conv_generators.columns.subst_id,
orm_conv_generators.columns.la_id,
orm_conv_generators.columns.capacity,
orm_conv_generators.columns.type,
orm_conv_generators.columns.voltage_level,
orm_conv_generators.columns.fuel,
func.ST_AsText(func.ST_Transform(
orm_conv_generators.columns.geom, srid))
). \
filter(orm_conv_generators.columns.subst_id == network.mv_grid.id). \
filter(orm_conv_generators.columns.voltage_level.in_([4, 5, 6, 7])). \
filter(orm_conv_generators_version)
# read data from db
generators_mv = pd.read_sql_query(generators_sqla.statement,
session.bind,
index_col='id')
return generators_mv
def _import_res_generators(types_filter):
"""Import renewable (res) generators
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
List of medium-voltage generators
:pandas:`pandas.DataFrame<dataframe>`
List of low-voltage generators
Notes
-----
You can find a full list of columns in
:func:`edisgo.data.import_data._update_grids`
If subtype is not specified it's set to 'unknown'.
"""
# build basic query
generators_sqla = session.query(
orm_re_generators.columns.id,
orm_re_generators.columns.subst_id,
orm_re_generators.columns.la_id,
orm_re_generators.columns.mvlv_subst_id,
orm_re_generators.columns.electrical_capacity,
orm_re_generators.columns.generation_type,
orm_re_generators.columns.generation_subtype,
orm_re_generators.columns.voltage_level,
orm_re_generators.columns.w_id,
func.ST_AsText(func.ST_Transform(
orm_re_generators.columns.rea_geom_new, srid)).label('geom'),
func.ST_AsText(func.ST_Transform(
orm_re_generators.columns.geom, srid)).label('geom_em')). \
filter(orm_re_generators.columns.subst_id == network.mv_grid.id). \
filter(orm_re_generators_version). \
filter(types_filter)
# extend basic query for MV generators and read data from db
generators_mv_sqla = generators_sqla. \
filter(orm_re_generators.columns.voltage_level.in_([4, 5]))
generators_mv = pd.read_sql_query(generators_mv_sqla.statement,
session.bind,
index_col='id')
# define generators with unknown subtype as 'unknown'
generators_mv.loc[generators_mv[
'generation_subtype'].isnull(),
'generation_subtype'] = 'unknown'
# extend basic query for LV generators and read data from db
generators_lv_sqla = generators_sqla. \
filter(orm_re_generators.columns.voltage_level.in_([6, 7]))
generators_lv = pd.read_sql_query(generators_lv_sqla.statement,
session.bind,
index_col='id')
# define generators with unknown subtype as 'unknown'
generators_lv.loc[generators_lv[
'generation_subtype'].isnull(),
'generation_subtype'] = 'unknown'
return generators_mv, generators_lv
def _update_grids(network, generators_mv, generators_lv, remove_missing=True):
"""Update imported status quo DINGO-grid according to new generator dataset
It
* adds new generators to grid if they do not exist
* updates existing generators if parameters have changed
* removes existing generators from grid which do not exist in the imported dataset
Steps:
* Step 1: MV generators: Update existing, create new, remove decommissioned
* Step 2: LV generators (single units): Update existing, remove decommissioned
* Step 3: LV generators (in aggregated MV generators): Update existing,
remove decommissioned
(aggregated MV generators = originally LV generators from aggregated Load
Areas which were aggregated during import from ding0.)
* Step 4: LV generators (single units + aggregated MV generators): Create new
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
generators_mv: :pandas:`pandas.DataFrame<dataframe>`
List of MV generators
Columns:
* id: :obj:`int` (index column)
* electrical_capacity: :obj:`float` (unit: kW)
* generation_type: :obj:`str` (e.g. 'solar')
* generation_subtype: :obj:`str` (e.g. 'solar_roof_mounted')
* voltage level: :obj:`int` (range: 4..7,)
* geom: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
* geom_em: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
generators_lv: :pandas:`pandas.DataFrame<dataframe>`
List of LV generators
Columns:
* id: :obj:`int` (index column)
* mvlv_subst_id: :obj:`int` (id of MV-LV substation in grid
= grid which the generator will be connected to)
* electrical_capacity: :obj:`float` (unit: kW)
* generation_type: :obj:`str` (e.g. 'solar')
* generation_subtype: :obj:`str` (e.g. 'solar_roof_mounted')
* voltage level: :obj:`int` (range: 4..7,)
* geom: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
* geom_em: :shapely:`Shapely Point object<points>`
(CRS see config_grid.cfg)
remove_missing: :obj:`bool`
If true, remove generators from grid which are not included in the imported dataset.
"""
# set capacity difference threshold
cap_diff_threshold = 10 ** -4
# get existing generators in MV and LV grids
g_mv, g_lv, g_mv_agg = _build_generator_list(network=network)
# print current capacity
capacity_grid = 0
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_mv.iterrows()])
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_lv.iterrows()])
capacity_grid += sum([row['obj'].nominal_capacity for id, row in g_mv_agg.iterrows()])
logger.debug('Cumulative generator capacity (existing): {} kW'
.format(str(round(capacity_grid, 1)))
)
# ======================================
# Step 1: MV generators (existing + new)
# ======================================
logger.debug('==> MV generators')
logger.debug('{} generators imported.'
.format(str(len(generators_mv))))
# get existing genos (status quo DF format)
g_mv_existing = g_mv[g_mv['id'].isin(list(generators_mv.index.values))]
# get existing genos (new genos DF format)
generators_mv_existing = generators_mv[generators_mv.index.isin(list(g_mv_existing['id']))]
# remove existing ones from grid's geno list
g_mv = g_mv[~g_mv.isin(g_mv_existing)].dropna()
# TEMP: BACKUP 1 GENO FOR TESTING
#temp_geno = generators_mv_existing.iloc[0]
#temp_geno['geom_em'] = temp_geno['geom_em'].replace('10.667', '10.64')
# iterate over exiting generators and check whether capacity has changed
log_geno_count = 0
log_geno_cap = 0
for id, row in generators_mv_existing.iterrows():
geno_existing = g_mv_existing[g_mv_existing['id'] == id]['obj'].iloc[0]
# check if capacity equals; if not: update capacity
if abs(row['electrical_capacity'] - \
geno_existing.nominal_capacity) < cap_diff_threshold:
continue
else:
log_geno_cap += row['electrical_capacity'] - geno_existing.nominal_capacity
log_geno_count += 1
geno_existing.nominal_capacity = row['electrical_capacity']
# check if cap=0 (this may happen if dp is buggy)
if row['electrical_capacity'] <= 0:
geno_existing.grid.graph.remove_node(geno_existing)
logger.warning('Capacity of generator {} is <=0, generator removed. '
'Check your data source.'
.format(repr(geno_existing))
)
logger.debug('Capacities of {} of {} existing generators updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_mv_existing) - log_geno_count),
str(round(log_geno_cap, 1))
)
)
# new genos
log_geno_count = 0
log_geno_cap = 0
generators_mv_new = generators_mv[~generators_mv.index.isin(
list(g_mv_existing['id']))]
# remove them from grid's geno list
g_mv = g_mv[~g_mv.isin(list(generators_mv_new.index.values))].dropna()
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
#generators_mv_new = generators_mv_new.append(temp_geno)
# iterate over new generators and create them
for id, row in generators_mv_new.iterrows():
# check if geom is available, skip otherwise
geom = _check_geom(id, row)
if not geom:
logger.warning('Generator {} has no geom entry at all and will'
'not be imported!'.format(id))
continue
# create generator object and add it to MV grid's graph
if row['generation_type'] in ['solar', 'wind']:
network.mv_grid.graph.add_node(
GeneratorFluctuating(
id=id,
grid=network.mv_grid,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
weather_cell_id=row['w_id'],
geom=wkt_loads(geom)),
type='generator')
else:
network.mv_grid.graph.add_node(
Generator(id=id,
grid=network.mv_grid,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
geom=wkt_loads(geom)
),
type='generator')
log_geno_cap += row['electrical_capacity']
log_geno_count += 1
logger.debug('{} of {} new generators added ({} kW).'
.format(str(log_geno_count),
str(len(generators_mv_new)),
str(round(log_geno_cap, 1))
)
)
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_mv.empty and remove_missing:
log_geno_count = 0
for _, row in g_mv.iterrows():
log_geno_cap += row['obj'].nominal_capacity
row['obj'].grid.graph.remove_node(row['obj'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators removed ({} kW).'
.format(str(log_geno_count),
str(len(g_mv)),
str(round(log_geno_cap, 1))
)
)
# =============================================
# Step 2: LV generators (single existing units)
# =============================================
logger.debug('==> LV generators')
logger.debug('{} generators imported.'.format(str(len(generators_lv))))
# get existing genos (status quo DF format)
g_lv_existing = g_lv[g_lv['id'].isin(list(generators_lv.index.values))]
# get existing genos (new genos DF format)
generators_lv_existing = generators_lv[generators_lv.index.isin(list(g_lv_existing['id']))]
# TEMP: BACKUP 1 GENO FOR TESTING
# temp_geno = g_lv.iloc[0]
# remove existing ones from grid's geno list
g_lv = g_lv[~g_lv.isin(g_lv_existing)].dropna()
# iterate over exiting generators and check whether capacity has changed
log_geno_count = 0
log_geno_cap = 0
for id, row in generators_lv_existing.iterrows():
geno_existing = g_lv_existing[g_lv_existing['id'] == id]['obj'].iloc[0]
# check if capacity equals; if not: update capacity
if abs(row['electrical_capacity'] - \
geno_existing.nominal_capacity) < cap_diff_threshold:
continue
else:
log_geno_cap += row['electrical_capacity'] - geno_existing.nominal_capacity
log_geno_count += 1
geno_existing.nominal_capacity = row['electrical_capacity']
logger.debug('Capacities of {} of {} existing generators (single units) updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_lv_existing) - log_geno_count),
str(round(log_geno_cap, 1))
)
)
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
# g_lv.loc[len(g_lv)] = temp_geno
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_lv.empty and remove_missing:
log_geno_count = 0
for _, row in g_lv.iterrows():
log_geno_cap += row['obj'].nominal_capacity
row['obj'].grid.graph.remove_node(row['obj'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators (single units) removed ({} kW).'
.format(str(log_geno_count),
str(len(g_lv)),
str(round(log_geno_cap, 1))
)
)
# ====================================================================================
# Step 3: LV generators (existing in aggregated units (originally from aggregated LA))
# ====================================================================================
g_lv_agg = network.dingo_import_data
g_lv_agg_existing = g_lv_agg[g_lv_agg['id'].isin(list(generators_lv.index.values))]
generators_lv_agg_existing = generators_lv[generators_lv.index.isin(list(g_lv_agg_existing['id']))]
# TEMP: BACKUP 1 GENO FOR TESTING
# temp_geno = g_lv_agg.iloc[0]
g_lv_agg = g_lv_agg[~g_lv_agg.isin(g_lv_agg_existing)].dropna()
log_geno_count = 0
log_agg_geno_list = []
log_geno_cap = 0
for id, row in generators_lv_agg_existing.iterrows():
# check if capacity equals; if not: update capacity off agg. geno
cap_diff = row['electrical_capacity'] - \
g_lv_agg_existing[g_lv_agg_existing['id'] == id]['capacity'].iloc[0]
if abs(cap_diff) < cap_diff_threshold:
continue
else:
agg_geno = g_lv_agg_existing[g_lv_agg_existing['id'] == id]['agg_geno'].iloc[0]
agg_geno.nominal_capacity += cap_diff
log_geno_cap += cap_diff
log_geno_count += 1
log_agg_geno_list.append(agg_geno)
logger.debug('Capacities of {} of {} existing generators (in {} of {} aggregated units) '
'updated ({} kW).'
.format(str(log_geno_count),
str(len(generators_lv_agg_existing) - log_geno_count),
str(len(set(log_agg_geno_list))),
str(len(g_lv_agg_existing['agg_geno'].unique())),
str(round(log_geno_cap, 1))
)
)
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
# g_lv_agg.loc[len(g_lv_agg)] = temp_geno
# remove decommissioned genos
# (genos which exist in grid but not in the new dataset)
log_geno_cap = 0
if not g_lv_agg.empty and remove_missing:
log_geno_count = 0
for _, row in g_lv_agg.iterrows():
row['agg_geno'].nominal_capacity -= row['capacity']
log_geno_cap += row['capacity']
# remove LV geno id from id string of agg. geno
id = row['agg_geno'].id.split('-')
ids = id[2].split('_')
ids.remove(str(int(row['id'])))
row['agg_geno'].id = '-'.join([id[0], id[1], '_'.join(ids)])
# after removing the LV geno from agg geno, is the agg. geno empty?
# if yes, remove it from grid
if not ids:
row['agg_geno'].grid.graph.remove_node(row['agg_geno'])
log_geno_count += 1
logger.debug('{} of {} decommissioned generators in aggregated generators removed ({} kW).'
.format(str(log_geno_count),
str(len(g_lv_agg)),
str(round(log_geno_cap, 1))
)
)
# ====================================================================
# Step 4: LV generators (new single units + genos in aggregated units)
# ====================================================================
# new genos
log_geno_count =\
log_agg_geno_new_count =\
log_agg_geno_upd_count = 0
# TEMP: BACKUP 1 GENO FOR TESTING
#temp_geno = generators_lv[generators_lv.index == g_lv_existing.iloc[0]['id']]
generators_lv_new = generators_lv[~generators_lv.index.isin(list(g_lv_existing['id'])) &
~generators_lv.index.isin(list(g_lv_agg_existing['id']))]
# TEMP: INSERT BACKUPPED GENO IN DF FOR TESTING
#generators_lv_new = generators_lv_new.append(temp_geno)
# dict for new agg. generators
agg_geno_new = {}
# get LV grid districts
lv_grid_dict = _build_lv_grid_dict(network)
# get predefined random seed and initialize random generator
seed = int(network.config['grid_connection']['random_seed'])
random.seed(a=seed)
# check if none of new generators can be allocated to an existing LV grid
if not any([_ in lv_grid_dict.keys()
for _ in list(generators_lv_new['mvlv_subst_id'])]):
logger.warning('None of the imported generators can be allocated '
'to an existing LV grid. Check compatibility of grid '
'and generator datasets.')
# iterate over new (single unit or part of agg. unit) generators and create them
log_geno_cap = 0
for id, row in generators_lv_new.iterrows():
lv_geno_added_to_agg_geno = False
# new unit is part of agg. LA (mvlv_subst_id is different from existing
# ones in LV grids of non-agg. load areas)
if (row['mvlv_subst_id'] not in lv_grid_dict.keys() and
row['la_id'] and not isnan(row['la_id']) and
row['mvlv_subst_id'] and not isnan(row['mvlv_subst_id'])):
# check if new unit can be added to existing agg. generator
# (LA id, type and subtype match) -> update existing agg. generator.
# Normally, this case should not occur since `subtype` of new genos
# is set to a new value (e.g. 'solar')
for _, agg_row in g_mv_agg.iterrows():
if (agg_row['la_id'] == int(row['la_id']) and
agg_row['obj'].type == row['generation_type'] and
agg_row['obj'].subtype == row['generation_subtype']):
agg_row['obj'].nominal_capacity += row['electrical_capacity']
agg_row['obj'].id += '_{}'.format(str(id))
log_agg_geno_upd_count += 1
lv_geno_added_to_agg_geno = True
if not lv_geno_added_to_agg_geno:
la_id = int(row['la_id'])
if la_id not in agg_geno_new:
agg_geno_new[la_id] = {}
if row['voltage_level'] not in agg_geno_new[la_id]:
agg_geno_new[la_id][row['voltage_level']] = {}
if row['generation_type'] not in agg_geno_new[la_id][row['voltage_level']]:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] = {}
if row['generation_subtype'] not in \
agg_geno_new[la_id][row['voltage_level']][row['generation_type']]:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']]\
.update({row['generation_subtype']: {'ids': [int(id)],
'capacity': row['electrical_capacity']
}
}
)
else:
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] \
[row['generation_subtype']]['ids'].append(int(id))
agg_geno_new[la_id][row['voltage_level']][row['generation_type']] \
[row['generation_subtype']]['capacity'] += row['electrical_capacity']
# new generator is a single (non-aggregated) unit
else:
# check if geom is available
geom = _check_geom(id, row)
if row['generation_type'] in ['solar', 'wind']:
gen = GeneratorFluctuating(
id=id,
grid=None,
nominal_capacity=row['electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
weather_cell_id=row['w_id'],
geom=wkt_loads(geom) if geom else geom)
else:
gen = Generator(id=id,
grid=None,
nominal_capacity=row[
'electrical_capacity'],
type=row['generation_type'],
subtype=row['generation_subtype'],
v_level=int(row['voltage_level']),
geom=wkt_loads(geom) if geom else geom)
# TEMP: REMOVE MVLV SUBST ID FOR TESTING
#row['mvlv_subst_id'] = None
# check if MV-LV substation id exists. if not, allocate to
# random one
lv_grid = _check_mvlv_subst_id(
generator=gen,
mvlv_subst_id=row['mvlv_subst_id'],
lv_grid_dict=lv_grid_dict)
gen.grid = lv_grid
lv_grid.graph.add_node(gen, type='generator')
log_geno_count += 1
log_geno_cap += row['electrical_capacity']
# there are new agg. generators to be created
if agg_geno_new:
pfac_mv_gen = network.config['reactive_power_factor']['mv_gen']
# add aggregated generators
for la_id, val in agg_geno_new.items():
for v_level, val2 in val.items():
for type, val3 in val2.items():
for subtype, val4 in val3.items():
if type in ['solar', 'wind']:
gen = GeneratorFluctuating(
id='agg-' + str(la_id) + '-' + '_'.join([
str(_) for _ in val4['ids']]),
grid=network.mv_grid,
nominal_capacity=val4['capacity'],
type=type,
subtype=subtype,
v_level=4,
# ToDo: get correct w_id
weather_cell_id=row['w_id'],
geom=network.mv_grid.station.geom)
else:
gen = Generator(
id='agg-' + str(la_id) + '-' + '_'.join([
str(_) for _ in val4['ids']]),
nominal_capacity=val4['capacity'],
type=type,
subtype=subtype,
geom=network.mv_grid.station.geom,
grid=network.mv_grid,
v_level=4)
network.mv_grid.graph.add_node(
gen, type='generator_aggr')
# select cable type
line_type, line_count = select_cable(
network=network,
level='mv',
apparent_power=gen.nominal_capacity /
pfac_mv_gen)
# connect generator to MV station
line = Line(id='line_aggr_generator_la_' + str(la_id) + '_vlevel_{v_level}_'
'{subtype}'.format(
v_level=v_level,
subtype=subtype),
type=line_type,
kind='cable',
quantity=line_count,
length=1e-3,
grid=network.mv_grid)
network.mv_grid.graph.add_edge(network.mv_grid.station,
gen,
line=line,
type='line_aggr')
log_agg_geno_new_count += len(val4['ids'])
log_geno_cap += val4['capacity']
logger.debug('{} of {} new generators added ({} single units, {} to existing '
'agg. generators and {} units as new aggregated generators) '
'(total: {} kW).'
.format(str(log_geno_count +
log_agg_geno_new_count +
log_agg_geno_upd_count),
str(len(generators_lv_new)),
str(log_geno_count),
str(log_agg_geno_upd_count),
str(log_agg_geno_new_count),
str(round(log_geno_cap, 1))
)
)
def _check_geom(id, row):
"""Checks if a valid geom is available in dataset
If yes, this geom will be used.
If not:
* MV generators: use geom from EnergyMap.
* LV generators: set geom to None. It is re-set in
:func:`edisgo.data.import_data._check_mvlv_subst_id`
to MV-LV station's geom. EnergyMap's geom is not used
since it is more inaccurate than the station's geom.
Parameters
----------
id : :obj:`int`
Id of generator
row : :pandas:`pandas.Series<series>`
Generator dataset
Returns
-------
:shapely:`Shapely Point object<points>` or None
Geom of generator. None, if no geom is available.
"""
geom = None
# check if geom is available
if row['geom']:
geom = row['geom']
else:
# MV generators: set geom to EnergyMap's geom, if available
if int(row['voltage_level']) in [4,5]:
# check if original geom from Energy Map is available
if row['geom_em']:
geom = row['geom_em']
logger.debug('Generator {} has no geom entry, EnergyMap\'s geom entry will be used.'
.format(id)
)
return geom
def _check_mvlv_subst_id(generator, mvlv_subst_id, lv_grid_dict):
"""Checks if MV-LV substation id of single LV generator is missing or invalid.
If so, a random one from existing stations in LV grids will be assigned.
Parameters
----------
generator : :class:`~.grid.components.Generator`
LV generator
mvlv_subst_id : :obj:`int`
MV-LV substation id
lv_grid_dict : :obj:`dict`
Dict of existing LV grids
Format: {:obj:`int`: :class:`~.grid.grids.LVGrid`}
Returns
-------
:class:`~.grid.grids.LVGrid`
LV grid of generator
"""
if mvlv_subst_id and not isnan(mvlv_subst_id):
# assume that given LA exists
try:
# get LV grid
lv_grid = lv_grid_dict[mvlv_subst_id]
# if no geom, use geom of station
if not generator.geom:
generator.geom = lv_grid.station.geom
logger.debug('Generator {} has no geom entry, stations\' geom will be used.'
.format(generator.id)
)
return lv_grid
# if LA/LVGD does not exist, choose random LVGD and move generator to station of LVGD
# this occurs due to exclusion of LA with peak load < 1kW
except:
lv_grid = random.choice(list(lv_grid_dict.values()))
generator.geom = lv_grid.station.geom
logger.warning('Generator {} cannot be assigned to '
'non-existent LV Grid and was '
'allocated to a random LV Grid ({}); '
'geom was set to stations\' geom.'
.format(repr(generator),
repr(lv_grid)))
pass
return lv_grid
else:
lv_grid = random.choice(list(lv_grid_dict.values()))
generator.geom = lv_grid.station.geom
logger.warning('Generator {} has no mvlv_subst_id and was '
'allocated to a random LV Grid ({}); '
'geom was set to stations\' geom.'
.format(repr(generator),
repr(lv_grid)))
pass
return lv_grid
def _validate_generation():
"""Validate generators in updated grids
The validation uses the cumulative capacity of all generators.
"""
# ToDo: Valdate conv. genos too!
# set capacity difference threshold
cap_diff_threshold = 10 ** -4
capacity_imported = generators_res_mv['electrical_capacity'].sum() + \
generators_res_lv['electrical_capacity'].sum() #+ \
#generators_conv_mv['capacity'].sum()
capacity_grid = 0
# MV genos
for geno in network.mv_grid.generators:
capacity_grid += geno.nominal_capacity
# LV genos
for lv_grid in network.mv_grid.lv_grids:
for geno in lv_grid.generators:
capacity_grid += geno.nominal_capacity
logger.debug('Cumulative generator capacity (updated): {} kW'
.format(str(round(capacity_imported, 1)))
)
if abs(capacity_imported - capacity_grid) > cap_diff_threshold:
raise ValueError('Cumulative capacity of imported generators ({} kW) '
'differ from cumulative capacity of generators '
'in updated grid ({} kW) by {} kW.'
.format(str(round(capacity_imported, 1)),
str(round(capacity_grid, 1)),
str(round(capacity_imported - capacity_grid, 1))
)
)
else:
logger.debug('Cumulative capacity of imported generators validated.')
def _validate_sample_geno_location():
if all(generators_res_lv['geom'].notnull()) \
and all(generators_res_mv['geom'].notnull()) \
and not generators_res_lv['geom'].empty \
and not generators_res_mv['geom'].empty:
# get geom of 1 random MV and 1 random LV generator and transform
sample_mv_geno_geom_shp = transform(proj2equidistant(network),
wkt_loads(generators_res_mv['geom']
.dropna()
.sample(n=1)
.item())
)
sample_lv_geno_geom_shp = transform(proj2equidistant(network),
wkt_loads(generators_res_lv['geom']
.dropna()
.sample(n=1)
.item())
)
# get geom of MV grid district
mvgd_geom_shp = transform(proj2equidistant(network),
network.mv_grid.grid_district['geom']
)
# check if MVGD contains geno
if not (mvgd_geom_shp.contains(sample_mv_geno_geom_shp) and
mvgd_geom_shp.contains(sample_lv_geno_geom_shp)):
raise ValueError('At least one imported generator is not located '
'in the MV grid area. Check compatibility of '
'grid and generator datasets.')
# make DB session
conn = connection(section=network.config['db_connection']['section'])
Session = sessionmaker(bind=conn)
session = Session()
srid = int(network.config['geo']['srid'])
oedb_data_source = network.config['data_source']['oedb_data_source']
scenario = network.generator_scenario
if oedb_data_source == 'model_draft':
# load ORM names
orm_conv_generators_name = network.config['model_draft']['conv_generators_prefix'] + \
scenario + \
network.config['model_draft']['conv_generators_suffix']
orm_re_generators_name = network.config['model_draft']['re_generators_prefix'] + \
scenario + \
network.config['model_draft']['re_generators_suffix']
# import ORMs
orm_conv_generators = model_draft.__getattribute__(orm_conv_generators_name)
orm_re_generators = model_draft.__getattribute__(orm_re_generators_name)
# set dummy version condition (select all generators)
orm_conv_generators_version = 1 == 1
orm_re_generators_version = 1 == 1
elif oedb_data_source == 'versioned':
# load ORM names
orm_conv_generators_name = network.config['versioned']['conv_generators_prefix'] + \
scenario + \
network.config['versioned']['conv_generators_suffix']
orm_re_generators_name = network.config['versioned']['re_generators_prefix'] + \
scenario + \
network.config['versioned']['re_generators_suffix']
data_version = network.config['versioned']['version']
# import ORMs
orm_conv_generators = supply.__getattribute__(orm_conv_generators_name)
orm_re_generators = supply.__getattribute__(orm_re_generators_name)
# set version condition
orm_conv_generators_version = orm_conv_generators.columns.version == data_version
orm_re_generators_version = orm_re_generators.columns.version == data_version
# Create filter for generation technologies
# ToDo: This needs to be removed when all generators can be imported
# (all generators in a scenario should be imported)
types_condition = orm_re_generators.columns.generation_type.in_(
['solar', 'wind'])
# get conventional and renewable generators
#generators_conv_mv = _import_conv_generators()
generators_res_mv, generators_res_lv = _import_res_generators(
types_condition)
#generators_mv = generators_conv_mv.append(generators_res_mv)
_validate_sample_geno_location()
_update_grids(network=network,
#generators_mv=generators_mv,
generators_mv=generators_res_mv,
generators_lv=generators_res_lv)
_validate_generation()
connect_mv_generators(network=network)
connect_lv_generators(network=network) | 0.002985 |
def daemon_connection_init(self, s_link, set_wait_new_conf=False):
"""Initialize a connection with the daemon for the provided satellite link
Initialize the connection (HTTP client) to the daemon and get its running identifier.
Returns True if it succeeds else if any error occur or the daemon is inactive
it returns False.
Assume the daemon should be reachable because we are initializing the connection...
as such, force set the link reachable property
If set_wait_new_conf is set, the daemon is requested to wait a new configuration if
we get a running identifier. This is used by the arbiter when a new configuration
must be dispatched
NB: if the daemon is configured as passive, or if it is a daemon link that is
inactive then it returns False without trying a connection.
:param s_link: link of the daemon to connect to
:type s_link: SatelliteLink
:param set_wait_new_conf: if the daemon must got the wait new configuration state
:type set_wait_new_conf: bool
:return: True if the connection is established, else False
"""
logger.debug("Daemon connection initialization: %s %s", s_link.type, s_link.name)
# If the link is not not active, I do not try to initialize the connection, just useless ;)
if not s_link.active:
logger.warning("%s '%s' is not active, do not initialize its connection!",
s_link.type, s_link.name)
return False
# Create the daemon connection
s_link.create_connection()
# Get the connection running identifier - first client / server communication
logger.debug("[%s] Getting running identifier for '%s'", self.name, s_link.name)
# Assume the daemon should be alive and reachable
# because we are initializing the connection...
s_link.alive = True
s_link.reachable = True
got_a_running_id = None
for _ in range(0, s_link.max_check_attempts):
got_a_running_id = s_link.get_running_id()
if got_a_running_id:
s_link.last_connection = time.time()
if set_wait_new_conf:
s_link.wait_new_conf()
break
time.sleep(0.3)
return got_a_running_id | 0.006321 |
def _get_daily_message(self, dt, algo, metrics_tracker):
"""
Get a perf message for the given datetime.
"""
perf_message = metrics_tracker.handle_market_close(
dt,
self.data_portal,
)
perf_message['daily_perf']['recorded_vars'] = algo.recorded_vars
return perf_message | 0.005747 |
def _connect_uncached(self):
''' activates the connection object '''
if not HAVE_PARAMIKO:
raise errors.AnsibleError("paramiko is not installed")
user = self.runner.remote_user
vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (user, self.port, self.host), host=self.host)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
allow_agent = True
if self.runner.remote_pass is not None:
allow_agent = False
try:
private_key = None
key_filename = None
if self.runner.private_key:
private_key = paramiko.RSAKey.from_private_key(StringIO.StringIO(self.runner.private_key))
if self.runner.private_key_file:
key_filename = os.path.expanduser(self.runner.private_key_file)
ssh.connect(self.host, username=user, allow_agent=allow_agent, look_for_keys=True,
key_filename=key_filename, pkey=private_key,
password=self.runner.remote_pass,
timeout=self.runner.timeout, port=self.port)
except Exception, e:
msg = str(e)
if "PID check failed" in msg:
raise errors.AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
user, self.host, self.port, msg)
raise errors.AnsibleConnectionFailed(msg)
else:
raise errors.AnsibleConnectionFailed(msg)
return ssh | 0.008806 |
def workflow_aggregate(graph: BELGraph,
node: BaseEntity,
key: Optional[str] = None,
tag: Optional[str] = None,
default_score: Optional[float] = None,
runs: Optional[int] = None,
aggregator: Optional[Callable[[Iterable[float]], float]] = None,
) -> Optional[float]:
"""Get the average score over multiple runs.
This function is very simple, and can be copied to do more interesting statistics over the :class:`Runner`
instances. To iterate over the runners themselves, see :func:`workflow`
:param graph: A BEL graph
:param node: The BEL node that is the focus of this analysis
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:param tag: The key for the nodes' data dictionaries where the scores will be put. Defaults to 'score'
:param default_score: The initial score for all nodes. This number can go up or down.
:param runs: The number of times to run the heat diffusion workflow. Defaults to 100.
:param aggregator: A function that aggregates a list of scores. Defaults to :func:`numpy.average`.
Could also use: :func:`numpy.mean`, :func:`numpy.median`, :func:`numpy.min`, :func:`numpy.max`
:return: The average score for the target node
"""
runners = workflow(graph, node, key=key, tag=tag, default_score=default_score, runs=runs)
scores = [runner.get_final_score() for runner in runners]
if not scores:
log.warning('Unable to run the heat diffusion workflow for %s', node)
return
if aggregator is None:
return np.average(scores)
return aggregator(scores) | 0.005461 |
def start_step(self, step_name):
""" Start a step. """
if self.finished is not None:
raise AlreadyFinished()
step_data = self._get_step(step_name)
if step_data is not None:
if 'stop' in step_data:
raise StepAlreadyFinished()
else:
raise StepAlreadyStarted()
steps = copy.deepcopy(self.steps)
steps.append({
"start": datetime.utcnow(),
"name": step_name
})
self._save(steps=steps) | 0.003724 |
def _parse_ports(self, ports):
"""Parse ports string into the list
:param str ports:
:rtype: list[tuple[str, str]]
"""
if not ports:
return []
return [tuple(port_pair.split("::")) for port_pair in ports.strip(";").split(";")] | 0.010453 |
def get(self, *args, **kwargs):
"""Handle reading of the model
:param args:
:param kwargs:
"""
# Create the model and fetch its data
self.model = self.get_model(kwargs.get('id'))
result = yield self.model.fetch()
# If model is not found, return 404
if not result:
LOGGER.debug('Not found')
self.not_found()
return
# Stub to check for read permissions
if not self.has_read_permission():
LOGGER.debug('Permission denied')
self.permission_denied()
return
# Add the headers and return the content as JSON
self.add_headers()
self.finish(self.model_json()) | 0.002706 |
def _update_id(record, new_id):
"""
Update a record id to new_id, also modifying the ID in record.description
"""
old_id = record.id
record.id = new_id
# At least for FASTA, record ID starts the description
record.description = re.sub('^' + re.escape(old_id), new_id, record.description)
return record | 0.005988 |
def has_role(user, *roles, **kwargs):
"""
Judge is the user belongs to the role, and if does, then return the role object
if not then return False. kwargs will be passed to role_func.
"""
Role = get_model('role')
if isinstance(user, (unicode, str)):
User = get_model('user')
user = User.get(User.c.username==user)
for role in roles:
if isinstance(role, (str, unicode)):
role = Role.get(Role.c.name==role)
if not role:
continue
name = role.name
func = __role_funcs__.get(name, None)
if func:
if isinstance(func, (unicode, str)):
func = import_attr(func)
assert callable(func)
para = kwargs.copy()
para['user'] = user
flag = call_func(func, para)
if flag:
return role
flag = role.users.has(user)
if flag:
return role
flag = role.usergroups_has_user(user)
if flag:
return role
return False | 0.007765 |
def set_handler(self, handler):
"""
Connect with a coroutine, which is scheduled when connection is made.
This function will create a task, and when connection is closed,
the task will be canceled.
:param handler:
:return: None
"""
if self._handler:
raise Exception('Handler was already set')
if handler:
self._handler = async_task(handler, loop=self._loop) | 0.004405 |
def authorizeClientRequest(self, request):
"""
Checks the authorization permissions of the bearer token passed, and determines whether the sender is permitted to make the request housed in the payload.
Usage:
request = The header object from pfioh, or the whole request from pman. This is parsed for the bearer token
"""
token = ''
if self.serverType == 'socket':
token = self.getSocketServerToken(request)
elif self.serverType == 'http':
token = self.getHttpServerToken(request)
if token != '' and token != None:
if token in self.__tokens:
if self.__tokens[token] == 'active':
return True, ()
elif self.__tokens[token] == 'revoked':
# token has been revoked
return False, (401, self.errorMessages[401], "This token has been revoked!")
else:
# token is untrusted/invalid
return False, (401, self.errorMessages[401], "")
else:
# token is required, but none was provided
return False, (400, self.errorMessages[400], "Authentication required! Client did not provide authentication.") | 0.005556 |
def combine(line, left, intersect, right):
"""Zip borders between items in `line`.
e.g. ('l', '1', 'c', '2', 'c', '3', 'r')
:param iter line: List to iterate.
:param left: Left border.
:param intersect: Column separator.
:param right: Right border.
:return: Yields combined objects.
"""
# Yield left border.
if left:
yield left
# Yield items with intersect characters.
if intersect:
try:
for j, i in enumerate(line, start=-len(line) + 1):
yield i
if j:
yield intersect
except TypeError: # Generator.
try:
item = next(line)
except StopIteration: # Was empty all along.
pass
else:
while True:
yield item
try:
peek = next(line)
except StopIteration:
break
yield intersect
item = peek
else:
for i in line:
yield i
# Yield right border.
if right:
yield right | 0.000856 |
def statfs(self, path):
"Return a statvfs(3) structure, for stat and df and friends"
# from fuse.py source code:
#
# class c_statvfs(Structure):
# _fields_ = [
# ('f_bsize', c_ulong), # preferred size of file blocks, in bytes
# ('f_frsize', c_ulong), # fundamental size of file blcoks, in bytes
# ('f_blocks', c_fsblkcnt_t), # total number of blocks in the filesystem
# ('f_bfree', c_fsblkcnt_t), # number of free blocks
# ('f_bavail', c_fsblkcnt_t), # free blocks avail to non-superuser
# ('f_files', c_fsfilcnt_t), # total file nodes in file system
# ('f_ffree', c_fsfilcnt_t), # free file nodes in fs
# ('f_favail', c_fsfilcnt_t)] #
#
# On Mac OS X f_bsize and f_frsize must be a power of 2
# (minimum 512).
_blocksize = 512
_usage = self.client.usage
_fs_size = self.client.capacity
if _fs_size == -1: # unlimited
# Since backend is supposed to be unlimited,
# always return a half-full filesystem, but at least 1 TB)
_fs_size = max(2 * _usage, 1024 ** 4)
_bfree = ( _fs_size - _usage ) // _blocksize
return {
'f_bsize': _blocksize,
'f_frsize': _blocksize,
'f_blocks': _fs_size // _blocksize,
'f_bfree': _bfree,
'f_bavail': _bfree,
# 'f_files': c_fsfilcnt_t,
# 'f_ffree': c_fsfilcnt_t,
# 'f_favail': c_fsfilcnt_t
} | 0.003906 |
def eq_central_moments(n_counter, k_counter, dmu_over_dt, species, propensities, stoichiometry_matrix, max_order):
r"""
Function used to calculate the terms required for use in equations giving the time dependence of central moments.
The function returns the list Containing the sum of the following terms in in equation 9,
for each of the :math:`[n_1, ..., n_d]` combinations in eq. 9 where ... is ... # FIXME
.. math::
\mathbf{ {n \choose k} } (-1)^{ \mathbf{n-k} }
[ \alpha \frac{d\beta}{dt} + \beta \frac{d\alpha}{dt} ]
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:param dmu_over_dt: du/dt in paper
:param species: species matrix: y_0, y_1,..., y_d
:param propensities: propensities matrix
:param stoichiometry_matrix: stoichiometry matrix
:return: central_moments matrix with `(len(n_counter)-1)` rows and one column per each :math:`[n_1, ... n_d]` combination
"""
central_moments = []
# Loops through required combinations of moments (n1,...,nd)
# (does not include 0th order central moment as this is 1,
# or 1st order central moment as this is 0
# copy dmu_mat matrix as a list of rows vectors (1/species)
dmu_mat = [sp.Matrix(l).T for l in dmu_over_dt.tolist()]
d_beta_over_dt_calculator = DBetaOverDtCalculator(propensities,n_counter,stoichiometry_matrix, species)
for n_iter in n_counter:
# skip zeroth moment
if n_iter.order == 0 or n_iter.order > max_order:
continue
n_vec = n_iter.n_vector
# Find all moments in k_counter that are lower than the current n_iter
k_lower = [k for k in k_counter if n_iter >= k]
taylor_exp_mat = []
for k_iter in k_lower:
k_vec = k_iter.n_vector
# (n k) binomial term in equation 9
n_choose_k = make_k_chose_e(k_vec, n_vec)
# (-1)^(n-k) term in equation 9
minus_one_pow_n_minus_k = product([sp.Integer(-1) ** (n - m) for (n,m)
in zip(n_vec, k_vec)])
# Calculate alpha, dalpha_over_dt terms in equation 9
alpha = product([s ** (n - k) for s, n, k in zip(species, n_vec, k_vec)])
# eq 10 {(n - k) mu_i^(-1)} corresponds to {(n - k)/s}. s is symbol for mean of a species
# multiplies by alpha an the ith row of dmu_mat and sum it to get dalpha_over_dt
# eq 10 {(n - k) mu_i^(-1)} corresponds to {(n - k)/s}
dalpha_over_dt = sympy_sum_list([((n - k) / s) * alpha * mu_row for s, n, k, mu_row
in zip(species, n_vec, k_vec, dmu_mat)])
# e_counter contains elements of k_counter lower than the current k_iter
e_counter = [k for k in k_counter if k_iter >= k and k.order > 0]
dbeta_over_dt = d_beta_over_dt_calculator.get(k_iter.n_vector, e_counter)
# Calculate beta, dbeta_over_dt terms in equation 9
if len(e_counter) == 0:
beta = 1
else:
beta = k_iter.symbol
taylor_exp_mat.append(n_choose_k * minus_one_pow_n_minus_k * (alpha * dbeta_over_dt + beta * dalpha_over_dt))
# Taylorexp is a matrix which has an entry equal to
# the `n_choose_k * minus_one_pow_n_minus_k * (AdB/dt + beta dA/dt)` term in equation 9 for each k1,..,kd
# These are summed over to give the Taylor Expansion for each n1,..,nd combination in equation 9
central_moments.append(sum_of_cols(sp.Matrix(taylor_exp_mat)))
return sp.Matrix(central_moments) | 0.006392 |
def _get_receivers(self, sender):
''' filter only receiver functions which correspond to the provided sender '''
key = _make_id(sender)
receivers = []
for (receiver_key, sender_key), receiver in self.receivers.items():
if sender_key == key:
receivers.append(receiver)
return receivers | 0.008523 |
def last(symbols=None, token='', version=''):
'''Last provides trade data for executions on IEX. It is a near real time, intraday API that provides IEX last sale price, size and time.
Last is ideal for developers that need a lightweight stock quote.
https://iexcloud.io/docs/api/#last
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
dict: result
'''
symbols = _strToList(symbols)
if symbols:
return _getJson('tops/last?symbols=' + ','.join(symbols) + '%2b', token, version)
return _getJson('tops/last', token, version) | 0.004559 |
def _(s: Influence) -> bool:
""" Check if an Influence statement is grounded """
return is_grounded(s.subj) and is_grounded(s.obj) | 0.007246 |
def _find_home_or_away(self, row):
"""
Determine whether the player is on the home or away team.
Next to every player is their school's name. This name can be matched
with the previously parsed home team's name to determine if the player
is a member of the home or away team.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` constant denoting whether the team plays for
the home or away team.
"""
name = row('td[data-stat="team"]').text().upper()
if name == self.home_abbreviation.upper():
return HOME
else:
return AWAY | 0.002401 |
def delete_invalid_route(self):
"""
Delete any invalid routes for this interface. An invalid route is
a left over when an interface is changed to a different network.
:return: None
"""
try:
routing = self._engine.routing.get(self.interface_id)
for route in routing:
if route.invalid or route.to_delete:
route.delete()
except InterfaceNotFound: # Only VLAN identifiers, so no routing
pass | 0.007634 |
def onboarding_message(**payload):
"""Create and send an onboarding welcome message to new users. Save the
time stamp of this message so we can update this message in the future.
"""
# Get WebClient so you can communicate back to Slack.
web_client = payload["web_client"]
# Get the id of the Slack user associated with the incoming event
user_id = payload["data"]["user"]["id"]
# Open a DM with the new user.
response = web_client.im_open(user_id)
channel = response["channel"]["id"]
# Post the onboarding message.
start_onboarding(web_client, user_id, channel) | 0.001634 |
def update_alias(self):
"""Update lambda alias to point to $LATEST."""
LOG.info('Updating alias %s to point to $LATEST', self.env)
try:
self.lambda_client.update_alias(FunctionName=self.app_name, Name=self.env, FunctionVersion='$LATEST')
except boto3.exceptions.botocore.exceptions.ClientError as error:
LOG.debug('Update alias error: %s', error)
LOG.info("Alias update failed. Retrying...")
raise | 0.006276 |
def help_center_article_translations(self, article_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/translations#list-translations"
api_path = "/api/v2/help_center/articles/{article_id}/translations.json"
api_path = api_path.format(article_id=article_id)
return self.call(api_path, **kwargs) | 0.011561 |
def persist_booking(booking, user):
"""
Ties an in-progress booking from a session to a user when the user logs in.
If we don't do this, the booking will be lost, because on a login, the
old session will be deleted and a new one will be created. Since the
booking has a FK to the session, it would be deleted as well when the user
logs in.
We assume that a user can only have one booking that is in-progress.
Therefore we will delete any existing in-progress bookings of this user
before tying the one from the session to the user.
TODO: Find a more generic solution for this, as this assumes that there is
a status called inprogress and that a user can only have one such booking.
:param booking: The booking that should be tied to the user.
:user: The user the booking should be tied to.
"""
if booking is not None:
existing_bookings = Booking.objects.filter(
user=user, booking_status__slug='inprogress').exclude(
pk=booking.pk)
existing_bookings.delete()
booking.session = None
booking.user = user
booking.save() | 0.000873 |
def stalta_pick(stream, stalen, ltalen, trig_on, trig_off, freqmin=False,
freqmax=False, debug=0, show=False):
"""
Basic sta/lta picker, suggest using alternative in obspy.
Simple sta/lta (short-term average/long-term average) picker, using
obspy's :func:`obspy.signal.trigger.classic_sta_lta` routine to generate
the characteristic function.
Currently very basic quick wrapper, there are many other (better) options
in obspy in the :mod:`obspy.signal.trigger` module.
:type stream: obspy.core.stream.Stream
:param stream: The stream to pick on, can be any number of channels.
:type stalen: float
:param stalen: Length of the short-term average window in seconds.
:type ltalen: float
:param ltalen: Length of the long-term average window in seconds.
:type trig_on: float
:param trig_on: sta/lta ratio to trigger a detection/pick
:type trig_off: float
:param trig_off: sta/lta ratio to turn the trigger off - no further picks\
will be made between exceeding trig_on until trig_off is reached.
:type freqmin: float
:param freqmin: Low-cut frequency in Hz for bandpass filter
:type freqmax: float
:param freqmax: High-cut frequency in Hz for bandpass filter
:type debug: int
:param debug: Debug output level from 0-5.
:type show: bool
:param show: Show picks on waveform.
:returns: :class:`obspy.core.event.event.Event`
.. rubric:: Example
>>> from obspy import read
>>> from eqcorrscan.utils.picker import stalta_pick
>>> st = read()
>>> event = stalta_pick(st, stalen=0.2, ltalen=4, trig_on=10,
... trig_off=1, freqmin=3.0, freqmax=20.0)
>>> print(event.creation_info.author)
EQcorrscan
.. warning::
This function is not designed for accurate picking, rather it can give
a first idea of whether picks may be possible. Proceed with caution.
"""
event = Event()
event.origins.append(Origin())
event.creation_info = CreationInfo(author='EQcorrscan',
creation_time=UTCDateTime())
event.comments.append(Comment(text='stalta'))
picks = []
for tr in stream:
# We are going to assume, for now, that if the pick is made on the
# horizontal channel then it is an S, otherwise we will assume it is
# a P-phase: obviously a bad assumption...
if tr.stats.channel[-1] == 'Z':
phase = 'P'
else:
phase = 'S'
if freqmin and freqmax:
tr.detrend('simple')
tr.filter('bandpass', freqmin=freqmin, freqmax=freqmax,
corners=3, zerophase=True)
df = tr.stats.sampling_rate
cft = classic_sta_lta(tr.data, int(stalen * df), int(ltalen * df))
if debug > 3:
plot_trigger(tr, cft, trig_on, trig_off)
triggers = trigger_onset(cft, trig_on, trig_off)
for trigger in triggers:
on = tr.stats.starttime + (trigger[0] / df)
# off = tr.stats.starttime + (trigger[1] / df)
wav_id = WaveformStreamID(station_code=tr.stats.station,
channel_code=tr.stats.channel,
network_code=tr.stats.network)
p = Pick(waveform_id=wav_id, phase_hint=phase, time=on)
if debug > 2:
print('Pick made:')
print(p)
picks.append(p)
# QC picks
pick_stations = list(set([pick.waveform_id.station_code
for pick in picks]))
for pick_station in pick_stations:
station_picks = [pick for pick in picks if
pick.waveform_id.station_code == pick_station]
# If P-pick is after S-picks, remove it.
p_time = [pick.time for pick in station_picks
if pick.phase_hint == 'P']
s_time = [pick.time for pick in station_picks
if pick.phase_hint == 'S']
if p_time > s_time:
p_pick = [pick for pick in station_picks if pick.phase_hint == 'P']
for pick in p_pick:
print('P pick after S pick, removing P pick')
picks.remove(pick)
if show:
plotting.pretty_template_plot(stream, picks=picks, title='Autopicks',
size=(8, 9))
event.picks = picks
if len(event.picks) > 0:
event.origins[0].time = min([pick.time for pick in event.picks]) - 1
# event.origins[0].latitude = float('nan')
# event.origins[0].longitude = float('nan')
# Set arbitrary origin time
return event | 0.000214 |
def compilevcf(args):
"""
%prog compilevcf samples.csv
Compile vcf results into master spreadsheet.
"""
p = OptionParser(compilevcf.__doc__)
p.add_option("--db", default="hg38", help="Use these lobSTR db")
p.add_option("--nofilter", default=False, action="store_true",
help="Do not filter the variants")
p.set_home("lobstr")
p.set_cpus()
p.set_aws_opts(store="hli-mv-data-science/htang/str-data")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
samples, = args
workdir = opts.workdir
store = opts.output_path
cleanup = not opts.nocleanup
filtered = not opts.nofilter
dbs = opts.db.split(",")
cwd = os.getcwd()
mkdir(workdir)
os.chdir(workdir)
samples = op.join(cwd, samples)
stridsfile = "STR.ids"
if samples.endswith((".vcf", ".vcf.gz")):
vcffiles = [samples]
else:
vcffiles = [x.strip() for x in must_open(samples)]
if not op.exists(stridsfile):
ids = []
for db in dbs:
ids.extend(STRFile(opts.lobstr_home, db=db).ids)
uids = uniqify(ids)
logging.debug("Combined: {} Unique: {}".format(len(ids), len(uids)))
fw = open(stridsfile, "w")
print("\n".join(uids), file=fw)
fw.close()
run_args = [(x, filtered, cleanup, store) for x in vcffiles]
cpus = min(opts.cpus, len(run_args))
p = Pool(processes=cpus)
for res in p.map_async(run_compile, run_args).get():
continue | 0.00065 |
def get_level_str(self):
''' format level str '''
if self.is_relative:
level_str = str(self.level) + "%"
else:
level_str = self.level
return level_str | 0.009709 |
def flush(signal_names, exclude, wait):
"""Send pending signals over the message bus.
If a list of SIGNAL_NAMES is specified, flushes only those
signals. If no SIGNAL_NAMES are specified, flushes all signals.
"""
signalbus = current_app.extensions['signalbus']
signal_names = set(signal_names)
exclude = set(exclude)
models_to_flush = signalbus.get_signal_models()
if signal_names and exclude:
click.echo('Warning: Specified both SIGNAL_NAMES and exclude option.')
if signal_names:
wrong_signal_names = signal_names - {m.__name__ for m in models_to_flush}
models_to_flush = [m for m in models_to_flush if m.__name__ in signal_names]
else:
wrong_signal_names = exclude - {m.__name__ for m in models_to_flush}
for name in wrong_signal_names:
click.echo('Warning: A signal with name "{}" does not exist.'.format(name))
models_to_flush = [m for m in models_to_flush if m.__name__ not in exclude]
logger = logging.getLogger(__name__)
try:
if wait is not None:
signal_count = signalbus.flush(models_to_flush, wait=max(0.0, wait))
else:
signal_count = signalbus.flush(models_to_flush)
except Exception:
logger.exception('Caught error while sending pending signals.')
sys.exit(1)
if signal_count == 1:
logger.warning('%i signal has been successfully processed.', signal_count)
elif signal_count > 1:
logger.warning('%i signals have been successfully processed.', signal_count) | 0.004502 |
def make_preprocessing_fn(output_dir, features, keep_target):
"""Makes a preprocessing function.
Args:
output_dir: folder path that contains the vocab and stats files.
features: the features dict
Returns:
a function that takes a dict of input tensors
"""
def preprocessing_fn(inputs):
"""Preprocessing function.
Args:
inputs: dictionary of raw input tensors
Returns:
A dictionary of transformed tensors
"""
stats = json.loads(
file_io.read_file_to_string(
os.path.join(output_dir, STATS_FILE)).decode())
result = {}
for name, transform in six.iteritems(features):
transform_name = transform['transform']
source_column = transform['source_column']
if transform_name == TARGET_TRANSFORM:
if not keep_target:
continue
if file_io.file_exists(os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column)):
transform_name = 'one_hot'
else:
transform_name = 'identity'
if transform_name == 'identity':
result[name] = inputs[source_column]
elif transform_name == 'scale':
result[name] = _scale(
inputs[name],
min_x_value=stats['column_stats'][source_column]['min'],
max_x_value=stats['column_stats'][source_column]['max'],
output_min=transform.get('value', 1) * (-1),
output_max=transform.get('value', 1))
elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]:
vocab, ex_count = read_vocab_file(
os.path.join(output_dir, VOCAB_ANALYSIS_FILE % source_column))
if transform_name == MULTI_HOT_TRANSFORM:
separator = transform.get('separator', ' ')
tokens = tf.string_split(inputs[source_column], separator)
result[name] = _string_to_int(tokens, vocab)
else:
result[name] = _string_to_int(inputs[source_column], vocab)
elif transform_name == IMAGE_TRANSFORM:
make_image_to_vec_fn = _make_image_to_vec_tito(
name, checkpoint=transform.get('checkpoint', None))
result[name] = make_image_to_vec_fn(inputs[source_column])
else:
raise ValueError('unknown transform %s' % transform_name)
return result
return preprocessing_fn | 0.008696 |
def great_circle(**kwargs):
"""
Named arguments:
distance = distance to travel, or numpy array of distances
azimuth = angle, in DEGREES of HEADING from NORTH, or numpy array of azimuths
latitude = latitude, in DECIMAL DEGREES, or numpy array of latitudes
longitude = longitude, in DECIMAL DEGREES, or numpy array of longitudes
rmajor = radius of earth's major axis. default=6378137.0 (WGS84)
rminor = radius of earth's minor axis. default=6356752.3142 (WGS84)
Returns a dictionary with:
'latitude' in decimal degrees
'longitude' in decimal degrees
'reverse_azimuth' in decimal degrees
"""
distance = kwargs.pop('distance')
azimuth = np.radians(kwargs.pop('azimuth'))
latitude = np.radians(kwargs.pop('latitude'))
longitude = np.radians(kwargs.pop('longitude'))
rmajor = kwargs.pop('rmajor', 6378137.0)
rminor = kwargs.pop('rminor', 6356752.3142)
f = (rmajor - rminor) / rmajor
vector_pt = np.vectorize(vinc_pt)
lat_result, lon_result, angle_result = vector_pt(f, rmajor,
latitude,
longitude,
azimuth,
distance)
return {'latitude': np.degrees(lat_result),
'longitude': np.degrees(lon_result),
'reverse_azimuth': np.degrees(angle_result)} | 0.005195 |
def spectrodir(self, filetype, **kwargs):
"""Returns :envvar:`SPECTRO_REDUX` or :envvar:`BOSS_SPECTRO_REDUX`
depending on the value of `run2d`.
Parameters
----------
filetype : str
File type parameter.
run2d : int or str
2D Reduction ID.
Returns
-------
spectrodir : str
Value of the appropriate environment variable.
"""
if str(kwargs['run2d']) in ('26', '103', '104'):
return os.environ['SPECTRO_REDUX']
else:
return os.environ['BOSS_SPECTRO_REDUX'] | 0.003284 |
def gen_random_float(minimum, maximum, decimals=2):
"""
指定一个浮点数范围,随机生成并返回区间内的一个浮点数,区间为闭区间
受限于 random.random 精度限制,支持最大 15 位精度
:param:
* minimum: (float) 浮点数最小取值
* maximum: (float) 浮点数最大取值
* decimals: (int) 小数位数,默认为 2 位
:return:
* random_float: (float) 随机浮点数
举例如下::
print('--- gen_random_float demo ---')
print(gen_random_float(1.0, 9.0))
print(gen_random_float(1.0, 9.0, decimals=10))
print(gen_random_float(1.0, 9.0, decimals=20))
print('---')
执行结果::
--- gen_random_float demo ---
6.08
6.8187342239
2.137902497554043
---
"""
if not (isinstance(minimum, float) and isinstance(maximum, float)):
raise ValueError('param minimum, maximum should be float, but got minimum: {} maximum: {}'.
format(type(minimum), type(maximum)))
if not isinstance(decimals, int):
raise ValueError('param decimals should be a int, but we got {}'.format(type(decimals)))
# 精度目前只支持最大 15 位
decimals = 15 if decimals > 15 else decimals
# 存在 round 之后四舍五入之后,精度不匹配的情况,新加判断
while True:
random_float = random.uniform(minimum, maximum)
random_float = round(random_float, decimals)
if len(str(random_float).split('.')[-1]) == decimals:
return random_float | 0.002187 |
def reduce(self):
"""Removes results rows whose n-grams are contained in larger
n-grams."""
self._logger.info('Reducing the n-grams')
# This does not make use of any pandas functionality; it
# probably could, and if so ought to.
data = {}
labels = {}
# Derive a convenient data structure from the rows.
for row_index, row in self._matches.iterrows():
work = row[constants.WORK_FIELDNAME]
siglum = row[constants.SIGLUM_FIELDNAME]
labels[work] = row[constants.LABEL_FIELDNAME]
witness_data = data.setdefault((work, siglum), {})
witness_data[row[constants.NGRAM_FIELDNAME]] = {
'count': int(row[constants.COUNT_FIELDNAME]),
'size': int(row[constants.SIZE_FIELDNAME])}
for witness_data in data.values():
ngrams = list(witness_data.keys())
ngrams.sort(key=lambda ngram: witness_data[ngram]['size'],
reverse=True)
for ngram in ngrams:
if witness_data[ngram]['count'] > 0:
self._reduce_by_ngram(witness_data, ngram)
# Recreate rows from the modified data structure.
rows = []
for (work, siglum), witness_data in data.items():
for ngram, ngram_data in witness_data.items():
count = ngram_data['count']
if count > 0:
rows.append(
{constants.NGRAM_FIELDNAME: ngram,
constants.SIZE_FIELDNAME: ngram_data['size'],
constants.WORK_FIELDNAME: work,
constants.SIGLUM_FIELDNAME: siglum,
constants.COUNT_FIELDNAME: count,
constants.LABEL_FIELDNAME: labels[work]})
self._matches = pd.DataFrame(
rows, columns=constants.QUERY_FIELDNAMES) | 0.00103 |
def fromInputs(self, received):
"""
Convert some random strings received from a browser into structured
data, using a list of parameters.
@param received: a dict of lists of strings, i.e. the canonical Python
form of web form post.
@rtype: L{Deferred}
@return: A Deferred which will be called back with a dict mapping
parameter names to coerced parameter values.
"""
results = []
for parameter in self.parameters:
name = parameter.name.encode('ascii')
d = maybeDeferred(parameter.fromInputs, received)
d.addCallback(lambda value, name=name: (name, value))
results.append(d)
return gatherResults(results).addCallback(dict) | 0.002581 |
def _get_association_classes(self, namespace):
"""
Return iterator of associator classes from the class repo
Returns the classes that have associations qualifier.
Does NOT copy so these are what is in repository. User functions
MUST NOT modify these classes.
Returns: Returns generator where each yield returns a single
association class
"""
class_repo = self._get_class_repo(namespace)
# associator_classes = []
for cl in six.itervalues(class_repo):
if 'Association' in cl.qualifiers:
yield cl
return | 0.00314 |
def extract_blocking(obj):
"""Extract index and watch from :class:`Blocking`
Parameters:
obj (Blocking): the blocking object
Returns:
tuple: index and watch
"""
if isinstance(obj, tuple):
try:
a, b = obj
except:
raise TypeError("Not a Blocking object")
else:
a, b = obj, None
return extract_attr(a, keys=["Index"]), b | 0.004878 |
def process_pair(self, key, value):
""" Process a (key, value) pair """
old_key = key
old_value = value
# Create key intelligently
keyparts = self.bspacere.split(key)
# print keyparts
strippable = False
lastpart = keyparts[-1]
if lastpart.find('\\ ') != -1:
keyparts[-1] = lastpart.replace('\\','')
# If no backspace is found at the end, but empty
# space is found, strip it
elif lastpart and lastpart[-1] == ' ':
strippable = True
key = ''.join(keyparts)
if strippable:
key = key.strip()
old_key = old_key.strip()
old_value = self.unescape(old_value)
value = self.unescape(value)
# Patch from N B @ ActiveState
curlies = re.compile(r'\$?\{.+?\}')
found_variables = curlies.findall(value)
for found_variable in found_variables:
if found_variable.startswith('$'):
source_key = found_variable[2:-1]
else:
source_key = found_variable[1:-1]
if source_key in self._properties:
value = value.replace(found_variable, self._properties[source_key], 1)
self._properties[key] = value.strip()
# Check if an entry exists in pristine keys
if key in self._keymap:
old_key = self._keymap.get(key)
self._origprops[old_key] = old_value.strip()
else:
self._origprops[old_key] = old_value.strip()
# Store entry in keymap
self._keymap[key] = old_key
if key not in self._keyorder:
self._keyorder.append(key) | 0.00235 |
def _build(self, x, prev_state):
"""Connects the core to the graph.
Args:
x: Input `Tensor` of shape `(batch_size, input_size)`.
prev_state: Previous state. This could be a `Tensor`, or a tuple of
`Tensor`s.
Returns:
The tuple `(output, state)` for this core.
Raises:
ValueError: if the `Tensor` `x` does not have rank 2.
"""
x.get_shape().with_rank(2)
self._batch_size = x.get_shape().as_list()[0]
self._dtype = x.dtype
x_zeros = tf.concat(
[x, tf.zeros(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
x_ones = tf.concat(
[x, tf.ones(
shape=(self._batch_size, 1), dtype=self._dtype)], 1)
# Weights for the halting signal
halting_linear = basic.Linear(name="halting_linear", output_size=1)
body = functools.partial(
self._body, halting_linear=halting_linear, x_ones=x_ones)
cumul_halting_init = tf.zeros(shape=(self._batch_size, 1),
dtype=self._dtype)
iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
core_output_size = [x.value for x in self._core.output_size]
out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size),
dtype=self._dtype)
cumul_state_init = _nested_zeros_like(prev_state)
remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype)
(unused_final_x, final_out, unused_final_state, final_cumul_state,
unused_final_halting, final_iteration, final_remainder) = tf.while_loop(
self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init,
cumul_halting_init, iteration_init, remainder_init])
act_output = basic.Linear(
name="act_output_linear", output_size=self._output_size)(final_out)
return (act_output, (final_iteration, final_remainder)), final_cumul_state | 0.00104 |
def sp_msg(cmd, pipe=None, data=None):
"""Produces skypipe protocol multipart message"""
msg = [SP_HEADER, cmd]
if pipe is not None:
msg.append(pipe)
if data is not None:
msg.append(data)
return msg | 0.004274 |
def _check_negatives(numbers):
"Raise warning for negative numbers."
negatives = filter(lambda x: x < 0, filter(None, numbers))
if any(negatives):
neg_values = ', '.join(map(str, negatives))
msg = 'Found negative value(s): {0!s}. '.format(neg_values)
msg += 'While not forbidden, the output will look unexpected.'
warnings.warn(msg) | 0.002653 |
def encode(url):
"""Encode URL."""
parts = extract(url)
return construct(URL(parts.scheme,
parts.username,
parts.password,
_idna_encode(parts.subdomain),
_idna_encode(parts.domain),
_idna_encode(parts.tld),
parts.port,
quote(parts.path.encode('utf-8')),
_encode_query(parts.query),
quote(parts.fragment.encode('utf-8')),
None)) | 0.001698 |
def list_service_certificates(self, service_name):
'''
Lists all of the service certificates associated with the specified
hosted service.
service_name:
Name of the hosted service.
'''
_validate_not_none('service_name', service_name)
return self._perform_get(
'/' + self.subscription_id + '/services/hostedservices/' +
_str(service_name) + '/certificates',
Certificates) | 0.004211 |
def get_ladder_metadata(session, url):
"""Get ladder metadata."""
parsed = make_scrape_request(session, url)
tag = parsed.find('a', href=re.compile(LADDER_ID_REGEX))
return {
'id': int(tag['href'].split('/')[-1]),
'slug': url.split('/')[-1],
'url': url
} | 0.003356 |
def run_actions(self, actions):
"""
Runs the given lists of attached actions and instance actions on the client.
:param actions: Actions to apply.
:type actions: list[dockermap.map.action.ItemAction]
:return: Where the result is not ``None``, returns the output from the client. Note that this is a generator
and needs to be consumed in order for all actions to be performed.
:rtype: collections.Iterable[dict]
"""
policy = self._policy
for action in actions:
config_id = action.config_id
config_type = config_id.config_type
client_config = policy.clients[action.client_name]
client = client_config.get_client()
c_map = policy.container_maps[config_id.map_name]
if config_type == ItemType.CONTAINER:
config = c_map.get_existing(config_id.config_name)
item_name = policy.cname(config_id.map_name, config_id.config_name, config_id.instance_name)
elif config_type == ItemType.VOLUME:
a_parent_name = config_id.config_name if c_map.use_attached_parent_name else None
item_name = policy.aname(config_id.map_name, config_id.instance_name, parent_name=a_parent_name)
if client_config.features['volumes']:
config = c_map.get_existing_volume(config_id.config_name)
else:
config = c_map.get_existing(config_id.config_name)
elif config_type == ItemType.NETWORK:
config = c_map.get_existing_network(config_id.config_name)
item_name = policy.nname(config_id.map_name, config_id.config_name)
elif config_type == ItemType.IMAGE:
config = None
item_name = format_image_tag(config_id.config_name, config_id.instance_name)
else:
raise ValueError("Invalid configuration type.", config_id.config_type)
for action_type in action.action_types:
try:
a_method = self.action_methods[(config_type, action_type)]
except KeyError:
raise ActionTypeException(config_id, action_type)
action_config = ActionConfig(action.client_name, action.config_id, client_config, client,
c_map, config)
try:
res = a_method(action_config, item_name, **action.extra_data)
except Exception:
exc_info = sys.exc_info()
raise ActionException(exc_info, action.client_name, config_id, action_type)
if res is not None:
yield ActionOutput(action.client_name, config_id, action_type, res) | 0.004954 |
def InitAgeCheck(self):
"""make an interactive grid in which users can edit ages"""
self.panel = wx.Panel(self, style=wx.SIMPLE_BORDER)
text = """Step 6:
Fill in or correct any cells with information about ages.
The column for magic_method_codes can take multiple values in the form of a colon-delimited list.
You may use the drop-down menus to add as many values as needed in these columns.
(See Help button for details)
**Denotes controlled vocabulary """
label = wx.StaticText(self.panel, label=text)
self.items = self.er_magic_data.data_lists[self.er_magic_data.age_type][0]
self.grid_builder = grid_frame2.GridBuilder(self.er_magic_data, 'age',
self.er_magic_data.headers, self.panel, 'location')
self.age_grid = self.grid_builder.make_grid(incl_pmag=False)
self.age_grid.InitUI()
self.grid_builder.add_data_to_grid(self.age_grid, 'age', incl_pmag=False)
self.grid_builder.add_age_data_to_grid()
self.grid = self.age_grid
#
# make it impossible to edit the 1st and 3rd columns
for row in range(self.age_grid.GetNumberRows()):
for col in (0, 2):
self.age_grid.SetReadOnly(row, col, True)
# initialize all needed drop-down menus
self.drop_down_menu = drop_down_menus.Menus("age", self, self.age_grid, None)
# re-set first column name
self.age_grid.SetColLabelValue(0, 'er_site_name')
### Create Buttons ###
hbox_one = wx.BoxSizer(wx.HORIZONTAL)
self.helpButton = wx.Button(self.panel, label="Help")
self.Bind(wx.EVT_BUTTON, lambda event: self.on_helpButton(event, "ErMagicAgeHelp.html"), self.helpButton)
hbox_one.Add(self.helpButton)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
self.saveButton = wx.Button(self.panel, id=-1, label='Save')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_saveButton(event, self.age_grid), self.saveButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.continueButton = wx.Button(self.panel, id=-1, label='Save and continue')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_continueButton(event, self.age_grid, next_dia=None), self.continueButton)
self.backButton = wx.Button(self.panel, wx.ID_ANY, "&Back")
previous_dia = self.InitLocCheck
self.Bind(wx.EVT_BUTTON, lambda event: self.on_backButton(event, previous_dia), self.backButton)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
hboxok.Add(self.saveButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.cancelButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.continueButton, flag=wx.RIGHT, border=10)
hboxok.Add(self.backButton)
### Make Containers ###
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(label, flag=wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, border=20)#, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=20)
vbox.Add(hbox_one, flag=wx.BOTTOM, border=10)
vbox.Add(hboxok, flag=wx.BOTTOM, border=10)
vbox.Add(self.age_grid, flag=wx.TOP|wx.BOTTOM, border=10) # EXPAND ??
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.hbox_all.AddSpacer(20)
self.panel.SetSizer(self.hbox_all)
#if sys.platform in ['win32', 'win64']:
# self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Centre()
self.Show()
self.Hide()
self.Show() | 0.006674 |
def facts_refresh():
'''
Reload the facts dictionary from the device. Usually only needed if,
the device configuration is changed by some other actor.
This function will also refresh the facts stored in the salt grains.
CLI Example:
.. code-block:: bash
salt 'device_name' junos.facts_refresh
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
conn.facts_refresh()
except Exception as exception:
ret['message'] = 'Execution failed due to "{0}"'.format(exception)
ret['out'] = False
return ret
ret['facts'] = __proxy__['junos.get_serialized_facts']()
try:
__salt__['saltutil.sync_grains']()
except Exception as exception:
log.error('Grains could not be updated due to "%s"', exception)
return ret | 0.001193 |
def read_file_bytes(input_file_path):
"""
Read the file at the given file path
and return its contents as a byte string,
or ``None`` if an error occurred.
:param string input_file_path: the file path
:rtype: bytes
"""
contents = None
try:
with io.open(input_file_path, "rb") as input_file:
contents = input_file.read()
except:
pass
return contents | 0.004762 |
def formfield(self, **kwargs):
"""
Returns a :class:`PlaceholderFormField` instance for this database Field.
"""
defaults = {
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
'required': not self.blank,
}
defaults.update(kwargs)
return PlaceholderFormField(slot=self.slot, plugins=self._plugins, **defaults) | 0.009592 |
def resolve(self, geoid, id_only=False):
'''
Resolve a GeoZone given a GeoID.
The start date is resolved from the given GeoID,
ie. it find there is a zone valid a the geoid validity,
resolve the `latest` alias
or use `latest` when no validity is given.
If `id_only` is True,
the result will be the resolved GeoID
instead of the resolved zone.
'''
level, code, validity = geoids.parse(geoid)
qs = self(level=level, code=code)
if id_only:
qs = qs.only('id')
if validity == 'latest':
result = qs.latest()
else:
result = qs.valid_at(validity).first()
return result.id if id_only and result else result | 0.002621 |
def load(self):
"""Load a file in text mode"""
self.meta.resolved_path = self.find_data(self.meta.path)
if not self.meta.resolved_path:
raise ImproperlyConfigured("Data file '{}' not found".format(self.meta.path))
print("Loading:", self.meta.path)
with open(self.meta.resolved_path, 'r') as fd:
return fd.read() | 0.007732 |
def hamming_distance(s1, s2):
"""Return the Hamming distance between equal-length sequences"""
# print(s1,s2)
if len(s1) != len(s2):
raise ValueError("Undefined for sequences of unequal length")
return sum(el1 != el2 for el1, el2 in zip(s1.upper(), s2.upper())) | 0.003509 |
def run(coro, loop=None):
"""
Convenient shortcut alias to ``loop.run_until_complete``.
Arguments:
coro (coroutine): coroutine object to schedule.
loop (asyncio.BaseEventLoop): optional event loop to use.
Defaults to: ``asyncio.get_event_loop()``.
Returns:
mixed: returned value by coroutine.
Usage::
async def mul_2(num):
return num * 2
paco.run(mul_2(4))
# => 8
"""
loop = loop or asyncio.get_event_loop()
return loop.run_until_complete(coro) | 0.001805 |
def w16_circuit() -> qf.Circuit:
"""
Return a circuit that prepares the the 16-qubit W state using\
sqrt(iswaps) and local gates, respecting linear topology
"""
gates = [
qf.X(7),
qf.ISWAP(7, 8) ** 0.5,
qf.S(8),
qf.Z(8),
qf.SWAP(7, 6),
qf.SWAP(6, 5),
qf.SWAP(5, 4),
qf.SWAP(8, 9),
qf.SWAP(9, 10),
qf.SWAP(10, 11),
qf.ISWAP(4, 3) ** 0.5,
qf.S(3),
qf.Z(3),
qf.ISWAP(11, 12) ** 0.5,
qf.S(12),
qf.Z(12),
qf.SWAP(3, 2),
qf.SWAP(4, 5),
qf.SWAP(11, 10),
qf.SWAP(12, 13),
qf.ISWAP(2, 1) ** 0.5,
qf.S(1),
qf.Z(1),
qf.ISWAP(5, 6) ** 0.5,
qf.S(6),
qf.Z(6),
qf.ISWAP(10, 9) ** 0.5,
qf.S(9),
qf.Z(9),
qf.ISWAP(13, 14) ** 0.5,
qf.S(14),
qf.Z(14),
qf.ISWAP(1, 0) ** 0.5,
qf.S(0),
qf.Z(0),
qf.ISWAP(2, 3) ** 0.5,
qf.S(3),
qf.Z(3),
qf.ISWAP(5, 4) ** 0.5,
qf.S(4),
qf.Z(4),
qf.ISWAP(6, 7) ** 0.5,
qf.S(7),
qf.Z(7),
qf.ISWAP(9, 8) ** 0.5,
qf.S(8),
qf.Z(8),
qf.ISWAP(10, 11) ** 0.5,
qf.S(11),
qf.Z(11),
qf.ISWAP(13, 12) ** 0.5,
qf.S(12),
qf.Z(12),
qf.ISWAP(14, 15) ** 0.5,
qf.S(15),
qf.Z(15),
]
circ = qf.Circuit(gates)
return circ | 0.000659 |
def latex(expr, cache=None, **settings):
r"""Return a LaTeX representation of the given object / expression
Args:
expr: Expression to print
cache (dict or None): dictionary to use for caching
show_hs_label (bool or str): Whether to a label for the Hilbert space
of `expr`. By default (``show_hs_label=True``), the label is shown
as a superscript. It can be shown as a subscript with
``show_hs_label='subscript'`` or suppressed entirely
(``show_hs_label=False``)
tex_op_macro (str): macro to use for formatting operator symbols.
Must accept 'name' as a format key.
tex_textop_macro (str): macro to use for formatting multi-letter
operator names.
tex_sop_macro (str): macro to use for formattign super-operator symbols
tex_textsop_macro (str): macro to use for formatting multi-letter
super-operator names
tex_identity_sym (str): macro for the identity symbol
tex_use_braket (bool): If True, use macros from the
`braket package
<https://ctan.org/tex-archive/macros/latex/contrib/braket>`_. Note
that this will not automatically render in IPython Notebooks, but
it is recommended when generating latex for a document.
tex_frac_for_spin_labels (bool): Whether to use '\frac' when printing
basis state labels for spin Hilbert spaces
Examples:
>>> A = OperatorSymbol('A', hs=1); B = OperatorSymbol('B', hs=1)
>>> latex(A + B)
'\\hat{A}^{(1)} + \\hat{B}^{(1)}'
>>> latex(A + B, cache={A: 'A', B: 'B'})
'A + B'
>>> latex(A + B, show_hs_label='subscript')
'\\hat{A}_{(1)} + \\hat{B}_{(1)}'
>>> latex(A + B, show_hs_label=False)
'\\hat{A} + \\hat{B}'
>>> latex(LocalSigma(0, 1, hs=1))
'\\left\\lvert 0 \\middle\\rangle\\!\\middle\\langle 1 \\right\\rvert^{(1)}'
>>> latex(LocalSigma(0, 1, hs=1), sig_as_ketbra=False)
'\\hat{\\sigma}_{0,1}^{(1)}'
>>> latex(A + B, tex_op_macro=r'\Op{{{name}}}')
'\\Op{A}^{(1)} + \\Op{B}^{(1)}'
>>> CNOT = OperatorSymbol('CNOT', hs=1)
>>> latex(CNOT)
'\\text{CNOT}^{(1)}'
>>> latex(CNOT, tex_textop_macro=r'\Op{{{name}}}')
'\\Op{CNOT}^{(1)}'
>>> A = SuperOperatorSymbol('A', hs=1)
>>> latex(A)
'\\mathrm{A}^{(1)}'
>>> latex(A, tex_sop_macro=r'\SOp{{{name}}}')
'\\SOp{A}^{(1)}'
>>> Lindbladian = SuperOperatorSymbol('Lindbladian', hs=1)
>>> latex(Lindbladian)
'\\mathrm{Lindbladian}^{(1)}'
>>> latex(Lindbladian, tex_textsop_macro=r'\SOp{{{name}}}')
'\\SOp{Lindbladian}^{(1)}'
>>> latex(IdentityOperator)
'\\mathbb{1}'
>>> latex(IdentityOperator, tex_identity_sym=r'\identity')
'\\identity'
>>> latex(LocalSigma(0, 1, hs=1), tex_use_braket=True)
'\\Ket{0}\\!\\Bra{1}^{(1)}'
>>> spin = SpinSpace('s', spin=(1, 2))
>>> up = SpinBasisKet(1, 2, hs=spin)
>>> latex(up)
'\\left\\lvert +1/2 \\right\\rangle^{(s)}'
>>> latex(up, tex_frac_for_spin_labels=True)
'\\left\\lvert +\\frac{1}{2} \\right\\rangle^{(s)}'
Note that the accepted parameters and their default values may be changed
through :func:`init_printing` or :func:`configure_printing`
"""
try:
if cache is None and len(settings) == 0:
return latex.printer.doprint(expr)
else:
printer = latex._printer_cls(cache, settings)
return printer.doprint(expr)
except AttributeError:
# init_printing was not called. Setting up defaults
latex._printer_cls = QnetLatexPrinter
latex.printer = latex._printer_cls()
return latex(expr, cache, **settings) | 0.000513 |
def _construct_retry(method_config, retry_codes, retry_params, retry_names):
"""Helper for ``construct_settings()``.
Args:
method_config (dict): A dictionary representing a single ``methods``
entry of the standard API client config file. (See
``construct_settings()`` for information on this yaml.)
retry_codes (dict): A dictionary parsed from the ``retry_codes`` entry
of the standard API client config file. (See ``construct_settings()``
for information on this yaml.)
retry_params (dict): A dictionary parsed from the ``retry_params`` entry
of the standard API client config file. (See ``construct_settings()``
for information on this yaml.)
retry_names (dict): A dictionary mapping the string names used in the
standard API client config file to API response status codes.
Returns:
Optional[RetryOptions]: The retry options, if applicable.
"""
if method_config is None:
return None
codes = None
if retry_codes and 'retry_codes_name' in method_config:
codes_name = method_config['retry_codes_name']
if codes_name in retry_codes and retry_codes[codes_name]:
codes = [retry_names[name] for name in retry_codes[codes_name]]
else:
codes = []
backoff_settings = None
if retry_params and 'retry_params_name' in method_config:
params_name = method_config['retry_params_name']
if params_name and params_name in retry_params:
backoff_settings = gax.BackoffSettings(**retry_params[params_name])
return gax.RetryOptions(
backoff_settings=backoff_settings,
retry_codes=codes,
) | 0.000587 |
def _write_ccr(self, f, g, level: int):
'''
Write a CCR to file "g" from file "f" with level "level".
Currently, only handles gzip compression.
Parameters:
f : file
Uncompressed file to read from
g : file
File to read the compressed file into
level : int
The level of the compression from 0 to 9
Returns: None
'''
f.seek(8)
data = f.read()
uSize = len(data)
section_type = CDF.CCR_
rfuA = 0
cData = gzip.compress(data, level)
block_size = CDF.CCR_BASE_SIZE64 + len(cData)
cprOffset = 0
ccr1 = bytearray(32)
#ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1)
#ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c)
ccr1[0:8] = struct.pack('>q', block_size)
ccr1[8:12] = struct.pack('>i', section_type)
ccr1[12:20] = struct.pack('>q', cprOffset)
ccr1[20:28] = struct.pack('>q', uSize)
ccr1[28:32] = struct.pack('>i', rfuA)
g.seek(0, 2)
g.write(ccr1)
g.write(cData)
cprOffset = self._write_cpr(g, CDF.GZIP_COMPRESSION, level)
self._update_offset_value(g, 20, 8, cprOffset) | 0.003145 |
def call(symbol, **kwargs):
"""Calls/executes BGPS public API identified by given symbol and passes
given kwargs as param.
"""
LOG.info("API method %s called with args: %s", symbol, str(kwargs))
# TODO(PH, JK) improve the way api function modules are loaded
from . import all # noqa
if not is_call_registered(symbol):
message = 'Did not find any method registered by symbol %s' % symbol
raise MethodNotFound(message)
if not symbol.startswith('core') and not CORE_MANAGER.started:
raise CoreNotStarted(desc='CoreManager is not active.')
call = get_call(symbol)
try:
return call(**kwargs)
except BGPSException as r:
LOG.error(traceback.format_exc())
raise r
except Exception as e:
LOG.error(traceback.format_exc())
raise ApiException(desc=str(e)) | 0.001163 |
def close(self):
"""Cleanup client resources and disconnect from MongoDB.
On MongoDB >= 3.6, end all server sessions created by this client by
sending one or more endSessions commands.
Close all sockets in the connection pools and stop the monitor threads.
If this instance is used again it will be automatically re-opened and
the threads restarted.
.. versionchanged:: 3.6
End all server sessions created by this client.
"""
session_ids = self._topology.pop_all_sessions()
if session_ids:
self._end_sessions(session_ids)
# Stop the periodic task thread and then run _process_periodic_tasks
# to send pending killCursor requests before closing the topology.
self._kill_cursors_executor.close()
self._process_periodic_tasks()
self._topology.close() | 0.00224 |
def encode(self, payload):
"""
Returns an encoded token for the given payload dictionary.
"""
token = jwt.encode(payload, self.signing_key, algorithm=self.algorithm)
return token.decode('utf-8') | 0.008547 |
def layer_depth( lat, lon, layerID="LID-BOTTOM"):
"""Returns layer depth at lat / lon (degrees)
where lat/lon may be arrays (of equal size).
Depths are returned in metres.
"""
## Must wrap longitude from 0 to 360 ...
lon1 = np.array(lon)%360.0
lat1 = np.array(lat)
# ## Must wrap longitude from -180 to 180 ...
#
# lon1[np.where(lon1 > 180.0)] = 360.0 - lon1[np.where(lon1 > 180.0)]
#
data, err = _interpolator.interpolate( np.radians(lon1), np.radians(lat1),
_litho_data[l1_layer_decode[layerID], l1_data_decode["DEPTH"]], order=1 )
return data | 0.01252 |
def get_my_moderation(self, *args, **kwargs):
"""Return a get_content generator of subreddits.
The Subreddits generated are those where the session's user is a
moderator.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['my_mod_subreddits'], *args,
**kwargs) | 0.004367 |
def QueryService(svc_name):
"""Query service and get its config."""
hscm = win32service.OpenSCManager(None, None,
win32service.SC_MANAGER_ALL_ACCESS)
result = None
try:
hs = win32serviceutil.SmartOpenService(hscm, svc_name,
win32service.SERVICE_ALL_ACCESS)
result = win32service.QueryServiceConfig(hs)
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
return result | 0.013834 |
def find_pad_index(self, array):
"""Find padding index.
Args:
array (list): integer list.
Returns:
idx: padding index.
Examples:
>>> array = [1, 2, 0]
>>> self.find_pad_index(array)
2
"""
try:
return list(array).index(self.pad_value)
except ValueError:
return len(array) | 0.004808 |
def get_series_names(self, column_indices = [], column_names = []):
'''Returns the series' names corresponding to column_indices and column_names.
"names" here are:
- strings for single-indexed dataframes; or
- tuples for multi-indexed dataframes.
If both parameters are empty then all column names are returned.
'''
n = []
if not column_indices and not column_names:
for k, v in sorted(self.series_names.iteritems()):
# Iterate by index to preserve document order
if v != self.reference_series:
n.append(k)
else:
s = set([self.series_names[x] for x in column_indices])
t = set([self.series_index[x] for x in column_names])
n = sorted(s.union(t))
assert(n)
return [self.series_names[x] for x in n] | 0.007743 |
def factorgraph_viz(d):
"""
Map the dictionary into factorgraph-viz format. See https://github.com/mbforbes/factorgraph-viz
:param d: The dictionary
:return: The formatted dictionary
"""
m = defaultdict(list)
for node in d['nodes']:
m['nodes'].append(dict(
id=node['id'],
type='rv'
))
for factor in d['factors']:
m['nodes'].append(dict(
id=factor['id'],
type='fac'
))
for source in factor['sources']:
m['links'].append(dict(
source=source,
target=factor['id']
))
if factor['sink']:
m['links'].append(dict(
source=factor['id'],
target=factor['sink']
))
return dict(m) | 0.00325 |
def _get_impact_info(vcf_reader):
"""Retrieve impact parsing information from INFO header.
"""
ImpactInfo = collections.namedtuple("ImpactInfo", "header, gclass, id")
KEY_2_CLASS = {
'CSQ': geneimpacts.VEP,
'ANN': geneimpacts.SnpEff,
'BCSQ': geneimpacts.BCFT}
for l in (x.strip() for x in _from_bytes(vcf_reader.raw_header).split("\n")):
if l.startswith("##INFO"):
patt = re.compile("(\w+)=(\"[^\"]+\"|[^,]+)")
stub = l.split("=<")[1].rstrip(">")
d = dict(patt.findall(_from_bytes(stub)))
if d["ID"] in KEY_2_CLASS:
return ImpactInfo(_parse_impact_header(d), KEY_2_CLASS[d["ID"]], d["ID"]) | 0.007092 |
def embeddable(self, path, variant):
"""Is the asset embeddable ?"""
name, ext = os.path.splitext(path)
font = ext in FONT_EXTS
if not variant:
return False
if not (re.search(settings.EMBED_PATH, path.replace('\\', '/')) and self.storage.exists(path)):
return False
if ext not in EMBED_EXTS:
return False
if not (font or len(self.encoded_content(path)) < settings.EMBED_MAX_IMAGE_SIZE):
return False
return True | 0.007648 |
def image(cam):
"""Extract first image of input stream to jpg file.
Args:
cam: Input stream of raw rosbag messages.
Returns:
File instance for first image of input stream.
"""
# Set output stream title and pull first message
yield marv.set_header(title=cam.topic)
msg = yield marv.pull(cam)
if msg is None:
return
# Deserialize raw ros message
pytype = get_message_type(cam)
rosmsg = pytype()
rosmsg.deserialize(msg.data)
# Write image to jpeg and push it to output stream
name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:])
imgfile = yield marv.make_file(name)
img = imgmsg_to_cv2(rosmsg, "rgb8")
cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60))
yield marv.push(imgfile) | 0.001267 |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
self._alarm_id = self.args.alarm_id if self.args.alarm_id is not None else None | 0.013453 |
def init_i18n (loc=None):
"""Initialize i18n with the configured locale dir. The environment
variable LOCPATH can also specify a locale dir.
@return: None
"""
if 'LOCPATH' in os.environ:
locdir = os.environ['LOCPATH']
else:
locdir = os.path.join(get_install_data(), 'share', 'locale')
i18n.init(configdata.name.lower(), locdir, loc=loc)
# install translated log level names
import logging
logging.addLevelName(logging.CRITICAL, _('CRITICAL'))
logging.addLevelName(logging.ERROR, _('ERROR'))
logging.addLevelName(logging.WARN, _('WARN'))
logging.addLevelName(logging.WARNING, _('WARNING'))
logging.addLevelName(logging.INFO, _('INFO'))
logging.addLevelName(logging.DEBUG, _('DEBUG'))
logging.addLevelName(logging.NOTSET, _('NOTSET')) | 0.00246 |
def get_data(url, gallery_dir):
"""Persistent dictionary usage to retrieve the search indexes"""
# shelve keys need to be str in python 2
if sys.version_info[0] == 2 and isinstance(url, unicode):
url = url.encode('utf-8')
cached_file = os.path.join(gallery_dir, 'searchindex')
search_index = shelve.open(cached_file)
if url in search_index:
data = search_index[url]
else:
data = _get_data(url)
search_index[url] = data
search_index.close()
return data | 0.001916 |
def _on_connection_made(self):
"""
Gets called when the TCP connection to kik's servers is done and we are connected.
Now we might initiate a login request or an auth request.
"""
if self.username is not None and self.password is not None and self.kik_node is not None:
# we have all required credentials, we can authenticate
log.info("[+] Establishing authenticated connection using kik node '{}'...".format(self.kik_node))
message = login.EstablishAuthenticatedSessionRequest(self.kik_node, self.username, self.password, self.device_id_override)
self.initial_connection_payload = message.serialize()
else:
self.initial_connection_payload = '<k anon="">'.encode()
self.connection.send_raw_data(self.initial_connection_payload) | 0.007101 |
def forwards(self, orm):
"Write your forwards methods here."
for a in orm.Article.objects.all():
if a.updated:
a.last_updated = a.updated
a.save(force_update=True) | 0.008969 |
def isUrl(urlString):
"""
Attempts to return whether a given URL string is valid by checking
for the presence of the URL scheme and netloc using the urlparse
module, and then using a regex.
From http://stackoverflow.com/questions/7160737/
"""
parsed = urlparse.urlparse(urlString)
urlparseValid = parsed.netloc != '' and parsed.scheme != ''
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return regex.match(urlString) and urlparseValid | 0.001252 |
def whereless(self, fieldname, value):
"""
Returns a new DataTable with rows only where the value at
`fieldname` < `value`.
"""
return self.mask([elem < value for elem in self[fieldname]]) | 0.008772 |
def _filtering_result_checked(self, by_or):
'''Check if post passes all / at_least_one (by_or parameter) filter(s).
Filters are evaluated on only-if-necessary ("lazy") basis.'''
filters, results = it.imap(set, ( self.feed.filters.all(),
self.filtering_results.values_list('filter', flat=True) ))
# Check if conclusion can already be made, based on cached results.
if results.issubset(filters):
# If at least one failed/passed test is already there, and/or outcome is defined.
try: return self._filtering_result(by_or)
except IndexError: # inconclusive until results are consistent
if filters == results: return not by_or
# Consistency check / update.
if filters != results:
# Drop obsolete (removed, unbound from feed)
# filters' results (they WILL corrupt outcome).
self.filtering_results.filter(filter__in=results.difference(filters)).delete()
# One more try, now that results are only from feed filters' subset.
try: return self._filtering_result(by_or)
except IndexError: pass
# Check if any filter-results are not cached yet, create them (perform actual filtering).
# Note that independent filters applied first, since
# crossrefs should be more resource-hungry in general.
for filter_obj in sorted(filters.difference(results), key=op.attrgetter('base.crossref')):
filter_op = FilterResult(filter=filter_obj, post=self, result=filter_obj.handler(self))
filter_op.save()
if filter_op.result == by_or: return by_or # return as soon as first passed / failed
# Final result
try: return self._filtering_result(by_or)
except IndexError: return not by_or | 0.02934 |
def set_desired_state(self, state):
"""Update the desired state of a unit.
Args:
state (str): The desired state for the unit, must be one of ``_STATES``
Returns:
str: The updated state
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
ValueError: An invalid value for ``state`` was provided
"""
if state not in self._STATES:
raise ValueError(
'state must be one of: {0}'.format(
self._STATES
))
# update our internal structure
self._data['desiredState'] = state
# if we have a name, then we came from the server
# and we have a handle to an active client
# Then update our selves on the server
if self._is_live():
self._update('_data', self._client.set_unit_desired_state(self.name, self.desiredState))
# Return the state
return self._data['desiredState'] | 0.003922 |
def ParseOptions(cls, options, analysis_plugin):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
analysis_plugin (VirusTotalAnalysisPlugin): analysis plugin to configure.
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation or
when unable to connect to VirusTotal.
"""
if not isinstance(analysis_plugin, virustotal.VirusTotalAnalysisPlugin):
raise errors.BadConfigObject(
'Analysis plugin is not an instance of VirusTotalAnalysisPlugin')
api_key = cls._ParseStringOption(options, 'virustotal_api_key')
if not api_key:
raise errors.BadConfigOption(
'VirusTotal API key not specified. Try again with '
'--virustotal-api-key.')
analysis_plugin.SetAPIKey(api_key)
enable_rate_limit = getattr(
options, 'virustotal_free_rate_limit', cls._DEFAULT_RATE_LIMIT)
if enable_rate_limit:
analysis_plugin.EnableFreeAPIKeyRateLimit()
lookup_hash = cls._ParseStringOption(
options, 'virustotal_hash', default_value=cls._DEFAULT_HASH)
analysis_plugin.SetLookupHash(lookup_hash)
if not analysis_plugin.TestConnection():
raise errors.BadConfigOption('Unable to connect to VirusTotal') | 0.003693 |
def vcsNodeState_originator_switch_info_switchIdentifier(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIdentifier = ET.SubElement(originator_switch_info, "switchIdentifier")
switchIdentifier.text = kwargs.pop('switchIdentifier')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.008576 |
def installUpdate(self):
""" Install the newest version of Plex Media Server. """
# We can add this but dunno how useful this is since it sometimes
# requires user action using a gui.
part = '/updater/apply'
release = self.check_for_update(force=True, download=True)
if release and release.version != self.version:
# figure out what method this is..
return self.query(part, method=self._session.put) | 0.004246 |
def parse_cookies(response, host):
"""
Sticks cookies to a response.
"""
cookie_pie = []
try:
for cookie in response.headers['set-cookie']:
cookie_jar = {}
name_val, *rest = cookie.split(';')
name, value = name_val.split('=', 1)
cookie_jar['name'] = name.strip()
cookie_jar['value'] = value
for item in rest:
try:
name, value = item.split('=')
if value.startswith('.'):
value = value[1:]
cookie_jar[name.lower().lstrip()] = value
except ValueError:
cookie_jar[item.lower().lstrip()] = True
cookie_pie.append(cookie_jar)
response.cookies = [Cookie(host, x) for x in cookie_pie]
except KeyError:
pass | 0.001155 |
def login_or_register(
client: GMatrixClient,
signer: Signer,
prev_user_id: str = None,
prev_access_token: str = None,
) -> User:
"""Login to a Raiden matrix server with password and displayname proof-of-keys
- Username is in the format: 0x<eth_address>(.<suffix>)?, where the suffix is not required,
but a deterministic (per-account) random 8-hex string to prevent DoS by other users registering
our address
- Password is the signature of the server hostname, verified by the server to prevent account
creation spam
- Displayname currently is the signature of the whole user_id (including homeserver), to be
verified by other peers. May include in the future other metadata such as protocol version
Params:
client: GMatrixClient instance configured with desired homeserver
signer: raiden.utils.signer.Signer instance for signing password and displayname
prev_user_id: (optional) previous persisted client.user_id. Must match signer's account
prev_access_token: (optional) previous persistend client.access_token for prev_user_id
Returns:
Own matrix_client.User
"""
server_url = client.api.base_url
server_name = urlparse(server_url).netloc
base_username = to_normalized_address(signer.address)
_match_user = re.match(
f'^@{re.escape(base_username)}.*:{re.escape(server_name)}$',
prev_user_id or '',
)
if _match_user: # same user as before
log.debug('Trying previous user login', user_id=prev_user_id)
client.set_access_token(user_id=prev_user_id, token=prev_access_token)
try:
client.api.get_devices()
except MatrixRequestError as ex:
log.debug(
'Couldn\'t use previous login credentials, discarding',
prev_user_id=prev_user_id,
_exception=ex,
)
else:
prev_sync_limit = client.set_sync_limit(0)
client._sync() # initial_sync
client.set_sync_limit(prev_sync_limit)
log.debug('Success. Valid previous credentials', user_id=prev_user_id)
return client.get_user(client.user_id)
elif prev_user_id:
log.debug(
'Different server or account, discarding',
prev_user_id=prev_user_id,
current_address=base_username,
current_server=server_name,
)
# password is signed server address
password = encode_hex(signer.sign(server_name.encode()))
rand = None
# try login and register on first 5 possible accounts
for i in range(JOIN_RETRIES):
username = base_username
if i:
if not rand:
rand = Random() # deterministic, random secret for username suffixes
# initialize rand for seed (which requires a signature) only if/when needed
rand.seed(int.from_bytes(signer.sign(b'seed')[-32:], 'big'))
username = f'{username}.{rand.randint(0, 0xffffffff):08x}'
try:
client.login(username, password, sync=False)
prev_sync_limit = client.set_sync_limit(0)
client._sync() # when logging, do initial_sync with limit=0
client.set_sync_limit(prev_sync_limit)
log.debug(
'Login',
homeserver=server_name,
server_url=server_url,
username=username,
)
break
except MatrixRequestError as ex:
if ex.code != 403:
raise
log.debug(
'Could not login. Trying register',
homeserver=server_name,
server_url=server_url,
username=username,
)
try:
client.register_with_password(username, password)
log.debug(
'Register',
homeserver=server_name,
server_url=server_url,
username=username,
)
break
except MatrixRequestError as ex:
if ex.code != 400:
raise
log.debug('Username taken. Continuing')
continue
else:
raise ValueError('Could not register or login!')
name = encode_hex(signer.sign(client.user_id.encode()))
user = client.get_user(client.user_id)
user.set_display_name(name)
return user | 0.002881 |
def opt_space(M_E, r=None, niter=50, tol=1e-6, print_out=False):
'''
Implementation of the OptSpace matrix completion algorithm.
An algorithm for Matrix Reconstruction from a partially revealed set.
Sparse treatment of matrices are removed because of indexing problems in Python.
Args:
M_E: 2D numpy array; The partially revealed matrix.
Matrix with zeroes at the unrevealed indices.
r: The rank to be used for reconstruction. If left empty, the rank is guessed in the program.
niter: Maximum number of iterations.
tol: Stop iterations if norm((XSY' - M_E) * E, 'fro') / sqrt(|E|) < tol, where
E_{ij} = 1 if M_{ij} is revealed and zero otherwise,
|E| is the size of the revealed set.
Returns: The following
X: A M_E.shape[0]xr numpy array
S: An rxr numpy array
Y: A M_E.shape[1]xr numpy matrix such that M_hat = X*S*Y'
errs: A vector containing norm((XSY' - M_E) * E, 'fro') / sqrt(|E|) at each iteration.
'''
n, m = M_E.shape
# construct the revealed set
E = np.zeros(M_E.shape)
E[np.nonzero(M_E)] = 1
eps = np.count_nonzero(E) / np.sqrt(m * n)
if r is None:
print('Rank not specified. Trying to guess ...')
r = guess_rank(M_E)
print('Using Rank : %d' % r)
m0 = 10000
rho = 0
rescal_param = np.sqrt(np.count_nonzero(
E) * r / np.linalg.norm(M_E, ord='fro')**2)
M_E = M_E * rescal_param
if print_out:
print('Trimming ...')
M_Et = copy.deepcopy(M_E)
d = E.sum(0)
d_ = np.mean(d)
for col in range(m):
if E[:, col].sum() > 2 * d_:
nonzero_ind_list = np.nonzero(E[:, col])
p = np.random.permutation(len(nonzero_ind_list))
M_Et[nonzero_ind_list[p[np.ceil(2 * d_):]], col] = 0
d = E.sum(1)
d_ = np.mean(d)
for row in range(n):
if E[row, :].sum() > 2 * d_:
nonzero_ind_list = np.nonzero(E[row, :])
p = np.random.permutation(len(nonzero_ind_list))
M_Et[nonzero_ind_list[row, p[np.ceil(2 * d_):]]] = 0
if print_out:
print('Sparse SVD ...')
X0, S0, Y0 = svds_descending(M_Et, r)
del M_Et
# Initial Guess
X0 = X0 * np.sqrt(n)
Y0 = Y0 * np.sqrt(m)
S0 = S0 / eps
if print_out:
print('Iteration\tFit Error')
# Gradient Descent
X = copy.deepcopy(X0)
Y = copy.deepcopy(Y0)
S = getoptS(X, Y, M_E, E)
errs = [None] * (niter + 1)
errs[0] = np.linalg.norm(
(M_E - np.dot(np.dot(X, S), Y.T)) * E, ord='fro') / np.sqrt(np.count_nonzero(E))
if print_out:
print('0\t\t\t%e' % errs[0])
for i in range(niter):
# Compute the Gradient
W, Z = gradF_t(X, Y, S, M_E, E, m0, rho)
# Line search for the optimum jump length
t = getoptT(X, W, Y, Z, S, M_E, E, m0, rho)
X = X + t * W
Y = Y + t * Z
S = getoptS(X, Y, M_E, E)
# Compute the distortion
errs[i + 1] = np.linalg.norm((M_E - np.dot(np.dot(X, S), Y.T))
* E, ord='fro') / np.sqrt(np.count_nonzero(E))
if print_out:
print('%d\t\t\t%e' % (i + 1, errs[i + 1]))
if abs(errs[i + 1] - errs[i]) < tol:
break
S = S / rescal_param
return X, S, Y, errs | 0.001995 |
def load_with_scipy(file, data_name):
import scipy.io
"""
Loads data from a netcdf file.
Parameters
----------
file : string or file-like
The name of the netcdf file to open.
data_name : string
The name of the data to extract from the netcdf file.
Returns
-------
data : ndarray
The desired data from the netcdf file as ndarray with nan for missing values.
"""
logger.debug('Loading data {} of netcdf file {} with scipy.io.'.format(data_name, file))
f = scipy.io.netcdf.netcdf_file(file, 'r')
data_netcdf = f.variables[data_name]
data = np.array(data_netcdf.data, copy = True)
data[data == data_netcdf.missing_value] = np.nan
f.close()
return data | 0.006676 |
def find_vulnerabilities(
cfg_list,
blackbox_mapping_file,
sources_and_sinks_file,
interactive=False,
nosec_lines=defaultdict(set)
):
"""Find vulnerabilities in a list of CFGs from a trigger_word_file.
Args:
cfg_list(list[CFG]): the list of CFGs to scan.
blackbox_mapping_file(str)
sources_and_sinks_file(str)
interactive(bool): determines if we ask the user about blackbox functions not in the mapping file.
Returns:
A list of vulnerabilities.
"""
vulnerabilities = list()
definitions = parse(sources_and_sinks_file)
with open(blackbox_mapping_file) as infile:
blackbox_mapping = json.load(infile)
for cfg in cfg_list:
find_vulnerabilities_in_cfg(
cfg,
definitions,
Lattice(cfg.nodes),
blackbox_mapping,
vulnerabilities,
interactive,
nosec_lines
)
if interactive:
with open(blackbox_mapping_file, 'w') as outfile:
json.dump(blackbox_mapping, outfile, indent=4)
return vulnerabilities | 0.001792 |
def decode_aes256(cipher, iv, data, encryption_key):
"""
Decrypt AES-256 bytes.
Allowed ciphers are: :ecb, :cbc.
If for :ecb iv is not used and should be set to "".
"""
if cipher == 'cbc':
aes = AES.new(encryption_key, AES.MODE_CBC, iv)
elif cipher == 'ecb':
aes = AES.new(encryption_key, AES.MODE_ECB)
else:
raise ValueError('Unknown AES mode')
d = aes.decrypt(data)
# http://passingcuriosity.com/2009/aes-encryption-in-python-with-m2crypto/
unpad = lambda s: s[0:-ord(d[-1:])]
return unpad(d) | 0.003534 |
def _bind_variables(self, instance, space):
"""
Bind related variables to the instance
"""
instance.api = self
if space:
instance.space = space
return instance | 0.009132 |
def load(self, read_tuple_name):
"""Load RNF values from a read tuple name.
Args:
read_tuple_name (str): Read tuple name which the values are taken from.
"""
self.prefix_width = 0
self.read_tuple_id_width = 0
self.genome_id_width = 0
self.chr_id_width = 0
self.coor_width = 0
parts = read_tuple_name.split("__")
self.prefix_width = len(parts[0])
self.read_tuple_id_width = len(parts[1])
segments = parts[2][1:-1].split("),(")
for segment in segments:
int_widths = list(map(len, segment.split(",")))
self.genome_id_width = max(self.genome_id_width, int_widths[0])
self.chr_id_width = max(self.chr_id_width, int_widths[1])
self.coor_width = max(self.coor_width, int_widths[2], int_widths[3]) | 0.008383 |
def list_permissions(self, group_name=None, resource=None,
url_prefix=None, auth=None, session=None, send_opts=None):
"""List the permission sets for the logged in user
Optionally filter by resource or group.
Args:
group_name (string): Name of group to filter on
resource (intern.resource.boss.BossResource): Identifies which data model object to filter on
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[dict]): List of dictionaries of permission sets
"""
filter_params = {}
if group_name:
filter_params["group"] = group_name
if resource:
filter_params.update(resource.get_dict_route())
req = self.get_permission_request('GET', 'application/json',
url_prefix, auth, query_params=filter_params)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code != 200:
msg = "Failed to get permission sets. "
if group_name:
msg = "{} Group: {}".format(msg, group_name)
if resource:
msg = "{} Resource: {}".format(msg, resource.name)
msg = '{}, got HTTP response: ({}) - {}'.format(msg, resp.status_code, resp.text)
raise HTTPError(msg, request=req, response=resp)
else:
return resp.json()["permission-sets"] | 0.004582 |
def mid_lvl_cmds_send(self, target, hCommand, uCommand, rCommand, force_mavlink1=False):
'''
Mid Level commands sent from the GS to the autopilot. These are only
sent when being operated in mid-level commands mode
from the ground.
target : The system setting the commands (uint8_t)
hCommand : Commanded Altitude in meters (float)
uCommand : Commanded Airspeed in m/s (float)
rCommand : Commanded Turnrate in rad/s (float)
'''
return self.send(self.mid_lvl_cmds_encode(target, hCommand, uCommand, rCommand), force_mavlink1=force_mavlink1) | 0.009103 |
def get_common(self, filename):
''' Process lists of common name words '''
word_list = []
words = open(filename)
for word in words.readlines():
word_list.append(word.strip())
return word_list | 0.00823 |
def remainingDays(self, now=None):
"""
Based on the value of notBefore field, returns the number of
days the certificate will still be valid. The date used for the
comparison is the current and local date, as returned by
time.localtime(), except if 'now' argument is provided another
one. 'now' argument can be given as either a time tuple or a string
representing the date. Accepted format for the string version
are:
- '%b %d %H:%M:%S %Y %Z' e.g. 'Jan 30 07:38:59 2008 GMT'
- '%m/%d/%y' e.g. '01/30/08' (less precise)
If the certificate is no more valid at the date considered, then,
a negative value is returned representing the number of days
since it has expired.
The number of days is returned as a float to deal with the unlikely
case of certificates that are still just valid.
"""
if now is None:
now = time.localtime()
elif type(now) is str:
try:
if '/' in now:
now = time.strptime(now, '%m/%d/%y')
else:
now = time.strptime(now, '%b %d %H:%M:%S %Y %Z')
except:
warning("Bad time string provided '%s'. Using current time" % now)
now = time.localtime()
now = time.mktime(now)
nft = time.mktime(self.notAfter)
diff = (nft - now)/(24.*3600)
return diff | 0.004676 |
def get_window_name(self, win_id):
"""
Get a window's name, if any.
"""
window = window_t(win_id)
name_ptr = ctypes.c_char_p()
name_len = ctypes.c_int(0)
name_type = ctypes.c_int(0)
_libxdo.xdo_get_window_name(
self._xdo, window, ctypes.byref(name_ptr),
ctypes.byref(name_len), ctypes.byref(name_type))
name = name_ptr.value
_libX11.XFree(name_ptr) # Free the string allocated by Xlib
return name | 0.003929 |
def permissions(self):
"""Return a set with all permissions granted to the user."""
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms | 0.009479 |
def get_supported_languages(
self,
parent=None,
display_language_code=None,
model=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of supported languages for translation.
Example:
>>> from google.cloud import translate_v3beta1
>>>
>>> client = translate_v3beta1.TranslationServiceClient()
>>>
>>> response = client.get_supported_languages()
Args:
parent (str): Optional. Used for making regionalized calls.
Format: projects/{project-id}/locations/{location-id}.
For global calls, use projects/{project-id}/locations/global.
If missing, the call is treated as a global call.
Only custom model within the same location-id can be used.
Otherwise 400 is returned.
display_language_code (str): Optional. The language to use to return localized, human readable names
of supported languages. If missing, default language is ENGLISH.
model (str): Optional. Get supported languages of this model.
The format depends on model type:
1. Custom models:
projects/{project-id}/locations/{location-id}/models/{model-id}.
2. General (built-in) models:
projects/{project-id}/locations/{location-id}/models/general/nmt
projects/{project-id}/locations/{location-id}/models/general/base
Returns languages supported by the specified model.
If missing, we get supported languages of Google general NMT model.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.translate_v3beta1.types.SupportedLanguages` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_supported_languages" not in self._inner_api_calls:
self._inner_api_calls[
"get_supported_languages"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_supported_languages,
default_retry=self._method_configs["GetSupportedLanguages"].retry,
default_timeout=self._method_configs["GetSupportedLanguages"].timeout,
client_info=self._client_info,
)
request = translation_service_pb2.GetSupportedLanguagesRequest(
parent=parent, display_language_code=display_language_code, model=model
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_supported_languages"](
request, retry=retry, timeout=timeout, metadata=metadata
) | 0.002716 |
def parse_args(self, args = None, namespace = None,
config_file_contents = None, env_vars = os.environ):
"""Supports all the same args as the ArgumentParser.parse_args(..),
as well as the following additional args.
Additional Args:
args: a list of args as in argparse, or a string (eg. "-x -y bla")
config_file_contents: String. Used for testing.
env_vars: Dictionary. Used for testing.
"""
args, argv = self.parse_known_args(args = args,
namespace = namespace,
config_file_contents = config_file_contents,
env_vars = env_vars)
if argv:
self.error('unrecognized arguments: %s' % ' '.join(argv))
return args | 0.028721 |
def badge(left_text: str, right_text: str, left_link: Optional[str] = None,
right_link: Optional[str] = None,
whole_link: Optional[str] = None, logo: Optional[str] = None,
left_color: str = '#555', right_color: str = '#007ec6',
measurer: Optional[text_measurer.TextMeasurer] = None,
embed_logo: bool = False) -> str:
"""Creates a github-style badge as an SVG image.
>>> badge(left_text='coverage', right_text='23%', right_color='red')
'<svg...</svg>'
>>> badge(left_text='build', right_text='green', right_color='green',
... whole_link="http://www.example.com/")
'<svg...</svg>'
Args:
left_text: The text that should appear on the left-hand-side of the
badge e.g. "coverage".
right_text: The text that should appear on the right-hand-side of the
badge e.g. "23%".
left_link: The URL that should be redirected to when the left-hand text
is selected.
right_link: The URL that should be redirected to when the right-hand
text is selected.
whole_link: The link that should be redirected to when the badge is
selected. If set then left_link and right_right may not be set.
logo: A url representing a logo that will be displayed inside the
badge. Can be a data URL e.g. "data:image/svg+xml;utf8,<svg..."
left_color: The color of the part of the badge containing the left-hand
text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
right_color: The color of the part of the badge containing the
right-hand text. Can be an valid CSS color
(see https://developer.mozilla.org/en-US/docs/Web/CSS/color) or a
color name defined here:
https://github.com/badges/shields/blob/master/lib/colorscheme.json
measurer: A text_measurer.TextMeasurer that can be used to measure the
width of left_text and right_text.
embed_logo: If True then embed the logo image directly in the badge.
This can prevent an HTTP request and some browsers will not render
external image referenced. When True, `logo` must be a HTTP/HTTPS
URI or a filesystem path. Also, the `badge` call may raise an
exception if the logo cannot be loaded, is not an image, etc.
"""
if measurer is None:
measurer = (
precalculated_text_measurer.PrecalculatedTextMeasurer
.default())
if (left_link or right_link) and whole_link:
raise ValueError(
'whole_link may not bet set with left_link or right_link')
template = _JINJA2_ENVIRONMENT.get_template('badge-template-full.svg')
if logo and embed_logo:
logo = _embed_image(logo)
svg = template.render(
left_text=left_text,
right_text=right_text,
left_text_width=measurer.text_width(left_text) / 10.0,
right_text_width=measurer.text_width(right_text) / 10.0,
left_link=left_link,
right_link=right_link,
whole_link=whole_link,
logo=logo,
left_color=_NAME_TO_COLOR.get(left_color, left_color),
right_color=_NAME_TO_COLOR.get(right_color, right_color),
)
xml = minidom.parseString(svg)
_remove_blanks(xml)
xml.normalize()
return xml.documentElement.toxml() | 0.000562 |
def process_rpc(self, rpc):
"""Process input and output parts of `rpc`."""
p = "/nc:rpc/" + self.qname(rpc)
tmpl = self.xsl_template(p)
inp = rpc.search_one("input")
if inp is not None:
ct = self.xsl_calltemplate("rpc-input", tmpl)
self.xsl_withparam("nsid", rpc.i_module.i_modulename + ":", ct)
self.process_children(inp, p, 2)
outp = rpc.search_one("output")
if outp is not None:
self.process_children(outp, "/nc:rpc-reply", 1) | 0.003752 |
def PopItem(self):
"""Pops an item off the queue.
If no ZeroMQ socket has been created, one will be created the first
time this method is called.
Returns:
object: item from the queue.
Raises:
KeyboardInterrupt: if the process is sent a KeyboardInterrupt while
popping an item.
QueueEmpty: if the queue is empty, and no item could be popped within the
queue timeout.
RuntimeError: if closed or terminate event is missing.
zmq.error.ZMQError: if a ZeroMQ error occurs.
"""
if not self._zmq_socket:
self._CreateZMQSocket()
if not self._closed_event or not self._terminate_event:
raise RuntimeError('Missing closed or terminate event.')
logger.debug(
'Pop on {0:s} queue, port {1:d}'.format(self.name, self.port))
last_retry_timestamp = time.time() + self.timeout_seconds
while not self._closed_event.is_set() or not self._terminate_event.is_set():
try:
return self._ReceiveItemOnActivity(self._zmq_socket)
except errors.QueueEmpty:
if time.time() > last_retry_timestamp:
raise
except KeyboardInterrupt:
self.Close(abort=True)
raise | 0.006612 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.