content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def view_folio_contact(request, folio_id=None):
"""
View contact page within folio
"""
folio = get_object_or_404(Folio, pk=folio_id)
if not folio.is_published and folio.author_id != request.user:
return render(
request,
'showcase/folio_is_not_published.html'
)
author = get_object_or_404(
UserAccount,
pk=folio.author_id.id
)
message_form = SendAuthorMessageForm()
context = {
"user": request.user,
"folio": folio,
"author": author,
"form": message_form
}
return render(
request,
'showcase/view_folio_contact.html',
context=context) | 0269fea6322486912cdd462961fb847ffd8d038a | 3,658,000 |
def faom03(t):
"""
Wrapper for ERFA function ``eraFaom03``.
Parameters
----------
t : double array
Returns
-------
c_retval : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - - -
e r a F a o m 0 3
- - - - - - - - - -
Fundamental argument, IERS Conventions (2003):
mean longitude of the Moon's ascending node.
Given:
t double TDB, Julian centuries since J2000.0 (Note 1)
Returned (function value):
double Omega, radians (Note 2)
Notes:
1) Though t is strictly TDB, it is usually more convenient to use
TT, which makes no significant difference.
2) The expression used is as adopted in IERS Conventions (2003) and
is from Simon et al. (1994).
References:
McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003),
IERS Technical Note No. 32, BKG (2004)
Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M.,
Francou, G., Laskar, J. 1994, Astron.Astrophys. 282, 663-683
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
c_retval = ufunc.faom03(t)
return c_retval | 3e3d1c7e650d6034ed0793e4f1bc8605e9e82e32 | 3,658,001 |
from datetime import datetime
import calendar
def get_dtindex(interval, begin, end=None):
"""Creates a pandas datetime index for a given interval.
Parameters
----------
interval : str or int
Interval of the datetime index. Integer values will be treated as days.
begin : datetime
Datetime index start date.
end : datetime, optional
Datetime index end date, defaults to current date.
Returns
-------
dtindex : pandas.tseries.index.DatetimeIndex
Datetime index.
"""
if end is None:
end = datetime.now()
if interval in ['dekad', 'dekadal', 'decadal', 'decade']:
dtindex = dekad_index(begin, end)
elif interval in ['daily', 'day', '1']:
dtindex = pd.date_range(begin, end, freq='D')
elif interval in ['weekly', 'week', '7']:
begin2 = begin - timedelta(begin.weekday()) + timedelta(6)
dtindex = pd.date_range(begin2, end, freq='7D')
elif interval in ['monthly', 'month']:
lday = calendar.monthrange(end.year, end.month)[1]
end = datetime(end.year, end.month, lday)
dtindex = pd.date_range(begin, end, freq='M')
if type(interval) is int:
dtindex = pd.date_range(begin, end, freq=str(str(interval) + 'D'))
return dtindex | 32f0992365b075fb8601276bd3680c7db43a677e | 3,658,002 |
import numpy
def asanyarray(a, dtype=None, order=None):
"""Converts the input to an array, but passes ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This includes scalars,
lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays.
dtype : dtype, optional
By default, the dtype is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-stype) or column-major (Fortran-style) memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of *a*. If *a* is a subclass of ndarray, it is returned
as-is and no copy is performed.
See Also
--------
asarray : Converts the input to an array.
Examples
--------
Convert a list into an array:
>>> import nlcpy as vp
>>> a = [1, 2]
>>> vp.asanyarray(a)
array([1, 2])
"""
if isinstance(a, ndarray):
if dtype is None and order is None:
return a
elif dtype is not None and order is None:
if a.dtype == numpy.dtype(dtype):
return a
elif dtype is None and order is not None:
order_char = internal._normalize_order(order)
order_char = chr(core._update_order_char(a, order_char))
if order_char == 'C' and a._c_contiguous:
return a
if order_char == 'F' and a._f_contiguous:
return a
else:
order_char = internal._normalize_order(order)
order_char = chr(core._update_order_char(a, order_char))
if a.dtype == numpy.dtype(dtype) and \
(order_char == 'C' and a._c_contiguous
or order_char == 'F' and a._f_contiguous):
return a
return core.array(a, dtype=dtype, order=order) | c079d114ab224c487a65929b7710450262c66733 | 3,658,003 |
import os
def envi_header(inputpath):
"""
Convert a envi binary/header path to a header, handling extensions
Args:
inputpath: path to envi binary file
Returns:
str: the header file associated with the input reference.
"""
if os.path.splitext(inputpath)[-1] == '.img' or os.path.splitext(inputpath)[-1] == '.dat' or os.path.splitext(inputpath)[-1] == '.raw':
# headers could be at either filename.img.hdr or filename.hdr. Check both, return the one that exists if it
# does, if not return the latter (new file creation presumed).
hdrfile = os.path.splitext(inputpath)[0] + '.hdr'
if os.path.isfile(hdrfile):
return hdrfile
elif os.path.isfile(inputpath + '.hdr'):
return inputpath + '.hdr'
return hdrfile
elif os.path.splitext(inputpath)[-1] == '.hdr':
return inputpath
else:
return inputpath + '.hdr' | 45df7507017676648cd4fae955da26916bbf4738 | 3,658,004 |
import base64
import struct
from datetime import datetime
def parse_fernet_timestamp(ciphertext):
"""
Returns timestamp embedded in Fernet-encrypted ciphertext, converted to Python datetime object.
Decryption should be attempted before using this function, as that does cryptographically strong tests on the
validity of the ciphertext.
"""
try:
decoded = base64.urlsafe_b64decode(ciphertext)
# This is a value in Unix Epoch time
epoch_timestamp = struct.unpack('>Q', decoded[1:9])[0]
timestamp = datetime(1970, 1, 1) + timedelta(seconds=epoch_timestamp)
return timestamp
except struct.error as e:
raise ValueError(e.message) | 216d314c84679cc5806d6a483f68bff485375b36 | 3,658,005 |
def tagcloud(guids):
"""Get "tag cloud" for the search specified by guids
Same return format as taglist, impl is always False.
"""
guids = set(guids)
range = (0, 19 + len(guids))
tags = request.client.find_tags("EI", "", range=range, guids=guids, order="-post", flags="-datatag")
return [(tagfmt(t.name), t, False) for t in tags if t.guid not in guids] | fb94fab24040b3c38a68a2731d9b1bba0cccd3bc | 3,658,006 |
def _ValidateContent(path, expected_content):
"""Helper to validate the given file's content."""
assert os.path.isfile(path), 'File didn\'t exist: %r' % path
name = os.path.basename(path)
current_content = open(path).read()
if current_content == expected_content:
print '%s is good.' % name
else:
try:
open(path, 'w').write(expected_content)
print 'Updated %s.' % name
except IOError as e:
if e.errno != errno.EACCES:
raise
print '%r needs to be updated but is not writable.' % path
return False
return True | ddf6e3089f66d157f281655357753ff2b746d4a2 | 3,658,007 |
import os
from sys import path
def _test_image_path():
"""
A 100 x 50 pixel GeoTIFF image, with 0 as NODATA value
"""
return os.path.join(path, "test.tiff") | b719a46fc7cdec952953502473000a7adb9b4625 | 3,658,008 |
from typing import Optional
from typing import Sequence
def api_ofrecord_image_decoder_random_crop(
input_blob: remote_blob_util.BlobDef,
blob_name: str,
color_space: str = "BGR",
num_attempts: int = 10,
seed: Optional[int] = None,
random_area: Sequence[float] = [0.08, 1.0],
random_aspect_ratio: Sequence[float] = [0.75, 1.333333],
name: str = "OFRecordImageDecoderRandomCrop",
) -> remote_blob_util.BlobDef:
"""This operator is an image decoder with random crop.
Args:
input_blob (BlobDef): The input Blob
blob_name (str): The name of the Blob
color_space (str, optional): The color space, such as "RGB", "BGR". Defaults to "BGR".
num_attempts (int, optional): The maximum number of random cropping attempts. Defaults to 10.
seed (Optional[int], optional): The random seed. Defaults to None.
random_area (Sequence[float], optional): The random cropping area. Defaults to [0.08, 1.0].
random_aspect_ratio (Sequence[float], optional): The random scaled ratio. Defaults to [0.75, 1.333333].
name (str, optional): The name for the operation. Defaults to "OFRecordImageDecoderRandomCrop".
Returns:
BlobDef: The random cropped Blob
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 16
color_space = "RGB"
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./imgdataset",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
random_shuffle=True,
shuffle_after_epoch=True,
)
image = flow.data.OFRecordImageDecoderRandomCrop(
ofrecord, "encoded", color_space=color_space
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(224, 224)
)
label = flow.data.OFRecordRawDecoder(
ofrecord, "class/label", shape=(1, ), dtype=flow.int32
)
return res_image, label
if __name__ == "__main__":
images, labels = ofrecord_reader_job()
# images.shape (16, 224, 224, 3)
"""
assert isinstance(name, str)
if seed is not None:
assert name is not None
module = flow.find_or_create_module(
name,
lambda: OFRecordImageDecoderRandomCropModule(
blob_name=blob_name,
color_space=color_space,
num_attempts=num_attempts,
random_seed=seed,
random_area=random_area,
random_aspect_ratio=random_aspect_ratio,
name=name,
),
)
return module(input_blob) | bcf8ad7deb97677e52b04e3204281a7ecc89c89c | 3,658,009 |
def student_add_information(adding_student_id, student_information):
"""
用于添加学生的详细信息
:@param adding_student_id: int
:@param student_information: dict or str
:@return : 运行状态(True or False)
"""
if type(student_information) == dict:
adding_information = student_information
elif type(student_information) == str:
adding_information = {}
tmp_key = ''
tmp_adding_key = ''
tmp_value = ''
state = 'write_key'
for k in student_information:
# 判断当前遍历到哪里
if k == ':':
tmp_value = ''
state = 'write_value'
continue
elif k == '\n':
tmp_adding_key = tmp_key
tmp_key = ''
state = 'write_key'
adding_information[tmp_adding_key] = tmp_value
continue
# 判断是否便利到节点
if state == 'write_key':
tmp_key += k
elif state == 'write_value':
tmp_value += k
else:
return False, 2
times = 0
adding_info_list = [adding_student_id]
for i in adding_information.keys():
times += 1
adding_info_list.append(adding_information.get(i))
for j in range(0, 5-times):
adding_info_list.append(None)
adding_info_tuple = tuple(adding_info_list)
adding_info_final = [adding_info_tuple]
cur.executemany("insert into student_info values(%s,%s,%s,%s,%s,%s)", adding_info_final)
conn.commit() | 8a44177b90c1f3e10077313f6765a4699ded676b | 3,658,010 |
from typing import Optional
import os
import importlib
def import_file_as_module(filename: str, name: Optional[str] = None) -> ModuleType:
"""
NOTE(2020-11-09|domanchi): We're essentially executing arbitrary code here, so some thoughts
should be recorded as to the security of this feature. This should not add any additional
security risk, given the following assumptions hold true:
1. detect-secrets is not used in an environment that has privileged access (more
than the current user), OR
2. detect-secrets (when running in a privileged context) does not accept arbitrary
user input that feeds into this function (e.g. custom plugins).
The first assumption should be rather self-explanatory: if you are running detect-secrets
in a context that has the same permissions as you, you can import any code you want, since
this acts more of a utility function than a security flaw. If you're going to do it *anyway*,
let's just make your life easier.
The second assumption should also be pretty straight-forward: don't trust user input,
especially if it's going to be executed as that privileged user, unless you want a privilege
escalation vulnerability. detect-secrets is not going to do any sanitization of user input
for you.
"""
if not os.path.exists(filename):
raise FileNotFoundError
if not name:
# NOTE: After several trial and error attempts, I could not discern the importance
# of this field, in this context. Hence, I don't think it matters that much.
name = os.path.splitext(os.path.basename(filename))[0]
# Source: https://stackoverflow.com/a/67692/13340678
spec = importlib.util.spec_from_file_location(name, filename)
if not spec:
raise InvalidFile
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
module.__path__ = os.path.abspath(filename) # type: ignore
return module | 5aa9a54f8a8ed4a42bc49f806c2ac38d01b8ccfa | 3,658,011 |
async def get_show_by_month_day(month: conint(ge=1, le=12), day: conint(ge=1, le=31)):
"""Retrieve a Show object, based on month and day, containing: Show
ID, date and basic information."""
try:
show = Show(database_connection=_database_connection)
shows = show.retrieve_by_month_day(month, day)
if not shows:
raise HTTPException(
status_code=404,
detail=f"Shows for month {month:02d} and {day:02d} not found",
)
else:
return {"shows": shows}
except ValueError:
raise HTTPException(
status_code=404,
detail=f"Shows for month {month:02d} and {day:02d} not found",
)
except ProgrammingError:
raise HTTPException(
status_code=500,
detail="Unable to retrieve show information from the database",
)
except DatabaseError:
raise HTTPException(
status_code=500,
detail="Database error occurred while retrieving "
"show information from the database",
) | e774f61254a3d7cdfc9a49ca1a9eea4f65853f55 | 3,658,012 |
import random
def generate_random_tag(length):
"""Generate a random alphanumeric tag of specified length.
Parameters
----------
length : int
The length of the tag, in characters
Returns
-------
str
An alphanumeric tag of specified length.
Notes
-----
The generated tag will not use possibly ambiguous characters from this set:
- '0' and '1'
- 'i' and 'I'
- 'l' and 'L'
- 'o' and 'O'
"""
characters_set = ('23456789'
+ 'abcdefghjkmnpqrstuvwxyz'
+ 'ABCDEFGHJKMNPQRSTUVWXYZ')
return ''.join([characters_set[int(random() * len(characters_set))]
for _ in range(length)]) | b62a103663b69f0a27d8ba23134473dc01932409 | 3,658,013 |
import io
def loadmat(filename, check_arrays=False, **kwargs):
"""
Big thanks to mergen on stackexchange for this:
http://stackoverflow.com/a/8832212
This function should be called instead of direct scipy.io.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects.
"""
kwargs["struct_as_record"] = False
kwargs["squeeze_me"] = True
data = io.loadmat(filename, **kwargs)
return _check_keys(data, check_arrays) | 3b054cbabc03b468ec0c80a4ed544b1c054ef223 | 3,658,014 |
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output.outputs.values()
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) | 913c1b232f8ac6e66e9104c055c9d34726db1027 | 3,658,015 |
import logging
def train_city_s1(city:str, pollutant= 'PM2.5', n_jobs=-2, default_meta=False,
search_wind_damp=False, choose_cat_hour=False, choose_cat_month=True,
add_weight=True, instr='MODIS', op_fire_zone=False, op_fire_twice=False, op_lag=True, search_tpot=False,
main_data_folder: str = '../data/',
model_folder='../models/', report_folder='../reports/'):
"""Training pipeline from process raw data, hyperparameter tune, and save model.
Args:
city: city name
pollutant(optional): pollutant name
n_jobs(optional): number of CPUs to use during optimization
default_meta(optional): if True, override meta setting with the default value
search_wind_damp(optional): if True, search in four options of the fire features.
add_weight(optional): if True, use non-uniform weight when fitting and evaluating the model.
instr(optional): choose hotspots detection instrument
choose_cat_hour(optional): if True, see if adding/not adding hour as catergorical variable is better
choose_cat_month(optional): if True, see if adding/not adding month as catergorical variable is better
op_fire_twice(optiohnal): if True, optimize fire data after optimizing lag
search_tpot(optional): If True, also search for other model using TPOT
main_data_folder(optional): main data folder for initializing Dataset object [default:'../data/]
model_folder(optional): model folder for initializing Dataset object [default:'../models/']
report_folder(optional): folder to save figure for initializing Dataset object [default:'../reports/']
Returns:
dataset: dataset object
model: model object
poll_meta(dict): parameter dictionary
"""
# start logging
set_logging(level=10)
logger = logging.getLogger(__name__)
# initialize a trainer object
trainer = Trainer(city=city, pollutant=pollutant, instr=instr)
trainer.n_jobs = n_jobs
if default_meta:
trainer.get_default_meta()
if ~ add_weight:
trainer.dataset.add_weight = 0
#if 'x_cols_org' in trainer.poll_meta.keys():
# trainer.dataset.x_cols = trainer.dataset.x_cols_org = trainer.poll_meta['x_cols_org']
# look for the best rf model
trainer.op_rf(fire_dict=trainer.dataset.fire_dict)
# remove columns
trainer.op2_rm_cols()
logger.info(f'current columns {trainer.dataset.x_cols_org}')
# op fire
trainer.op_fire(x_cols=trainer.dataset.x_cols_org, search_wind_damp=search_wind_damp)
if op_fire_zone:
trainer.op_fire_zone(step=50)
if choose_cat_hour:
trainer.choose_cat_hour()
if choose_cat_month:
trainer.choose_cat_month()
if op_lag:
# see if adding lag improve things
if trainer.dataset.with_interact:
# use smaller lag range
trainer.op4_lag(lag_range=[1, 20])
else:
trainer.op4_lag()
else:
print('skip adding lag')
trainer.dataset.lag_dict = {'n_max': 1, 'step': 1, 'roll':True}
trainer.dataset.build_lag(
lag_range=np.arange(
1,
trainer.dataset.lag_dict['n_max'],
trainer.dataset.lag_dict['step']),
roll=trainer.dataset.lag_dict['roll'])
if op_fire_twice:
trainer.op_fire(x_cols=trainer.dataset.x_cols, with_lag=True, search_wind_damp=search_wind_damp)
# serach rf model again
trainer.op6_rf()
trainer.final_fit()
# save plot
trainer.save_feat_imp(with_interact=trainer.dataset.with_interact, filename=trainer.dataset.report_folder +f'{trainer.poll_name}_rf_fea_op2_nolag.png', title='rf feature of importance')
trainer.save_all()
if search_tpot:
trainer.search_tpot()
# turn of logging
logging.shutdown()
return trainer.dataset, trainer.model, trainer | 43cd4ff89068feba1bca3c316e40b19f852c1da2 | 3,658,016 |
def constructCbsdGrantInfo(reg_request, grant_request, is_managing_sas=True):
"""Constructs a |CbsdGrantInfo| tuple from the given data."""
lat_cbsd = reg_request['installationParam']['latitude']
lon_cbsd = reg_request['installationParam']['longitude']
height_cbsd = reg_request['installationParam']['height']
height_type_cbsd = reg_request['installationParam']['heightType']
if height_type_cbsd == 'AMSL':
# TODO(sbdt): move the feature of AMSL support within the prop models.
altitude_cbsd = drive.terrain_driver.GetTerrainElevation(lat_cbsd, lon_cbsd)
height_cbsd = height_cbsd - altitude_cbsd
max_eirp, low_frequency, high_frequency = None, None, None
if grant_request is not None:
if 'requestedOperationParam' in grant_request:
max_eirp = grant_request['requestedOperationParam']['maxEirp']
low_frequency = grant_request['requestedOperationParam']['operationFrequencyRange']['lowFrequency']
high_frequency = grant_request['requestedOperationParam']['operationFrequencyRange']['highFrequency']
else:
max_eirp = grant_request['operationParam']['maxEirp']
low_frequency = grant_request['operationParam']['operationFrequencyRange']['lowFrequency']
high_frequency = grant_request['operationParam']['operationFrequencyRange']['highFrequency']
return CbsdGrantInfo(
# Get information from the registration
latitude=lat_cbsd,
longitude=lon_cbsd,
height_agl=height_cbsd,
indoor_deployment=reg_request['installationParam']['indoorDeployment'],
antenna_azimuth=reg_request['installationParam']['antennaAzimuth'],
antenna_gain=reg_request['installationParam']['antennaGain'],
antenna_beamwidth=reg_request['installationParam']['antennaBeamwidth'],
cbsd_category=reg_request['cbsdCategory'],
max_eirp=max_eirp,
iap_eirp={max_eirp}, # *****PATCHED*****
low_frequency=low_frequency,
high_frequency=high_frequency,
is_managed_grant=is_managing_sas) | aff6a37f7831a185a7c1a3168c42a692768ae4e9 | 3,658,017 |
def download_raw_pages_content(pages_count):
"""download habr pages by page count"""
return [fetch_raw_content(page) for page in range(1, pages_count + 1)] | 77e369a986ff09887a71d996226d147fef9a36ec | 3,658,018 |
def tseries2bpoframe(s: pd.Series, freq: str = "MS", prefix: str = "") -> pd.DataFrame:
"""
Aggregate timeseries with varying values to a dataframe with base, peak and offpeak
timeseries, grouped by provided time interval.
Parameters
----------
s : Series
Timeseries with hourly or quarterhourly frequency.
freq : {'MS' (month, default) 'QS' (quarter), 'AS' (year)}
Target frequency.
prefix : str, optional (default: '')
If specified, add this to the column names of the returned dataframe.
Returns
-------
DataFrame
Dataframe with base, peak and offpeak values (as columns). Index: downsampled
timestamps at provided frequency.
Notes
-----
Can only be used for values that are 'averagable' over a time period, like power [MW]
and price [Eur/MWh]. Not for e.g. energy [MWh], revenue [Eur], and duration [h].
In:
ts_left
2020-01-01 00:00:00+01:00 41.88
2020-01-01 01:00:00+01:00 38.60
2020-01-01 02:00:00+01:00 36.55
...
2020-12-31 21:00:00+01:00 52.44
2020-12-31 22:00:00+01:00 51.86
2020-12-31 23:00:00+01:00 52.26
Freq: H, Name: p, Length: 8784, dtype: float64
Out:
base peak offpeak
ts_left
2020-01-01 00:00:00+01:00 35.034906 42.530036 30.614701
2020-02-01 00:00:00+01:00 21.919009 33.295167 15.931557
... ... ...
2020-11-01 00:00:00+01:00 38.785706 49.110873 33.226004
2020-12-01 00:00:00+01:00 43.519745 57.872246 35.055449
12 rows × 3 columns
"""
if freq not in ("MS", "QS", "AS"):
raise ValueError(
f"Parameter ``freq`` must be one of 'MS', 'QS', 'AS'; got '{freq}'."
)
# Remove partial data
s = trim_frame(s, freq)
# Handle possible units.
sin, units = (s.pint.magnitude, s.pint.units) if hasattr(s, "pint") else (s, None)
# Do calculations. Use normal mean, because all rows have same duration.
sout = sin.resample(freq).apply(lambda s: tseries2singlebpo(s, prefix))
# Handle possible units.
if units is not None:
sout = sout.astype(nits.pintunit(units))
return sout.unstack() | 6b97bc3b8c925be68ba79e8a9abdc2795500df76 | 3,658,019 |
def calc_buffered_bounds(
format, bounds, meters_per_pixel_dim, layer_name, geometry_type,
buffer_cfg):
"""
Calculate the buffered bounds per format per layer based on config.
"""
if not buffer_cfg:
return bounds
format_buffer_cfg = buffer_cfg.get(format.extension)
if format_buffer_cfg is None:
return bounds
geometry_type = normalize_geometry_type(geometry_type)
per_layer_cfg = format_buffer_cfg.get('layer', {}).get(layer_name)
if per_layer_cfg is not None:
layer_geom_pixels = per_layer_cfg.get(geometry_type)
if layer_geom_pixels is not None:
assert isinstance(layer_geom_pixels, Number)
result = bounds_buffer(
bounds, meters_per_pixel_dim * layer_geom_pixels)
return result
by_geometry_pixels = format_buffer_cfg.get('geometry', {}).get(
geometry_type)
if by_geometry_pixels is not None:
assert isinstance(by_geometry_pixels, Number)
result = bounds_buffer(
bounds, meters_per_pixel_dim * by_geometry_pixels)
return result
return bounds | 5bbf9720525126e3dcd000329493c894c8249771 | 3,658,020 |
async def read_users_me(
current_user: models.User = Depends(security.get_current_active_user),
):
"""Get User data"""
return current_user | 4b2e37586a4e13074ec009f4cd7e64e7a357d539 | 3,658,021 |
import os
def cal_energy_parameters_for_one_channel(sub_sample_label_dict, channel, importance=1):
"""
the loss comes equally from four sources: connected component (0D), boundary (1D), area (2D), and rim_enhance
e.g. a small region with long boundaries means it accounts for lots of 1D loss and little of 2D loss.
If border_outer=False, boundaries are inside lesions, and all connected regions account for the same 0D loss
If border_outer=True, boundaries are outside lesions, 0D loss are the same inside the same integrated connected
region determined by the outer boundaries.
0D, 1D, 2D loss are uniformly distributed into every pixels inside the integrated connected region;
rim_enhance is then added to the boundary pixels.
:param sub_sample_label_dict: The dict of representative training sample labels. This function calculate how to
balance the loss weights according to these training sample labels. Training sample labels should be numpy arrays
shaped: [length, width, channel], and when the channel is specified, it should be a binary image, with 1 means
positive, it can be a probability [0, 1]. When summing all channels, we get a 2D array of all ones.
:param channel: which channel we need to calculate? The weights are calculated channel-wise. The theoretical basis
is that, some TYPES lesions are big and folded; while some are small and smooth. When doing performance measure, we
don't care about this difference in the classes. Thus, different classes should account the same training loss.
Channel 0 is the probability mask for normal pixels.
:param importance: There may be a special class is extremely important. Increase the importance will increase the
proportion of training loss for this class.
:return: connect_energy_factor, rim_energy_factor, area_enhance, rim_enhance
0D 1D 2D
"""
sample_names_list = os.listdir(sub_sample_label_dict)
total_num_connected_areas = 0 # the number of connected areas in this sub set, counts for 0D loss
total_rim_length = 0 # the number of rim voxels, counts for 1D loss
total_lesion_area = 0 # the number of lesion voxels, counts for 2D loss
if not sub_sample_label_dict[-1] == '/':
sub_sample_label_dict = sub_sample_label_dict + '/'
for sample_name in sample_names_list:
sample = np.load(sub_sample_label_dict + sample_name) # sample should in [width, length, channel]
mask = sample[:, :, channel]
num_connected_areas, num_rim_voxels, num_lesion_voxels = calculate_balance_weights(mask, return_stat=True)
total_num_connected_areas += num_connected_areas
total_rim_length += num_rim_voxels
total_lesion_area += num_lesion_voxels
num_samples = len(sample_names_list)
num_loss_per_dimension = num_samples * importance
# each sample and each class is defaulted to have 4 units of losses: 3 units, 0D, 1D, 2D, which distributed
# uniformly on lesions; and one unit distributed uniformly on the rim pixels.
area_enhance = num_loss_per_dimension / total_lesion_area # thus, averagely each slice 1 units of 2D loss
rim_energy_factor = num_loss_per_dimension / total_rim_length # thus, averagely each slice 1 units of 1D loss
connect_energy_factor = num_loss_per_dimension / total_num_connected_areas # each slice 1 units of 0D loss
rim_enhance = num_loss_per_dimension / total_rim_length # averagely further add 1 units to enhance the rim pixels
return connect_energy_factor, rim_energy_factor, area_enhance, rim_enhance | 7d82fb2b2e232fee027971913475ff310b1b736e | 3,658,022 |
import logging
def get_logger(logfile):
"""Instantiate a simple logger.
"""
fmt = "%(levelname)s:%(filename)s:%(lineno)s:%(funcName)s: %(message)s"
#fmt = '%(levelname)s:%(filename)s:%(lineno)s:%(funcName)s:%(asctime)s: %(message)s']
datefmt = '%Y-%m-%dT%H:%M:%S'
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# logging to logfile
ch = logging.FileHandler(logfile, mode='w')
#ch.setLevel(logging.INFO)
ch.setFormatter( logging.Formatter(fmt, datefmt=datefmt) )
logger.addHandler(ch)
### log stdout
#ch = logging.StreamHandler()
#ch.setLevel(logging.DEBUG)
#ch.setFormatter( logging.Formatter(fmt, datefmt=datefmt) )
#logger.addHandler(ch)
#
#logger.write = lambda msg: logger.info(msg) if msg != '\n' else None
return logger | a3bd9387b745e3cb001a44c4f3c11488fef3c106 | 3,658,023 |
import logging
def __to_signed(val, bits):
"""
internal function to convert a unsigned integer to signed
of given bits length
"""
logging.debug(" in: value = %d", val)
mask = 0x00
for i in range(int(bits / 8)):
mask |= 0xff << (i * 8)
if val >= (1 << (bits - 1)):
val = -1 - (val ^ mask)
logging.debug(" out: value = %d", val)
return val | 618b02ad5a67c31a5942430ec64abcad59708ddd | 3,658,024 |
from typing import Iterable
from typing import Tuple
def compute_qp_objective(
configuration: Configuration, tasks: Iterable[Task], damping: float
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the Hessian matrix :math:`H` and linear vector :math:`c` of the
QP objective function:
.. math::
\\frac{1}{2} \\Delta q^T H \\Delta q + c^T q
The configuration displacement :math:`\\Delta q` is the output of inverse
kinematics (we divide it by :math:`\\Delta t` to get a commanded velocity).
Args:
configuration: Robot configuration to read kinematics from.
tasks: List of kinematic tasks to fulfill at (weighted) best.
damping: weight of Tikhonov (everywhere) regularization. Its unit is
`[cost]^2 / [tangent]` where `[tangent]` is "the" unit of robot
velocities. Improves numerical stability, but larger values slow
down all tasks.
Returns:
Pair :math:`(H, c)` of Hessian matrix and linear vector of the QP
objective.
"""
H = damping * configuration.tangent.eye
c = configuration.tangent.zeros
for task in tasks:
H_task, c_task = task.compute_qp_objective(configuration)
H += H_task
c += c_task
return (H, c) | 623997bbaf7ce92c39084fa44960593b55a0b3a0 | 3,658,025 |
def _is_existing_account(respondent_email):
"""
Checks if the respondent already exists against the email address provided
:param respondent_email: email of the respondent
:type respondent_email: str
:return: returns true if account already registered
:rtype: bool
"""
respondent = party_controller.get_respondent_by_email(respondent_email)
if not respondent:
return False
return True | 4cb0462f748d0b80dbb12f89364d279a3436b632 | 3,658,026 |
import socket
def basic_checks(server,port):
"""Perform basics checks on given host"""
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# 2 seconds timeout
sock.settimeout(2)
return sock.connect_ex((server,int(port))) == 0 | 4a31521089feb2c178bb5202fa818804dfe87142 | 3,658,027 |
import time
def test(ipu_estimator, args, x_test, y_test):
"""
Test the model on IPU by loading weights from the final checkpoint in the
given `args.model_dir`.
"""
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
dataset = dataset.prefetch(len(x_test)).cache()
dataset = dataset.batch(args.batch_size, drop_remainder=True)
return dataset
num_test_examples = len(x_test)
steps = num_test_examples // args.batch_size
# IPUEstimator requires no remainder; batches_per_step must divide steps
steps -= steps % args.batches_per_step
print(f"Evaluating on {steps * args.batch_size} examples")
# Set up profiling hook
hooks = []
if args.profile:
hooks.append(ProfilerHook(ipu_estimator.model_dir, name='eval'))
t0 = time.time()
metrics = ipu_estimator.evaluate(input_fn=input_fn,
steps=steps,
hooks=hooks)
t1 = time.time()
test_loss = metrics["loss"]
test_accuracy = metrics["accuracy"]
duration_seconds = t1 - t0
print("Test loss: {:g}".format(test_loss))
print("Test accuracy: {:.2f}%".format(100 * test_accuracy))
print(f"Took {duration_seconds:.2f} seconds to compile and run") | 083c2c830315ccf2602109a4a3e718cecd1b6760 | 3,658,028 |
def _get_service():
"""Gets service instance to start API searches.
Returns:
A Google API Service used to send requests.
"""
# Create the AI Platform service object.
# To authenticate set the environment variable
# GOOGLE_APPLICATION_CREDENTIALS=<path_to_service_account_file>
return googleapiclient.discovery.build('ml', 'v1') | 5d79698216626eff9618dc55b6b651a5da3f5187 | 3,658,029 |
def giq(scores, targets, I, ordered, cumsum, penalties, randomized, allow_zero_sets):
"""
Generalized inverse quantile conformity score function.
E from equation (7) in Romano, Sesia, Candes. Find the minimum tau in [0, 1] such that the correct label enters.
"""
E = -np.ones((scores.shape[0],))
for i in range(scores.shape[0]):
E[i] = get_tau(
scores[i : i + 1, :],
targets[i].item(),
I[i : i + 1, :],
ordered[i : i + 1, :],
cumsum[i : i + 1, :],
penalties[0, :],
randomized=randomized,
allow_zero_sets=allow_zero_sets,
)
return E | 99a877053cf095622184cbbd9043b742c6ae076f | 3,658,030 |
def findUser(userId):
"""
:param userId:
:return: The user obj
Finds a particular user from a dataset.
"""
return user_collection.find_one({"user_id": userId}) | ffca934689c554993ca5d33005a32e4f9afe48cd | 3,658,031 |
def sub_sample_map(data, aug_map, n_input, n_output, n_teach, buffer):
"""
Expands an augmentation map to produce indexes that will allow
targets values of previous outputs to be used as inputs
"""
n_io = n_input + n_output
n_req = n_io
teach_range = range(n_teach)
tf_map = []
for map_ in aug_map:
sample = data[map_["orig_sample_idx"]]
n = len(sample)
i = np.random.randint(n - n_io - n_teach - buffer)
j = i + n_req + n_teach + buffer
new_map_ = {"strt_idx": i, "end_idx": j, **map_}
tf_map.append(new_map_)
return tf_map | 05f88939ad2e293e3370f5585ad30f1d9d6256d1 | 3,658,032 |
def capture_flow(pkt_hist):
"""
Monitors the flow in the file.
:param pkt_hist: a list of raw eth packets
:return: 0 (No errors)
"""
closedby = []
global numFlows, flow_buffer, sent_buffer, ackd_buffer, received_buffer, retransmissions, end_ts, retransmissions_timeout, retransmissions_fast
# print "Starting capture"
cnt = 0
for ts, pkt in pkt_hist:
cnt+=1
# print "PACKET -----" + str(cnt)
tcp_packet = get_tcp_packet(pkt, ts)
# print "Seq Num :", str(tcp_packet.seqNum), "| Ack Num :", tcp_packet.ackNum, "| Packet Size :", tcp_packet.pSize, "| Payload Length :", tcp_packet.payloadLength
fState = getFlowStateForPacket(tcp_packet)
if(fState == 2):
#This means that the flow is in active state
# print "Packet belongs to Flow", str(getFlowID(tcp_packet)), "which is already in ACTIVE state."
pkt_id = add_packet(tcp_packet, cnt, ts)
if (tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 0):
updateFlowState(tcp_packet, 3)
closedby.append([getFlowID(tcp_packet), cnt, "SENDERCLOSE"])
# FIN ACKed by sender
if(tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 1):
updateFlowState(tcp_packet, 4)
closedby.append([getFlowID(tcp_packet), cnt, "RECVRCLOSE"])
# FIN ACKed by server
elif(fState == 3):
pkt_id = add_packet(tcp_packet, cnt, ts)
if (tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 1):
updateFlowState(tcp_packet, 5)
closedby.append([getFlowID(tcp_packet), cnt, "RECVRCLOSE"])
# Was in 3 state (finned by sender). Now also FIN ACKed by server
elif(fState == 4):
pkt_id = add_packet(tcp_packet, cnt, ts)
if (tcp_packet._FIN == 1 and getTransDirection(tcp_packet) == 0):
updateFlowState(tcp_packet, 5)
closedby.append([getFlowID(tcp_packet), cnt, "SENDERCLOSE"])
# Was in 4 state (finned by server). Now also FIN ACKed by sender
elif(fState == 5):
if(tcp_packet._ACK == 1):
# Just a stupid ack
add_packet(tcp_packet, cnt, ts)
end_ts[getFlowID(tcp_packet)] = ts
else:
print "Suspicious Packet."
print(closedby)
printFlowBuffer()
break
else:
if(tcp_packet._SYN == 1 and tcp_packet._ACK == 0):
# print "Flow initiated with timestamp", ts
fid = newFlow(tcp_packet)
# updateFlowState(fid, 0) NO NEED TO DO THAT, WHEN WE CREATE A NEW FLOW, ITS DEFAULT STATE IS 0
if(tcp_packet._SYN == 1 and tcp_packet._ACK == 1):
# print "Flow SYN/ACK Received"
updateFlowState(tcp_packet, 1)
winscale[getFlowID(tcp_packet)] = tcp_packet.winscale
if (tcp_packet._SYN == 0 and tcp_packet._ACK == 1):
'TODO : IN THIS CASE WE NEED TO CHECK IF IT IS FOR NORMAL ACK OR HANDSHAKE ACK'
updateFlowState(tcp_packet, 2)
updateFlowWinSize(tcp_packet)
pkt_id = add_packet(tcp_packet, cnt, ts)
# print "Sent Buffer Length : ", len(sent_buffer), " | Received Buffer Length : ", len(received_buffer), " | Acked Buffer Length : ", len(ackd_buffer)
# printFlowBuffer()
# print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n"
if(pkt_id == False):
print " >> No TCP Flow registered for this packet with timestamp", ts
break
# print_first_two_trans()
# print closedby
return 0 | 41a1628960aa4c95c2859fbbe10410fb49289d04 | 3,658,033 |
import time
import subprocess
import traceback
def run_customcheck_command(check):
"""Function that starts as a thread (future) to process a custom check command
Process a custom check command until a given timeout.
The result will be added to the cached_customchecks_check_data object.
process_customcheck_results() takes care of a may dying run_customcheck_command thread.
Parameters
----------
check
Object containing the specific check data (name, command, timeout)
"""
print_verbose('Start custom check "%s" with timeout %s at %s' % (str(check['name']), str(check['timeout']), str(round(time.time()))), False)
agent_log.info('Start custom check "%s" with timeout %s at %s' % (str(check['name']), str(check['timeout']), str(round(time.time()))))
cached_customchecks_check_data[check['name']]['running'] = "true"
cached_customchecks_check_data[check['name']]['command'] = check['command']
try:
p = subprocess.Popen(check['command'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
stdout, stderr = p.communicate(timeout=int(check['timeout']))
p.poll()
if stdout:
stdout = stdout.decode()
if stderr:
stderr = stderr.decode()
cached_customchecks_check_data[check['name']]['result'] = str(stdout)
cached_customchecks_check_data[check['name']]['error'] = None if str(stderr) == 'None' else str(stderr)
cached_customchecks_check_data[check['name']]['returncode'] = p.returncode
except subprocess.TimeoutExpired:
print_verbose('Custom check "%s" timed out' % (check['name']), False)
agent_log.error('Custom check "%s" timed out' % (check['name']))
p.kill() #not needed; just to be sure
cached_customchecks_check_data[check['name']]['result'] = None
cached_customchecks_check_data[check['name']]['error'] = 'Command timeout after ' + str(check['timeout']) + ' seconds'
cached_customchecks_check_data[check['name']]['returncode'] = 124
except:
print_verbose('An error occured while running the custom check "%s"!' % (check['name']), True)
agent_log.error('An error occured while running the custom check "%s"!' % (check['name']))
if stacktrace:
traceback.print_exc()
cached_customchecks_check_data[check['name']]['last_updated_timestamp'] = round(time.time())
cached_customchecks_check_data[check['name']]['last_updated'] = time.ctime()
del cached_customchecks_check_data[check['name']]['running']
return True | aa4846d08190c16e4a1edb49d369ba093f767b3c | 3,658,034 |
def rcGetBBModelEnum():
""" Get the BeagleBone model as member of the BBModel Enum. """
return BBModel(rcGetBBModel()) | 90cf6857f2754a1947d017a1a57b11790d534c05 | 3,658,035 |
def ordToString(ordList):
"""Use this function to convert ord values to strings."""
newStrList = []
cstr = ""
for cint in ordList:
cstr += chr(cint)
if cint == 44:
newStrList.append(cstr[:-1])
cstr = ""
return newStrList | 5a836f7fe34803744de90aa2608e3d99a081c7ff | 3,658,036 |
def get_test_data_for_successful_build():
"""Returns a test data set of test suites and cases that passed.
"""
return _get_test_data(["PASSED", "PASSED", "PASSED"]) | 8969d33f887dcc7c7f7fb8148cbcfc7a4eb4d7c1 | 3,658,037 |
import re
import logging
def fromOldAdjacencyList(adjlist, group=False, saturateH=False):
"""
Convert a pre-June-2014 string adjacency list `adjlist` into a set of :class:`Atom` and
:class:`Bond` objects.
It can read both "old style" that existed for years, an the "intermediate style" that
existed for a few months in 2014, with the extra column of integers for lone pairs.
"""
atoms = []
atomdict = {}
bonds = {}
try:
adjlist = adjlist.strip()
lines = adjlist.splitlines()
if adjlist == '' or len(lines) == 0:
raise InvalidAdjacencyListError('Empty adjacency list.')
# Skip the first line if it contains a label
if len(lines[0].split()) == 1:
label = lines.pop(0)
if len(lines) == 0:
raise InvalidAdjacencyListError('No atoms specified in adjacency list.')
mistake1 = re.compile('\{[^}]*\s+[^}]*\}')
atomicMultiplicities = {} # these are no longer stored on atoms, so we make a separate dictionary
# Iterate over the remaining lines, generating Atom or GroupAtom objects
for line in lines:
# Sometimes people put spaces after commas, which messes up the
# parse-by-whitespace. Examples include '{Cd, Ct}'.
if mistake1.search(line):
raise InvalidAdjacencyListError(
"Shouldn't have spaces inside braces: {0}".format(mistake1.search(line).group())
)
# Sometimes commas are used to delimit bonds in the bond list,
# so replace them just in case
line = line.replace('},{', '} {')
data = line.split()
# Skip if blank line
if len(data) == 0: continue
# First item is index for atom
# Sometimes these have a trailing period (as if in a numbered list),
# so remove it just in case
aid = int(data[0].strip('.'))
# If second item starts with '*', then atom is labeled
label = ''; index = 1
if data[1][0] == '*':
label = data[1]
index += 1
# Next is the element or functional group element
# A list can be specified with the {,} syntax
atomType = data[index]
if atomType[0] == '{':
atomType = atomType[1:-1].split(',')
else:
atomType = [atomType]
index += 1
# Next is the electron state
radicalElectrons = []; atomSpinMultiplicity = []
elecState = data[index].upper()
if elecState[0] == '{':
elecState = elecState[1:-1].split(',')
else:
elecState = [elecState]
for e in elecState:
if e == '0':
radicalElectrons.append(0); atomSpinMultiplicity.append(1)
elif e == '1':
radicalElectrons.append(1); atomSpinMultiplicity.append(2)
elif e == '2':
radicalElectrons.append(2); atomSpinMultiplicity.append(1)
radicalElectrons.append(2); atomSpinMultiplicity.append(3)
elif e == '2S':
radicalElectrons.append(2); atomSpinMultiplicity.append(1)
elif e == '2T':
radicalElectrons.append(2); atomSpinMultiplicity.append(3)
elif e == '3':
radicalElectrons.append(3); atomSpinMultiplicity.append(4)
elif e == '3D':
radicalElectrons.append(3); atomSpinMultiplicity.append(2)
elif e == '3Q':
radicalElectrons.append(3); atomSpinMultiplicity.append(4)
elif e == '4':
radicalElectrons.append(4); atomSpinMultiplicity.append(5)
elif e == '4S':
radicalElectrons.append(4); atomSpinMultiplicity.append(1)
elif e == '4T':
radicalElectrons.append(4); atomSpinMultiplicity.append(3)
elif e == '4V':
radicalElectrons.append(4); atomSpinMultiplicity.append(5)
elif e == 'X':
radicalElectrons.extend([0,1,2,2])
atomSpinMultiplicity.extend([1,2,1,3])
index += 1
# Next number defines the number of lone electron pairs (if provided)
lonePairsOfElectrons = -1
if len(data) > index:
lpState = data[index]
if lpState[0] == '{':
# this is the start of the chemical bonds - no lone pair info was provided
lonePairsOfElectrons = -1
else:
if lpState == '0':
lonePairsOfElectrons = 0
if lpState == '1':
lonePairsOfElectrons = 1
if lpState == '2':
lonePairsOfElectrons = 2
if lpState == '3':
lonePairsOfElectrons = 3
if lpState == '4':
lonePairsOfElectrons = 4
index += 1
else: # no bonds or lone pair info provided.
lonePairsOfElectrons = -1
# Create a new atom based on the above information
if group:
# charge currently not allowed
atom = GroupAtom(atomType=atomType,
radicalElectrons=sorted(set(radicalElectrons)),
charge=[0],
label=label,
lonePairs=(None if lonePairsOfElectrons==-1 else [lonePairsOfElectrons])
)
else:
atom = Atom(element=atomType[0],
radicalElectrons=radicalElectrons[0],
charge=0,
label=label,
lonePairs=lonePairsOfElectrons
)
atomicMultiplicities[atom] = atomSpinMultiplicity
# Add the atom to the list
atoms.append(atom)
atomdict[aid] = atom
# Process list of bonds
bonds[aid] = {}
for datum in data[index:]:
# Sometimes commas are used to delimit bonds in the bond list,
# so strip them just in case
datum = datum.strip(',')
aid2, comma, order = datum[1:-1].partition(',')
aid2 = int(aid2)
if aid == aid2:
raise InvalidAdjacencyListError('Attempted to create a bond between atom {0:d} and itself.'.format(aid))
if order[0] == '{':
order = order[1:-1].split(',')
else:
order = [order]
bonds[aid][aid2] = order
if group:
multiplicity = None
else:
multiplicity = 1
for atom in atoms:
multiplicity += max(atomicMultiplicities[atom]) - 1
# Check consistency using bonddict
for atom1 in bonds:
for atom2 in bonds[atom1]:
if atom2 not in bonds:
raise InvalidAdjacencyListError('Atom {0:d} not in bond dictionary.'.format(atom2))
elif atom1 not in bonds[atom2]:
raise InvalidAdjacencyListError('Found bond between {0:d} and {1:d}, but not the reverse.'.format(atom1, atom2))
elif bonds[atom1][atom2] != bonds[atom2][atom1]:
raise InvalidAdjacencyListError('Found bonds between {0:d} and {1:d}, but of different orders "{2}" and "{3}".'.format(atom1, atom2, bonds[atom1][atom2], bonds[atom2][atom1]))
# Convert bonddict to use Atom[group] and Bond[group] objects
atomkeys = atomdict.keys()
atomkeys.sort()
for aid1 in atomkeys:
atomkeys2 = bonds[aid1].keys()
atomkeys2.sort()
for aid2 in atomkeys2:
if aid1 < aid2:
atom1 = atomdict[aid1]
atom2 = atomdict[aid2]
order = bonds[aid1][aid2]
if group:
bond = GroupBond(atom1, atom2, order)
elif len(order) == 1:
bond = Bond(atom1, atom2, order[0])
else:
raise InvalidAdjacencyListError('Multiple bond orders specified for an atom in a Molecule.')
atom1.edges[atom2] = bond
atom2.edges[atom1] = bond
if saturateH and not group:
# Add explicit hydrogen atoms to complete structure if desired
valences = {'H': 1, 'C': 4, 'O': 2, 'N': 3, 'S': 2, 'Si': 4, 'Cl': 1, 'He': 0, 'Ne': 0, 'Ar': 0}
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
newAtoms = []
for atom in atoms:
try:
valence = valences[atom.symbol]
except KeyError:
raise InvalidAdjacencyListError('Cannot add hydrogens to adjacency list: Unknown valence for atom "{0}".'.format(atom.symbol))
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
count = valence - radical - int(order)
for i in range(count):
a = Atom(element='H', radicalElectrons=0, charge=0, label='')
b = Bond(atom, a, 'S')
newAtoms.append(a)
atom.bonds[a] = b
a.bonds[atom] = b
atoms.extend(newAtoms)
# Calculate the number of lone pair electrons requiring molecule with all hydrogen atoms present
if not group and lonePairsOfElectrons == -1:
orders = {'S': 1, 'D': 2, 'T': 3, 'B': 1.5}
for atom in atoms:
radical = atom.radicalElectrons
order = 0
for atom2, bond in atom.bonds.items():
order += orders[bond.order]
lonePairs = (1 if atom.symbol == 'H' or atom.symbol == 'He' else 4) - order - radical
atom.setLonePairs(lonePairs)
atom.updateCharge()
elif not group:
for atom in atoms:
atom.updateCharge()
except InvalidAdjacencyListError:
logging.error("Troublesome adjacency list:\n" + adjlist)
raise
return atoms, multiplicity | 0c54ee172948437f9cb075c5880eb7eb25d2893f | 3,658,038 |
def read_arg_optional(
src, args, n_optional=-1, tolerance=0, mode=MODE_NON_MATH, skip_math=False):
"""Read next optional argument from buffer.
If the command has remaining optional arguments, look for:
a. A spacer. Skip the spacer if it exists.
b. A bracket delimiter. If the optional argument is bracket-delimited,
the contents of the bracket group are used as the argument.
:param Buffer src: a buffer of tokens
:param TexArgs args: existing arguments to extend
:param int n_optional: Number of optional arguments. If < 0, all valid
bracket groups will be captured.
:param int tolerance: error tolerance level (only supports 0 or 1)
:param str mode: math or not math mode
:return: number of remaining optional arguments
:rtype: int
"""
while n_optional != 0:
spacer = read_spacer(src)
if not (src.hasNext() and src.peek().category == TC.BracketBegin):
if spacer:
src.backward(1)
break
args.append(read_arg(src, next(src), tolerance=tolerance, mode=mode, skip_math=skip_math))
n_optional -= 1
return n_optional | 641fe9ab9a96b6e59e15b115abe843fa09a07659 | 3,658,039 |
def searcheduxapian_ajax_get_schlagwort(request, item_container):
""" moegliche Schlagworte """
schlagworte = get_schlagworte(request.GET['query'])
res = '<items>\n'
for schlagwort in schlagworte:
res += '<schlagwort>\n<name><![CDATA[%s]]></name>\n</schlagwort>\n' % schlagwort.name
res += '</items>\n'
return HttpResponse(res, mimetype="text/xml; charset=utf-8") | 5a248ced5006d49f2dc303c68957d07ba187c3d5 | 3,658,040 |
def expanded_X_y_sample_weights(X, y_proba, expand_factor=10,
sample_weight=None, shuffle=True,
random_state=None):
"""
scikit-learn can't optimize cross-entropy directly if target
probability values are not indicator vectors.
As a workaround this function expands the dataset according to
target probabilities. ``expand_factor=None`` means no dataset
expansion.
"""
rng = check_random_state(random_state)
if expand_factor:
if sample_weight is not None:
X, y, sample_weight = zip(*expand_dataset(X, y_proba,
factor=expand_factor,
random_state=rng,
extra_arrays=[
sample_weight
]))
else:
X, y = zip(*expand_dataset(X, y_proba,
factor=expand_factor,
random_state=rng))
else:
y = y_proba.argmax(axis=1)
if isinstance(X, (list, tuple)) and len(X) and issparse(X[0]):
X = vstack(X)
if shuffle:
if sample_weight is not None:
X, y, sample_weight = _shuffle(X, y, sample_weight,
random_state=rng)
else:
X, y = _shuffle(X, y, random_state=rng)
return X, y, sample_weight | 7398062d3eb75fa68c39e20415b944e58a20387e | 3,658,041 |
def refine_uniformly(dom, seg):
"""
Refine all edges of the given domain and segmentation.
:param dom: Domain to refine
:type dom: :class:`viennagrid.Domain`
:param seg: Segmentation of the domain to refine
:type seg: :class:`viennagrid.Segmentation`
:returns: A two-element tuple containing the output domain and segmentation after the refinement.
:raises: TypeError
"""
try:
config = dom.config
dom = dom._domain
except AttributeError:
raise TypeError('parameter at position 1 is not a valid domain')
try:
seg = seg._segmentation
except AttributeError:
raise TypeError('parameter at position 2 is not a valid domain')
refined_result = viennagrid.wrapper.refine_uniformly(dom, seg)
refined_domain = viennagrid.Domain(config)
refined_domain._domain = refined_result[0]
refined_segmentation = viennagrid.Segmentation(refined_domain)
refined_segmentation._segmentation = refined_result[1]
return (refined_domain, refined_segmentation) | 623b9fc2fa6c83133ca1e01714fecba7e70ab95e | 3,658,042 |
def rename_tuning(name, new_name):
"""rename tuning"""
session = tables.get_session()
if session is None:
return False, 'connect'
try:
tuning_table = TuningTable()
if not tuning_table.check_exist_by_name(TuningTable, name, session):
return False, 'tuning not exist'
if tuning_table.check_exist_by_name(TuningTable, new_name, session):
return False, 'duplicate'
tuning_table.update_tuning_name(name, new_name, session)
session.commit()
except SQLAlchemyError as err:
LOGGER.error('Rename tuning failed: %s', err)
return False, 'error'
finally:
session.close()
return True, '' | 1ea1498483fc9abe0bb5be7a7c892c6a171b5df9 | 3,658,043 |
import logging
async def get_event(token: str, event_id: str) -> dict:
"""Get event - return new if no event found."""
event = {"id": event_id, "name": "Nytt arrangement", "organiser": "Ikke valgt"}
if event_id != "":
logging.debug(f"get_event {event_id}")
event = await EventsAdapter().get_event(token, event_id)
return event | a071da872e9a2f670926da5b7a8d4644acaf237e | 3,658,044 |
import re
def _xfsdump_output(data):
"""
Parse CLI output of the xfsdump utility.
"""
out = {}
summary = []
summary_block = False
for line in [l.strip() for l in data.split("\n") if l.strip()]:
line = re.sub("^xfsdump: ", "", line)
if line.startswith("session id:"):
out["Session ID"] = line.split(" ")[-1]
elif line.startswith("session label:"):
out["Session label"] = re.sub("^session label: ", "", line)
elif line.startswith("media file size"):
out["Media size"] = re.sub(r"^media file size\s+", "", line)
elif line.startswith("dump complete:"):
out["Dump complete"] = re.sub(r"^dump complete:\s+", "", line)
elif line.startswith("Dump Status:"):
out["Status"] = re.sub(r"^Dump Status:\s+", "", line)
elif line.startswith("Dump Summary:"):
summary_block = True
continue
if line.startswith(" ") and summary_block:
summary.append(line.strip())
elif not line.startswith(" ") and summary_block:
summary_block = False
if summary:
out["Summary"] = " ".join(summary)
return out | dbc7fbf9dced99b83a7dc5917c473a1dee16d749 | 3,658,045 |
def get_current():
"""Return the currently running interpreter."""
id = _interpreters.get_current()
return Interpreter(id) | 0949280d364cc6f2935b9109c19c508ed06352b8 | 3,658,046 |
def csstext(text: str, cls: str, span: bool=False, header: bool=False) -> str:
"""
Custom build HTML text element.
"""
if span:
tag = 'span'
elif header:
tag = 'h1'
else:
tag = 'p'
return f'<{tag} class="{cls}">{str(text)}</{tag}>' | 0833fd9d83143e09b5c234e193a8e53ef653112b | 3,658,047 |
def trans_exam_list_to_colum(example_list, headers=None):
"""
将example列表转换成以列表示的形式,用于适配输出附加信息
:param example_list: example 列表
:param headers: 需要的属性,默认为("question", "answer", "yes_or_no")
:return: {header1:[...],header2:[...],...}
"""
if headers is None:
headers = ("question", "answer", "yes_or_no")
result = {}
for header in headers:
result[header] = []
for example in example_list:
for header in headers:
result[header].append(getattr(example, header, ""))
return result | ff5a2e5f6e27ce0a32717e55ba35dbd864a11dbb | 3,658,048 |
def member():
""" RESTful CRUD Controller """
return s3_rest_controller() | 2f14df1f9b97ee4777c2ce0740207c691aedb1c2 | 3,658,049 |
from datetime import datetime
def _now():
"""Get EST localized now datetime."""
return EST_TIMEZONE.localize(datetime.datetime.now()) | a7a62b5f5febdbacab0c0ac1e6ef0de843f09a11 | 3,658,050 |
def pydantic_model_to_pandas(pydantic_model_input) -> pd.DataFrame:
"""
Function that transforms <pydantic.BaseModel> child objects to
<pandas.DataFrame> objects
:param pydantic_model_input: Input validator for API
"""
return dict_to_pandas(pydantic_model_input.dict()) | 8397c39d7c760ad44565a7b89013d95c241413ed | 3,658,051 |
def calculate_pair_energy(coordinates, i_particle, box_length, cutoff):
"""
Calculate the interaction energy of a particle with its environment (all other particles in the system) - rewrite
Parameters
----------
coordinates : list
The coordinates for all particles in the system
i_particle : int
The particle number for which to calculate the energy
cutoff : float
The simulation cutoff. Beyond this distance, interactions are not calculated.
Returns
-------
e_total : float
The pairwise interaction energy of he i_th particle with all other particles in the system.
"""
e_total = 0.0
i_position = coordinates[i_particle]
distance_array = calculate_distance(coordinates, i_position, box_length)
# Just so we don't use it for calculation
distance_array[i_particle] = cutoff*2
less_than_cutoff = distance_array[distance_array < cutoff]
interaction_energies = calculate_LJ(less_than_cutoff)
e_total = np.sum(interaction_energies)
return e_total | 42150ff5282731b13e4ac512c08fd71566f0bdb4 | 3,658,052 |
def simulation_activation(model, parcel_df, aerosols_panel):
""" Given the DataFrame output from a parcel model simulation, compute
activation kinetic limitation diagnostics.
Parameters
----------
model : ParcelModel
The ParcelModel
parcel_df : DataFrame used to generate the results to be analyzed
The DataFrame containing the parcel's thermodynamic trajectory
aerosols_panel : Panel
A Panel collection of DataFrames containing the aerosol size evolution
Returns
-------
act_stats : DataFrame
A DataFrame containing the activation statistics
"""
initial_row = parcel_df.iloc[0]
Smax_i, T_i = initial_row['S'], initial_row['T']
acts = {'eq': [], 'kn': [], 'alpha': [], 'phi': []}
initial_aerosols = model.aerosols
N_all_modes = np.sum([aer.total_N for aer in initial_aerosols])
N_fracs = {aer.species: aer.total_N/N_all_modes for aer in initial_aerosols}
for i in range(len(parcel_df)):
row_par = parcel_df.iloc[i]
rows_aer = {key: aerosols_panel[key].iloc[i] for key in aerosols_panel}
# Update thermo
T_i = row_par['T']
if row_par['S'] > Smax_i:
Smax_i = row_par['S']
eq_tot, kn_tot, alpha_tot, phi_tot = 0., 0., 0., 0.
for aerosol in initial_aerosols:
N_frac = N_fracs[aerosol.species]
rs = rows_aer[aerosol.species]
eq, kn, alpha, phi = binned_activation(Smax_i, T_i, rs, aerosol)
eq_tot += eq*N_frac
kn_tot += kn*N_frac
alpha_tot += alpha*N_frac
phi_tot += phi*N_frac
acts['kn'].append(kn_tot)
acts['eq'].append(eq_tot)
acts['alpha'].append(alpha_tot)
acts['phi'].append(phi_tot)
acts_total = pd.DataFrame(acts, index=parcel_df.index)
return acts_total | 41461da13062177124ca4ebedc801ff5d574fbb8 | 3,658,053 |
import os
import sys
import logging
def create(options, args):
"""
Instantiate and return a Blueprint object from either standard input or by
reverse-engineering the system.
"""
try:
with context_managers.mkdtemp():
if not os.isatty(sys.stdin.fileno()):
try:
b = blueprint.Blueprint.load(sys.stdin, args[0])
except ValueError:
logging.error(
'standard input contains invalid blueprint JSON')
sys.exit(1)
else:
b = blueprint.Blueprint.create(args[0])
if options.subtrahend:
logging.info('subtracting {0}'.format(options.subtrahend))
b_s = blueprint.Blueprint.checkout(options.subtrahend)
b = b - b_s
b.commit(options.message or '')
return b
except blueprint.NameError:
logging.error('invalid blueprint name')
sys.exit(1) | 556ac20f6b10c58011a99f217f8e4410faac6ee9 | 3,658,054 |
def create_config(
case=None, Exp='Dummy', Type='Tor',
Lim=None, Bump_posextent=[np.pi/4., np.pi/4],
R=None, r=None, elong=None, Dshape=None,
divlow=None, divup=None, nP=None,
returnas=None, strict=None,
SavePath='./', path=_path_testcases,
):
""" Create easily a tofu.geom.Config object
In tofu, a Config (short for geometrical configuration) refers to the 3D
geometry of a fusion device.
It includes, at least, a simple 2D polygon describing the first wall of the
fusion chamber, and can also include other structural elements (tiles,
limiters...) that can be non-axisymmetric.
To create a simple Config, provide either the name of a reference test
case, of a set of geometrical parameters (major radius, elongation...).
This is just a tool for fast testing, if you want to create a custom
config, use directly tofu.geom.Config and provide the parameters you want.
Parameters
----------
case : str
The name of a reference test case, if provided, this arguments is
sufficient, the others are ignored
Exp : str
The name of the experiment
Type : str
The type of configuration (toroidal 'Tor' or linear 'Lin')
Lim_Bump: list
The angular (poloidal) limits, in the cross-section of the extension of
the outer bumper
R : float
The major radius of the center of the cross-section
r : float
The minor radius of the cross-section
elong: float
An elongation parameter (in [-1;1])
Dshape: float
A parameter specifying the D-shape of the cross-section (in [-1;1])
divlow: bool
A flag specifying whether to include a lower divertor-like shape
divup: bool
A flag specifying whether to include an upper divertor-like shape
nP: int
Number of points used to describe the cross-section polygon
out: str
FLag indicating whether to return:
- 'dict' : the polygons as a dictionary of np.ndarrays
- 'object': the configuration as a tofu.geom.Config instance
returnas: object / dict
Flag indicating whether to return the config as:
- object: a Config instance
- dict: a dict of Struct instances
strict: bool
Flag indicating whether to raise an error if a Struct cannot be loaded
Otherwise only raises a warning
path: str
Absolute path where to find the test case data
SavePath: str
The default path used for saving Struct and Config objects returned by
the routine.
Return
------
conf: tofu.geom.Config / dict
Depending on the value of parameter out, either:
- the tofu.geom.Config object created
- a dictionary of the polygons and their pos/extent (if any)
"""
lp = [R, r, elong, Dshape, divlow, divup, nP]
lpstr = '[R, r, elong, Dshape, divlow, divup, nP]'
lc = [case is not None,
any([pp is not None for pp in lp])]
if np.sum(lc) > 1:
msg = ("Please provide either:\n"
+ "\t- case: the name of a pre-defined config\n"
+ "\t- geometrical parameters {}\n\n".format(lpstr))
raise Exception(msg)
elif not any(lc):
msg = get_available_config(verb=False, returnas=str)
raise Exception(msg)
# Get config, either from known case or geometrical parameterization
if case is not None:
conf = _create_config_testcase(
config=case,
path=path,
returnas=returnas,
strict=strict,
)
else:
poly, pbump, pbaffle = _compute_VesPoly(R=R, r=r,
elong=elong, Dshape=Dshape,
divlow=divlow, divup=divup,
nP=nP)
if returnas == 'dict':
conf = {'Ves':{'Poly':poly},
'Baffle':{'Poly':pbaffle},
'Bumper':{'Poly':pbump,
'pos':Bump_posextent[0],
'extent':Bump_posextent[1]}}
else:
ves = _core.Ves(Poly=poly, Type=Type, Lim=Lim, Exp=Exp, Name='Ves',
SavePath=SavePath)
baf = _core.PFC(Poly=pbaffle, Type=Type, Lim=Lim,
Exp=Exp, Name='Baffle', color='b', SavePath=SavePath)
bump = _core.PFC(Poly=pbump, Type=Type,
pos=Bump_posextent[0], extent=Bump_posextent[1],
Exp=Exp, Name='Bumper', color='g', SavePath=SavePath)
conf = _core.Config(Name='Dummy', Exp=Exp, lStruct=[ves,baf,bump],
SavePath=SavePath)
return conf | e9f855ff614cd511f730afd34c34e1d610b06a43 | 3,658,055 |
def is_project_description(description):
"""Validates the specified project description.
A valid description is simply a non-empty string.
Args:
description (str): A project description to validate.
Returns:
<bool, str|None>: A pair containing the value True if the specified description
is valid, False otherwise; and an error message in case the description is invalid.
"""
try:
return (False, "A project description must be a non-empty string.") if is_empty_string(description) else (True, None)
except TypeError:
return (False, "The 'description' argument must be a string.") | ef831f2ddeede75bb1dbd0730dccacba3e379c2b | 3,658,056 |
import json
def remove_friend():
"""
Accepts an existing friend request.
"""
data = json.loads(request.data)
friend_id = data['id']
user = interface.get_user_by_id(get_jwt_identity())
friend = interface.get_user_by_id(friend_id)
interface.remove_friendship(user, friend)
return '', 200 | 0d5e2c390d5da7ff1869d907bbe85bcda80a9513 | 3,658,057 |
import functools
import logging
def ensure_configured(func):
"""Modify a function to call ``basicConfig`` first if no handlers exist."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(logging.root.handlers) == 0:
basicConfig()
return func(*args, **kwargs)
return wrapper | 2c04afd53ab9c7341fc4913485a8a1f7f7e7e1b3 | 3,658,058 |
from typing import Dict
from typing import Any
from typing import Optional
from typing import Type
def get_loss(dataset_properties: Dict[str, Any], name: Optional[str] = None) -> Type[Loss]:
"""
Utility function to get losses for the given dataset properties.
If name is mentioned, checks if the loss is compatible with
the dataset properties and returns the specific loss
Args:
dataset_properties (Dict[str, Any]): Dictionary containing
properties of the dataset. Must contain task_type and
output_type as strings.
name (Optional[str]): name of the specific loss
Returns:
Type[torch.nn.modules.loss._Loss]
"""
assert 'task_type' in dataset_properties, \
"Expected dataset_properties to have task_type got {}".format(dataset_properties.keys())
assert 'output_type' in dataset_properties, \
"Expected dataset_properties to have output_type got {}".format(dataset_properties.keys())
task = STRING_TO_TASK_TYPES[dataset_properties['task_type']]
output_type = STRING_TO_OUTPUT_TYPES[dataset_properties['output_type']]
supported_losses = get_supported_losses(task, output_type)
if name is not None:
if name not in supported_losses.keys():
raise ValueError("Invalid name entered for task {}, and output type {} currently supported losses"
" for task include {}".format(dataset_properties['task_type'],
dataset_properties['output_type'],
list(supported_losses.keys())))
else:
loss = supported_losses[name]
else:
loss = get_default(task)
return loss | a9f75d6e2c35a0b9472e3fdc046f72eb1188e48d | 3,658,059 |
def ten_to_base(value : int, base):
"""Converts a given decimal value into the specified base.
:param value: The number to convert
:param base: The base to convert the specified number to
:return: The converted value in the specified base
"""
# Check if the base is 10, return the value
if base == 10:
return value
# Keep track of the remainders, which will be the new digits in the specified base
remainders = []
# Divide the value by the base until the number is 0
while value != 0:
remainders.append(value % base)
value //= base
# Reverse the order of the remainders and turn each digit
# into the proper value from the BASES string
remainders.reverse()
for i in range(len(remainders)):
remainders[i] = BASES[remainders[i]]
return "".join(remainders) | 2f5ba92af48fe2ce19dbcb6001afadfde2514373 | 3,658,060 |
def get_style(selector, name):
"""
Returns the resolved CSS style for the given property name.
:param selector:
:param name:
"""
if not get_instance():
raise Exception("You need to start a browser first with open_browser()")
return get_style_g(get_instance(), selector, name) | 903e4abc09dc196d0d1dbbbb3c58869e3c0beb78 | 3,658,061 |
import inspect
def argmod(*args):
"""
Decorator that intercepts and modifies function arguments.
Args:
from_param (str|list): A parameter or list of possible parameters that
should be modified using `modifier_func`. Passing a list of
possible parameters is useful when a function's parameter names
have changed, but you still want to support the old parameter
names.
to_param (str): Optional. If given, to_param will be used as the
parameter name for the modified argument. If not given, to_param
will default to the last parameter given in `from_param`.
modifier_func (callable): The function used to modify the `from_param`.
Returns:
function: A function that modifies the given `from_param` before the
function is called.
"""
from_param = listify(args[0])
to_param = from_param[-1] if len(args) < 3 else args[1]
modifier_func = args[-1]
def _decorator(func):
try:
argspec = inspect.getfullargspec(unwrap(func))
except AttributeError:
argspec = inspect.getargspec(unwrap(func))
if to_param not in argspec.args:
return func
arg_index = argspec.args.index(to_param)
@wraps(func)
def _modifier(*args, **kwargs):
kwarg = False
for arg in from_param:
if arg in kwargs:
kwarg = arg
break
if kwarg:
kwargs[to_param] = modifier_func(kwargs.pop(kwarg))
elif arg_index < len(args):
args = list(args)
args[arg_index] = modifier_func(args[arg_index])
return func(*args, **kwargs)
return _modifier
return _decorator | 5824d20568a3913be59941df0e4f657f05f08cc0 | 3,658,062 |
def group_update(group_id, group_min, group_max, desired):
"""
Test with invalid input
>>> group_update('foo', 2, 1, 4)
{}
"""
if group_min > group_max or desired < group_min or desired > group_max:
return {}
try:
client = boto3.client('autoscaling')
response = client.update_auto_scaling_group(
AutoScalingGroupName=group_id,
MinSize=group_min,
MaxSize=group_max,
DesiredCapacity=desired)
except botocore.exceptions.ClientError:
print "Autoscaling client error: update_auto_scaling_group"
sys.exit(127)
return response | 77eef10b7db604a3aa2e32bdd0d226fb44cf07ab | 3,658,063 |
def remove_bookmark(request, id):
"""
This view deletes a bookmark.
If requested via ajax it also returns the add bookmark form to replace the
drop bookmark form.
"""
bookmark = get_object_or_404(Bookmark, id=id, user=request.user)
if request.method == "POST":
bookmark.delete()
if not is_xhr(request):
messages.success(request, "Bookmark removed")
if request.POST.get("next"):
return HttpResponseRedirect(request.POST.get("next"))
return HttpResponse("Deleted")
return render(
request,
"admin_tools/menu/add_bookmark_form.html",
context={
"url": request.POST.get("next"),
"title": "**title**", # replaced on the javascript side
},
)
return render(
request,
"admin_tools/menu/delete_confirm.html",
context={"bookmark": bookmark, "title": "Delete Bookmark"},
) | 9c8442d5a313e7babf71b9f9a4c41452c65c5aab | 3,658,064 |
def parse_propa(blob):
"""Creates new blob entries for the given blob keys"""
if "track_in" in blob.keys():
muon = blob["track_in"]
blob["Muon"] = Table(
{
"id": np.array(muon)[:, 0].astype(int),
"pos_x": np.array(muon)[:, 1],
"pos_y": np.array(muon)[:, 2],
"pos_z": np.array(muon)[:, 3],
"dir_x": np.array(muon)[:, 4],
"dir_y": np.array(muon)[:, 5],
"dir_z": np.array(muon)[:, 6],
"energy": np.array(muon)[:, 7],
"time": np.array(muon)[:, 8],
"particle_id": np.array(muon)[:, 9].astype(int),
"is_charm": np.array(muon)[:, 10].astype(int),
"mother_pid": np.array(muon)[:, 11].astype(int),
"grandmother_pid": np.array(muon)[:, 11].astype(int),
},
h5loc="muon",
)
blob["MuonMultiplicity"] = Table(
{"muon_multiplicity": len(np.array(muon)[:, 6])}, h5loc="muon_multiplicity"
)
if "neutrino" in blob.keys():
nu = blob["neutrino"]
blob["Neutrino"] = Table(
{
"id": np.array(nu)[:, 0].astype(int),
"pos_x": np.array(nu)[:, 1],
"pos_y": np.array(nu)[:, 2],
"pos_z": np.array(nu)[:, 3],
"dir_x": np.array(nu)[:, 4],
"dir_y": np.array(nu)[:, 5],
"dir_z": np.array(nu)[:, 6],
"energy": np.array(nu)[:, 7],
"time": np.array(nu)[:, 8],
"particle_id": np.array(nu)[:, 9].astype(int),
"is_charm": np.array(nu)[:, 10].astype(int),
"mother_pid": np.array(nu)[:, 11].astype(int),
"grandmother_pid": np.array(nu)[:, 11].astype(int),
},
h5loc="nu",
)
blob["NeutrinoMultiplicity"] = Table(
{
"total": len(np.array(nu)[:, 6]),
"nue": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == 12]),
"anue": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == -12]),
"numu": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == 14]),
"anumu": len(np.array(nu)[:, 6][np.array(nu)[:, 9] == -14]),
},
h5loc="nu_multiplicity",
)
if ("track_in" or "neutrino") in blob.keys():
blob["Weights"] = Table(
{
"w1": blob["weights"][0][0],
"w2": blob["weights"][0][1],
"w3": blob["weights"][0][2],
},
h5loc="weights",
)
if "track_primary" in blob.keys():
primary = blob["track_primary"]
blob["Primary"] = Table(
{
"id": np.array(primary)[:, 0].astype(int),
"pos_x": np.array(primary)[:, 1],
"pos_y": np.array(primary)[:, 2],
"pos_z": np.array(primary)[:, 3],
"dir_x": np.array(primary)[:, 4],
"dir_y": np.array(primary)[:, 5],
"dir_z": np.array(primary)[:, 6],
"energy": np.array(primary)[:, 7],
"time": np.array(primary)[:, 8],
"particle_id": np.array(primary)[:, 9].astype(int),
},
h5loc="primary",
)
return blob | ae7993d6e51287b6a88d125f63ef7d6edd001cf1 | 3,658,065 |
def parseParams(opt):
"""Parse a set of name=value parameters in the input value.
Return list of (name,value) pairs.
Raise ValueError if a parameter is badly formatted.
"""
params = []
for nameval in opt:
try:
name, val = nameval.split("=")
except ValueError:
raise ValueError("Bad name=value format for '%s'" % nameval)
params.append((name, val))
return params | b932f74c8e5502ebdd7a8749c2de4b30921d518b | 3,658,066 |
from ._sparse_array import SparseArray
def asnumpy(a, dtype=None, order=None):
"""Returns a dense numpy array from an arbitrary source array.
Args:
a: Arbitrary object that can be converted to :class:`numpy.ndarray`.
order ({'C', 'F', 'A'}): The desired memory layout of the output
array. When ``order`` is 'A', it uses 'F' if ``a`` is
fortran-contiguous and 'C' otherwise.
Returns:
numpy.ndarray: Converted array on the host memory.
"""
if isinstance(a, SparseArray):
a = a.todense()
return np.array(a, dtype=dtype, copy=False, order=order) | 54bea22ab6fe8327b3a4df93ac9e4447b4d65fec | 3,658,067 |
def get_next_cpi_date():
"""
Get next CPI release date
"""
df = pd.read_html(r"https://www.bls.gov/schedule/news_release/cpi.htm")[0][:-1]
df["Release Date"] = pd.to_datetime(df["Release Date"], errors='coerce')
df = df[df["Release Date"] >= current_date].iloc[0]
df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')
return df | e23b9bea0996ac442115163729ffeed1407e52b5 | 3,658,068 |
from typing import Tuple
from datetime import datetime
def date_arithmetic() -> Tuple[datetime, datetime, int]:
""" This function is used to calculate
what is the date after 3 days is given
and the differences between two given dates """
date1: str = "Feb 27, 2020"
date_2020: datetime = datetime.strptime(
date1, "%b %d, %Y") + timedelta(3)
date2: str = "Feb 27, 2019"
date_2019: datetime = datetime.strptime(
date2, "%b %d, %Y") + timedelta(3)
date3: str = "Feb 1, 2019"
date4: str = "Sep 30, 2019"
days_passed: int = datetime.strptime(
date3, "%b %d, %Y") - datetime.strptime(date4, "%b %d, %Y")
three_days_after_02272020: datetime = date_2020.strftime("%b %d, %Y")
three_days_after_02272019: datetime = date_2019.strftime("%b %d, %Y")
days_passed_02012019_09302019: int = abs(days_passed.days)
return three_days_after_02272020, three_days_after_02272019, days_passed_02012019_09302019 | 1e2d4681578ccab11612771589a46f22246071eb | 3,658,069 |
def get_words_from_line_list(text):
"""
Applies Translations and returns the list of words from the text document
"""
text = text.translate(translation_table)
word_list = [x for x in text.split() if x not in set(stopwords.words('english'))]
return word_list | aaa2a1476e887aa6a7d477d67528f838d6f229b9 | 3,658,070 |
def _get_name(dist):
"""Attempts to get a distribution's short name, excluding the name scope."""
return getattr(dist, 'parameters', {}).get('name', dist.name) | fd57e523c1a84a36f9ed56236e4b8db1e887575c | 3,658,071 |
def compute_mean_std(all_X):
"""Return an approximate mean and std for every feature"""
concatenated = np.concatenate(all_X, axis=0).astype(np.float64)
mean = np.mean(concatenated, axis=0)
std = np.std(concatenated, axis=0)
std[std == 0] = 1
return mean, std | b102a045705efdab8d9783e04da192ec30e167f7 | 3,658,072 |
def GenerateConfig(context):
"""Generates configuration."""
key_ring = {
'name': 'keyRing',
'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings',
'properties': {
'parent': 'projects/' + context.env['project'] + '/locations/' + context.properties['region'],
'keyRingId': context.env['deployment'] + '-key-ring'
}
}
crypto_key = {
'name': 'cryptoKey',
'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings.cryptoKeys',
'properties': {
'parent': '$(ref.keyRing.name)',
'cryptoKeyId': context.env['deployment'] + '-crypto-key',
'purpose': 'ENCRYPT_DECRYPT'
}
}
resources = [key_ring, crypto_key]
outputs = [{
'name': 'primaryVersion',
'value': '$(ref.cryptoKey.primary)'
}]
return { 'resources': resources, 'outputs': outputs } | 257b7217c1a08bba46866aff0b7faa1a03fe7fdc | 3,658,073 |
def get_valid_collapsed_products(products, limit):
"""wraps around collapse_products and respecting a limit
to ensure that uncomplete products are not collapsed
"""
next_min_scanid = get_next_min_scanid(products, limit)
collapsed_products = []
for scanid, scan in groupby(products, itemgetter('ScanID')):
if scanid == next_min_scanid:
continue
collapsed_products.extend(collapse_products(list(scan)))
return collapsed_products, next_min_scanid | df3ffa503855a020c7c011aa58cba20243e19be4 | 3,658,074 |
def get_imd():
"""Fetches data about LA IMD status.
The "local concentration" measure is used -
this gives higher weight to particularly deprived areas
Source: http://www.gov.uk/government/statistics/english-indices-of-deprivation-2019
"""
imd = pd.read_csv(
PROJECT_DIR / "inputs/data/societal-wellbeing_imd2019_indicesbyla.csv",
usecols=[1, 2],
skiprows=7,
)
return imd | 4e2495dda505bde8dd8ccf62234730a7227ffa97 | 3,658,075 |
def read_bgr(file):
"""指定ファイルからBGRイメージとして読み込む.
# Args:
file: イメージファイル名.
# Returns:
成功したらイメージ、失敗したら None.
"""
return cv2.imread(file, cv2.IMREAD_COLOR) | ee96842899ffefa0508218d0a4b721f2ae5a7efb | 3,658,076 |
def _remove_none_from_post_data_additional_rules_list(json):
"""
removes hidden field value from json field "additional_rules" list,
which is there to ensure field exists for editing purposes
:param json: this is data that is going to be posted
"""
data = json
additional_rules = json.get("additional_rules", None)
if additional_rules and "None" in additional_rules:
new_additional_rules = []
for rule in additional_rules:
if rule != "None":
new_additional_rules.append(rule)
data["additional_rules"] = new_additional_rules
return data | c82aa568f82ba4abcb8f4e6f9c770969277d078f | 3,658,077 |
import traceback
def add_email(request, pk):
"""
This Endpoint will add the email id into
the person contact details.
It expects personId in URL param.
"""
try:
request_data = request.data
email = request_data.get("email")
person = Person.objects.filter(id=pk).last()
if email:
Email.objects.create(
email=email,
person_id=person.id
)
serializer = PersonDetailSerializer(person)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
except:
print(traceback.format_exc())
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR) | 5669b442e7ef4fb3e5a22053c368e6a4b68cfeef | 3,658,078 |
from typing import Any
def coords_extracter():
"""Exctract coords to send command to robot.
To be executed inside of xarm_hand_control module."""
SKIPPED_COMMANDS = 5
COEFF = 22
current = [0]
def coords_to_command(data: Any):
current[0] += 1
if current[0] < SKIPPED_COMMANDS:
return
current[0] = 0
if np.linalg.norm(data[0:2], 2) < 0.05:
return
x = data[0] * COEFF / 1000
z = data[1] * COEFF / 1000
# speed = np.linalg.norm(data, ord=2) * COEFF * 50
# speed = int(speed)
# # speed = np.log(speed) * COEFF
# mvacc = speed * 10
speed = 500
mvacc = speed * 10
command = Command(
x=x,
y=0.0,
z=z,
speed=speed,
acc=mvacc,
is_radian=True,
is_cartesian=True,
is_relative=True,
)
# print(command)
send_command(command)
return coords_to_command | 930cef91e517751da3c3fb441543ab736be6aa23 | 3,658,079 |
def NO_MERGE(writer, segments):
"""This policy does not merge any existing segments.
"""
return segments | 0742365f30d59cb219ac60483b867180bd910ba8 | 3,658,080 |
import gzip
import shutil
def save_features():
"""
Writes extracted feature vectors into a binary or text file, per args.
:return: none
"""
extractor = args.extractor
features = []
if extractor == 'multi':
features = extract_multi()
elif extractor == 'single':
features = extract_single()
# print("Output shape: ", features.shape) # comment out if you don't care to know output shape
extension = str(args.ext)
compress = args.compressed
out_path = str(args.out_path)
# TODO: get rid of boilerplate code
outfile = "" + out_path
out_full = outfile + "." + extension
if extension == "hdf5":
# (Recommended, default) save to .hdf5
f = h5py.File("" + out_path + ".hdf5", "w")
f.create_dataset(name=str(args.out_path), data=features)
if compress:
with open(out_full) as f_in:
outfile_gz = out_full + ".gz"
with gzip.open(outfile_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
elif extension == "npy": # god please don't actually do this
# Save to .npy binary (numpy) - incompressible (as of now)
np.save(file=outfile, allow_pickle=True, arr=features)
if compress:
with open(out_full) as f_in:
outfile_gz = out_full + ".gz"
with gzip.open(outfile_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
elif extension == "csv":
# Save to .csv (or, .csv.gz if args.compressed==True)
# This option is natively compressible.
if compress:
extension += ".gz"
outfile = "" + out_path + "." + extension
# TODO: This needs to return a string, no explicit save
np.savetxt(fname=outfile, X=features, fmt='%1.5f')
return features
# TODO: (distant future) npz for the optional list of concat. 1d arrays | 65fd172aa0ff0283111c9e8148eb33a1160255d3 | 3,658,081 |
def build_ntwk(p, s_params):
"""
Construct a network object from the model and
simulation params.
"""
np.random.seed(s_params['RNG_SEED'])
# set membrane properties
n = p['N_PC'] + p['N_INH']
t_m = cc(
[np.repeat(p['T_M_PC'], p['N_PC']), np.repeat(p['T_M_INH'], p['N_INH'])])
e_l = cc(
[np.repeat(p['E_L_PC'], p['N_PC']), np.repeat(p['E_L_INH'], p['N_INH'])])
v_th = cc(
[np.repeat(p['V_TH_PC'], p['N_PC']), np.repeat(p['V_TH_INH'], p['N_INH'])])
v_r = cc(
[np.repeat(p['V_R_PC'], p['N_PC']), np.repeat(p['V_R_INH'], p['N_INH'])])
t_rp = cc(
[np.repeat(p['T_R_PC'], p['N_PC']), np.repeat(p['T_R_INH'], p['N_INH'])])
# set latent nrn positions
lb = [-s_params['BOX_W']/2, -s_params['BOX_H']/2]
ub = [s_params['BOX_W']/2, s_params['BOX_H']/2]
# sample evenly spaced place fields
## E cells
pfxs_e, pfys_e = cxn.apx_lattice(lb, ub, p['N_PC'], randomize=True)
## I cells
pfxs_i, pfys_i = cxn.apx_lattice(lb, ub, p['N_INH'], randomize=True)
## join E & I place fields
pfxs = cc([pfxs_e, pfxs_i])
pfys = cc([pfys_e, pfys_i])
# make upstream ws
if p['W_PC_PL'] > 0:
w_pc_pl_flat = np.random.lognormal(
*lognormal_mu_sig(p['W_PC_PL'], p['S_PC_PL']), p['N_PC'])
else:
w_pc_pl_flat = np.zeros(p['N_PC'])
if p['W_PC_G'] > 0:
w_pc_g_flat = np.random.lognormal(
*lognormal_mu_sig(p['W_PC_G'], p['S_PC_G']), p['N_PC'])
else:
w_pc_g_flat = np.zeros(p['N_PC'])
ws_up_temp = {
'E': {
('PC', 'PL'): np.diag(w_pc_pl_flat),
('PC', 'G'): np.diag(w_pc_g_flat),
},
}
targs_up = cc([np.repeat('PC', p['N_PC']), np.repeat('INH', p['N_INH'])])
srcs_up = cc([np.repeat('PL', p['N_PC']), np.repeat('G', p['N_PC'])])
ws_up = join_w(targs_up, srcs_up, ws_up_temp)
# make rcr ws
w_pc_pc = cxn.make_w_pc_pc(pfxs[:p['N_PC']], pfys[:p['N_PC']], p)
w_inh_pc = cxn.make_w_inh_pc(
pfxs_inh=pfxs[-p['N_INH']:],
pfys_inh=pfys[-p['N_INH']:],
pfxs_pc=pfxs[:p['N_PC']],
pfys_pc=pfys[:p['N_PC']],
p=p)
w_pc_inh = cxn.make_w_pc_inh(
pfxs_pc=pfxs[:p['N_PC']],
pfys_pc=pfys[:p['N_PC']],
pfxs_inh=pfxs[-p['N_INH']:],
pfys_inh=pfys[-p['N_INH']:],
p=p)
ws_rcr_temp = {
'E': {
('PC', 'PC'): w_pc_pc,
('INH', 'PC'): w_inh_pc,
},
'I': {
('PC', 'INH'): w_pc_inh,
},
}
targs_rcr = cc([np.repeat('PC', p['N_PC']), np.repeat('INH', p['N_INH'])])
ws_rcr = join_w(targs_rcr, targs_rcr, ws_rcr_temp)
# make ntwk
ntwk = LIFNtwk(
t_m=t_m,
e_l=e_l,
v_th=v_th,
v_r=v_r,
t_r=t_rp,
es_syn={'E': p['E_E'], 'I': p['E_I']},
ts_syn={'E': p['T_E'], 'I': p['T_I']},
ws_up=ws_up,
ws_rcr=ws_rcr)
ntwk.pfxs = pfxs
ntwk.pfys = pfys
ntwk.types_up = srcs_up
ntwk.types_rcr = targs_rcr
ntwk.n_pc = p['N_PC']
ntwk.n_inh = p['N_INH']
ntwk.n_g = p['N_PC']
ntwk.n_inp = p['N_PC']
ntwk.n_rcr = p['N_PC'] + p['N_INH']
ntwk.n_up = 2 * p['N_PC']
ntwk.types_up_slc = {
'PL': slice(0, p['N_PC']),
'G': slice(p['N_PC'], 2*p['N_PC'])
}
ntwk.types_rcr_slc = {
'PC': slice(0, p['N_PC']),
'INH': slice(p['N_PC'], p['N_PC'] + p['N_INH'])
}
return ntwk | 88a5b5c73edf015d9b2b4137db874252f63a3571 | 3,658,082 |
def createAaronWorld():
"""
Create an empty world as an example to build future projects from.
"""
# Set up a barebones project
project = makeBasicProject()
# Create sprite sheet for the player sprite
player_sprite_sheet = addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
# add a sprite we can use for the rocks
a_rock_sprite = addSpriteSheet(project, "rock.png", "rock", "static")
a_dog_sprite = addSpriteSheet(project, "dog.png", "dog", "static")
# Add a background image
default_bkg = makeBackground("placeholder.png", "placeholder")
project.backgrounds.append(default_bkg)
a_scene = makeScene(f"Scene", default_bkg)
project.scenes.append(a_scene)
actor = makeActor(a_rock_sprite, 9, 8)
a_scene['actors'].append(actor)
dog_actor = makeActor(a_dog_sprite, 5, 5)
dog_script = []
element = makeElement()
element["command"] = "EVENT_ACTOR_EMOTE"
element["args"] = {
"actorId": "player",
"emoteId": "1"
}
dog_script.append(element)
element = makeElement()
element["command"] = "EVENT_END"
dog_script.append(element)
dog_actor["script"] = dog_script
a_scene['actors'].append(dog_actor)
# Add some music
project.music.append(makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
return project | 7326aee61ee4977ccc422955fbd33c6a51b13e37 | 3,658,083 |
def builtin_ljustify(s, w, p):
"""Left-justify a string to a given width with a given padding character."""
sv = s.convert(BStr()).value
pv = p.convert(BStr()).value
return BStr(sv.ljust(w.value, pv)) | dda28d65d1916a7e01aa36e7b90ee5ba98329c58 | 3,658,084 |
import os
def package_files(directory):
"""package_files
recursive method which will lets you set the
package_data parameter in the setup call.
"""
paths = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths | e043de9a9e8ed9092f933df00b167b092ba6abaa | 3,658,085 |
def get_effective_router(appname):
"""Returns a private copy of the effective router for the specified application"""
if not routers or appname not in routers:
return None
return Storage(routers[appname]) | dd0e3ccc8d05864b5a324541129845e5f82c2669 | 3,658,086 |
def is_activated(user_id):
"""Checks if a user has activated their account. Returns True or false"""
cur = getDb().cursor()
cur.execute('SELECT inactive FROM users where user_id=%s', (user_id,))
inactive = cur.fetchone()[0]
cur.close()
return False if inactive is 1 else True | 704a5c3462be3612e5cd44057ee082d612ae8aa9 | 3,658,087 |
import base64
import json
def _encode(dictionary):
"""Encodes any arbitrary dictionary into a pagination token.
Args:
dictionary: (dict) Dictionary to basee64-encode
Returns:
(string) encoded page token representing a page of items
"""
# Strip ugly base64 padding.
return base64.urlsafe_b64encode(json.dumps(dictionary)).rstrip('=') | e9a490e659a3a0e6d546fd2ab4dd89a5f6a748af | 3,658,088 |
from typing import List
from pathlib import Path
import tempfile
import gc
import os
def remove_duplicates_sharded(
files: List[Path],
outputs: List[Path],
hashes_dir: FilesOrDir,
field: str,
group_hashes: int = 1,
tmp_dir: Path = None,
min_len: int = 0,
):
"""Remove duplicates in several passes, when all hashes don't fit in RAM.
Note: The current implementation is not doing a 'perfect' deduplication.
If a hash appear exactly once in each shard of hashes it won't be detected
as a duplicate. This can be fixed if hashes are fully dedup beforehand.
"""
assert len(files) == len(outputs)
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
assert len(hashes_files) > 0, f"no hashes files found in: {hashes_dir}"
if len(hashes_files) <= group_hashes:
log(f"All hashes can be done in one pass, using DuplicatesRemover on {files}")
rm_dups = DuplicatesRemover(field, hashes_files)
rm_dups._prepare()
run_par(
(jsonql.run_pipes, (rm_dups,), dict(file=f, output=o))
for f, o in zip(files, outputs)
)
return
log(f"Starting deduplicate_sharded on {files}.")
tmp_directory = tempfile.TemporaryDirectory(dir=str(tmp_dir) if tmp_dir else None)
def tmp_files(i):
return [
Path(tmp_directory.name) / (f.name.split(".")[0] + f".{i}.bin")
for f in files
]
last = tmp_files(0)
run_par((_dump_sentence_hashes, (f, tmp, field), {}) for f, tmp in zip(files, last))
if isinstance(hashes_dir, list):
hashes_files = hashes_dir
else:
hashes_files = sorted(
h for h in Path(hashes_dir).iterdir() if h.suffix == ".bin"
)
for i, group in enumerate(jsonql.grouper(hashes_files, group_hashes)):
hashes = FlatHashSet()
for h in group:
hashes.load(h)
log(f"Loaded {h}, up to {len(hashes)} hashes ({mem_footprint_gb()}GB)")
intermediates = tmp_files(i + 1)
# Remove hashes in parallel. Since modern OS have "copy-on-write" and
# `hashes` is read-only, we will only have one version of it in RAM.
run_par(
(_remove_duplicate_hashes, (hashes, f, tmp), {})
for f, tmp in zip(last, intermediates)
)
# Force hashes to be freed, before we start allocating a new one.
del hashes
gc.collect()
for tmp in last:
os.remove(tmp)
last = intermediates
def finalize(source, dedup_hashes, min_len):
n_chars, n_chars_kept = 0, 0
with open(dedup_hashes, "rb") as hashes:
for doc in jsonql.read_jsons(source):
content = doc.get(field)
if not content or len(content) < min_len:
continue
sentences = content.split("\n")
doc_hashes = np.fromfile(hashes, dtype=HASH_TYPE, count=len(sentences))
chars, kept_chars = finalize_doc(doc, field, doc_hashes)
n_chars += chars
n_chars_kept += kept_chars
yield doc
selectivity = n_chars_kept / n_chars if n_chars else 0
log(f"Kept {n_chars_kept} chars out of {n_chars} ({selectivity:.1%}).")
dedup_hashes = last
run_par(
[
(
jsonql.run_pipe,
(finalize,),
dict(kwargs=dict(dedup_hashes=h, min_len=min_len), file=f, output=o),
)
for h, f, o in zip(dedup_hashes, files, outputs)
]
)
tmp_directory.cleanup() | 6ab2477cd3c3fb513af2fbb4808c0279212b1cd2 | 3,658,089 |
def selectPlate(plates, jdRange, normalise=False, scope='all'):
"""From a list of simulated plates, returns the optimal one."""
# Gets the JD range for the following night
nextNightJDrange = _getNextNightRange(jdRange)
# First we exclude plates without new exposures
plates = [plate for plate in plates if plate._after['nNewExposures'] > 0]
# Sorts plates by inverse plate completion.
plates = sorted(plates, reverse=True, key=lambda plate: plate.getPlateCompletion()
if plate.getPlateCompletion() <= 1 else 1. / plate.getPlateCompletion())
if len(plates) == 0:
return None
# If we are scheduling only plugged plates, we rather plug a new plate
# unless we can observe a plugged plate at least for a whole set.
availableTime = (jdRange[1] - jdRange[0]) * 24.
completionIncrease = np.array(
[plate._after['completion'] - plate._before['completion'] for plate in plates])
# minSchedulingTime ensures that if the remaining time < length of a set,
# we still use the plugged plates, if any.
if scope == 'plugged':
if (availableTime > minSchedulingTime and np.all(completionIncrease == 0)):
return None
else:
# If no plate has been observed for a whole set, we try to use first
# plates that are already plugged.
if np.all(completionIncrease == 0):
pluggedPlates = [plate for plate in plates if plate.isPlugged]
if len(pluggedPlates) > 0:
plates = pluggedPlates
# If plugger, tries to select only plates at APO
if scope == 'plugged':
platesAtAPO = [plate for plate in plates if plate.getLocation() == 'APO']
if len(platesAtAPO) > 0:
plates = platesAtAPO
# Now tries to select only plates that have been marked.
markedPlates = [
plate for plate in plates if 'Accepted' in [status.label for status in plate.statuses]
]
if len(markedPlates) > 0:
plates = markedPlates
# We check if any of the plate is complete after the simulation.
# If so, we return the one with fewer new exposures.
completePlates = [plate for plate in plates
if plate._after['completion'] > plate.completion_factor]
nNewExposures = [plate._after['nNewExposures'] for plate in completePlates]
if len(completePlates) > 0:
return completePlates[np.argmin(nNewExposures)]
# We record the real completion before and after. We will normalise the
# other completions based on our scheduling logic.
for plate in plates:
plate._before['realCompletion'] = plate._before['completion']
plate._before['realCompletion+'] = plate._before['completion+']
plate._after['realCompletion'] = plate._after['completion']
plate._after['realCompletion+'] = plate._after['completion+']
# If normalise=True, we divide the several completion values by the
# length of the observing window for the plate, normalised by the length
# of the minimum plate window. The effect of this is that plates with short
# observing windows get comparatively larger completions and, thus, have
# higher chance of being selected. This is good for plugged plates, as it
# tries to schedule first plates with short windows even if other plates
# could be completed at the time.
# We also increase the completion of plates for which we have patched sets,
# while we penalise those with incomplete sets. With this logic, we hope
# that plates are observed when their incomplete sets can be patched.
if normalise:
_normaliseWindowLength(plates, jdRange, factor=1.0, apply=True)
# We also normalise using the following night, if possible.
if nextNightJDrange is not None:
_normaliseWindowLength(plates, nextNightJDrange, factor=nextNightFactor, apply=True)
# Now we normalise plate completion using a metric that gives higher
# priority to plates for which we have patched incomplete sets.
patchedSetFactor = []
for plate in plates:
nSetsFactor = 0
for ss in plate.sets:
if not ss.isMock:
nNewExps = 0
for exp in ss.totoroExposures:
if hasattr(exp, '_tmp') and exp._tmp:
nNewExps += 1
setComplete = ss.getStatus()[0] in ['Good', 'Excellent']
if setComplete and nNewExps == 0:
pass
else:
if nNewExps > 0:
nSetsFactor += 2 * nNewExps
if setComplete:
nSetsFactor *= 2
else:
nSetsFactor -= 1
patchedSetFactor.append(1. + patchSetFactor * nSetsFactor)
_completionFactor(plates, patchedSetFactor)
# We add the priority into the mix
platePriorities = np.array([plate.priority for plate in plates]) - 5.
_completionFactor(plates, 1 + platePriorityFactor * platePriorities)
ancillaryPriorities = []
for plate in plates:
if hasattr(plate, 'ancillary_weight'):
ancillaryPriorities.append(plate.ancillary_weight)
else:
ancillaryPriorities.append(1)
_completionFactor(plates, np.array(ancillaryPriorities))
# Selects the plates that have the largest increase in completion
completionIncrease = [plate._after['completion'] - plate._before['completion']
for plate in plates if plate.completion_factor <= 1.]
if len(completionIncrease) == 0:
for plate in plates:
if plate.completion_factor > 1:
completionIncrease.append(
plate._after['completion'] - plate._before['completion'])
completionIncrease = np.array(completionIncrease)
plates = np.array(plates)
maxCompletionIncrease = np.max(completionIncrease)
plates = plates[np.where(completionIncrease == maxCompletionIncrease)]
if len(plates) == 1:
return plates[0]
# If maxCompletionIncrease is 0, it means that no plate has been
# observed for at least a set. In this case, if possible, we want to use
# a plate that already has signal.
if maxCompletionIncrease == 0:
platesWithSignal = [plate for plate in plates if plate._before['completion+'] > 0]
if len(platesWithSignal) > 0:
plates = platesWithSignal
# If several plates have maximum completion increase, use the incomplete
# sets to break the tie.
completionIncreasePlus = np.array(
[plate._after['completion+'] - plate._before['completion+'] for plate in plates])
return plates[np.argmax(completionIncreasePlus)] | f48207a9be002e6b2295d4100e02ffaddde779e8 | 3,658,090 |
from typing import Union
def get_events(
raw: mne.io.BaseRaw,
event_picks: Union[str, list[str], list[tuple[str, str]]],
) -> tuple[np.ndarray, dict]:
"""Get events from given Raw instance and event id."""
if isinstance(event_picks, str):
event_picks = [event_picks]
events = None
for event_pick in event_picks:
if isinstance(event_pick, str):
event_id = {event_pick: 1}
else:
event_id = {event_pick[0]: 1, event_pick[1]: -1}
try:
events, _ = mne.events_from_annotations(
raw=raw,
event_id=event_id,
verbose=True,
)
return events, event_id
except ValueError as error:
print(error)
_, event_id_found = mne.events_from_annotations(
raw=raw,
verbose=False,
)
raise ValueError(
f"None of the given `event_picks´ found: {event_picks}."
f"Possible events: {*event_id_found.keys(),}"
) | d1b5c961160848607a40cfcb5b2ba8a47625ab21 | 3,658,091 |
def transplant(root, u, v):
"""
注意, 这里要返回root, 不然修改不了
"""
if u.parent == None:
root = v
elif u.parent.left == u:
u.parent.left = v
else:
u.parent.right = v
if v:
v.parent = u.parent
return root | cadf0433399e428596d1d0d4ab200e4d79285d21 | 3,658,092 |
def is_head_moderator():
"""
Returns true if invoking author is a Head Moderator (role).
"""
async def predicate(ctx: Context):
if not any(config.HEAD_MOD_ROLE in role.id for role in ctx.author.roles):
raise NotStaff("The command `{}` can only be used by a Head Moderator.".format(ctx.invoked_with))
return True
return commands.check(predicate) | 22f80251190d914d38052e67ca8fe47279eab833 | 3,658,093 |
def compute_adj_matrices(type, normalize=True):
"""
Computes adjacency matrices 'n', 'd' or 's' used in GCRAM.
"""
# Get channel names
raw = mne.io.read_raw_edf('dataset/physionet.org/files/eegmmidb/1.0.0/S001/S001R01.edf', preload=True, verbose=False).to_data_frame()
ch_names = raw.columns[2:]
n_channels = 64
# Compute channel position distances using electrode positions. Required for computing 'd' and 's' adjacency matrices
ch_pos_1010 = get_sensor_pos(ch_names)
ch_pos_1010_names = []
ch_pos_1010_dist = []
for name, value in ch_pos_1010.items():
ch_pos_1010_names.append(name)
ch_pos_1010_dist.append(value)
ch_pos_1010_dist = np.array(ch_pos_1010_dist)
# Compute adjacency matrices
if type=='n':
A = n_graph()
elif type=='d':
A = d_graph(n_channels, ch_pos_1010_dist)
elif type=='s':
A = s_graph(n_channels, ch_pos_1010_dist)
# Normalize adjacency matrices
if normalize:
A = normalize_adj(A)
A = np.array(A, dtype=np.float32)
return A | cecf0c94c5d1efe6c6210949736377d5d7d454c4 | 3,658,094 |
from typing import Optional
def build_census_chart(
*, alt, census_floor_df: pd.DataFrame, max_y_axis: Optional[int] = None, use_log_scale: bool = False
) -> Chart:
"""
This builds the "Admitted Patients" census chart, projecting total number of patients in the hospital over time.
Args:
alt: Reference to Altair package.
census_floor_df: Pandas data frame containing three columns: "census_hospitalized", "census_icu", and
"census_ventilated".
max_y_axis: Optional maximum value for the Y axis of the chart.
use_log_scale: Set to true to use a logarithmic scale on the Y axis. Default is linear scale.
Returns: The newly created chart.
"""
adjusted_census_floor_df = __adjust_data_for_log_scale(census_floor_df) if use_log_scale else census_floor_df
y_scale = __build_y_scale(alt, max_y_axis, use_log_scale)
x = dict(shorthand="date:T", title=i18n.t("charts-date"), axis=alt.Axis(format=(DATE_FORMAT)))
y = dict(shorthand="value:Q", title=i18n.t("charts-census"), scale=y_scale)
color = "key:N"
tooltip = ["date:T", alt.Tooltip("value:Q", format=".0f", title="Census"), "key:N"]
# TODO fix the fold to allow any number of dispositions
points = (
alt.Chart()
.transform_fold(fold=[i18n.t("census_hospitalized"), i18n.t("census_icu"), i18n.t("census_ventilated")])
.encode(x=alt.X(**x), y=alt.Y(**y), color=color, tooltip=tooltip)
.mark_line(point=True)
.encode(
x=alt.X(**x),
y=alt.Y(**y),
color=color,
tooltip=tooltip,
)
)
bar = (
alt.Chart()
.encode(x=alt.X(**x))
.transform_filter(alt.datum.day == 0)
.mark_rule(color="black", opacity=0.35, size=2)
)
census_floor_df_renamed = adjusted_census_floor_df.rename({
"census_hospitalized": i18n.t("census_hospitalized"),
"census_icu": i18n.t("census_icu"),
"census_ventilated": i18n.t("census_ventilated")
}, axis=1)
return (
alt.layer(points, bar, data=census_floor_df_renamed)
.configure_legend(orient="bottom")
.interactive()
) | 315329e593e116dbf63fb02772bf9394722feb90 | 3,658,095 |
def hasNonAsciiCharacters(sText):
"""
Returns True is specified string has non-ASCII characters, False if ASCII only.
"""
sTmp = unicode(sText, errors='ignore') if isinstance(sText, str) else sText;
return not all(ord(ch) < 128 for ch in sTmp); | c1627d1a0a26e7c4d4e04c84085197ba44b5e640 | 3,658,096 |
def draw_matches(image_1, image_1_keypoints, image_2, image_2_keypoints, matches):
""" Draws the matches between the image_1 and image_2.
(Credit: GT CP2017 course provided source)
Params:
image_1: The first image (can be color or grayscale).
image_1_keypoints: The image_1 keypoints.
image_2: The image to search in (can be color or grayscale)
image_2_keypoints: The image_2 keypoints.
Returns:
output: Image with a line drawn between matched keypoints.
"""
# Compute number of channels.
num_channels = 1
if len(image_1.shape) == 3:
num_channels = image_1.shape[2]
# Separation between images.
margin = 10
# Create an array that will fit both images (with a margin of 10 to
# separate the two images)
joined_image = np.zeros((max(image_1.shape[0], image_2.shape[0]),
image_1.shape[1] + image_2.shape[1] + margin,
3))
if num_channels == 1:
for channel_idx in range(3):
joined_image[:image_1.shape[0],
:image_1.shape[1],
channel_idx] = image_1
joined_image[:image_2.shape[0],
image_1.shape[1] + margin:,
channel_idx] = image_2
else:
joined_image[:image_1.shape[0], :image_1.shape[1]] = image_1
joined_image[:image_2.shape[0], image_1.shape[1] + margin:] = image_2
for match in matches:
image_1_point = (int(image_1_keypoints[match.queryIdx].pt[0]),
int(image_1_keypoints[match.queryIdx].pt[1]))
image_2_point = (int(image_2_keypoints[match.trainIdx].pt[0] +
image_1.shape[1] + margin),
int(image_2_keypoints[match.trainIdx].pt[1]))
rgb = (np.random.rand(3) * 255).astype(np.int)
cv2.circle(joined_image, image_1_point, 5, rgb, thickness=-1)
cv2.circle(joined_image, image_2_point, 5, rgb, thickness=-1)
cv2.line(joined_image, image_1_point, image_2_point, rgb, thickness=3)
return joined_image | d2984de4c542fca7dda3863ad934e9eddc14a375 | 3,658,097 |
def copy_ttl_in():
"""
COPY_TTL_IN Action
"""
return _action("COPY_TTL_IN") | a01acb2645e033ad658435e9ca0323aec10b720c | 3,658,098 |
import torch
from typing import Optional
def neuron_weight(
layer: str,
weight: torch.Tensor,
x: Optional[int] = None,
y: Optional[int] = None,
batch: Optional[int] = None,
) -> Objective:
"""Linearly weighted channel activation at one location as objective
:param layer: Name of the layer
:type layer: str
:param weight: A torch.Tensor of same length as the number of channels
:type weight: torch.Tensor
:param x: x-position, defaults to None
:type x: Optional[int], optional
:param y: y-position, defaults to None
:type y: Optional[int], optional
:param batch: which position at the batch dimension of the image tensor this objective is applied to, defaults to None
:type batch: Optional[int], optional
:return: Objective to optimize input for a linearly weighted channel activation at one location
:rtype: Objective
"""
@handle_batch(batch)
def inner(model):
layer_t = model(layer)
layer_t = _extract_act_pos(layer_t, x, y)
if weight is None:
return -layer_t.mean()
else:
return -(layer_t.squeeze() * weight).mean()
return inner | 646966613249a02468e00b91157fd43459d246bb | 3,658,099 |
Subsets and Splits