content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def avg_pds_from_events(
times,
gti,
segment_size,
dt,
norm="frac",
use_common_mean=True,
silent=False,
fluxes=None,
errors=None,
):
"""Calculate the average periodogram from a list of event times or a light curve.
If the input is a light curve, the time array needs to be uniformly sampled
inside GTIs (it can have gaps outside), and the fluxes need to be passed
through the ``fluxes`` array.
Otherwise, times are interpeted as photon arrival times.
Parameters
----------
times : float `np.array`
Array of times
gti : [[gti00, gti01], [gti10, gti11], ...]
good time intervals
segment_size : float
length of segments
dt : float
Time resolution of the light curves used to produce periodograms
Other Parameters
----------------
norm : str, default "frac"
The normalization of the periodogram. "abs" is absolute rms, "frac" is
fractional rms, "leahy" is Leahy+83 normalization, and "none" is the
unnormalized periodogram
use_common_mean : bool, default True
The mean of the light curve can be estimated in each interval, or on
the full light curve. This gives different results (Alston+2013).
Here we assume the mean is calculated on the full light curve, but
the user can set ``use_common_mean`` to False to calculate it on a
per-segment basis.
silent : bool, default False
Silence the progress bars
fluxes : float `np.array`, default None
Array of counts per bin or fluxes
errors : float `np.array`, default None
Array of errors on the fluxes above
Returns
-------
freq : `np.array`
The periodogram frequencies
power : `np.array`
The normalized periodogram powers
n_bin : int
the number of bins in the light curves used in each segment
n_ave : int
the number of averaged periodograms
mean : float
the mean flux
"""
if segment_size is None:
segment_size = gti.max() - gti.min()
n_bin = np.rint(segment_size / dt).astype(int)
dt = segment_size / n_bin
flux_iterable = get_flux_iterable_from_segments(
times, gti, segment_size, n_bin, fluxes=fluxes, errors=errors
)
cross = avg_pds_from_iterable(
flux_iterable, dt, norm=norm, use_common_mean=use_common_mean, silent=silent
)
if cross is not None:
cross.meta["gti"] = gti
return cross | 904f43b36380e07e115d23c6677b61bca155d898 | 5,400 |
import random
def check_random_state(seed):
"""
Turn seed into a random.Random instance
If seed is None, return the Random singleton used by random.
If seed is an int, return a new Random instance seeded with seed.
If seed is already a Random instance, return it.
Otherwise raise ValueError.
"""
# Code slightly adjusted from scikit-learn utils/validation.py
if seed is None or isinstance(seed, int):
rng = random.Random(seed)
elif isinstance(seed, random.Random):
rng = seed
else:
raise ValueError(
"### error: '{}' cannot be used to seed random.Random instance.".format(
seed
)
)
return rng | 347481de01f4a3bba59bc9a2c484c10d4857e1e2 | 5,401 |
def chunk(seq, size, groupByList=True):
"""Returns list of lists/tuples broken up by size input"""
func = tuple
if groupByList:
func = list
return [func(seq[i:i + size]) for i in range(0, len(seq), size)] | e7cece99822a01476b46351cebc1345793485cbd | 5,402 |
def registerNamespace(namespace, prefix):
"""
Register a namespace in libxmp.exempi
@namespace : the namespace to register
@prefix : the prefix to use with this namespace
"""
try:
registered_prefix = libxmp.exempi.namespace_prefix(namespace)
# The namespace already exists, return actual prefix.
return registered_prefix
except libxmp.XMPError:
# This namespace does not exist, that's cool
pass
try:
libxmp.exempi.prefix_namespace_uri(prefix)
# Prefix is already used, but not by us.
raise NameError("Prefix is already used")
except libxmp.XMPError:
# This prefix is not used yet, that's cool
pass
return libxmp.exempi.register_namespace(namespace, prefix)[:-1] | 6e7dbed515651f252222a283dc1cda08941fa4c5 | 5,403 |
def definition():
"""
Lists the parent-child relationships through the curriculum structure.
"""
sql = """
--Course to session
SELECT c.course_id as parent_id,
CASE WHEN cc.course_id IS NULL THEN 0 ELSE 1 END as linked,
cs.course_session_id as child_id, 'course' as parent,
cs.description + ' ' + cast(cs.session as char(1)) as description,
-1 as ratio,0 as changed
FROM c_course c
LEFT OUTER JOIN c_course_session cs on cs.curriculum_id = c.curriculum_id
LEFT OUTER JOIN c_course_config cc on c.course_id = cc.course_id
AND cc.course_session_id = cs.course_session_id
UNION ALL
--session to group
SELECT a.course_session_id as parent_id,
CASE WHEN c.course_session_id IS NULL THEN 0 ELSE 1 END as linked,
b.cgroup_id as child_id, 'course_session' as parent,
b.description,
-1 as ratio, 0 as changed
FROM c_course_session a
LEFT OUTER JOIN c_cgroup b ON a.curriculum_id = b.curriculum_id
LEFT OUTER JOIN c_course_session_config c on a.course_session_id = c.course_session_id
AND b.cgroup_id = c.cgroup_id
UNION ALL
--CGroup to component
SELECT a.cgroup_id as parent_id,
CASE WHEN c.component_id IS NULL THEN 0 ELSE 1 END as linked,
b.component_id as child_id, 'cgroup' as parent, b.description,
ISNULL(c.ratio, 1) as ratio, 0 as changed
FROM c_cgroup a
LEFT OUTER JOIN c_component b ON a.curriculum_id = b.curriculum_id
LEFT OUTER JOIN c_cgroup_config c on a.cgroup_id = c.cgroup_id
AND b.component_id = c.component_id
"""
return sql | e8dc6a720dcd5f62854ce95e708a88b43859e2cc | 5,404 |
def create_user(strategy, details, backend, user=None, *args, **kwargs):
"""Aggressively attempt to register and sign in new user"""
if user:
return None
request = strategy.request
settings = request.settings
email = details.get("email")
username = kwargs.get("clean_username")
if not email or not username:
return None
try:
validate_email(email)
validate_new_registration(request, {"email": email, "username": username})
except ValidationError:
return None
activation_kwargs = {}
if settings.account_activation == "admin":
activation_kwargs = {"requires_activation": User.ACTIVATION_ADMIN}
new_user = User.objects.create_user(
username, email, joined_from_ip=request.user_ip, **activation_kwargs
)
setup_new_user(settings, new_user)
send_welcome_email(request, new_user)
return {"user": new_user, "is_new": True} | afdff23d6ca578ef652872ba11bcfe57264b0a9b | 5,405 |
def prendreTresorPlateau(plateau,lig,col,numTresor):
"""
prend le tresor numTresor qui se trouve sur la carte en lin,col du plateau
retourne True si l'opération s'est bien passée (le trésor était vraiment sur
la carte
paramètres: plateau: le plateau considéré
lig: la ligne où se trouve la carte
col: la colonne où se trouve la carte
numTresor: le numéro du trésor à prendre sur la carte
resultat: un booléen indiquant si le trésor était bien sur la carte considérée
"""
if getTresor(getVal(plateau,lig,col))==numTresor:
prendreTresor(getVal(plateau,lig,col))
return True
else:
return False | 5fa94fb875e34068f4e391c66952fe4cc4248ddf | 5,406 |
import os
import logging
import yaml
def load_styles(style_yaml) -> dict:
""" Load point style dictionary """
default_style = {"icon_image": DEFAULT_ICON_IMAGE,
"icon_color": DEFAULT_ICON_COLOR,
"icon_scale": DEFAULT_ICON_SCALE,
"text_scale": DEFAULT_TEXT_SCALE,
}
styles = {'Default': default_style}
if style_yaml is None:
return styles
if not os.path.isfile(style_yaml):
logging.error('Invalid style file location %s', style_yaml)
return styles
with open(style_yaml, 'r') as stream:
new_styles = yaml.load(stream)
for style in new_styles:
if style in styles:
logging.warning('Style %s already exists', style)
continue
else:
styles[style] = dict() # Create new style
for attr in ['icon_image', 'icon_color', 'icon_scale', 'text_scale']:
if attr in new_styles[style]:
attr_val = new_styles[style][attr]
else:
attr_val = default_style[attr]
styles[style][attr] = attr_val
return styles | 940a540c5ee7bad266d5a51bfafee2a9d2197128 | 5,407 |
import os
def everything_deployed(project, chain, web3, accounts, deploy_address) -> dict:
"""Deploy our token plan."""
yaml_filename = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..","crowdsales", "allocated-token-sale-acceptance-test.yml"))
deployment_name = "testrpc"
chain_data = load_crowdsale_definitions(yaml_filename, deployment_name)
runtime_data, statistics, contracts = _deploy_contracts(project, chain, web3, yaml_filename, chain_data, deploy_address)
return contracts | f486f63838cef1d4da6f5dfdacad28a1a0f560fe | 5,408 |
def task_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc', admin_as_user=False):
"""
Get all tasks that match zero or more filters.
:param filters: dict of filter keys and values.
:param marker: task id after which to start page
:param limit: maximum number of tasks to return
:param sort_key: task attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param admin_as_user: For backwards compatibility. If true, then return to
an admin the equivalent set of tasks which it would see
if it were a regular user
:returns: tasks set
"""
filters = filters or {}
session = get_session()
query = session.query(models.Task)
if not (context.is_admin or admin_as_user) and context.owner is not None:
query = query.filter(models.Task.owner == context.owner)
_task_soft_delete(context, session=session)
showing_deleted = False
if 'deleted' in filters:
deleted_filter = filters.pop('deleted')
query = query.filter_by(deleted=deleted_filter)
showing_deleted = deleted_filter
for (k, v) in filters.items():
if v is not None:
key = k
if hasattr(models.Task, key):
query = query.filter(getattr(models.Task, key) == v)
marker_task = None
if marker is not None:
marker_task = _task_get(context, marker,
force_show_deleted=showing_deleted)
sort_keys = ['created_at', 'id']
if sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
query = _paginate_query(query, models.Task, limit,
sort_keys,
marker=marker_task,
sort_dir=sort_dir)
task_refs = query.all()
tasks = []
for task_ref in task_refs:
tasks.append(_task_format(task_ref, task_info_ref=None))
return tasks | 4d03e8f7ae15411c2cb597aeb06e25cd40c4f033 | 5,409 |
def updateAppMonConf(AppID, requestModel):
"""Update an Application Monitoring Configuration for a given application
Args:
AppID (str): This is the application ID of the Web App you are interested in.
requestModel: This is the data you wish to update and you need to put it in this
format:
{
"enabled": true,
"scanUrl": "https://mywebapp.com/directory"
}
explanation:
{
enabled (boolean): Enable Application Monitoring ,
scanUrl (string): Scan Url
}
Returns:
dict: Dictionary with the following layout
{
"success": true,
"errors": [
"string"
]
}
In the case of a return code 204, the update will take place but you will not
get the above layout, instead you will get a custom layout like this:
{'Response_Text': u'', 'Status_code': 204}
"""
url = "https://api.ams.fortify.com/api/v3/applications/{applicationId}/application-monitoring/configuration".format(applicationId=AppID)
req = fodRequest()
r = req.put(url, params=requestModel)
return r
#*******************************************************Applications************************************************************** | 0b61eae04b79702b2722d0c0bc5dafe48dcdf21f | 5,410 |
def plot_morphology(morphology,
order=MORPHOLOGY_ORDER,
colors=MORPHOLOGY_COLORS,
metastases=None,
metastasis_color=METASTASIS_COLOR,
ax=None,
bg_color='#f6f6f6',
**kwargs):
"""Plots morphology matrix as 2D heatmap.
Plots a morphology matrix (typically obtained from the parse_morphology
function) as a 2D heatmap. Matrix is expected to correspond with the
three categories returned by parse_morphology (ILC, Spindle cell
and Squamous).
Parameters
----------
morphology : pd.DataFrame
Boolean matrix of samples-by-morphologies.
order :
colors :
metastases : pd.DataFrame
Optional dataframe (single column) indicating which samples have
a metastasis. Used to draw metastases as an extra row in the heatmap.
metastasis_color :
ax : matplotlib.Axis
Axis to use for plotting.
bg_color : str
**kwargs
Any kwargs are passed to seaborns heatmap function.
Returns
-------
matplotlib.Axis
Axis that was used for plotting.
"""
if ax is None:
_, ax = plt.subplots()
# Add metastasis data if given.
if metastases is not None:
morphology = pd.concat([morphology, metastases], axis=1)
order = list(order) + [metastases.columns[0]]
colors = list(colors) + [metastasis_color]
# Sort by rows/columns.
morphology = morphology[list(order)]
morphology = sort_matrix(morphology, sort_columns=False)
# Convert to numeric matrix (for heatmap).
num_matrix = pd.DataFrame(
{
col: morphology[col].astype(int) * (i + 1)
for i, col in enumerate(morphology)
},
columns=morphology.columns)
# Draw heatmap.
cmap = ListedColormap([bg_color] + list(colors))
sns.heatmap(
num_matrix.T,
ax=ax,
cbar=False,
cmap=cmap,
vmin=0,
vmax=len(colors),
**kwargs)
ax.set_xticks([])
ax.set_xlim(0, num_matrix.shape[0])
ax.set_title('Tumor morphology')
ax.set_xlabel('Samples ({})'.format(morphology.shape[0]))
# Add counts to labels.
ax.set_yticklabels(
['{} ({})'.format(k, v) for k, v in morphology.sum().items()][::-1],
rotation=0)
return ax | 62048ced8ede14e9aa505c6e45dd5b196d12297d | 5,411 |
import os
def scores_summary(CurDataDir, steps = 300, population_size = 40, regenerate=False):
"""Obsolete for the one above! better and more automatic"""
ScoreEvolveTable = np.full((steps, population_size,), np.NAN)
ImagefnTable = [[""] * population_size for i in range(steps)]
fncatalog = os.listdir(CurDataDir)
if "scores_summary_table.npz" in fncatalog and (not regenerate):
# if the summary table exist, just read from it!
with np.load(os.path.join(CurDataDir, "scores_summary_table.npz")) as data:
ScoreEvolveTable = data['ScoreEvolveTable']
ImagefnTable = data['ImagefnTable']
return ScoreEvolveTable, ImagefnTable
startnum = 0
for stepi in range(startnum, steps):
try:
with np.load(os.path.join(CurDataDir, "scores_end_block{0:03}.npz".format(stepi))) as data:
score_tmp = data['scores']
image_ids = data['image_ids']
ScoreEvolveTable[stepi, :len(score_tmp)] = score_tmp
if stepi==0:
image_fns = image_ids
else:
image_fns = []
for imgid in image_ids:
fn_tmp_list = [fn for fn in fncatalog if (imgid in fn) and ('.npy' in fn)]
assert len(fn_tmp_list) is 1, "Code file not found or wrong Code file number"
image_fns.append(fn_tmp_list[0])
ImagefnTable[stepi][0:len(score_tmp)] = image_fns
# FIXME: 1st generation natural stimuli is not in the directory! so it's not possible to get the file name there. Here just put the codeid
except FileNotFoundError:
if stepi == 0:
startnum += 1
steps += 1
continue
else:
print("maximum steps is %d." % stepi)
ScoreEvolveTable = ScoreEvolveTable[0:stepi, :]
ImagefnTable = ImagefnTable[0:stepi]
steps = stepi
break
ImagefnTable = np.asarray(ImagefnTable)
savez(os.path.join(CurDataDir, "scores_summary_table.npz"),
{"ScoreEvolveTable": ScoreEvolveTable, "ImagefnTable": ImagefnTable})
return ScoreEvolveTable, ImagefnTable | a0e31a9647dee013aa5d042cc41f3793a097e1c2 | 5,412 |
import requests
def post(name,url,message,params=None):
"""Wrap a post in some basic error reporting"""
start = dt.now()
s = requests.session()
if params is None:
response = s.post(url,json=message)
else:
response = s.post(url,json=message,params=params)
end = dt.now()
if not response.status_code == 200:
print(name, 'error:',response.status_code)
print(response.json())
return response.json()
print(f'{name} returned in {end-start}s')
m = response.json()
if 'message' in m:
if 'results' in m['message']:
print(f'Num Results: {len(m["message"]["results"])}')
print_errors(m)
return m | 9180424171cdf4cb7bf16a938d7207a99af0987f | 5,413 |
def get_lin_reg_results(est, true, zero_tol=0):
"""
Parameters
----------
est: an Estimator
A covariance estimator.
true: array-like, shape (n_features, n_features)
zero_tol: float
Output
------
out: dict with keys 'utri' and 'graph'
"""
est_coef = get_coef(est)[0]
est_adj = fill_hollow_sym(est_coef)
true_adj = fill_hollow_sym(est_coef)
coef_results = compare_vecs(est=est_coef, truth=true,
zero_tol=zero_tol)
graph_results = compare_adj_mats(est=est_adj, truth=true_adj,
zero_tol=zero_tol)
results = merge_dicts(coef_results, graph_results, allow_key_overlap=False)
return results | 0f963c135d0bd74a70714ef47ed6f2b0191df846 | 5,414 |
import requests
import logging
def download_file(requested_url: str) -> str:
"""Download a file from github repository"""
url = f"https://github.com/{requested_url.replace('blob', 'raw')}"
resp = requests.get(url)
logging.info(F"Requested URL: {requested_url}")
if resp.status_code != 200:
logging.info(f"Can not download {url}")
raise NotebookDownloadException("Can not download the file. Please, check the URL")
return resp.text | f96d68843f6291aa3497a6e7a5b1e30e2ea4005e | 5,415 |
import warnings
def readBody(response):
"""
Get the body of an L{IResponse} and return it as a byte string.
This is a helper function for clients that don't want to incrementally
receive the body of an HTTP response.
@param response: The HTTP response for which the body will be read.
@type response: L{IResponse} provider
@return: A L{Deferred} which will fire with the body of the response.
Cancelling it will close the connection to the server immediately.
"""
def cancel(deferred):
"""
Cancel a L{readBody} call, close the connection to the HTTP server
immediately, if it is still open.
@param deferred: The cancelled L{defer.Deferred}.
"""
abort = getAbort()
if abort is not None:
abort()
d = defer.Deferred(cancel)
protocol = _ReadBodyProtocol(response.code, response.phrase, d)
def getAbort():
return getattr(protocol.transport, 'abortConnection', None)
response.deliverBody(protocol)
if protocol.transport is not None and getAbort() is None:
warnings.warn(
'Using readBody with a transport that does not have an '
'abortConnection method',
category=DeprecationWarning,
stacklevel=2)
return d | bbc693fca1536a3699b0e088941d9577de94d8dd | 5,416 |
def is_valid_mac(address):
"""Verify the format of a MAC address."""
class mac_dialect(netaddr.mac_eui48):
word_fmt = '%.02x'
word_sep = ':'
try:
na = netaddr.EUI(address, dialect=mac_dialect)
except Exception:
return False
return str(na) == address.lower() | f8bb59a986773307f803dd52154ec03eaddb8597 | 5,417 |
def build_state_abstraction(similar_states, mdp, tol=0.1):
"""
"""
bools = similar_states + np.eye(similar_states.shape[0]) < tol # approximate abstraction
if bools.sum() == 0:
raise ValueError('No abstraction')
mapping, parts = partitions(bools)
idx = list(set(np.array([p[0] for p in parts]))) # pick a representative set of states. one from each partition
f = construct_abstraction_fn(mapping, idx, mdp.S, len(idx))
# print('Abstracting from {} states to {} states'.format(mdp.S, len(parts)))
# print('idx', idx)
# print('mapping', mapping)
# print('parts', parts)
# mapping, parts = fix_mapping(mapping)
# print(f)
# print(f.shape, abs_mdp.S)
abs_mdp = abstract_the_mdp(mdp, idx)
# want a way to do this stuff in numpy!?
# should calculate the error of the abstraction?! check it is related to tol!?
return idx, abs_mdp, f | d4d9354507172ee92ea11c915de0376f0c873878 | 5,418 |
def diagram(source, rstrip=True):
"""High level API to generate ASCII diagram.
This function is equivalent to:
.. code-block:: python
Diagram(source).renders()
:param source: The ADia source code.
:type source: str or file-like
:param rstrip: If ``True``, the trailing wihtespaces at the end of each
line will be removed.
:type rstrip: bool, optional, default: True
:return: ASCII diagram.
:rtype: str
"""
return Diagram(source).renders(rstrip) | 2a386b49052a7f4dd31eb4f40dec15d774d86b94 | 5,419 |
def eng_to_kong(eng_word: str)-> list[str]:
"""
Translate given English word into Korean into matching pronounciation,
matching the English Loanword Orthography.
For example, "hello" will be translated into 헐로.
# Panics
When given a english word that it cannot translate, `eng_to_kong` will raise a KeyError.
Example
```python
import konglog
def main():
word = "shrimp"
print(konglog.eng_to_kong(word))
```
"""
# Parse word into phonemes string for passing to Prolog Query.
prolog_arg_aras = "]"
for phe in cmudict.dict()[eng_word.lower().strip()][0]:
if phe[-1] == '0' or phe[-1] == '1' or phe[-1] == '2':
phe = phe[:-1]
prolog_arg_aras = "," + phe.lower() + prolog_arg_aras
prolog_arg_aras = "[" + prolog_arg_aras[1:]
# Execute Prolog query
with PrologMQI() as mqi:
with mqi.create_thread() as prolog_thread:
assert(prolog_thread.query("consult(\"ipa.pl\")"))
prolog_res = prolog_thread.query(f"ipa_to_kr(X,{prolog_arg_aras})")
# Parse results
jamo_lists = []
try:
for jls in prolog_res:
temp = jls['X']
temp.reverse()
jamo_lists.append(temp)
except TypeError:
raise KeyError
jamo_all = []
for jamo_list in jamo_lists:
temp_jamo_all = [""]
for jamos in jamo_list:
if isinstance(jamos, str):
for i in range(len(temp_jamo_all)):
temp_jamo_all[i] += jamos
else:
temp = []
for jamo in jamos:
for s in temp_jamo_all:
temp.append(s + jamo)
temp_jamo_all = temp
jamo_all.extend(temp_jamo_all)
# Combine jamos into Konglish word
jamo_all.sort(key = lambda x : len(x))
for jamos in jamo_all:
try:
return join_jamos(jamos, False)
except ValueError:
continue | 0b0d55fdacdea1493d73de85c21dc9c086352b99 | 5,420 |
import logging
import os
def visualize_demux(base_dir, data_artifact):
"""
:param base_dir: Main working directory filepath
:param data_artifact: QIIME2 data artifact object
:return: QIIME2 demux visualization object
"""
logging.info('Visualizing demux...')
# Path setup
export_path = os.path.join(base_dir, 'demux_summary.qzv')
# Prepare and save demux summary visualization
demux_viz = demux.visualizers.summarize(data=data_artifact)
demux_viz.visualization.save(export_path)
logging.info('Saved {}'.format(export_path))
return demux_viz | 80d4e4d1eba50bdae09838e625628d9d860341f9 | 5,421 |
def socfaker_dns_answers():
"""
A list of DNS answers during a DNS request
Returns:
list: A random list (count) of random DNS answers during a DNS request
"""
if validate_request(request):
return jsonify(str(socfaker.dns.answers)) | c1f641e1a0e977363067937487a6455800e6a25c | 5,422 |
import os
def create_app():
"""Create and configure and instance of the
Flask Application"""
app = Flask(__name__)
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('HEROKU_POSTGRESQL_COPPER_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['ENV'] = config('ENV')
DB.init_app(app)
@app.route('/')
def root():
return render_template('base.html', title='Home', users=User.query.all())
@app.route('/update')
def update():
update_all_users()
return render_template('base.html', title='Update all users!', users=User.query.all())
@app.route('/user', methods=['POST'])
@app.route('/user/<name>', methods=['GET'])
def user(name=None, message=''):
name = name or request.values['user_name']
try:
if request.method == 'POST':
add_or_update_user(name)
message = "User {} successfully added".format(name)
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = "Error adding {}: {}".format(name, e)
return message
else:
return render_template('user.html', title=name,
tweets=tweets, message=message)
@app.route('/compare', methods=['POST'])
def predict(message=''):
user1, user2 = sorted((request.values['user1'],
request.values['user2']))
if user1 == user2:
message = 'Cannot compare a user the same user in both fields!'
else:
comparison = predict_user(user1, user2,
request.values['tweet_text'])
user1_name = comparison.user1_name
user2_name = comparison.user2_name
user1_prob = comparison.user1_prob
user2_prob = comparison.user2_prob
prediction = comparison.predicted_user
message = '"{}" is more likely to be said by {} than {}'.format(
request.values['tweet_text'],
user1_name if prediction else user2_name,
user2_name if prediction else user1_name)
return render_template('prediction.html', title='Prediction',
message=message,
user1_name=user1_name, user1_prob=user1_prob,
user2_name=user2_name, user2_prob=user2_prob
)
@app.route("/reset")
def reset():
DB.drop_all()
DB.create_all()
add_users()
return render_template('base.html', title='Reset database!')
return app | a5a2a0022d3f204410db3669910e48e1cd518457 | 5,423 |
def bilin(x, y, data, datax, datay): # --DC
""" x, y ARE COORDS OF INTEREST
data IS 2x2 ARRAY CONTAINING NEARBY DATA
datax, datay CONTAINS x & y COORDS OF NEARBY DATA"""
lavg = ( (y - datay[0]) * data[1,0] + (datay[1] - y) * data[0,0] ) / (datay[1] - datay[0])
ravg = ( (y - datay[0]) * data[1,1] + (datay[1] - y) * data[0,1] ) / (datay[1] - datay[0])
return ( (x - datax[0]) * ravg + (datax[1] - x) * lavg ) / (datax[1] - datax[0]) | 59a740f65c7187a08cdc09cef8aa100b01c652cf | 5,424 |
import array
def slice_data(xdata, ydata, x_range):
"""
crops or slices the data in xdata,ydata in the range x_range on the x axis
"""
data = zip(xdata, ydata)
sliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]
return array(zip(*sliced_data)) | faf90781e2f073e74a7b11cd80d7c127c0db1bb3 | 5,425 |
def best_low_rank(A, rank):
"""
Finding the best low rank approximation by SVD
"""
u, s, vh = np.linalg.svd(A)
s = np.sqrt(s[:rank])
return u[:, range(rank)] @ np.diag(s), np.diag(s) @ vh[range(rank)] | 5df8197fc0113bfa74a36803445a6a300766880f | 5,426 |
def get_bangumi(uid: int, type_: str = "bangumi", limit: int = 114514, callback=None, verify: utils.Verify = None):
"""
自动循环获取追番/追剧列表
:param callback: 回调函数
:param uid:
:param type_:
:param limit:
:param verify:
:return:
"""
if verify is None:
verify = utils.Verify()
bangumi = []
page = 1
count = 0
while count < limit:
data = get_bangumi_raw(uid=uid, pn=page, type_=type_, verify=verify)
if len(data["list"]) == 0:
break
bangumi += data["list"]
if callable(callback):
callback(data["list"])
count += len(data["list"])
page += 1
return bangumi[:limit] | b5a2a553a3bf13ded7eb30c53ce536b4a2b9e043 | 5,427 |
def local_cases_range(start_date='2020-06-01',end_date='2020-07-01',areaname='Hartlepool'):
"""calculate new cases in a time range"""
try:
q=DailyCases.objects.filter(areaname=areaname,specimenDate=start_date)[0]
start_total=q.totalLabConfirmedCases
q=DailyCases.objects.filter(areaname=areaname,specimenDate=end_date)[0]
end_total=q.totalLabConfirmedCases
return end_total-start_total
except Exception as e:
log.info(e)
return None | 747f03f2ef9925f7dc252798bb7f3844cd31d2c0 | 5,428 |
def ecal_phisym_flattables(process, produce_by_run : bool=False):
"""
Add the NanoAOD flat table producers.
This functions adjust also the output columns.
Should be called once nMisCalib has been set in the EcalPhiSymRecHitProducer
"""
process.load('Calibration.EcalCalibAlgos.EcalPhiSymFlatTableProducers_cfi')
nmis = process.EcalPhiSymRecHitProducerRun.nMisCalib.value()
for imis in range(1, nmis+1):
# get the naming and indexing right.
if imis<nmis/2+1:
var_name = 'sumEt_m'+str(abs(int(imis-(nmis/2)-1)))
var = Var(f'sumEt({imis})', float, doc='ECAL PhiSym rechits: '+str(imis-(nmis/2)-1)+'*miscalib et', precision=23)
else:
var_name = 'sumEt_p'+str(int(imis-(nmis/2)))
var = Var(f'sumEt({imis})', float, doc='ECAL PhiSym rechits: '+str(imis-(nmis/2))+'*miscalib et', precision=23)
if produce_by_run:
setattr(process.ecalPhiSymRecHitRunTableEB.variables, var_name, var)
setattr(process.ecalPhiSymRecHitRunTableEE.variables, var_name, var)
flattable_sequence = cms.Sequence( process.ecalPhiSymRecHitRunTableEB +
process.ecalPhiSymRecHitRunTableEE +
process.ecalPhiSymInfoRunTable )
else:
setattr(process.ecalPhiSymRecHitLumiTableEB.variables, var_name, var)
setattr(process.ecalPhiSymRecHitLumiTableEE.variables, var_name, var)
flattable_sequence = cms.Sequence( process.ecalPhiSymRecHitLumiTableEB +
process.ecalPhiSymRecHitLumiTableEE +
process.ecalPhiSymInfoLumiTable
)
return flattable_sequence | f6f48ab2e5b1df6a3e58c5e3130be56861eb1384 | 5,429 |
def nx_add_prefix(graph, prefix):
"""
Rename graph to obtain disjoint node labels
"""
assert isinstance(graph, nx.DiGraph)
if prefix is None:
return graph
def label(x):
if isinstance(x, str):
name = prefix + x
else:
name = prefix + repr(x)
return name
return nx.relabel_nodes(graph, label) | c8f05052c613adc17423637186867b70db31e70d | 5,430 |
def infected_asymptomatic_20x80():
"""
Real Name: b'Infected asymptomatic 20x80'
Original Eqn: b'Infected asymptomatic 20+Infected asymptomatic 80'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return infected_asymptomatic_20() + infected_asymptomatic_80() | fd9cba446672bf8ebcc5fe2fc7f2f961129bdb71 | 5,431 |
import json
import os
def make_forecast_json(data: str):
"""Generate custom forecast file in JSON format
:param data: JSON string containing weather.gov data
"""
try:
if data:
# Current conditions
conditions_now: str = data["properties"]["periods"][0]
# Determine where in the data object tomorrow's forecast will come from
if is_day(data, 0):
conditions_tomorrow: str = data["properties"]["periods"][2]
else:
conditions_tomorrow: str = data["properties"]["periods"][1]
# Currently
current_temperature: str = conditions_now["temperature"]
current_wind: str = conditions_now["windSpeed"]
current_advice: str = advice(current_temperature)
# Tomorrow
tomorrow_temperature: str = conditions_tomorrow["temperature"]
tomorrow_wind: str = conditions_tomorrow["windSpeed"]
tomorrow_advice: str = advice(tomorrow_temperature)
weather_forecast: dict = {
"current_conditions": f"It's currently {current_temperature}°f. {current_advice}. Wind speeds are {current_wind}.",
"tomorrow_conditions": f"Tomorrow will be {tomorrow_temperature}°f. {tomorrow_advice}. Wind speeds will be {tomorrow_wind}."
}
# Convert forecast dict to JSON
forecast_json = json.dumps(weather_forecast)
# Save JSON file
output_directory = os.path.sep.join(["json_output", "noaa"])
forecast_file = open(
os.path.sep.join(
[output_directory, "forecast.json"]), "w+"
)
forecast_file.write(forecast_json)
forecast_file.close()
return print("Forecast creation complete.")
except Exception as e:
print(e) | 9b65f30c3b26db8ffa1818db073723eef57fe7f0 | 5,432 |
def biosample_table_data():
"""Return a dictionary containing the expected values of the BioSample Table"""
columns = [
"id",
"BioSample_id",
"BioSampleAccession",
"BioSampleAccessionSecondary",
"BioSampleBioProjectAccession",
"BioSampleSRAAccession",
"BioSampleOrganism",
"BioSampleStrain",
"BioSampleSubmissionDate",
"BioSampleComment",
]
metadata = [
"1",
"12991206",
"SAMN12991206",
"",
"",
"SRS5502739",
"TestOrganism1",
"TestStrain1",
"2019-10-08T07:15:03.950",
"",
]
table_dict = {}
# Populate the dict with data
for i in range(0, len(columns)):
key = columns[i]
value = metadata[i]
table_dict[key] = value
return table_dict | 65e5d5bb5416a8f113100562fba8f2e6fd66796a | 5,433 |
def grid_adapter3D(
out_dim=(100.0, 100.0),
in_dim=(50.0, 50.0),
z_dim=-10.0,
out_res=(10.0, 10.0, 10.0),
in_res=(5.0, 5.0, 5.0),
out_pos=(0.0, 0.0),
in_pos=(25.0, 25.0),
z_pos=0.0,
in_mat=0,
out_mat=0,
fill=False,
):
"""
Generate a grid adapter.
3D adapter from an outer grid resolution
to an inner grid resolution with gmsh.
Parameters
----------
out_dim : list of 2 float
xy-Dimension of the outer block
in_dim : list of 2 float
xy-Dimension of the inner block
z_dim : float
z-Dimension of the whole block
out_res : list of 3 float
Grid resolution of the outer block
in_res : list of 3 float
Grid resolution of the inner block
out_pos : list of 2 float
xy-Position of the origin of the outer block
in_pos : list of 2 float
xy-Position of the origin of the inner block
z_dim : float
z-Position of the origin of the whole block
in_mat : integer
Material-ID of the inner block
out_mat : integer
Material-ID of the outer block
fill : bool, optional
State if the inner block should be filled with a rectangular mesh.
Default: False.
Returns
-------
result : dictionary
Result contains one '#FEM_MSH' block of the OGS mesh file
with the following information (sorted by keys):
mesh_data : dict
dictionary containing information about
- AXISYMMETRY (bool)
- CROSS_SECTION (bool)
- PCS_TYPE (str)
- GEO_TYPE (str)
- GEO_NAME (str)
- LAYER (int)
nodes : ndarray
Array with all node postions
elements : dict
contains nodelists for elements sorted by element types
material_id : dict
contains material ids for each element sorted by element types
element_id : dict
contains element ids for each element sorted by element types
"""
out = gmsh(
gmsh_grid_adapt3D(
out_dim, in_dim, z_dim, out_res, in_res, out_pos, in_pos, z_pos
),
import_dim=3,
)
out["material_id"] = gen_std_mat_id(out["elements"], out_mat)
if fill:
element_no = [
int(in_dim[0] / in_res[0]),
int(in_dim[1] / in_res[1]),
int(abs(z_dim) / in_res[2]),
]
mesh_in = rectangular(
dim=3,
mesh_origin=in_pos + (z_pos + min(z_dim, 0.0),),
element_no=element_no,
element_size=in_res,
)
mesh_in["material_id"] = gen_std_mat_id(mesh_in["elements"], in_mat)
dec = int(np.ceil(-np.log10(min(min(in_res), min(out_res)))) + 2.0) * 2
out = combine(mesh_in, out, dec)
return out | 9c14a4f9b27ec14cdc550f81fd861207a5674616 | 5,434 |
from typing import Sequence
from typing import Tuple
from typing import Mapping
from typing import Any
from functools import reduce
from typing import cast
def concat_dtypes(ds: Sequence[np.dtype]) -> np.dtype:
"""Concat structured datatypes."""
def _concat(
acc: Tuple[Mapping[Any, Any], int], a: np.dtype
) -> Tuple[DTYPE_FIELDS_T, int]:
acc_fields, acc_itemsize = acc
fields = dtype_fields(a).throw()
intersection = set(acc_fields).intersection(set(fields))
if intersection != set():
raise ValueError(f'dtypes have overlapping fields: {intersection}')
return (
{
**acc_fields,
**{k: (d[0], d[1] + acc_itemsize) for k, d in fields.items()}
},
acc_itemsize + a.itemsize
)
# dtype.fields() doesn't match dtype constructor despite being compatible
return np.dtype(reduce(_concat, ds, (cast(DTYPE_FIELDS_T, {}), 0))[0]) | bef7d8ebe30f41297adbf8f1d8de9b93f646c8f4 | 5,435 |
def mutation(param_space, config, mutation_rate, list=False):
"""
Mutates given configuration.
:param param_space: space.Space(), will give us information about parameters
:param configs: list of configurations.
:param mutation_rate: integer for how many parameters to mutate
:param list: boolean whether returning one or more alternative configs
:return: list of dicts, list of mutated configurations
"""
parameter_object_list = param_space.get_input_parameters_objects()
rd_config = dict()
for name, obj in parameter_object_list.items():
x = obj.randomly_select()
single_valued_param = False
param_type = param_space.get_type(name)
if param_type == 'real' or param_type == 'integer':
if obj.get_max() == obj.get_min():
single_valued_param = True
else:
if obj.get_size() == 1:
single_valued_param = True
mutation_attempts = 0
while x == config[name] and single_valued_param == False:
x = obj.randomly_select()
mutation_attempts += 1
if mutation_attempts > 1000000:
break
rd_config[name] = x
parameter_names_list = param_space.get_input_parameters()
nbr_params = len(parameter_names_list)
configs = []
n_configs = nbr_params if list else 1
for _ in range(n_configs):
indices = rd.permutation(nbr_params)[:mutation_rate]
for idx in indices:
mutation_param = parameter_names_list[idx]
# Should I do something if they are the same?
temp = config.copy()
temp[mutation_param] = rd_config[mutation_param]
configs.append(temp)
return configs | 38427cfee226589d72117f102d2befdbe8ebbcc0 | 5,436 |
from typing import Union
from typing import Callable
from typing import Optional
from typing import Tuple
def uncertainty_batch_sampling(classifier: Union[BaseLearner, BaseCommittee],
X: Union[np.ndarray, sp.csr_matrix],
n_instances: int = 20,
metric: Union[str, Callable] = 'euclidean',
n_jobs: Optional[int] = None,
**uncertainty_measure_kwargs
) -> Tuple[np.ndarray, Union[np.ndarray, sp.csr_matrix]]:
"""
Batch sampling query strategy. Selects the least sure instances for labelling.
This strategy differs from :func:`~modAL.uncertainty.uncertainty_sampling` because, although it is supported,
traditional active learning query strategies suffer from sub-optimal record selection when passing
`n_instances` > 1. This sampling strategy extends the interactive uncertainty query sampling by allowing for
batch-mode uncertainty query sampling. Furthermore, it also enforces a ranking -- that is, which records among the
batch are most important for labeling?
Refer to Cardoso et al.'s "Ranked batch-mode active learning":
https://www.sciencedirect.com/science/article/pii/S0020025516313949
Args:
classifier: One of modAL's supported active learning models.
X: Set of records to be considered for our active learning model.
n_instances: Number of records to return for labeling from `X`.
metric: This parameter is passed to :func:`~sklearn.metrics.pairwise.pairwise_distances`
n_jobs: If not set, :func:`~sklearn.metrics.pairwise.pairwise_distances_argmin_min` is used for calculation of
distances between samples. Otherwise it is passed to :func:`~sklearn.metrics.pairwise.pairwise_distances`.
**uncertainty_measure_kwargs: Keyword arguments to be passed for the :meth:`predict_proba` of the classifier.
Returns:
Indices of the instances from `X` chosen to be labelled; records from `X` chosen to be labelled.
"""
uncertainty = classifier_uncertainty(classifier, X, **uncertainty_measure_kwargs)
query_indices = ranked_batch(classifier, unlabeled=X, uncertainty_scores=uncertainty,
n_instances=n_instances, metric=metric, n_jobs=n_jobs)
return query_indices, X[query_indices] | eb95ad79f4326d89c94a42aa727e2e3c338e021e | 5,437 |
import re
def only_digits(raw, force_int=False):
"""Strips all not digit characters from string.
Args:
raw (str or unicode): source string.
Kwargs:
force_int (boolean): not to seek for dot, seek only for int value.
Returns:
int or float: in dependence of "raw" argument content.
None: if raw is None, empty or not contains digits.
"""
if isinstance(raw, (unicode, str)) and len(raw):
if not force_int and re.search(r'\d\.\d', raw):
try:
return float(u''.join(u'{0}'.format(one) for one in raw
if one.isdigit() or one == one.__class__(u'.')))
except (TypeError, ValueError):
return None
else:
try:
return int(u''.join(u'{0}'.format(one) for one in raw
if one.isdigit()))
except (TypeError, ValueError):
return None
elif isinstance(raw, (float, int)):
return raw
else:
return None | 413763588b067f335f7401fb914f1d6f3f8972fa | 5,438 |
def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result | ce2e4b2f6fe0243a8ac5c418d10c6352c95ea302 | 5,439 |
from typing import Optional
from typing import Sequence
def get_users(compartment_id: Optional[str] = None,
external_identifier: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetUsersFilterArgs']]] = None,
identity_provider_id: Optional[str] = None,
name: Optional[str] = None,
state: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUsersResult:
"""
This data source provides the list of Users in Oracle Cloud Infrastructure Identity service.
Lists the users in your tenancy. You must specify your tenancy's OCID as the value for the
compartment ID (remember that the tenancy is simply the root compartment).
See [Where to Get the Tenancy's OCID and User's OCID](https://docs.cloud.oracle.com/iaas/Content/API/Concepts/apisigningkey.htm#five).
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_users = oci.identity.get_users(compartment_id=var["tenancy_ocid"],
external_identifier=var["user_external_identifier"],
identity_provider_id=oci_identity_identity_provider["test_identity_provider"]["id"],
name=var["user_name"],
state=var["user_state"])
```
:param str compartment_id: The OCID of the compartment (remember that the tenancy is simply the root compartment).
:param str external_identifier: The id of a user in the identity provider.
:param str identity_provider_id: The id of the identity provider.
:param str name: A filter to only return resources that match the given name exactly.
:param str state: A filter to only return resources that match the given lifecycle state. The state value is case-insensitive.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['externalIdentifier'] = external_identifier
__args__['filters'] = filters
__args__['identityProviderId'] = identity_provider_id
__args__['name'] = name
__args__['state'] = state
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:identity/getUsers:getUsers', __args__, opts=opts, typ=GetUsersResult).value
return AwaitableGetUsersResult(
compartment_id=__ret__.compartment_id,
external_identifier=__ret__.external_identifier,
filters=__ret__.filters,
id=__ret__.id,
identity_provider_id=__ret__.identity_provider_id,
name=__ret__.name,
state=__ret__.state,
users=__ret__.users) | 4d404eb1069c829bb757f3870efc548583998434 | 5,440 |
def se_resnet20(num_classes: int = 10,
in_channels: int = 3
) -> ResNet:
""" SEResNet by Hu+18
"""
return resnet(num_classes, 20, in_channels, block=partial(SEBasicBlock, reduction=16)) | da1d1327d5e5d1b55d3b4cc9d42dbf381ece029f | 5,441 |
def parallel_threaded(function):
"""
A decorator for running a function within a parallel thread
"""
def decorator(*args, **kwargs):
t = ParallelThread(target=function,
args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
return decorator | 9f4936b0ab7de3d550b404043d6b0e37dbb3a066 | 5,442 |
def Upsample(x, size):
"""
Wrapper Around the Upsample Call
"""
return nn.functional.interpolate(x, size=size, mode="bilinear", align_corners=True) | dfaabb3999047589b2d755a2cc631b1389d172b1 | 5,443 |
def get_user_playlists(spotipy_obj, username):
"""Gets and returns all Spotify playlists owned by the username specified.
Parameters:
spotipy_obj: Spotipy object
username: Spotify username
Returns:
List of dictionaries, each dictionary a Spotify playlist object.
"""
# Grab all user playlists, including private ones
initial_playlists = spotipy_obj.user_playlists(username)
final_playlists = []
while initial_playlists:
for playlist in initial_playlists["items"]:
if playlist["owner"]["id"] == username:
final_playlists.append(playlist)
if initial_playlists["next"]:
initial_playlists = spotipy_obj.next(initial_playlists)
else:
initial_playlists = None
return final_playlists | 90c06e0ddd91a7a84f4d905dd9334f9b4c27f890 | 5,444 |
import zipfile
from . import mocap
import os
def osu_run1(data_set="osu_run1", sample_every=4):
"""Ohio State University's Run1 motion capture data set."""
path = os.path.join(access.DATAPATH, data_set)
if not access.data_available(data_set):
access.download_data(data_set)
zip = zipfile.ZipFile(os.path.join(access.DATAPATH, data_set, "run1TXT.ZIP"), "r")
for name in zip.namelist():
zip.extract(name, path)
Y, connect = mocap.load_text_data("Aug210106", path)
Y = Y[0:-1:sample_every, :]
return access.data_details_return({"Y": Y, "connect": connect}, data_set) | db4687276c69c9bfdcbc30e7c6f46a26fa0f4c19 | 5,445 |
import os
def package_data(pkg, root_list):
"""
Generic function to find package_data for `pkg` under `root`.
"""
data = []
for root in root_list:
for dirname, _, files in os.walk(os.path.join(pkg, root)):
for fname in files:
data.append(os.path.relpath(os.path.join(dirname, fname), pkg))
return {pkg: data} | 6ffe84ef045e460a7bc2613e14e1670a9f6a48e2 | 5,446 |
def create(dataset, target, features=None, l2_penalty=1e-2, l1_penalty=0.0,
solver='auto', feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
step_size = _DEFAULT_SOLVER_OPTIONS['step_size'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
validation_set = "auto",
verbose=True):
"""
Create a :class:`~turicreate.linear_regression.LinearRegression` to
predict a scalar target variable as a linear function of one or more
features. In addition to standard numeric and categorical types, features
can also be extracted automatically from list- or dictionary-type SFrame
columns.
The linear regression module can be used for ridge regression, Lasso, and
elastic net regression (see References for more detail on these methods). By
default, this model has an l2 regularization weight of 0.01.
Parameters
----------
dataset : SFrame
The dataset to use for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
l2_penalty : float, optional
Weight on the l2-regularizer of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized linear regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful for
the model. The default weight of 0 prevents any features from being
discarded. See the LASSO regression reference for more detail.
solver : string, optional
Solver to use for training the model. See the references for more detail
on each solver.
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are
all automatically tuned and the default options should function well.
See the solver options guide for setting additional parameters for each
of the solvers.
See the user guide for additional details on how the solver is chosen.
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : int, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level`` can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
step_size : float, optional (fista only)
The starting step size to use for the ``fista`` and ``gd`` solvers. The
default is set to 1.0, this is an aggressive setting. If the first
iteration takes a considerable amount of time, reducing this parameter
may speed up model training.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : LinearRegression
A trained model of type
:class:`~turicreate.linear_regression.LinearRegression`.
See Also
--------
LinearRegression, turicreate.boosted_trees_regression.BoostedTreesRegression, turicreate.regression.create
Notes
-----
- Categorical variables are encoded by creating dummy variables. For a
variable with :math:`K` categories, the encoding creates :math:`K-1` dummy
variables, while the first category encountered in the data is used as the
baseline.
- For prediction and evaluation of linear regression models with sparse
dictionary inputs, new keys/columns that were not seen during training
are silently ignored.
- Any 'None' values in the data will result in an error being thrown.
- A constant term is automatically added for the model intercept. This term
is not regularized.
- Standard errors on coefficients are only available when `solver=newton`
or when the default `auto` solver option chooses the newton method and if
the number of examples in the training data is more than the number of
coefficients. If standard errors cannot be estimated, a column of `None`
values are returned.
References
----------
- Hoerl, A.E. and Kennard, R.W. (1970) `Ridge regression: Biased Estimation
for Nonorthogonal Problems
<http://amstat.tandfonline.com/doi/abs/10.1080/00401706.1970.10488634>`_.
Technometrics 12(1) pp.55-67
- Tibshirani, R. (1996) `Regression Shrinkage and Selection via the Lasso <h
ttp://www.jstor.org/discover/10.2307/2346178?uid=3739256&uid=2&uid=4&sid=2
1104169934983>`_. Journal of the Royal Statistical Society. Series B
(Methodological) 58(1) pp.267-288.
- Zhu, C., et al. (1997) `Algorithm 778: L-BFGS-B: Fortran subroutines for
large-scale bound-constrained optimization
<https://dl.acm.org/citation.cfm?id=279236>`_. ACM Transactions on
Mathematical Software 23(4) pp.550-560.
- Barzilai, J. and Borwein, J. `Two-Point Step Size Gradient Methods
<http://imajna.oxfordjournals.org/content/8/1/141.short>`_. IMA Journal of
Numerical Analysis 8(1) pp.141-148.
- Beck, A. and Teboulle, M. (2009) `A Fast Iterative Shrinkage-Thresholding
Algorithm for Linear Inverse Problems
<http://epubs.siam.org/doi/abs/10.1137/080716542>`_. SIAM Journal on
Imaging Sciences 2(1) pp.183-202.
- Zhang, T. (2004) `Solving large scale linear prediction problems using
stochastic gradient descent algorithms
<https://dl.acm.org/citation.cfm?id=1015332>`_. ICML '04: Proceedings of
the twenty-first international conference on Machine learning p.116.
Examples
--------
Given an :class:`~turicreate.SFrame` ``sf`` with a list of columns
[``feature_1`` ... ``feature_K``] denoting features and a target column
``target``, we can create a
:class:`~turicreate.linear_regression.LinearRegression` as follows:
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> model = turicreate.linear_regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
For ridge regression, we can set the ``l2_penalty`` parameter higher (the
default is 0.01). For Lasso regression, we set the l1_penalty higher, and
for elastic net, we set both to be higher.
.. sourcecode:: python
# Ridge regression
>>> model_ridge = turicreate.linear_regression.create(data, 'price', l2_penalty=0.1)
# Lasso
>>> model_lasso = turicreate.linear_regression.create(data, 'price', l2_penalty=0.,
l1_penalty=1.0)
# Elastic net regression
>>> model_enet = turicreate.linear_regression.create(data, 'price', l2_penalty=0.5,
l1_penalty=0.5)
"""
# Regression model names.
model_name = "regression_linear_regression"
solver = solver.lower()
model = _sl.create(dataset, target, model_name, features=features,
validation_set = validation_set,
solver = solver, verbose = verbose,
l2_penalty=l2_penalty, l1_penalty = l1_penalty,
feature_rescaling = feature_rescaling,
convergence_threshold = convergence_threshold,
step_size = step_size,
lbfgs_memory_level = lbfgs_memory_level,
max_iterations = max_iterations)
return LinearRegression(model.__proxy__) | 3988ac27163873a8feff9fd34a5e8fe87e923487 | 5,447 |
def argument(*name_or_flags, **kwargs):
"""Convenience function to properly format arguments to pass to the
subcommand decorator.
"""
args = list()
for arg in name_or_flags:
args.append(arg)
return args, kwargs | 0cae66e8b23211affc97fd8857f17b48a73cf286 | 5,448 |
def get_logger():
"""
Provides the stem logger.
:returns: **logging.Logger** for stem
"""
return LOGGER | 8189cae16a244f0237f641e613783a484be5cf38 | 5,449 |
def get_graphql_type_for_model(model):
"""
Return the GraphQL type class for the given model.
"""
app_name, model_name = model._meta.label.split('.')
# Object types for Django's auth models are in the users app
if app_name == 'auth':
app_name = 'users'
class_name = f'{app_name}.graphql.types.{model_name}Type'
try:
return dynamic_import(class_name)
except AttributeError:
raise GraphQLTypeNotFound(f"Could not find GraphQL type for {app_name}.{model_name}") | d9f2b4093c290260db864cedd6b06958651bf713 | 5,450 |
from pathlib import Path
def load_image_files(container_path, dimension=(64, 64)):
"""
Load image files with categories as subfolder names
which performs like scikit-learn sample dataset
"""
image_dir = Path(container_path)
folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]
categories = [fo.name for fo in folders]
descr = "A image classification dataset"
images = []
flat_data = []
target = []
for i, direc in enumerate(folders):
for file in direc.iterdir():
img = imread(file)
img_resized = resize(img, dimension, anti_aliasing=True, mode='reflect')
flat_data.append(img_resized.flatten())
images.append(img_resized)
target.append(i)
flat_data = np.array(flat_data)
target = np.array(target)
images = np.array(images)
print('done')
return Bunch(data=flat_data,
target=target,
target_names=categories,
images=images,
DESCR=descr) | 1c92309c7f8f0b99db841fed21901d37e143f41c | 5,451 |
from datetime import datetime
import calendar
def create_calendar(year=None, month=None):
"""
Create an inline keyboard with the provided year and month
:param int year: Year to use in the calendar,
if None the current year is used.
:param int month: Month to use in the calendar,
if None the current month is used.
:return: Returns the InlineKeyboardMarkup object with the calendar.
"""
now = datetime.datetime.now()
if year is None:
year = now.year
if month is None:
month = now.month
data_ignore = create_callback_data("IGNORE", year, month, 0)
keyboard = []
# First row - Month and Year
row = []
row.append(InlineKeyboardButton(
calendar.month_name[month]+" "+str(year), callback_data=data_ignore)
)
keyboard.append(row)
# Second row - Week Days
row = []
for day in ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]:
row.append(InlineKeyboardButton(day, callback_data=data_ignore))
keyboard.append(row)
my_calendar = calendar.monthcalendar(year, month)
for week in my_calendar:
row = []
for day in week:
if day == 0:
row.append(InlineKeyboardButton(
" ", callback_data=data_ignore)
)
else:
row.append(InlineKeyboardButton(
str(day),
callback_data=create_callback_data(
"DAY",
year,
month,
day
))
)
keyboard.append(row)
# Last row - Buttons
row = []
row.append(InlineKeyboardButton(
"<", callback_data=create_callback_data(
"PREV-MONTH",
year,
month,
day
))
)
row.append(InlineKeyboardButton(
" ", callback_data=data_ignore)
)
row.append(InlineKeyboardButton(
">", callback_data=create_callback_data(
"NEXT-MONTH",
year,
month,
day
))
)
keyboard.append(row)
return InlineKeyboardMarkup(keyboard) | 232dd093b08c53f099b942d4497aef920002f5d4 | 5,452 |
def format_allowed_section(allowed):
"""Format each section of the allowed list"""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
elif ports:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return return_val | 0c07feec16562826a1f38a11b1d57782adf09b4d | 5,453 |
import os
def upload_image():
"""
form-data:
image: a jpeg picture
:return: a file pathname, assigned by backend.
"""
if 'image' not in request.files:
return '', 400
f = request.files['image']
if f.filename == '':
return '', 400
if not _allowed_file(f.filename):
return '', 400
# filename = secure_filename(f.filename)
filename = hash_filename(f)
pathname = os.path.join(app.config['UPLOAD_FOLDER'], filename)
f.save(pathname)
return pathname | f74f90a3acce30e550e92d3cec51039450db7401 | 5,454 |
def get_airmass(when, ra, dec):
"""Return the airmass of (ra,dec) at the specified observing time.
Uses :func:`cos_zenith_to_airmass`.
Parameters
----------
when : astropy.time.Time
Observation time, which specifies the local zenith.
ra : astropy.units.Quantity
Target RA angle(s)
dec : astropy.units.Quantity
Target DEC angle(s)
Returns
-------
array or float
Value of the airmass for each input (ra,dec).
"""
target = astropy.coordinates.ICRS(ra=ra, dec=dec)
zenith = get_observer(when, alt=90 * u.deg, az=0 * u.deg
).transform_to(astropy.coordinates.ICRS)
# Calculate zenith angle in degrees.
zenith_angle = target.separation(zenith)
# Convert to airmass.
return cos_zenith_to_airmass(np.cos(zenith_angle)) | 2d2b25963cc5814c8189b117734963feda762d88 | 5,455 |
def get_metadata(class_):
"""Returns a list of MetaDataTuple structures.
"""
return list(get_metadata_iterator(class_)) | 95bc083464431cd8bc3c273989680732f711c5c1 | 5,456 |
def get_register(regname):
"""
Get register value. Exception will be raised if expression cannot be parse.
This function won't catch on purpose.
@param regname: expected register
@return register value
"""
t = gdb.lookup_type("unsigned long")
reg = gdb.parse_and_eval(regname)
return long( reg.cast(t) ) | 43d077b59dc0b1cb8a6233538a2a1291216c1ec4 | 5,457 |
import re
def create_queries(project_id, ticket_number, pids_project_id, pids_dataset_id,
pids_table):
"""
Creates sandbox and truncate queries to run for EHR deactivated retraction
:param project_id: bq name of project
:param ticket_number: Jira ticket number to identify and title sandbox table
:param pids_project_id: deactivated ehr pids table in bq's project_id
:param pids_dataset_id: deactivated ehr pids table in bq's dataset_id
:param pids_table: deactivated pids table in bq's table name
:return: list of queries to run
"""
queries_list = []
dataset_list = set()
final_date_column_df = pd.DataFrame()
# Hit bq and receive df of deactivated ehr pids and deactivated date
client = get_client(project_id)
deactivated_ehr_pids_df = client.query(
DEACTIVATED_PIDS_QUERY.render(project=pids_project_id,
dataset=pids_dataset_id,
table=pids_table)).to_dataframe()
date_columns_df = get_date_info_for_pids_tables(project_id, client)
LOGGER.info(
"Dataframe creation complete. DF to be used for creation of retraction queries."
)
for date_row in date_columns_df.itertuples(index=False):
# Filter to only include tables containing deactivated pids with the earliest deactivated date
LOGGER.info(
f'Checking table: {date_row.project_id}.{date_row.dataset_id}.{date_row.table}'
)
if check_pid_exist(date_row, client, pids_project_id, pids_dataset_id,
pids_table):
dataset_list.add(date_row.dataset_id)
row = {
'project_id': date_row.project_id,
'dataset_id': date_row.dataset_id,
'table': date_row.table,
'date_column': date_row.date_column,
'start_date_column': date_row.start_date_column,
'end_date_column': date_row.end_date_column
}
final_date_column_df = final_date_column_df.append(
row, ignore_index=True)
LOGGER.info(
"Looping through the deactivated PIDS df to create queries based on the retractions needed per PID table"
)
for ehr_row in deactivated_ehr_pids_df.itertuples(index=False):
LOGGER.info(f'Creating retraction queries for PID: {ehr_row.person_id}')
for date_row in final_date_column_df.itertuples(index=False):
# Determine if dataset is deid to correctly pull pid or research_id and check if ID exists in dataset or if
# already retracted
if re.match(DEID_REGEX, date_row.dataset_id):
pid = get_research_id(date_row.project_id, date_row.dataset_id,
ehr_row.person_id, client)
else:
pid = ehr_row.person_id
# Get or create sandbox dataset
sandbox_dataset = check_and_create_sandbox_dataset(
date_row.project_id, date_row.dataset_id)
# Create queries based on type of date field
LOGGER.info(
f'Creating Query to retract {pid} from {date_row.dataset_id}.{date_row.table}'
)
if pd.isnull(date_row.date_column):
sandbox_query = SANDBOX_QUERY_END_DATE.render(
project=date_row.project_id,
sandbox_dataset=sandbox_dataset,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
end_date_column=date_row.end_date_column,
start_date_column=date_row.start_date_column)
clean_query = CLEAN_QUERY_END_DATE.render(
project=date_row.project_id,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
end_date_column=date_row.end_date_column,
start_date_column=date_row.start_date_column)
else:
sandbox_query = SANDBOX_QUERY_DATE.render(
project=date_row.project_id,
sandbox_dataset=sandbox_dataset,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
date_column=date_row.date_column)
clean_query = CLEAN_QUERY_DATE.render(
project=date_row.project_id,
dataset=date_row.dataset_id,
table=date_row.table,
pid=pid,
deactivated_pids_project=pids_project_id,
deactivated_pids_dataset=pids_dataset_id,
deactivated_pids_table=pids_table,
date_column=date_row.date_column)
queries_list.append({
clean_consts.QUERY:
sandbox_query,
clean_consts.DESTINATION:
date_row.project_id + '.' + sandbox_dataset + '.' +
(ticket_number + '_' + date_row.table),
clean_consts.DESTINATION_DATASET:
date_row.dataset_id,
clean_consts.DESTINATION_TABLE:
date_row.table,
clean_consts.DISPOSITION:
bq_consts.WRITE_APPEND,
'type':
'sandbox'
})
queries_list.append({
clean_consts.QUERY:
clean_query,
clean_consts.DESTINATION:
date_row.project_id + '.' + date_row.dataset_id + '.' +
date_row.table,
clean_consts.DESTINATION_DATASET:
date_row.dataset_id,
clean_consts.DESTINATION_TABLE:
date_row.table,
clean_consts.DISPOSITION:
bq_consts.WRITE_TRUNCATE,
'type':
'retraction'
})
LOGGER.info(
f"Query list complete, retracting ehr deactivated PIDS from the following datasets: "
f"{dataset_list}")
return queries_list | 2940468b76ccd4d16dfb1bbddf440be635eaaf8d | 5,458 |
def load_and_estimate(file, arguments, denoise=medfilt, data=None):
"""Loads mean+std images and evaluates noise. Required for parallelization."""
# Pipeline for µCT data
if data is not None:
# Evaluate noise on data
noises = np.zeros(len(metrics))
for m in range(len(metrics)):
noise = estimate_noise(data, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noises[m] = noise
return np.array(noises)
# Pipeline for images
# Get images
path = arguments.image_path
# Load images
image_surf, image_deep, image_calc = load_vois_h5(path, file)
# Auto crop
if arguments.auto_crop:
image_deep, cropped = auto_corner_crop(image_deep)
image_calc, cropped = auto_corner_crop(image_calc)
# Evaluate noise on mean+std images
noises_surf, noises_deep, noises_calc = np.zeros(len(metrics)), np.zeros(len(metrics)), np.zeros(len(metrics))
for m in range(len(metrics)):
noise_surf = estimate_noise(image_surf, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noise_deep = estimate_noise(image_deep, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noise_calc = estimate_noise(image_calc, metrics[m], kernel_size=kernel_size, denoise_method=denoise)
noises_surf[m] = noise_surf
noises_deep[m] = noise_deep
noises_calc[m] = noise_calc
return np.array((noises_surf, noises_deep, noises_calc)) | 63b53eb5441dd9a2e9f4b558005b640109fea220 | 5,459 |
import torch
def calc_self_attn(
bert_model: BertModel, protein: dict, device="cuda:0", **kwargs
):
"""Calculate self-attention matrices given Bert model for one protein.
Args:
bert_model: a BertModel instance
protein: a dict object from LM-GVP formatted data (json record).
device: device to do the computation
Returns:
torch.tensor of shape: [n_maps, seqlen, seqlen]
"""
bert_model = bert_model.to(device)
bert_model.eval()
with torch.no_grad():
self_attn_mats = bert_model(
protein["input_ids"].unsqueeze(0).to(device),
attention_mask=protein["attention_mask"].unsqueeze(0).to(device),
output_attentions=True,
).attentions
# gather self-attention map from all layers together
n_layers = len(self_attn_mats)
batch_size, n_heads, seqlen, _ = self_attn_mats[0].size()
self_attn_mats = torch.stack(self_attn_mats, dim=1).view(
batch_size, n_layers * n_heads, seqlen, seqlen
)
# remove [CLS] and [SEP]
self_attn_mats = self_attn_mats[..., 1:-1, 1:-1]
if self_attn_mats.size()[0] == 1:
self_attn_mats = self_attn_mats.squeeze(0)
self_attn_mats = self_attn_mats.detach().cpu()
return self_attn_mats | 4d076cc232207c9c446c4f9f52f1156af2afabf2 | 5,460 |
def compute_median_survival_time(times, surv_function):
"""
Computes a median survival time estimate by looking for where the survival
function crosses 1/2.
Parameters
----------
times : 1D numpy array
Sorted list of unique times (in ascending order).
surv_function : 1D numpy array
A survival function evaluated at each of time in `times`, in the same
order.
Returns
-------
output : float
Median survival time estimate.
"""
t_left = times[0]
t_right = times[-1]
if surv_function[-1] > 1/2:
# survival function never crosses 1/2; just output this last time point
return t_right
for t, s in zip(times, surv_function):
if s >= 0.5:
t_left = t
for t, s in zip(reversed(times), reversed(surv_function)):
if s <= 0.5:
t_right = t
return (t_left + t_right) / 2. | 22103bc705acb791c0937a403aa9c34e9145e1c2 | 5,461 |
def TDMAsolver_no_vec(coeffs):
"""
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
and to http://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
"""
a = coeffs[1:, 0]
b = coeffs[:, 1]
c = coeffs[:-1, 2]
d = coeffs[:, 3]
nf = len(d) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy arrays
for it in range(1, nf):
mc = ac[it-1]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = bc
xc[-1] = dc[-1]/bc[-1]
for il in range(nf-2, 1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
return xc | cdec1baa7ce0fe2baef71631b0ba678a0f7559dc | 5,462 |
def aggregate_ant(data, sub_num, response_type="full"):
"""
Aggregate data from the ANT task.
Calculates various summary statistics for the ANT task for a given subject.
Parameters
----------
data : dataframe
Pandas dataframe containing a single subjects trial data for the task.
sub_num : str
Subject number to which the data file belongs.
response_type : {'full', 'correct', 'incorrect'}, optional
Should the summary data be calculated using all trials? Only correct
trials? Or only incorrect trials? This is not supported in all tasks.
Returns
-------
stats : list
List containing the calculated data for the subject.
"""
# Calculate times following errors and correct responses
df = data
follow_error_rt = df.loc[df.correct.shift() == 0, "RT"].mean()
follow_correct_rt = df.loc[df.correct.shift() == 1, "RT"].mean()
if response_type == "correct":
df = data[data["correct"] == 1]
elif response_type == "incorrect":
df = data[data["correct"] == 0]
elif response_type == "full":
df = data
# Aggregated descriptives
## congruency conditions
grouped_congruency = df.groupby("congruency")
neutral_rt = grouped_congruency.mean().get_value("neutral", "RT")
congruent_rt = grouped_congruency.mean().get_value("congruent", "RT")
incongruent_rt = grouped_congruency.mean().get_value("incongruent", "RT")
neutral_rtsd = grouped_congruency.std().get_value("neutral", "RT")
congruent_rtsd = grouped_congruency.std().get_value("congruent", "RT")
incongruent_rtsd = grouped_congruency.std().get_value("incongruent", "RT")
neutral_rtcov = neutral_rtsd / neutral_rt
congruent_rtcov = congruent_rtsd / congruent_rt
incongruent_rtcov = incongruent_rtsd / incongruent_rt
neutral_correct = grouped_congruency.sum().get_value("neutral", "correct")
congruent_correct = grouped_congruency.sum().get_value("congruent", "correct")
incongruent_correct = grouped_congruency.sum().get_value("incongruent", "correct")
## cue conditions
grouped_cue = df.groupby("cue")
nocue_rt = grouped_cue.mean().get_value("nocue", "RT")
center_rt = grouped_cue.mean().get_value("center", "RT")
spatial_rt = grouped_cue.mean().get_value("spatial", "RT")
double_rt = grouped_cue.mean().get_value("double", "RT")
nocue_rtsd = grouped_cue.std().get_value("nocue", "RT")
center_rtsd = grouped_cue.std().get_value("center", "RT")
spatial_rtsd = grouped_cue.std().get_value("spatial", "RT")
double_rtsd = grouped_cue.std().get_value("double", "RT")
nocue_rtcov = nocue_rtsd / nocue_rt
center_rtcov = center_rtsd / center_rt
spatial_rtcov = spatial_rtsd / spatial_rt
double_rtcov = double_rtsd / double_rt
nocue_correct = grouped_cue.sum().get_value("nocue", "correct")
center_correct = grouped_cue.sum().get_value("center", "correct")
spatial_correct = grouped_cue.sum().get_value("spatial", "correct")
double_correct = grouped_cue.sum().get_value("double", "correct")
# OLS regression
conflict_intercept, conflict_slope = congruent_rt, incongruent_rt - congruent_rt
conflict_slope_norm = conflict_slope / congruent_rt
alerting_intercept, alerting_slope = double_rt, nocue_rt - double_rt
alerting_slope_norm = alerting_slope / double_rt
orienting_intercept, orienting_slope = spatial_rt, center_rt - spatial_rt
orienting_slope_norm = orienting_slope / spatial_rt
return [
sub_num,
follow_error_rt,
follow_correct_rt,
neutral_rt,
congruent_rt,
incongruent_rt,
neutral_rtsd,
congruent_rtsd,
incongruent_rtsd,
neutral_rtcov,
congruent_rtcov,
incongruent_rtcov,
neutral_correct,
congruent_correct,
incongruent_correct,
nocue_rt,
center_rt,
spatial_rt,
double_rt,
nocue_rtsd,
center_rtsd,
spatial_rtsd,
double_rtsd,
nocue_rtcov,
center_rtcov,
spatial_rtcov,
double_rtcov,
nocue_correct,
center_correct,
spatial_correct,
double_correct,
conflict_intercept,
conflict_slope,
conflict_slope_norm,
alerting_intercept,
alerting_slope,
alerting_slope_norm,
orienting_intercept,
orienting_slope,
orienting_slope_norm,
] | be01651d450560a5c36bc6240025fe59352d6347 | 5,463 |
def parse_search_after(params):
"""Validate search_after and return it as a list of [score, ID]."""
search_pair = params.get("search_after")
sort = params.get("sort")
if not search_pair or not sort:
return
if '_' not in search_pair or len(search_pair.split("_")) != 2:
return
_score, _id = search_pair.split("_")
_sort = sort.split("_")[0]
if _sort not in ["relevance", "created"]:
log.error("{} is not a supported sort value.".format(_sort))
return
if _sort == "relevance":
score = test_float(_score)
if score is None:
log.error("Search_after relevance score is not a float.")
return
elif _sort == "created":
if not str(_score).isdigit():
log.error("Search_after date score is not an integer.")
return
score = int(_score)
return [score, _id] | f44228d4f5b47129218d122adcb29e41a81c5a1f | 5,464 |
import torch
def compute_cosine_distance(
features, others=None, cuda=False,
):
"""Computes cosine distance.
Args:
input1 (torch.Tensor): 2-D feature matrix.
input2 (torch.Tensor): 2-D feature matrix.
Returns:
torch.Tensor: distance matrix.
"""
if others is None:
if cuda:
features = features.cuda()
features = F.normalize(features, p=2, dim=1)
dist_m = 1 - torch.mm(features, features.t())
else:
if cuda:
features = features.cuda()
others = others.cuda()
features = F.normalize(features, p=2, dim=1)
others = F.normalize(others, p=2, dim=1)
dist_m = 1 - torch.mm(features, others.t())
return dist_m.cpu().numpy() | 702fe068f99efc1b8dda7f03d361dcceb62c7426 | 5,465 |
from typing import Any
def convert_dict_keys_case(obj: Any, case_style: str = CaseStyle.CAMEL):
"""
This function recursively changes the case of all the keys in the obj
argument
"""
case_style = process_case_style(case_style=case_style)
if isinstance(obj, (tuple, list)):
return type(obj)(
[convert_dict_keys_case(item, case_style) for item in obj]
)
elif isinstance(obj, dict):
return {
convert_string_case(key, case_style): convert_dict_keys_case(
value, case_style
)
for key, value in obj.items()
if key
}
else:
return obj | 2b09a7d8ace030ba2543f9bdb74e7201e07243e1 | 5,466 |
def chain_exception(new_exc, old_exc):
"""Set the __cause__ attribute on *new_exc* for explicit exception
chaining. Returns the inplace modified *new_exc*.
"""
if DEVELOPER_MODE:
new_exc.__cause__ = old_exc
return new_exc | ce19e735a26fb03d170f74d6590fb256cd70d70a | 5,467 |
def make_input_fn(x_out, prob_choice):
"""Use py_func to yield elements from the given generator."""
inp = {"inputs": np.array(x_out).astype(np.int32),
"problem_choice": prob_choice}
flattened = tf.contrib.framework.nest.flatten(inp)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [inp]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = inp
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(inp, flat_example)
return example
return input_fn | 1e5e8717cb348a9114dd15ddce50cb33af50b75c | 5,468 |
import re
def url_validate(url):
"""
URL验证
用于登录传递URL
"""
regex = r'^\?next=((/\w+)*)'
if isinstance(url, str) and re.match(regex, url):
return url.split('?next=')[-1]
return '/' | 7a5aa5866018d1bf16c0f4ede527a770da760e17 | 5,469 |
from typing import Tuple
def mie_harmonics(x: np.ndarray, L: int) -> Tuple[np.ndarray]:
"""Calculates the spherical harmonics of the mie field.
The harmonics are calculated up to order L using the iterative method.
Parameters
----------
x : ndarray
The cosine of the angle defined by the line passing through origo parallel
to the propagation direction and the evaluation point, with the corner at origo.
L : int
The order up to which to evaluate the harmonics. The L:th
Returns
-------
ndarray, ndarray
Tuple of ndarray of shape (L, *x.shape)
"""
PI = np.zeros((L, *x.shape))
TAU = np.zeros((L, *x.shape))
PI[0, :] = 1
PI[1, :] = 3 * x
TAU[0, :] = x
TAU[1, :] = 6 * x * x - 3
for i in range(3, L + 1):
PI[i - 1] = (2 * i - 1) / (i - 1) * x * PI[i - 2] - i / (i - 1) * PI[i - 3]
TAU[i - 1] = i * x * PI[i - 1] - (i + 1) * PI[i - 2]
return PI, TAU | 3998b065737db276142a3a25ee30c866ab52fbbd | 5,470 |
def is_version_dir(vdir):
"""Check whether the given directory contains an esky app version.
Currently it only need contain the "esky-files/bootstrap-mainfest.txt" file.
"""
if exists(pathjoin(vdir,ESKY_CONTROL_DIR,"bootstrap-manifest.txt")):
return True
return False | 931d46c96523bd63d1087cb612a73d98d6338ae2 | 5,471 |
def session_decrypt_raw(encrypted_message, destination_key):
"""
Decrypts the message from a random session key, encrypted with the
destination key.
Superior alternative when the destination key is slow (ex RSA).
"""
block_size = destination_key.block_size
encrypted_session_key = encrypted_message[:block_size]
message = encrypted_message[block_size:]
session_key = AesKey(destination_key.decrypt_raw(encrypted_session_key))
return session_key.decrypt_raw(message) | 7b13ea5d689e050aba5f5a4c6de9c0ca5346bb76 | 5,472 |
def gef_pybytes(x):
"""Returns an immutable bytes list from the string given as input."""
return bytes(str(x), encoding="utf-8") | 8e8cff61e035ac2ef9f6a2cf462a545a05c0ede8 | 5,473 |
def plot_2d_morphing_basis(
morpher,
xlabel=r"$\theta_0$",
ylabel=r"$\theta_1$",
xrange=(-1.0, 1.0),
yrange=(-1.0, 1.0),
crange=(1.0, 100.0),
resolution=100,
):
"""
Visualizes a morphing basis and morphing errors for problems with a two-dimensional parameter space.
Parameters
----------
morpher : PhysicsMorpher
PhysicsMorpher instance with defined basis.
xlabel : str, optional
Label for the x axis. Default value: r'$\theta_0$'.
ylabel : str, optional
Label for the y axis. Default value: r'$\theta_1$'.
xrange : tuple of float, optional
Range `(min, max)` for the x axis. Default value: (-1., 1.).
yrange : tuple of float, optional
Range `(min, max)` for the y axis. Default value: (-1., 1.).
crange : tuple of float, optional
Range `(min, max)` for the color map. Default value: (1., 100.).
resolution : int, optional
Number of points per axis for the rendering of the squared morphing weights. Default value: 100.
Returns
-------
figure : Figure
Plot as Matplotlib Figure instance.
"""
basis = morpher.basis
assert basis is not None, "No basis defined"
assert basis.shape[1] == 2, "Only 2d problems can be plotted with this function"
xi, yi = (np.linspace(xrange[0], xrange[1], resolution), np.linspace(yrange[0], yrange[1], resolution))
xx, yy = np.meshgrid(xi, yi)
xx, yy = xx.reshape((-1, 1)), yy.reshape((-1, 1))
theta_test = np.hstack([xx, yy])
squared_weights = []
for theta in theta_test:
wi = morpher.calculate_morphing_weights(theta, None)
squared_weights.append(np.sum(wi * wi) ** 0.5)
squared_weights = np.array(squared_weights).reshape((resolution, resolution))
fig = plt.figure(figsize=(6.5, 5))
ax = plt.gca()
pcm = ax.pcolormesh(
xi, yi, squared_weights, norm=matplotlib.colors.LogNorm(vmin=crange[0], vmax=crange[1]), cmap="viridis_r"
)
cbar = fig.colorbar(pcm, ax=ax, extend="both")
plt.scatter(basis[:, 0], basis[:, 1], s=50.0, c="black")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
cbar.set_label(r"$\sqrt{\sum w_i^2}$")
plt.xlim(xrange[0], xrange[1])
plt.ylim(yrange[0], yrange[1])
plt.tight_layout()
return fig | a99fc0a710d42557ec35be646171a25ce640c01e | 5,474 |
import numpy
def single_lut_conversion(lookup_table):
"""
This constructs the function to convert data using a single lookup table.
Parameters
----------
lookup_table : numpy.ndarray
Returns
-------
callable
"""
_validate_lookup(lookup_table)
def converter(data):
if not isinstance(data, numpy.ndarray):
raise ValueError('requires a numpy.ndarray, got {}'.format(type(data)))
if data.dtype.name not in ['uint8', 'uint16']:
raise ValueError('requires a numpy.ndarray of uint8 or uint16 dtype, '
'got {}'.format(data.dtype.name))
if len(data.shape) == 3 and data.shape[2] != 1:
raise ValueError('Requires a three-dimensional numpy.ndarray, '
'with single band in the last dimension. Got shape {}'.format(data.shape))
return lookup_table[data[:, :, 0]]
return converter | 6d01aa69053d964933bf330d8cf2340ea3f13eba | 5,475 |
def signal_average(cov,bin_edges=None,bin_width=40,kind=3,lmin=None,dlspace=True,return_bins=False,**kwargs):
"""
dcov = cov * ellfact
bin dcov in annuli
interpolate back on to ell
cov = dcov / ellfact
where ellfact = ell**2 if dlspace else 1
"""
modlmap = cov.modlmap()
assert np.all(np.isfinite(cov))
dcov = cov*modlmap**2. if dlspace else cov.copy()
if lmin is None:
minell = maps.minimum_ell(dcov.shape,dcov.wcs)
else:
minell = modlmap[modlmap<=lmin].max()
if bin_edges is None: bin_edges = np.append([2],np.arange(minell,modlmap.max(),bin_width))
binner = stats.bin2D(modlmap,bin_edges)
cents,c1d = binner.bin(dcov)
outcov = enmap.enmap(maps.interp(cents,c1d,kind=kind,fill_value=c1d[-1],**kwargs)(modlmap),dcov.wcs)
with np.errstate(invalid='ignore'): outcov = outcov / modlmap**2. if dlspace else outcov
outcov[modlmap<2] = 0
assert np.all(np.isfinite(outcov))
if return_bins: return cents,c1d,outcov
else: return outcov | 3075cdef1a7063c295a60c06b368bd337870c883 | 5,476 |
def find_tree_diameter(g):
"""
Standard awesome problem
So for each node, I want to find the maximum distance to another node
:param n:
:param g:
:return:
"""
# First finding the arbitary node that is maximum distance from root
# DFS - First time
q = deque()
q.append((1,0))
arbitrary_node = None
visited = set()
curr_max_length = 0
while q:
node, length = q.pop()
visited.add(node)
if length > curr_max_length:
curr_max_length = length
arbitrary_node = node
for nei in g[node]:
if nei not in visited:
q.append((nei, length + 1))
# Now keep this arbitary node as root, and find the node that is the maximum depth to it
# That is the diameter of the tree
# DFS second time
q2 = deque()
q2.append((arbitrary_node, 0))
diameter_of_tree = 0
visited2 = set()
while q2:
node, length = q2.pop()
visited2.add(node)
if length >= diameter_of_tree:
diameter_of_tree = length
for nei in g[node]:
if nei not in visited2:
q2.append((nei, length + 1))
return diameter_of_tree | 393e6f9b95316c005a3a056bdd291047f96853ec | 5,477 |
def edge_list_to_adjacency(edges):
"""
Create adjacency dictionary based on a list of edges
:param edges: edges to create adjacency for
:type edges: :py:list
:rtype: :py:dict
"""
adjacency = dict([(n, []) for n in edge_list_to_nodes(edges)])
for edge in edges:
adjacency[edge[0]].append(edge[1])
return adjacency | c756baf0cb1182ab79df0846afd97296a0b42679 | 5,478 |
def __renormalized_likelihood_above_threshold_lnlikelihood(data, thr=__thr, alpha=models.__alpha, beta=models.__beta, num_mc=models.__num_mc, **kwargs):
"""
only include data that is above thr, treats them all as signals, and renormalizes the likelihood so that it only covers "detectable data"
"""
truth = data[:,0]>=thr
if np.any(truth):
norm = 1-np.exp(models.signalData_lncdf(thr, alpha=alpha, beta=beta, num_mc=num_mc)) ### normalization of likelihood for data above threshold
return np.sum(models.signalData_lnpdf(data[truth][:,0], alpha=alpha, beta=beta, num_mc=num_mc) - np.log(norm))
else:
return 0 | 47d8d75ef136eb809fa62f77226734ad5201d49e | 5,479 |
def _build_tags(model_uri, model_python_version=None, user_tags=None):
"""
:param model_uri: URI to the MLflow model.
:param model_python_version: The version of Python that was used to train the model, if
the model was trained in Python.
:param user_tags: A collection of user-specified tags to append to the set of default tags.
"""
tags = dict(user_tags) if user_tags is not None else {}
tags["model_uri"] = model_uri
if model_python_version is not None:
tags["python_version"] = model_python_version
return tags | 8807967b3e9d89dbb7a24542d2709bc9293992df | 5,480 |
def test_token(current_user: usermodels.User = Depends(get_current_user)):
"""
Test access token
"""
return current_user | b74580436bba2d02c14a0840fcc0a139e637abd2 | 5,481 |
def student_list_prof_request():
"""Return a JSON containing adding instructor requests, or raise 401 if not authorized."""
role_student = Role.query.filter_by(name='student').first()
if current_user.is_authenticated and has_membership(current_user.id, role_student):
list_approved = request.args.get('approved', type=int) or 0
list_pending = request.args.get('pending', type=int) or 0
list_declined = request.args.get('declined', type=int) or 0
prof_requests = []
if list_approved:
prof_requests.extend(AddProfRequest.query.filter_by(
user_id=current_user.id,
approved=ApprovalType.APPROVED,
).all())
if list_pending:
prof_requests.extend(AddProfRequest.query.filter_by(
user_id=current_user.id,
approved=ApprovalType.PENDING,
).all())
if list_declined:
prof_requests.extend(AddProfRequest.query.filter_by(
user_id=current_user.id,
approved=ApprovalType.DECLINED,
).all())
ret = []
for prof_request in prof_requests:
ret.append({
'id': prof_request.id,
'name': prof_request.name,
'department_id': prof_request.department.id,
'course_id': prof_request.course.id,
'term_id': prof_request.term.id,
'approved': prof_request.approved.value,
})
return jsonify(ret)
else:
abort(401) | 488a5b342cdb8a83ff94f0c234f5a0996c0c0203 | 5,482 |
def set_complete(request, id):
"""
Marque un ticket comme complet
:param request:
:param id:
"""
ticket = Tickets.objects.get(pk=id)
ticket.complete = 1
ticket.save()
return redirect('/ticket/id=%s' % id) | a37e24751e6899c1cc9f413c6d0a356825b1c79f | 5,483 |
import pathlib
def parse_version_from_path(path):
"""Get version parts from a path name."""
path = pathlib.Path(path).absolute()
version = path.name
try:
parts = version.split("_")
ret = {}
ret["major"] = try_int(parts[0])
ret["minor"] = try_int(parts[1])
ret["protocol"] = try_int(parts[2])
ret["build"] = try_int(parts[3])
ret["string"] = version.replace("_", ".")
ret["file"] = version
except Exception:
error = "Bad API version in '{p}', must look like: '7_2_314_3181'"
error = error.format(p=path)
raise Exception(error)
return ret | 56abb214d6c3b6033a77b10c4d1a1d836ce0f8bd | 5,484 |
from typing import Dict
from typing import Any
from typing import Sequence
import math
import itertools
def assign_partitions_to_actors(
ip_to_parts: Dict[int, Any],
actor_rank_ips: Dict[int, str]) -> Dict[int, Sequence[Any]]:
"""Assign partitions from a distributed dataframe to actors.
This function collects distributed partitions and evenly distributes
them to actors, trying to minimize data transfer by respecting
co-locality.
This function currently does _not_ take partition sizes into account
for distributing data. It assumes that all partitions have (more or less)
the same length.
Instead, partitions are evenly distributed. E.g. for 8 partitions and 3
actors, each actor gets assigned 2 or 3 partitions. Which partitions are
assigned depends on the data locality.
The algorithm is as follows: For any number of data partitions, get the
Ray object references to the shards and the IP addresses where they
currently live.
Calculate the minimum and maximum amount of partitions per actor. These
numbers should differ by at most 1. Also calculate how many actors will
get more partitions assigned than the other actors.
First, each actor gets assigned up to ``max_parts_per_actor`` co-located
partitions. Only up to ``num_actors_with_max_parts`` actors get the
maximum number of partitions, the rest try to fill the minimum.
The rest of the partitions (all of which cannot be assigned to a
co-located actor) are assigned to actors until there are none left.
"""
num_partitions = sum(len(parts) for parts in ip_to_parts.values())
num_actors = len(actor_rank_ips)
min_parts_per_actor = max(0, math.floor(num_partitions / num_actors))
max_parts_per_actor = max(1, math.ceil(num_partitions / num_actors))
num_actors_with_max_parts = num_partitions % num_actors
# This is our result dict that maps actor objects to a list of partitions
actor_to_partitions = defaultdict(list)
# First we loop through the actors and assign them partitions from their
# own IPs. Do this until each actor has `min_parts_per_actor` partitions
partition_assigned = True
while partition_assigned:
partition_assigned = False
# Loop through each actor once, assigning
for rank, actor_ip in actor_rank_ips.items():
num_parts_left_on_ip = len(ip_to_parts[actor_ip])
num_actor_parts = len(actor_to_partitions[rank])
if num_parts_left_on_ip > 0 and \
num_actor_parts < max_parts_per_actor:
if num_actor_parts >= min_parts_per_actor:
# Only allow up to `num_actors_with_max_parts actors to
# have the maximum number of partitions assigned.
if num_actors_with_max_parts <= 0:
continue
num_actors_with_max_parts -= 1
actor_to_partitions[rank].append(ip_to_parts[actor_ip].pop(0))
partition_assigned = True
# The rest of the partitions, no matter where they are located, could not
# be assigned to co-located actors. Thus, we assign them
# to actors who still need partitions.
rest_parts = list(itertools.chain(*ip_to_parts.values()))
partition_assigned = True
while len(rest_parts) > 0 and partition_assigned:
partition_assigned = False
for rank in actor_rank_ips:
num_actor_parts = len(actor_to_partitions[rank])
if num_actor_parts < max_parts_per_actor:
if num_actor_parts >= min_parts_per_actor:
if num_actors_with_max_parts <= 0:
continue
num_actors_with_max_parts -= 1
actor_to_partitions[rank].append(rest_parts.pop(0))
partition_assigned = True
if len(rest_parts) <= 0:
break
if len(rest_parts) != 0:
raise RuntimeError(
"There are still partitions left to assign, but no actor "
"has capacity for more. This is probably a bug. Please go "
"to https://github.com/ray-project/xgboost_ray to report it.")
return actor_to_partitions | 884b2b05f965c8e3517f9e49019b0bfbf06f0298 | 5,485 |
from typing import Tuple
def unsorted_array(arr: list) -> Tuple[list, int, Tuple[int, int]]:
"""
Time Complexity: O(n)
"""
start, end = 0, len(arr) - 1
while start < end and arr[start] < arr[start + 1]:
start += 1
while start < end and arr[end] > arr[end - 1]:
end -= 1
for el in arr[start : end + 1]:
# another way of implementing this part would be to find the min and
# max of the subarray and keep on decrementing start/incrementing end
while el < arr[start]:
start -= 1
while el > arr[end]:
end += 1
if start + 1 < end - 1:
return arr[start + 1 : end], end - start - 1, (start + 1, end - 1)
return [], 0, (-1, -1) | c3370a3e76009ef26ae3e1086e773463c312c6bb | 5,486 |
def get_tol_values(places):
# type: (float) -> list
"""List of tolerances to test
Returns:
list[tuple[float, float]] -- [(abs_tol, rel_tol)]
"""
abs_tol = 1.1 / pow(10, places)
return [(None, None), (abs_tol, None)] | 5af82438abbc0889374d62181ca7f0b7ee3c0fbe | 5,487 |
def index(atom: Atom) -> int:
"""Index within the parent molecule (int).
"""
return atom.GetIdx() | 64d385588f683a048dfa9d54ea25d85c14f04cb7 | 5,488 |
def specification_config() -> GeneratorConfig:
"""A spec cache of r4"""
return load_config("python_pydantic") | ca4936fac7499cf986fb7ae201a07b77d6b7d917 | 5,489 |
def _wait_for_stack_ready(stack_name, region, proxy_config):
"""
Verify if the Stack is in one of the *_COMPLETE states.
:param stack_name: Stack to query for
:param region: AWS region
:param proxy_config: Proxy configuration
:return: true if the stack is in the *_COMPLETE status
"""
log.info("Waiting for stack %s to be ready", stack_name)
cfn_client = boto3.client("cloudformation", region_name=region, config=proxy_config)
stacks = cfn_client.describe_stacks(StackName=stack_name)
stack_status = stacks["Stacks"][0]["StackStatus"]
log.info("Stack %s is in status: %s", stack_name, stack_status)
return stack_status in [
"CREATE_COMPLETE",
"UPDATE_COMPLETE",
"UPDATE_ROLLBACK_COMPLETE",
"CREATE_FAILED",
"UPDATE_FAILED",
] | b9ed5cd161b2baef23da40bc0b67b8b7dfc2f2ea | 5,490 |
from typing import Optional
def create_order_number_sequence(
shop_id: ShopID, prefix: str, *, value: Optional[int] = None
) -> OrderNumberSequence:
"""Create an order number sequence."""
sequence = DbOrderNumberSequence(shop_id, prefix, value=value)
db.session.add(sequence)
try:
db.session.commit()
except IntegrityError as exc:
db.session.rollback()
raise OrderNumberSequenceCreationFailed(
f'Could not create order number sequence with prefix "{prefix}"'
) from exc
return _db_entity_to_order_number_sequence(sequence) | 49484a1145e0d2c0dde9fdf2935428b5f68cd190 | 5,491 |
import os
import shutil
import csv
def move_wheel_files(name, req, wheeldir, user=False, home=None):
"""Install a wheel"""
scheme = distutils_scheme(name, user=user, home=home)
if scheme['purelib'] != scheme['platlib']:
# XXX check *.dist-info/WHEEL to deal with this obscurity
raise NotImplemented("purelib != platlib")
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
location = dest = scheme['platlib']
installed = {}
changed = set()
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, location)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None):
if not os.path.exists(dest): # common for the 'include' path
os.makedirs(dest)
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base
and s.endswith('.dist-info')
# is self.req.project_name case preserving?
and s.lower().startswith(req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
if not os.path.exists(destsubdir):
os.makedirs(destsubdir)
for f in files:
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
shutil.move(srcfile, destfile)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, dest, True)
assert info_dir, "%s .dist-info directory not found" % req
for datadir in data_dirs:
fixer = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record) | 8e466a16a965f5955ff49cdbee1027b3e3d3074b | 5,492 |
import os
def read_hdulist (fits_file, get_data=True, get_header=False,
ext_name_indices=None, dtype=None, columns=None,
memmap=True):
"""Function to read the data (if [get_data] is True) and/or header (if
[get_header] is True) of the input [fits_file]. The fits file can
be an image or binary table, and can be compressed (with the
compressions that astropy.io can handle, such as .gz and .fz
files). If [ext_name_indices] is defined, which can be an integer,
a string matching the extension's keyword EXTNAME or a list or
numpy array of integers, those extensions are retrieved.
"""
if os.path.exists(fits_file):
fits_file_read = fits_file
else:
# if fits_file does not exist, look for compressed versions or
# files without the .fz or .gz extension
if os.path.exists('{}.fz'.format(fits_file)):
fits_file_read = '{}.fz'.format(fits_file)
elif os.path.exists(fits_file.replace('.fz','')):
fits_file_read = fits_file.replace('.fz','')
elif os.path.exists('{}.gz'.format(fits_file)):
fits_file_read = '{}.gz'.format(fits_file)
elif os.path.exists(fits_file.replace('.gz','')):
fits_file_read = fits_file.replace('.gz','')
else:
raise FileNotFoundError ('file not found: {}'.format(fits_file))
# open fits file into hdulist
with fits.open(fits_file_read, memmap=memmap) as hdulist:
n_exts = len(hdulist)
# if [ext_name_indices] is a range, or list or numpy ndarray
# of integers, loop over these extensions and concatenate the
# data into one astropy Table; it is assumed the extension
# formats are identical to one another - this is used to read
# specific extensions from e.g. the calibration catalog.
if type(ext_name_indices) in [list, range, np.ndarray]:
for i_ext, ext in enumerate(ext_name_indices):
# get header from first extension as they should be
# all identical, except for NAXIS2 (nrows)
if get_header and i_ext==0:
header = hdulist[ext].header
if get_data:
# read extension
data_temp = hdulist[ext].data
# convert to table, as otherwise concatenation of
# extensions below using [stack_arrays] is slow
data_temp = Table(data_temp)
# could also read fits extension into Table directly,
# but this is about twice as slow as the 2 steps above
#data_temp = Table.read(fits_file_read, hdu=ext)
if i_ext==0:
data = data_temp
else:
#data = stack_arrays((data, data_temp),asrecarray=True,
# usemask=False)
# following does not work if data is a fitsrec
# array and the array contains boolean fields, as
# these are incorrectly converted; therefore the
# conversion to a Table above
data = np.concatenate([data, data_temp])
else:
# otherwise read the extension defined by [ext_name_indices]
# or simply the last extension
if type(ext_name_indices) in [int, str]:
ext = ext_name_indices
else:
ext = n_exts-1
if get_data:
data = hdulist[ext].data
# convert to [dtype] if it is defined
if dtype is not None:
data = data.astype(dtype, copy=False)
if get_header:
header = hdulist[ext].header
if columns is not None:
# only return defined columns
return [data[col] for col in columns if col in data.dtype.names]
else:
# return data and header depending on whether [get_data]
# and [get_header] are defined or not
if get_data:
if get_header:
return data, header
else:
return data
else:
if get_header:
return header
else:
return | 54188e63edb3432035fe851bd81d9e478138b728 | 5,493 |
def calcMFCC(signal, sample_rate=16000, win_length=0.025, win_step=0.01,
filters_num=26, NFFT=512, low_freq=0, high_freq=None, pre_emphasis_coeff=0.97,
cep_lifter=22, append_energy=True, append_delta=False):
"""Calculate MFCC Features.
Arguments:
signal: 1-D numpy array.
sample_rate: Sampling rate. Defaulted to 16KHz.
win_length: Window length. Defaulted to 0.025, which is 25ms/frame.
win_step: Interval between the start points of adjacent frames.
Defaulted to 0.01, which is 10ms.
filters_num: Numbers of filters. Defaulted to 26.
NFFT: Size of FFT. Defaulted to 512.
low_freq: Lowest frequency.
high_freq: Highest frequency.
pre_emphasis_coeff: Coefficient for pre-emphasis. Pre-emphasis increase
the energy of signal at higher frequency. Defaulted to 0.97.
cep_lifter: Numbers of lifter for cepstral. Defaulted to 22.
append_energy: Whether to append energy. Defaulted to True.
append_delta: Whether to append delta to feature. Defaulted to False.
Returns:
2-D numpy array with shape (NUMFRAMES, features). Each frame containing filters_num of features.
"""
(feat, energy) = _fbank(signal, sample_rate, win_length, win_step, filters_num, NFFT,
low_freq, high_freq, pre_emphasis_coeff)
feat = np.log(feat)
feat = dct(feat, type=2, axis=1, norm='ortho')
feat = _lifter(feat, cep_lifter)
if append_energy:
feat[:, 0] = np.log(energy)
if append_delta:
feat_delta = _delta(feat)
feat_delta_delta = _delta(feat_delta)
feat = np.concatenate((feat, feat_delta, feat_delta_delta), axis=1)
return feat | b10429bec859af3f5e6302e7f2f185bf64178922 | 5,494 |
def get_user(domain_id=None, enabled=None, idp_id=None, name=None, password_expires_at=None, protocol_id=None, region=None, unique_id=None):
"""
Use this data source to get the ID of an OpenStack user.
"""
__args__ = dict()
__args__['domainId'] = domain_id
__args__['enabled'] = enabled
__args__['idpId'] = idp_id
__args__['name'] = name
__args__['passwordExpiresAt'] = password_expires_at
__args__['protocolId'] = protocol_id
__args__['region'] = region
__args__['uniqueId'] = unique_id
__ret__ = pulumi.runtime.invoke('openstack:identity/getUser:getUser', __args__)
return GetUserResult(
default_project_id=__ret__.get('defaultProjectId'),
domain_id=__ret__.get('domainId'),
region=__ret__.get('region'),
id=__ret__.get('id')) | e7f6a874816673ebab1fe2f5e807def02fe232d1 | 5,495 |
def use(workflow_id, version, client=None):
"""
Use like ``import``: load the proxy object of a published `Workflow` version.
Parameters
----------
workflow_id: str
ID of the `Workflow` to retrieve
version: str
Version of the workflow to retrive
client: `.workflows.client.Client`, optional
Allows you to use a specific client instance with non-default
auth and parameters
Returns
-------
obj: Proxytype
Proxy object of the `Workflow` version.
Example
-------
>>> import descarteslabs.workflows as wf
>>> @wf.publish("[email protected]:ndvi", "0.0.1") # doctest: +SKIP
... def ndvi(img: wf.Image) -> wf.Image:
... nir, red = img.unpack_bands("nir red")
... return (nir - red) / (nir + red)
>>> same_function = wf.use("[email protected]:ndvi", "0.0.1") # doctest: +SKIP
>>> same_function # doctest: +SKIP
<descarteslabs.workflows.types.function.function.Function[Image, {}, Image] object at 0x...>
>>> img = wf.Image.from_id("sentinel-2:L1C:2019-05-04_13SDV_99_S2B_v1")
>>> same_function(img).compute(geoctx) # geoctx is an arbitrary geocontext for 'img' # doctest: +SKIP
ImageResult:
...
"""
return VersionedGraft.get(workflow_id, version, client=client).object | d8f34e521af161e8840e11c8c888fd959e8584c9 | 5,496 |
def logout():
"""
Route for logout page.
"""
logout_user()
return redirect(url_for('index')) | 4e59dd9a6b59639e24053be072f4ade4fb23d922 | 5,497 |
def ipv4_size_check(ipv4_long):
"""size chek ipv4 decimal
Args:
ipv4_long (int): ipv4 decimal
Returns:
boole: valid: True
"""
if type(ipv4_long) is not int:
return False
elif 0 <= ipv4_long <= 4294967295:
return True
else:
return False | 97c5d5c7472fb81e280f91275b5a88b032ee7927 | 5,498 |
def generate_gate_hadamard_mat() -> np.ndarray:
"""Return the Hilbert-Schmidt representation matrix for an Hadamard (H) gate with respect to the orthonormal Hermitian matrix basis with the normalized identity matrix as the 0th element.
The result is a 4 times 4 real matrix.
Parameters
----------
Returns
----------
np.ndarray
The real Hilbert-Schmidt representation matrix for the gate.
"""
l = [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0]]
mat = np.array(l, dtype=np.float64)
return mat | 8dd36adb0cef79cbc758267fb7adf371f7b698b0 | 5,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.