content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def uncapitalize(string: str):
"""De-capitalize first character of string
E.g. 'How is Michael doing?' -> 'how is Michael doing?'
"""
if len(string):
return string[0].lower() + string[1:]
return "" | 1a294f171d16d7a4c41fb0546feca3c03b7ae37a | 7,300 |
import sqlite3
import os
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(os.path.join(config['database']))
rv.row_factory = sqlite3.Row
return rv | 1a3f3e6385500758b9b0cef8af2b15210697e3b5 | 7,301 |
def _sc_weights_trad(M, M_c, V, N, N0, custom_donor_pool, best_w_pen, verbose=0):
""" Traditional matrix solving. Requires making NxN0 matrices.
"""
#Potentially could be decomposed to not build NxN0 matrix, but the RidgeSolution works fine for that.
sc_weights = np.full((N,N0), 0.)
weight_log_inc = max(int(N/100), 1)
for i in range(N):
if ((i % weight_log_inc) == 0 and verbose>0):
print_progress(i+1, N)
if verbose > 1:
print_memory_snapshot(extra_str="Loop " + str(i))
allowed = custom_donor_pool[i,:]
sc_weights[i,allowed] = _weights(V, M[i,:], M_c[allowed,:], best_w_pen)
if ((N-1) % weight_log_inc) != 0 and verbose > 0:
print_progress(N, N)
return sc_weights | 00b9ae93b52453281693660fa6de685cd784127e | 7,302 |
import os
import argparse
def getargs():
"""Parse command line arguments"""
desc = (
"Analyze and query strace log given the strace log in CSV format "
"(STRACE_CSV). See 'strace2csv.py' for converting strace "
"log to the csv format expected by this tool."
)
epil = "Example: ./%s strace.csv summary" % os.path.basename(__file__)
parser = argparse.ArgumentParser(
description=desc, epilog=epil, formatter_class=_SmartFormatter
)
helpstr = "path to strace log in csv format (output from strace2csv.py)"
parser.add_argument("STRACE_CSV", nargs=1, help=helpstr)
helpstr = "R|specify output details, one of the following strings:"
parser.add_argument("COMMAND", nargs=1, help=helpstr + _command_help())
helpstr = "set the verbose level between 0-3 (defaults to --verbose=1)"
parser.add_argument("--verbose", help=helpstr, type=int, default=1)
return parser.parse_args() | 3d5b3132434ad0d74a633ee8bc5e231674571d7f | 7,303 |
from typing import Callable
from typing import Awaitable
def generate_on_message(
test_client: "DiscordTestClient", broker_id: int
) -> Callable[[discord.Message], Awaitable[None]]:
"""
Whenever a message comes in, we want our test client to:
1. Filter the message so we are only getting the ones we want.
2. Store received messages so we can inspect them during tests.
3. Wait to receive a certain number of messages, then set an event communicating
that the expected number of messages has been received and we can continue.
"""
async def on_message(message: discord.Message) -> None:
# Toss out any messages not on our expected channels, otherwise we may receive
# messages from other devs running tests concurrently
if message.channel.id not in test_client.channel_id_whitelist:
return
# Print the message for our test logs. We're only going to use the primary
# client to print so we don't double-print each message.
if test_client.is_primary:
print(
f"message received"
f"\nfrom: {test_client.user.display_name}"
f"\nby: {message.author.display_name}"
f"\nchannel: {message.channel.name}"
f"\n{message.content}\n\n"
)
if message.author.id != broker_id:
return
test_client.messages_received.append(message)
if test_client.test_expected_count_received == 0:
raise IOError("Received an unexpected message")
if (
len(test_client.messages_received)
>= test_client.test_expected_count_received
and not test_client.event_messages_received.is_set()
):
test_client.event_messages_received.set()
return on_message | 2de510a3195f60056ad13c8fb1b9c71fe480b5ab | 7,304 |
import unittest
def test_suite():
"""
Construct a TestSuite instance for all test cases.
"""
suite = unittest.TestSuite()
for dt, format, expectation in TEST_CASES:
suite.addTest(create_testcase(dt, format, expectation))
return suite | 791e6942d213c53a44f433495e13a76abfc1f936 | 7,305 |
def calcScipionScore(modes):
"""Calculate the score from hybrid electron microscopy normal mode analysis (HEMNMA)
[CS14]_ as implemented in the Scipion continuousflex plugin [MH20]_. This score
prioritises modes as a function of mode number and collectivity order.
.. [CS14] Sorzano COS, de la Rosa-Trevín JM, Tama F, Jonić S.
Hybrid Electron Microscopy Normal Mode Analysis graphical interface and protocol.
*J Struct Biol* **2014** 188:134-41.
.. [MH20] Harastani M, Sorzano COS, Jonić S.
Hybrid Electron Microscopy Normal Mode Analysis with Scipion.
*Protein Sci* **2020** 29:223-236.
:arg modes: mode(s) or vector(s)
:type modes: :class:`.Mode`, :class:`.Vector`, :class:`.ModeSet`, :class:`.NMA`
"""
n_modes = modes.numModes()
if n_modes > 1:
collectivityList = list(calcCollectivity(modes))
else:
collectivityList = [calcCollectivity(modes)]
idxSorted = [i[0] for i in sorted(enumerate(collectivityList),
key=lambda x: x[1],
reverse=True)]
score = np.zeros(n_modes)
modeNum = list(range(n_modes))
for i in range(n_modes):
score[idxSorted[i]] = idxSorted[i] + modeNum[i] + 2
score = score / (2.0 * n_modes)
return score | e1c786bb90d6a7bc0367338f5e5bd6d10ec35366 | 7,306 |
def google_base(request):
""" view for Google Base Product feed template; returns XML response """
products = Product.active.all()
template = get_template("marketing/google_base.xml")
xml = template.render(Context(locals()))
return HttpResponse(xml, mimetype="text/xml") | a850e4c16f55486c872d0d581a25802d7de3c56e | 7,307 |
def get_agivenn_df(run_list, run_list_sep, **kwargs):
"""DF of mean amplitudes conditiontioned on differnet n values."""
n_simulate = kwargs.pop('n_simulate')
adfam_t = kwargs.pop('adfam_t', None)
adaptive = kwargs.pop('adaptive')
n_list = kwargs.pop('n_list', [1, 2, 3])
comb_vals, comb_val_resamps, sep_vals, sep_val_resamps = (
comb_sep_eval_resamp(
run_list, run_list_sep, get_a_n_mean_given_n, n_simulate,
adaptive=adaptive, n_list=n_list, adfam_t=adfam_t))
col_names = [r'$\mathrm{{E}}[a_{}|N={}]$'.format(n, n) for n in n_list]
return get_sep_comb_df(
comb_vals, comb_val_resamps, sep_vals, sep_val_resamps,
col_names) | fe8fa42a3bc2e78ec1d5c5a7d47151d56789f5a5 | 7,308 |
def friendship_request_list_rejected(request, template_name='friendship/friend/requests_list.html'):
""" View rejected friendship requests """
# friendship_requests = Friend.objects.rejected_requests(request.user)
friendship_requests = FriendshipRequest.objects.filter(rejected__isnull=True)
return render(request, template_name, {'requests': friendship_requests}) | 2457de6e01bd4fee96d499a481a3c5a2cd0d1782 | 7,309 |
def cycle_ctgo(object_type, related_type, related_ids):
""" indirect relationships between Cycles and Objects mapped to CycleTask """
if object_type == "Cycle":
join_by_source_id = db.session.query(CycleTask.cycle_id) \
.join(Relationship, CycleTask.id == Relationship.source_id) \
.filter(
Relationship.source_type == "CycleTaskGroupObjectTask",
Relationship.destination_type == related_type,
Relationship.destination_id.in_(related_ids))
join_by_destination_id = db.session.query(CycleTask.cycle_id) \
.join(Relationship, CycleTask.id == Relationship.destination_id) \
.filter(
Relationship.destination_type == "CycleTaskGroupObjectTask",
Relationship.source_type == related_type,
Relationship.source_id.in_(related_ids))
return join_by_source_id.union(join_by_destination_id)
else:
join_by_source_id = db.session.query(Relationship.destination_id) \
.join(CycleTask, CycleTask.id == Relationship.source_id) \
.filter(
CycleTask.cycle_id.in_(related_ids),
Relationship.source_type == "CycleTaskGroupObjectTask",
Relationship.destination_type == object_type)
join_by_destination_id = db.session.query(Relationship.source_id) \
.join(CycleTask, CycleTask.id == Relationship.destination_id) \
.filter(
CycleTask.cycle_id.in_(related_ids),
Relationship.destination_type == "CycleTaskGroupObjectTask",
Relationship.source_type == object_type)
return join_by_source_id.union(join_by_destination_id) | 25de449672ef9ced358a53762156f3cbeaabd432 | 7,310 |
def Min(axis=-1, keepdims=False):
"""Returns a layer that applies min along one tensor axis.
Args:
axis: Axis along which values are grouped for computing minimum.
keepdims: If `True`, keep the resulting size 1 axis as a separate tensor
axis; else, remove that axis.
"""
return Fn('Min', lambda x: jnp.min(x, axis, keepdims=keepdims)) | 09c83217b48f16782530c1954f3e4f0127c06e69 | 7,311 |
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon | 06e8ca16f1139e12242ee043aa680d5ce7c43c10 | 7,312 |
def get_expression_arg_names(expression, strip_dots=True):
"""
Parse expression and return set of all argument names. For arguments
with attribute-like syntax (e.g. materials), if `strip_dots` is
True, only base argument names are returned.
"""
args = ','.join(aux.args for aux in parse_definition(expression))
args = [arg.strip() for arg in args.split(',')]
if strip_dots:
for ii, arg in enumerate(args[:]):
aux = arg.split('.')
if len(aux) == 2:
args[ii] = aux[0]
return set(args) | 6f96395af45b008e5e8b0336c320813a760add49 | 7,313 |
from pathlib import Path
def ORDER_CTIME(path: Path) -> int:
"""パスのソート用関数です。作成日時でソートを行います。
"""
return path.stat().st_ctime_ns | 435571222b26e0c83904305784d6c8868b5bf497 | 7,314 |
def format_location(data, year):
""" Format any spatial data. Does nothing yet.
Parameters
----------
data : pd.DataFrame
Data before location formatting.
Returns
-------
data : pd.DataFrame
Data with location formatting.
"""
# No spatial data yet so does nothing.
# data["MSOA"] = "no_location"
# data["location"] = "no_location"
data["region"] = data["region"].astype(str).map(region_dict)
return data | 11d5d7b88f3143b38dc57248937b5e19e22e44c8 | 7,315 |
from typing import Union
from typing import List
def deploy_ingestion_service(
featureset: Union[FeatureSet, str],
source: DataSource = None,
targets: List[DataTargetBase] = None,
name: str = None,
run_config: RunConfig = None,
):
"""Start real-time ingestion service using nuclio function
Deploy a real-time function implementing feature ingestion pipeline
the source maps to Nuclio event triggers (http, kafka, v3io stream, etc.)
example::
source = HTTPSource()
func = mlrun.code_to_function("ingest", kind="serving").apply(mount_v3io())
config = RunConfig(function=func)
fs.deploy_ingestion_service(my_set, source, run_config=config)
:param featureset: feature set object or uri
:param source: data source object describing the online or offline source
:param targets: list of data target objects
:param name: name name for the job/function
:param run_config: service runtime configuration (function object/uri, resources, etc..)
"""
if isinstance(featureset, str):
featureset = get_feature_set_by_uri(featureset)
run_config = run_config.copy() if run_config else RunConfig()
source, run_config.parameters = set_task_params(
featureset, source, targets, run_config.parameters
)
name = name or f"{featureset.metadata.name}_ingest"
if not run_config.function:
function_ref = featureset.spec.function.copy()
if function_ref.is_empty():
function_ref = FunctionReference(name=name, kind=RuntimeKinds.serving)
function_ref.kind = function_ref.kind or RuntimeKinds.serving
if not function_ref.url:
function_ref.code = function_ref.code or ""
run_config.function = function_ref
function = run_config.to_function(
RuntimeKinds.serving, mlrun.mlconf.feature_store.default_job_image
)
function.metadata.project = featureset.metadata.project
function.metadata.name = function.metadata.name or name
# todo: add trigger (from source object)
function.spec.graph = featureset.spec.graph
function.spec.parameters = run_config.parameters
function.spec.graph_initializer = (
"mlrun.feature_store.ingestion.featureset_initializer"
)
function.verbose = True
if run_config.local:
return function.to_mock_server(namespace=get_caller_globals())
return function.deploy() | b1fb46c3900d70bf300a8acdf8045cf308be37ad | 7,316 |
def create_app(service: Service):
"""Start a small webserver with the Service."""
app = FastAPI()
@app.post("/query")
def query(params: Params):
"""The main query endpoint."""
return service.query(**params.query, n_neighbors=params.n_neighbors)
return app | 3d2f01960d3def11f45bbbbf653511d6f5362881 | 7,317 |
def establecer_dominio(func_dist: Expr) -> dict:
"""Establece el dominio a partir de una FD.
Parameters
----------
func_dist
Distribución de probabilidad
Returns
-------
dict
Dominio
"""
equations = func_dist.atoms(Eq)
orders = func_dist.atoms(Rel) - equations
dom = {var: EmptySet for var in func_dist.atoms(Symbol)}
for order in orders:
if len(order.atoms(Symbol)) > 1:
continue
var, = order.atoms(Symbol)
val = solveset(order, var, Integers)
dom[var] = dom[var] & val if dom[var] else val
for equation in equations:
var, = equation.atoms(Symbol)
val = solveset(equation, var)
dom[var] = dom[var] | val
return dom | 8ab1b4bc6518cb8baa300bd9f1d38ffff3dfbcf7 | 7,318 |
def random_init(n, max_norm):
"""Computes a random initial configuration of n 2D-vectors such that they all
are inside of a circle of radius max_norm
Parameters
----------
n : int
Number of vectors
max_norm : float or int
Radius of the circle or maximum possible distance from the origin of
coordinates that the vectors can have.
Returns
-------
numpy.ndarray
(n, 2) matrix of vectors
"""
X = np.zeros((n, 2))
angles = np.random.rand(n) * 2 * np.pi
norms = np.random.rand(n) * max_norm
for i, angle, norm in zip(range(n), angles, norms):
X[i] = np.array([np.cos(angle), np.sin(angle)]) * norm
return X | 5533e43572c47d8c8cd2d6765bb383382987015b | 7,319 |
from typing import Dict
from typing import Tuple
def calc_cells(serial: int) -> Dict[Tuple[int, int], int]:
"""Calculate the power for all cells
and store them in a dict to retrieve them faster later
"""
r = {}
for i in range(300):
for j in range(300):
r.update({(i, j): calc_power((i, j), serial)})
return r | 70684827b5c3ef1ec31d419f3012356a9bde1e6c | 7,320 |
import os
def check_img(img, input_dir):
""" Checks whether the img complies with API`s restrictions.
Parameters
----------
img : str
Image name.
input_dir : str
Path to the dir with the image to check.
Returns
-------
Error message if image does not comply with API`s
restrictions. Otherwise, returns "correct".
"""
img_format = img[img.find("."):].lower()
if img_format not in ALLOWED_FORMATS:
return f"{img},[Error] Unsupported format {img_format}\n"
if os.path.getsize(os.path.join(input_dir, img)) >= IMG_SIZE_LIMIT:
return f"{img},[Error] Size is larger than {IMG_SIZE_LIMIT}B\n"
img_cv2 = cv2.imread(os.path.join(input_dir, img))
img_height, img_width, _ = img_cv2.shape
if (not MAX_IMG_DIM > img_height > MIN_IMG_DIM or
not MAX_IMG_DIM > img_width > MIN_IMG_DIM):
return f"{img},[Error] Img dim must be in between " \
f"{MIN_IMG_DIM}-{MAX_IMG_DIM}\n"
return "correct" | 1052554ac784c52c88830f07ba738c2feb6a79c1 | 7,321 |
from typing import Union
import pathlib
from typing import List
import glob
from pathlib import Path
def child_files_recursive(root: Union[str, pathlib.Path], ext: str) -> List[str]:
"""
Get all files with a specific extension nested under a root directory.
Parameters
----------
root : pathlib.Path or str
root directory
ext : str
file extension
Returns
-------
List[str]
"""
if not is_string_like(root) and not isinstance(root, pathlib.Path):
raise TypeError(f'filetype is not string-like: {type(root)}')
return list(glob.iglob(str(Path(root).joinpath('**/*' + ext)), recursive=True)) | c16288b417d36d6d414c799c78fd59df976ca400 | 7,322 |
def ensure_dict(value):
"""Convert None to empty dict."""
if value is None:
return {}
return value | 191b1a469e66750171648e715501690b2814b8b2 | 7,323 |
import random
def mutSet(individual):
"""Mutation that pops or add an element."""
if random.random() < 0.5:
if len(individual) > 0: # We cannot pop from an empty set
individual.remove(random.choice(sorted(tuple(individual))))
else:
individual.add(random.randrange(param.NBR_ITEMS))
return individual, | f9919da7f6612e3f317dbe854eda05a71d106632 | 7,324 |
def validate_tweet(tweet: str) -> bool:
"""It validates a tweet.
Args:
tweet (str): The text to tweet.
Raises:
ValueError: Raises if tweet length is more than 280 unicode characters.
Returns:
bool: True if validation holds.
"""
str_len = ((tweet).join(tweet)).count(tweet) + 1
if str_len > 280:
raise ValueError(f"tweet is more than 280 unicode characters\n {tweet}")
else:
return True | 41c7ef1967cba5bb75ea8bce7ffa9b7d636ef80e | 7,325 |
def train_and_eval(model, model_dir, train_input_fn, eval_input_fn,
steps_per_epoch, epochs, eval_steps):
"""Train and evaluate."""
train_dataset = train_input_fn()
eval_dataset = eval_input_fn()
callbacks = get_callbacks(model, model_dir)
history = model.fit(
x=train_dataset,
validation_data=eval_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=eval_steps,
callbacks=callbacks)
tf.get_logger().info(history)
return model | 54a30f82ab3da4b60534a2775c2217de057ba93c | 7,326 |
import requests
import html
def open_website(url):
""" Open website and return a class ready to work on """
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
page = requests.get(url, headers=headers)
source = html.fromstring(page.content)
return source | 081ed99692ff9763cb19208fdc7f3e7e08e03e8d | 7,327 |
import sys
def train():
"""Trains the model"""
if not request.is_json:
return jsonify(error='Request must be json'), 400
try:
frame = data_uri_to_cv2_img(request.json['frame'])
except: # pylint: disable=bare-except
e_type, value, _ = sys.exc_info()
print(e_type)
print(value)
return jsonify(error='Could not decode frame'), 400
model_id = request.json['model_id']
coordinates = request.json['coord_x'], request.json['coord_y']
if model_id not in frames:
frames[model_id] = list()
frames[model_id].append((frame, coordinates))
if len(frames[model_id]) >= N_TRAINING_SAMPLES:
models[model_id] = GazeTrackingModel(frames[model_id])
remaining_frames = N_TRAINING_SAMPLES - len(frames[model_id])
return jsonify(remaining=remaining_frames) | ce42f5751623dbf2da9544eab7ad98ea3cc67d51 | 7,328 |
def euclidean3d(v1, v2):
"""Faster implementation of euclidean distance for the 3D case."""
if not len(v1) == 3 and len(v2) == 3:
print("Vectors are not in 3D space. Returning None.")
return None
return np.sqrt((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2 + (v1[2] - v2[2]) ** 2) | 89facc15567a7ed0138dee09ef1824ba40bb58a8 | 7,329 |
def blast_seqs(seqs,
blast_constructor,
blast_db=None,
blast_mat_root=None,
params={},
add_seq_names=True,
out_filename=None,
WorkingDir=None,
SuppressStderr=None,
SuppressStdout=None,
input_handler=None,
HALT_EXEC=False
):
"""Blast list of sequences.
seqs: either file name or list of sequence objects or list of strings or
single multiline string containing sequences.
WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules
for data are as follows. If it's s list, treat as lines, unless
add_seq_names is true (in which case treat as list of seqs). If it's a
string, test whether it has newlines. If it doesn't have newlines, assume
it's a filename. If it does have newlines, it can't be a filename, so
assume it's a multiline string containing sequences.
If you want to skip the detection and force a specific type of input
handler, use input_handler='your_favorite_handler'.
add_seq_names: boolean. if True, sequence names are inserted in the list
of sequences. if False, it assumes seqs is a list of lines of some
proper format that the program can handle
"""
# set num keep
if blast_db:
params["-d"] = blast_db
if out_filename:
params["-o"] = out_filename
ih = input_handler or guess_input_handler(seqs, add_seq_names)
blast_app = blast_constructor(
params=params,
blast_mat_root=blast_mat_root,
InputHandler=ih,
WorkingDir=WorkingDir,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout,
HALT_EXEC=HALT_EXEC)
return blast_app(seqs) | ce22f90fe3092a2c792478d7a4818d67fd13a753 | 7,330 |
def merge_dicts(dicts, handle_duplicate=None):
"""Merge a list of dictionaries.
Invoke handle_duplicate(key, val1, val2) when two dicts maps the
same key to different values val1 and val2, maybe logging the
duplication.
"""
if not dicts:
return {}
if len(dicts) == 1:
return dicts[0]
if handle_duplicate is None:
return {key: val for dict_ in dicts for key, val in dict_.items()}
result = {}
for dict_ in dicts:
for key, val in dict_.items():
if key in result and val != result[key]:
handle_duplicate(key, result[key], val)
continue
result[key] = val
return result | 44c06ab30bb76920ff08b5978a6aa271abd3e449 | 7,331 |
from datetime import datetime
def _timestamp(line: str) -> Timestamp:
"""Returns the report timestamp from the first line"""
start = line.find("GUIDANCE") + 11
text = line[start : start + 16].strip()
timestamp = datetime.strptime(text, r"%m/%d/%Y %H%M")
return Timestamp(text, timestamp.replace(tzinfo=timezone.utc)) | 7e3083c6dec766fe681e82555daa59ba7f5166b5 | 7,332 |
def start_qpsworkers(languages, worker_hosts):
"""Starts QPS workers as background jobs."""
if not worker_hosts:
# run two workers locally (for each language)
workers=[(None, 10000), (None, 10010)]
elif len(worker_hosts) == 1:
# run two workers on the remote host (for each language)
workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
else:
# run one worker per each remote host (for each language)
workers=[(worker_host, 10000) for worker_host in worker_hosts]
return [create_qpsworker_job(language,
shortname= 'qps_worker_%s_%s' % (language,
worker_idx),
port=worker[1] + language.worker_port_offset(),
remote_host=worker[0])
for language in languages
for worker_idx, worker in enumerate(workers)] | 3b53693a292027fd82d808b6d609fb3276f1bc2a | 7,333 |
import os
def is_source_ext(filename):
"""
Tells if filename (filepath) is a source file. For our purposes "sources"
are any files that can #include and can be included.
"""
_, ext = os.path.splitext(filename)
return ext in [".h", ".hh", ".hpp", ".inc", ".c", ".cc", ".cxx", ".cpp", ".f", ".F"] | b388768191e3efbfe2ddf3925a3ebf9f3a6693ac | 7,334 |
def MCE(conf, pred, true, bin_size = 0.1):
"""
Maximal Calibration Error
Args:
conf (numpy.ndarray): list of confidences
pred (numpy.ndarray): list of predictions
true (numpy.ndarray): list of true labels
bin_size: (float): size of one bin (0,1) # TODO should convert to number of bins?
Returns:
mce: maximum calibration error
"""
upper_bounds = np.arange(bin_size, 1+bin_size, bin_size)
cal_errors = []
for conf_thresh in upper_bounds:
acc, avg_conf, _ = compute_acc_bin(conf_thresh-bin_size, conf_thresh, conf, pred, true)
cal_errors.append(np.abs(acc-avg_conf))
return max(cal_errors) | fb2390daeb8aed2f700994723884971198b613e8 | 7,335 |
def validate_frame_range(shots, start_time, end_time, sequence_time=False):
"""
Verify if the given frame range is overlapping existing shots timeline
range. If it is overlapping any shot tail, it redefine the start frame at
the end of it. If it is overlapping any shot head, it will push back all
shots (and animation) behind the range to ensure the space is free to
insert new shot.
:param list[str] shots: Maya shot node names.
:param int start_time:
:param int end_time:
:param bool sequence_time:
Operate on Camera Sequencer's timeline instead of Maya timeline.
:rtype: tuple[int, int]
:return: Free range.
"""
start_attribute = "sequenceStartFrame" if sequence_time else "startFrame"
end_attribute = "sequenceEndFrame" if sequence_time else "endFrame"
length = end_time - start_time
# Offset start_time to ensure it is not overlapping any shot tail.
for shot in shots:
shot_start = cmds.getAttr(shot + "." + start_attribute)
shot_end = cmds.getAttr(shot + "." + end_attribute)
# Ensure the time is not in the middle of a shot.
if shot_start <= start_time <= shot_end:
start_time = shot_end + 1
break
# Detect overlapping shots from heads.
end_time = start_time + length
overlapping_shots = filter_shots_from_range(
shots=shots,
start_frame=start_time,
end_frame=end_time,
sequence_time=sequence_time)
if not overlapping_shots:
return start_time, end_time
# Push back overlapping shots.
offset = max(
end_time - cmds.getAttr(shot + "." + start_attribute) + 1
for shot in overlapping_shots)
if sequence_time:
# Operating on the camera sequencer timeline don't need to adapt
# animation.
shift_shots_in_sequencer(shots, offset, after=end_time - offset)
return start_time, end_time
shift_shots(shots, offset, after=end_time - offset)
curves = cmds.ls(type=ANIMATION_CURVES_TYPES)
if curves:
hold_animation_curves(curves, end_time - offset, offset)
return start_time, end_time | d287ad393c80b899cfba3bac92ea9717918aed4a | 7,336 |
def sparse_add(sv1, sv2):
"""dict, dict -> dict
Returns a new dictionary that is the sum of the other two.
>>>sparse_add(sv1, sv2)
{0: 5, 1: 6, 2: 9}
"""
newdict = {}
keys = set(sv1.keys()) | set(sv2.keys())
for key in keys:
x = sv1.get(key, 0) + sv2.get(key, 0)
newdict[key] = x
return (newdict) | ced3420a585084a246ad25f7686fb388f2c05542 | 7,337 |
def return_flagger(video_ID):
"""
In GET request
- Returns the username of the user that flagged the video with the corresponding video ID from the FLAGS table.
"""
if request.method == 'GET':
return str(db.get_flagger(video_ID)) | c8ca346b60ffa322847b5444c39dcbc43c66701a | 7,338 |
def get_all_hits():
"""Retrieves all hits.
"""
hits = [ i for i in get_connection().get_all_hits()]
pn = 1
total_pages = 1
while pn < total_pages:
pn = pn + 1
print "Request hits page %i" % pn
temp_hits = get_connection().get_all_hits(page_number=pn)
hits.extend(temp_hits)
return hits | 23f4da652d9e89dd0401ac4d8ccf2aa4f2660a5e | 7,339 |
import socket
def create_socket(
host: str = "", port: int = 14443, anidb_server: str = "", anidb_port: int = 0
) -> socket.socket:
"""Create a socket to be use to communicate with the server.
This function is called internally, so you only have to call it if you want to change the default parameters.
:param host: local host to bind the socket to, defaults to "" (which I think is any. Read the docs.)
:type host: str, optional
:param port: local port to bind the socket to, defaults to 14443
:type port: int, optional
:param anidb_server: aniDB server name, defaults to environment ANIDB_SERVER
:type anidb_server: str, optional
:param anidb_port: anidb port, default to environment ANIDB_PORT
:type anidb_port: int, optional
:return: The created socket.
:rtype: socket.socket
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
anidb_server = value_or_error("ANIDB_SERVER", anidb_server)
anidb_port = value_or_error("ANIDB_PORT", anidb_port)
s.connect((anidb_server, anidb_port))
logger.info(
f"Created socket on UDP %s:%d => %s:%d", host, port, anidb_server, anidb_port
)
global _conn
_conn = s
return s | 6b8cc3aa19af4582dbdf96d781d8ae64399165cf | 7,340 |
def aten_eq(mapper, graph, node):
""" 构造判断数值是否相等的PaddleLayer。
TorchScript示例:
%125 : bool = aten::eq(%124, %123)
参数含义:
%125 (bool): 对比后结果。
%124 (-): 需对比的输入1。
%123 (-): 需对比的输入2。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%124
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
x_value = list(node.inputs())[0]
x_type = x_value.type()
# 处理输入1,即%123
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["y"] = inputs_name[1]
y_value = list(node.inputs())[1]
y_type = y_value.type()
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.eq", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | c08fc3120130e949cbba4e147888fabca38c89e3 | 7,341 |
from typing import Tuple
def _create_simple_tf1_conv_model(
use_variable_for_filter=False) -> Tuple[core.Tensor, core.Tensor]:
"""Creates a basic convolution model.
This is intended to be used for TF1 (graph mode) tests.
Args:
use_variable_for_filter: Setting this to `True` makes the filter for the
conv operation a `tf.Variable`.
Returns:
in_placeholder: Input tensor placeholder.
output_tensor: The resulting tensor of the convolution operation.
"""
in_placeholder = array_ops.placeholder(dtypes.float32, shape=[1, 3, 4, 3])
filters = random_ops.random_uniform(shape=(2, 3, 3, 2), minval=-1., maxval=1.)
if use_variable_for_filter:
filters = variables.Variable(filters)
output_tensor = nn_ops.conv2d(
in_placeholder,
filters,
strides=[1, 1, 2, 1],
dilations=[1, 1, 1, 1],
padding='SAME',
data_format='NHWC')
return in_placeholder, output_tensor | 21deebe2de004554a5bdc6559ecf6319947f8109 | 7,342 |
def plot_diversity_bootstrapped(diversity_df):
"""Plots the result of bootstrapped diversity"""
div_lines = (
alt.Chart()
.mark_line()
.encode(
x="year:O",
y=alt.Y("mean(score)", scale=alt.Scale(zero=False)),
color="parametre_set",
)
)
div_bands = (
alt.Chart()
.mark_errorband(extent="ci")
.encode(
x="year:O",
y=alt.Y("score", scale=alt.Scale(zero=False)),
color="parametre_set",
)
)
out = alt.layer(
div_lines, div_bands, data=diversity_df, height=150, width=400
).facet(row="diversity_metric", column="test")
return out | 8b91d1d6d1f7384dcbea6c398a9bde8dfb4aae39 | 7,343 |
def escape(s):
"""
Returns the given string with ampersands, quotes and carets encoded.
>>> escape('<b>oh hai</b>')
'<b>oh hai</b>'
>>> escape("Quote's Test")
'Quote's Test'
"""
mapping = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
for tup in mapping:
s = s.replace(tup[0], tup[1])
return s | 2b4971c4e87e613cad457dde6d62806d299cdbcd | 7,344 |
def _get_db_columns_for_model(model):
"""
Return list of columns names for passed model.
"""
return [field.column for field in model._meta._fields()] | 181999f28ca659bf296bcb4dda7ac29ddfe61071 | 7,345 |
def get_UV(filename):
"""
Input: filename (including path)
Output: (wave_leftedges, wav_rightedges, surface radiance) in units of (nm, nm, photons/cm2/sec/nm)
"""
wav_leftedges, wav_rightedges, wav, toa_intensity, surface_flux, SS,surface_intensity, surface_intensity_diffuse, surface_intensity_direct=np.genfromtxt(filename, skip_header=1, skip_footer=0, usecols=(0, 1, 2,3,4,5,6,7,8), unpack=True)
surface_intensity_photons=surface_intensity*(wav/(hc))
return wav_leftedges, wav_rightedges, surface_intensity_photons | cd514ee29ba3ac17cdfcb95c53d4b5df4f4cad80 | 7,346 |
import json
def load_chunks(chunk_file_location, chunk_ids):
"""Load patch paths from specified chunks in chunk file
Parameters
----------
chunks : list of int
The IDs of chunks to retrieve patch paths from
Returns
-------
list of str
Patch paths from the chunks
"""
patch_paths = []
with open(chunk_file_location) as f:
data = json.load(f)
chunks = data['chunks']
for chunk in data['chunks']:
if chunk['id'] in chunk_ids:
patch_paths.extend([[x,chunk['id']] for x in chunk['imgs']])
if len(patch_paths) == 0:
raise ValueError(
f"chunks {tuple(chunk_ids)} not found in {chunk_file_location}")
return patch_paths | c01ec6076141356ae6f3a1dc40add28638739359 | 7,347 |
import argparse
def set_arguments() -> argparse.Namespace:
"""Setting the arguments to run from CMD
:return: arguments
"""
# Adding main description
parser = argparse.ArgumentParser(
description=f'{m.__description__}',
epilog=f'{m.__copyright__}\n | Versioon: {m.__version__}')
# Postional arguments
parser.add_argument('Excel', help='Exceli täielik asukoht, näiteks "./docs/examples/sisend_test.xlsx"')
parser.add_argument('Kinnistu', help='kinnistu numbrite veeru nimi, näiteks "Kinnistu reg.osa"')
parser.add_argument('Out', help='väljundi nimi, salvestatakse samasse kausta run.py-ga, näiteks kinnistu_tulemused')
# Optional arguments
parser.add_argument('-i', '--intermediate', help='Vahetulemused. (Default: %(default)s).', action='store_true')
parser.add_argument('-l', '--logi', help='Logimiseks. (Default: %(default)s).', action='store_true')
args = parser.parse_args() # collection
return args | 26d0d8efdb4bc13874c6ba8bb0cb7976ad000002 | 7,348 |
def get_model_input(batch, input_id=None):
"""
Get model input from batch
batch: batch of model input samples
"""
if isinstance(batch, dict) or isinstance(batch, list):
assert input_id is not None
return batch[input_id]
else:
return batch | 1b12ee86257bfbd5ab23404251bed39c0021f461 | 7,349 |
def issym(b3):
"""test if a list has equal number of positive
and negative values; zeros belong to both. """
npos = 0; nneg = 0
for item in b3:
if (item >= 0):
npos +=1
if (item <= 0):
nneg +=1
if (npos==nneg):
return True
else:
return False | e8cc57eec5bc9ef7f552ad32bd6518daa2882a3e | 7,350 |
def predictOneVsAll(all_theta, X):
"""will return a vector of predictions
for each example in the matrix X. Note that X contains the examples in
rows. all_theta is a matrix where the i-th row is a trained logistic
regression theta vector for the i-th class. You should set p to a vector
of values from 1..K (e.g., p = [1 3 1 2] predicts classes 1, 3, 1, 2
for 4 examples)
"""
m = X.shape[0]
# You need to return the following variables correctly
p = np.zeros((m, 1))
# probs = np.zeros((all_theta.shape[0], X.shape[0]))
# ====================== YOUR CODE HERE ======================
# Instructions: Complete the following code to make predictions using
# your learned logistic regression parameters (one-vs-all).
# You should set p to a vector of predictions (from 1 to
# num_labels).
#
# Hint: This code can be done all vectorized using the max function.
# In particular, the np.argmax function can return the index of the max
# element, for more information see 'numpy.argmax' on the numpy website.
# If your examples are in rows, then, you can use
# np.argmax(probs, axis=1) to obtain the max for each row.
#
p = np.argmax(sigmoid(np.dot(all_theta, X.T)), axis=0) + 1
# for i in range(all_theta.shape[0]):
# probs[i,:] = sigmoid(X @ all_theta[i,:])
# p = (np.argmax(probs, axis=0) + 1)
# =========================================================================
return p | 32bf9d386ef84debe75781a52335588da360b269 | 7,351 |
from typing import Tuple
from typing import List
def lecture_produit(ligne : str) -> Tuple[str, int, float]:
"""Précondition : la ligne de texte décrit une commande de produit.
Renvoie la commande produit (nom, quantité, prix unitaire).
"""
lmots : List[str] = decoupage_mots(ligne)
nom_produit : str = lmots[0]
quantite : int = int(lmots[1])
prix_unitaire : float = float(lmots[2])
return (nom_produit, quantite, prix_unitaire) | 355721522b9711f9bc206f4ab63af6121ef9b3d0 | 7,352 |
def affinity_matrix(test_specs):
"""Generate a random user/item affinity matrix. By increasing the likehood of 0 elements we simulate
a typical recommending situation where the input matrix is highly sparse.
Args:
users (int): number of users (rows).
items (int): number of items (columns).
ratings (int): rating scale, e.g. 5 meaning rates are from 1 to 5.
spars: probability of obtaining zero. This roughly corresponds to the sparseness.
of the generated matrix. If spars = 0 then the affinity matrix is dense.
Returns:
np.array: sparse user/affinity matrix of integers.
"""
np.random.seed(test_specs["seed"])
# uniform probability for the 5 ratings
s = [(1 - test_specs["spars"]) / test_specs["ratings"]] * test_specs["ratings"]
s.append(test_specs["spars"])
P = s[::-1]
# generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items
X = np.random.choice(
test_specs["ratings"] + 1, (test_specs["users"], test_specs["items"]), p=P
)
Xtr, Xtst = numpy_stratified_split(
X, ratio=test_specs["ratio"], seed=test_specs["seed"]
)
return (Xtr, Xtst) | 8e033d230cbc8c21d6058bcada14aca9fb1d7e68 | 7,353 |
def get_wm_desktop(window):
"""
Get the desktop index of the window.
:param window: A window identifier.
:return: The window's virtual desktop index.
:rtype: util.PropertyCookieSingle (CARDINAL/32)
"""
return util.PropertyCookieSingle(util.get_property(window,
'_NET_WM_DESKTOP')) | 050fe4a97a69317ba875aa59b3bf4144b9e2f83c | 7,354 |
def get_parents(tech_id, model_config):
"""
Returns the full inheritance tree from which ``tech`` descends,
ending with its base technology group.
To get the base technology group,
use ``get_parents(...)[-1]``.
Parameters
----------
tech : str
model_config : AttrDict
"""
tech = model_config.techs[tech_id].essentials.parent
parents = [tech]
while True:
tech = model_config.tech_groups[tech].essentials.parent
if tech is None:
break # We have reached the top of the chain
parents.append(tech)
return parents | 7220a57b770232e335001a0dab74ca2d8197ddfa | 7,355 |
from chaoslib.activity import run_activity
def load_dynamic_configuration(
config: Configuration, secrets: Secrets = None
) -> Configuration:
"""
This is for loading a dynamic configuration if exists.
The dynamic config is a regular activity (probe) in the configuration
section. If there's a use-case for setting a configuration dynamically
right before the experiment is starting. It executes the probe,
and then the return value of this probe will be the config you wish to set.
The dictionary needs to have a key named `type` and as a value `probe`,
alongside the rest of the probe props.
(No need for the `tolerance` key).
For example:
```json
"some_dynamic_config": {
"name": "some config probe",
"type": "probe",
"provider": {
"type": "python",
"module": "src.probes",
"func": "config_probe",
"arguments": {
"arg1":"arg1"
}
}
}
```
`some_dynamic_config` will be set with the return value
of the function config_probe.
Side Note: the probe type can be the same as a regular probe can be,
python, process or http. The config argument contains all the
configurations of the experiment including the raw config_probe
configuration that can be dynamically injected.
The configurations contain as well all the env vars after they are set in
`load_configuration`.
The `secrets` argument contains all the secrets of the experiment.
For `process` probes, the stdout value (stripped of endlines)
is stored into the configuration.
For `http` probes, the `body` value is stored.
For `python` probes, the output of the function will be stored.
We do not stop on errors but log a debug message and do not include the
key into the result dictionary.
"""
# we delay this so that the configuration module can be imported leanly
# from elsewhere
conf = {}
secrets = secrets or {}
had_errors = False
logger.debug("Loading dynamic configuration...")
for (key, value) in config.items():
if not (isinstance(value, dict) and value.get("type") == "probe"):
conf[key] = config.get(key, value)
continue
# we have a dynamic config
name = value.get("name")
provider_type = value["provider"]["type"]
value["provider"]["secrets"] = deepcopy(secrets)
try:
output = run_activity(value, conf, secrets)
except Exception:
had_errors = True
logger.debug(f"Failed to load configuration '{name}'", exc_info=True)
continue
if provider_type == "python":
conf[key] = output
elif provider_type == "process":
if output["status"] != 0:
had_errors = True
logger.debug(
f"Failed to load configuration dynamically "
f"from probe '{name}': {output['stderr']}"
)
else:
conf[key] = output.get("stdout", "").strip()
elif provider_type == "http":
conf[key] = output.get("body")
if had_errors:
logger.warning(
"Some of the dynamic configuration failed to be loaded."
"Please review the log file for understanding what happened."
)
return conf | aaeac23c0700c00c5b7946e010299fe5b2e74d82 | 7,356 |
import sys
import copy
import io
def main(argv=None):
"""Main function: Parse, process, print"""
if argv is None:
argv = sys.argv
args = parse_args(argv)
if not args:
exit(1)
original_tags = copy.deepcopy(tags.load(args["config"]))
with io.open(args["src"], "r", encoding="utf-8", errors="ignore") as fin:
lines = fin.readlines()
transacs_orig = qifile.parse_lines(lines, options=args)
try:
transacs = process_transactions(transacs_orig, options=args)
except EOFError: # exit on Ctrl + D: restore original tags
tags.save(args["config"], original_tags)
return 1
res = qifile.dump_to_buffer(transacs + transacs_orig[len(transacs) :])
if not args.get("dry-run"):
with io.open(args["dest"], "w", encoding="utf-8") as dest:
dest.write(res)
if args["batch"] or args["dry-run"]:
print("\n" + res)
return 0 if len(transacs) == len(transacs_orig) else 1 | 0aae5e3ab5d92de2b2fd2306645696733e81de38 | 7,357 |
import hashlib
import urllib
def profile_avatar(user, size=200):
"""Return a URL to the user's avatar."""
try: # This is mostly for tests.
profile = user.profile
except (Profile.DoesNotExist, AttributeError):
avatar = settings.STATIC_URL + settings.DEFAULT_AVATAR
profile = None
else:
if profile.is_fxa_migrated:
avatar = profile.fxa_avatar
elif profile.avatar:
avatar = profile.avatar.url
else:
avatar = settings.STATIC_URL + settings.DEFAULT_AVATAR
if avatar.startswith("//"):
avatar = "https:%s" % avatar
if user and hasattr(user, "email"):
email_hash = hashlib.md5(force_bytes(user.email.lower())).hexdigest()
else:
email_hash = "00000000000000000000000000000000"
url = "https://secure.gravatar.com/avatar/%s?s=%s" % (email_hash, size)
# If the url doesn't start with http (local dev), don't pass it to
# to gravatar because it can't use it.
if avatar.startswith("https") and profile and profile.is_fxa_migrated:
url = avatar
elif avatar.startswith("http"):
url = url + "&d=%s" % urllib.parse.quote(avatar)
return url | a810af7f7abb4a5436a2deed8c4e1069aa5d504c | 7,358 |
import sys
def szz_reverse_blame(ss_path, sha_to_blame_on, buggy_line_num, buggy_file_path_in_ss, buggy_SHA):
"""Reverse-blames `buggy_line_num` (added in `buggy_SHA`) onto `sha_to_blame_on`."""
ss_repo = Repo(ss_path)
ss_name = pathLeaf(ss_path)
try:
# If buggy_SHA equals sha_to_blame_on, then git-blame-reverse fails.
# In our code buggy_SHA and sha_to_blame_on are never equal, but just to be safe...
if sha_to_blame_on != buggy_SHA:
curr_blame_info = ss_repo.git.blame('--reverse', '-w', '-n', '-f', '--abbrev=40', \
'-L' + buggy_line_num + ',+1', \
'--', buggy_file_path_in_ss, \
buggy_SHA + '..' + sha_to_blame_on,
stdout_as_string = False)
curr_buggy_line_num = curr_blame_info.split('(')[0].split()[-1]
curr_buggy_file_path_in_ss = ' '.join(curr_blame_info.split('(')[0].split()[1:-1])
return [ss_name, curr_buggy_file_path_in_ss, sha_to_blame_on, curr_buggy_line_num]
else:
return [ss_name, buggy_file_path_in_ss, sha_to_blame_on, buggy_line_num]
except Exception as e:
sys.stderr.write("\nError in reverse-blame! Continuing with next line_num...\n" + str(e))
return None | abfefc8b3a02b4d3e63d151ab7213321b8b43d62 | 7,359 |
import tqdm
def union(graphs, use_tqdm: bool = False):
"""Take the union over a collection of graphs into a new graph.
Assumes iterator is longer than 2, but not infinite.
:param iter[BELGraph] graphs: An iterator over BEL graphs. Can't be infinite.
:param use_tqdm: Should a progress bar be displayed?
:return: A merged graph
:rtype: BELGraph
Example usage:
>>> import pybel
>>> g = pybel.from_bel_script('...')
>>> h = pybel.from_bel_script('...')
>>> k = pybel.from_bel_script('...')
>>> merged = union([g, h, k])
"""
it = iter(graphs)
if use_tqdm:
it = tqdm(it, desc='taking union')
try:
target = next(it)
except StopIteration as e:
raise ValueError('no graphs given') from e
try:
graph = next(it)
except StopIteration:
return target
else:
target = target.copy()
left_full_join(target, graph)
for graph in it:
left_full_join(target, graph)
return target | 8ea9bae0386c497a5fe31c8bd44099ee450b2b2a | 7,360 |
def get_month_n_days_from_cumulative(monthly_cumulative_days):
"""
Transform consecutive number of days in monthly data to actual number of days.
EnergyPlus monthly results report a total consecutive number of days for each day.
Raw data reports table as 31, 59..., this function calculates and returns
actual number of days for each month 31, 28...
"""
old_num = monthly_cumulative_days.pop(0)
m_actual_days = [old_num]
for num in monthly_cumulative_days:
new_num = num - old_num
m_actual_days.append(new_num)
old_num += new_num
return m_actual_days | 5ede033023d357a60ba5eb7e9926325d24b986e8 | 7,361 |
def get_text(name):
"""Returns some text"""
return "Hello " + name | bff30de2184c84f6ed1c4c1831ed9fd782f479c9 | 7,362 |
import re
def apply_template(assets):
"""
Processes the template.
Used for overwrite ``docutils.writers._html_base.Writer.apply_template``
method.
``apply_template(<assets>)``
``assets`` (dictionary)
Assets to add at the template, see ``ntdocutils.writer.Writer.assets``.
returns
function - Template processor.
Example
=======
.. code:: python
apply_template({
"before_styles": '<link rel="stylesheet" href="styles.css" />',
"scripts": '<script src="script.js"></script>'
'<script src="other_script.js"></script>'
})
"""
def apply_template(self):
template_file = open(self.document.settings.template, "rb")
template = str(template_file.read(), "utf-8")
template_file.close()
# Escape ``%`` that don't are special fields
pattern = r"%(?!\((" + "|".join(self.visitor_attributes) + r")\)s)"
template = re.subn(pattern, "%%", template)[0]
subs = self.interpolation_dict()
return template.format(**assets) % subs
return apply_template | 51042e25f701935d668d91a923155813ce60b381 | 7,363 |
def harvest(post):
"""
Filter the post data for just the funding allocation formset data.
"""
data = {k: post[k] for k in post if k.startswith("fundingallocation")}
return data | 67f400caf87f2accab30cb3c519e7014792c84d7 | 7,364 |
import os
from sys import flags
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'calendar-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
#print('Storing credentials to ' + credential_path)
return credentials | b786fef63c51380b543f04aa6c7fd91b3ede03d3 | 7,365 |
def model2(x, input_size, output_size):
"""! Fully connected model [InSize]x800x[OutSize]
Implementation of a [InSize]x800x[OutSize] fully connected model.
Parameters
----------
@param x : placeholder for input data
@param input_size : size of input data
@param output_size : size of output data
Returns
-------
@retval logits : output
@retval logits_dup : a copy of output
@retval w_list : trainable parameters
@retval w_list_dup : a copy of trainable parameters
"""
#==================================================================================================================
## model definition
mu = 0
sigma = 0.2
weights = {
'wfc': tf.Variable(tf.truncated_normal(shape=(input_size,800), mean = mu, stddev = sigma, seed = 1)),
'out': tf.Variable(tf.truncated_normal(shape=(800,output_size), mean = mu, stddev = sigma, seed = 1))
}
biases = {
'bfc': tf.Variable(tf.zeros(800)),
'out': tf.Variable(tf.zeros(output_size))
}
# Flatten input.
c_flat = flatten(x)
# Layer 1: Fully Connected. Input = input_size. Output = 800.
# Activation.
fc = fc_relu(c_flat, weights['wfc'], biases['bfc'])
# Layer 2: Fully Connected. Input = 800. Output = output_size.
logits = tf.add(tf.matmul(fc, weights['out']), biases['out'])
w_list = []
for w,b in zip(weights, biases):
w_list.append(weights[w])
w_list.append(biases[b])
#==================================================================================================================
## duplicate the model used in ProxSVRG
weights_dup = {
'wfc': tf.Variable(tf.truncated_normal(shape=(input_size,800), mean = mu, stddev = sigma, seed = 1)),
'out': tf.Variable(tf.truncated_normal(shape=(800,output_size), mean = mu, stddev = sigma, seed = 1))
}
biases_dup = {
'bfc': tf.Variable(tf.zeros(800)),
'out': tf.Variable(tf.zeros(output_size))
}
# Flatten input.
c_flat_dup = flatten(x)
# Layer 1: Fully Connected. Input = input_size. Output = 800.
# Activation.
fc_dup = fc_relu(c_flat_dup, weights_dup['wfc'], biases_dup['bfc'])
# Layer 2: Fully Connected. Input = 800. Output = output_size.
logits_dup = tf.add(tf.matmul(fc_dup, weights_dup['out']), biases_dup['out'])
w_list_dup = []
for w,b in zip(weights_dup, biases_dup):
w_list_dup.append(weights_dup[w])
w_list_dup.append(biases_dup[b])
return logits, logits_dup, w_list, w_list_dup | 74a7f9129865e1d2b6cbfe767c7f218d53ee50e1 | 7,366 |
def cut_bin_depths(
dataset: xr.Dataset,
depth_range: tp.Union[int, float, list] = None
) -> xr.Dataset:
"""
Return dataset with cut bin depths if the depth_range are not outside the depth span.
Parameters
----------
dataset :
depth_range :
min or (min, max) to be included in the dataset.
Bin depths outside this range will be cut.
Returns
-------
dataset with depths cut.
"""
if depth_range:
if not isinstance(depth_range, (list, tuple)):
if depth_range > dataset.depth.max():
l.log(
"depth_range value is greater than the maximum bin depth. Depth slicing aborded."
)
else:
dataset = dataset.sel(depth=slice(depth_range, None))
l.log(f"Bin of depth inferior to {depth_range} m were cut.")
elif len(depth_range) == 2:
if dataset.depth[0] > dataset.depth[-1]:
depth_range.reverse()
if depth_range[0] > dataset.depth.max() or depth_range[1] < dataset.depth.min():
l.log(
"depth_range values are outside the actual depth range. Depth slicing aborted."
)
else:
dataset = dataset.sel(depth=slice(*depth_range))
l.log(
f"Bin of depth inferior to {depth_range[0]} m and superior to {depth_range[1]} m were cut."
)
else:
l.log(
f"depth_range expects a maximum of 2 values but {len(depth_range)} were given. Depth slicing aborted."
)
return dataset | ab4561711d118dc620100bec5e159dc4b7a29f92 | 7,367 |
def create_edgelist(file, df):
"""
creates an edgelist based on genre info
"""
# load edges from the (sub)genres themselves
df1 = (pd
.read_csv(file,
dtype='str'))
# get edges from the book descriptions df
df2 = (df[['title',
'subclass']]
.rename(columns={'title':'Edge_From',
'subclass':'Edge_To'})
.sort_values(by='Edge_To'))
# combine the two dfs
df3 = (df1
.append(df2,
ignore_index=True))
# consistently assign categories
df4 = (df3
.stack()
.astype('category')
.unstack())
# make the categorical values explicit for later convenience
for name in df4.columns:
df4['N' + name] = (df4[name]
.cat
.codes)
return df4 | 9cfba48eca977e8b2e3217078bd6d112c465ea23 | 7,368 |
def CodeRange(code1, code2):
"""
CodeRange(code1, code2) is an RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|.
"""
if code1 <= nl_code < code2:
return Alt(RawCodeRange(code1, nl_code),
RawNewline,
RawCodeRange(nl_code + 1, code2))
else:
return RawCodeRange(code1, code2) | c63213c63d96361451e441cf6923015238dae8f8 | 7,369 |
def sort_by_date(data):
"""
The sort_by_date function sorts the lists by their datetime
object
:param data: the list of lists containing parsed UA data
:return: the sorted date list of lists
"""
# Supply the reverse option to sort by descending order
return [x[0:6:4] for x in sorted(data, key=itemgetter(4),
reverse=True)] | f8d18b80404edcf141a56f47938ea09531d30df7 | 7,370 |
def get_menu_option():
"""
Function to display menu options and asking the user to choose one.
"""
print("1. View their next 5 fixtures...")
print("2. View their last 5 fixtures...")
print("3. View their entire current season...")
print("4. View their position in the table...")
print("5. View the club roster...")
print("6. View season statistics...")
print("7. View team information...")
print("8. Sign up to your club's weekly newsletter...")
print("9. Calculate odds on next game...")
print()
return input("CHOOSE AN OPTION BELOW BY ENTERING THE MENU NUMBER OR ENTER 'DONE' ONCE YOU ARE FINISHED: ") | 69e71555d9896d0c462b2e7b542ec87aea9213eb | 7,371 |
def pdf(mu_no):
""" the probability distribution function which the number of fibers per MU should follow """
return pdf_unscaled(mu_no) / scaling_factor_pdf | 2d6fd461d12da6b00bbf20de7a7be7d61112014c | 7,372 |
import requests
def get_weather_by_key(key):
"""
Returns weather information for a given database key
Args:
key (string) -- database key for weather information
Returns:
None or Dict
"""
url = "%s/weather/%s.json" % (settings.FIREBASE_URL, key)
r = requests.get(url)
if r.status_code != 200:
return None
return r.json() | 8ab3bfa6b5924b726fef9a9c0b8bd9d47cf9dfc8 | 7,373 |
import warnings
def source_receiver_midpoints(survey, **kwargs):
"""
Calculate source receiver midpoints.
Input:
:param SimPEG.electromagnetics.static.resistivity.Survey survey: DC survey object
Output:
:return numpy.ndarray midx: midpoints x location
:return numpy.ndarray midz: midpoints z location
"""
if not isinstance(survey, dc.Survey):
raise ValueError("Input must be of type {}".format(dc.Survey))
if len(kwargs) > 0:
warnings.warn(
"The keyword arguments of this function have been deprecated."
" All of the necessary information is now in the DC survey class",
DeprecationWarning,
)
# Pre-allocate
midxy = []
midz = []
for ii, source in enumerate(survey.source_list):
tx_locs = source.location
if isinstance(tx_locs, list):
Cmid = (tx_locs[0][:-1] + tx_locs[1][:-1]) / 2
zsrc = (tx_locs[0][-1] + tx_locs[1][-1]) / 2
tx_sep = np.linalg.norm((tx_locs[0][:-1] - tx_locs[1][:-1]))
else:
Cmid = tx_locs[:-1]
zsrc = tx_locs[-1]
Pmids = []
for receiver in source.receiver_list:
rx_locs = receiver.locations
if isinstance(rx_locs, list):
Pmid = (rx_locs[0][:, :-1] + rx_locs[1][:, :-1]) / 2
else:
Pmid = rx_locs[:, :-1]
Pmids.append(Pmid)
Pmid = np.vstack(Pmids)
midxy.append((Cmid + Pmid) / 2)
diffs = np.linalg.norm((Cmid - Pmid), axis=1)
if np.allclose(diffs, 0.0): # likely a wenner type survey.
midz = zsrc - tx_sep / 2 * np.ones_like(diffs)
else:
midz.append(zsrc - diffs / 2)
return np.vstack(midxy), np.hstack(midz) | adec937949a4293d35c7a57aad7125f2e1113794 | 7,374 |
import copy
def fix_source_scale(
transformer, output_std: float = 1, n_samples: int = 1000, use_copy: bool = True,
) -> float:
""" Adjust the scale for a data source to fix the output variance of a
transformer.
The transformer's data source must have a `scale` parameter.
Parameters
----------
transformer
Transformer whose output variance is optimized. This should behave like
`Arma`: it needs to have a `transform` method that can be called like
`transformer.transform(U=source)`; and it needs an attribute called
`default_source`.
output_std
Value to which to fix the transformer's output standard deviation.
n_samples
Number of samples to generate for each optimization iteration.
use_copy
If true, a deep copy of the data source is made for the optimization, so
that the source's random generator is unaffected by this procedure.
Returns the final value for the scale.
"""
output_var = output_std ** 2
source = transformer.default_source
if use_copy:
source_copy = copy.deepcopy(source)
else:
source_copy = source
def objective(scale: float):
source_copy.scale = np.abs(scale)
samples = transformer.transform(n_samples, X=source_copy)
return np.var(samples) / output_var - 1
soln = optimize.root_scalar(
objective, x0=np.sqrt(output_var / 2), x1=np.sqrt(2 * output_var), maxiter=100,
)
source.scale = np.abs(soln.root)
return source.scale | bf2dc0690732ce7677a484afee75fa7701b3d0e8 | 7,375 |
def samplePinDuringCapture(f, pin, clock):
"""\
Configure Arduino to enable sampling of a particular light sensor or audio
signal input pin. Only enabled pins are read when capture() is subsequently called.
:param f: file handle for the serial connection to the Arduino Due
:param pin: The pin to enable.
:param clock: a :class:`dvbcss.clock` clock object
Values for the pin parameter:
* 0 enables reading of light sensor 0 (on Arduino analogue pin 0).
* 1 enables reading of audio input 0 (on Arduino analogue pin 1).
* 2 enables reading of light sensor 1 (on Arduino analogue pin 2).
* 3 enables reading of audio input 1 (on Arduino analogue pin 3).
:returns: (t1,t2,t3,t4) measuring the specified clock object and arduino clock, as per :func`writeCmdAndTimeRoundTrip`
See :func:`writeAndTimeRoundTrip` for details of the meaning of the returned round-trip timing data
"""
CMD = CMDS_ENABLE_PIN[pin]
return writeCmdAndTimeRoundTrip(f, clock, CMD) | 246dc76eb07b9240439befdffdc3f31376647a64 | 7,376 |
def year_filter(year = None):
"""
Determine whether the input year is single value or not
Parameters
----------
year :
The input year
Returns
-------
boolean
whether the inputed year is a single value - True
"""
if year[0] == year[1]:
single_year = True
else:
single_year = False
return single_year | 35868a72196015c20517179dc89cd65b5601e969 | 7,377 |
def distance(p1, p2):
"""
Return the Euclidean distance between two QPointF objects.
Euclidean distance function in 2D using Pythagoras Theorem and linear algebra
objects. QPointF and QVector2D member functions.
"""
if not (isinstance(p1, QPointF) and isinstance(p2, QPointF)):
raise ValueError('ValueError, computing distance p1 or p2 not of Type QPointF')
return toVector(p2 - p1).length() | 2e8b2d8fcbb05b24798c8507bef8a32b8b9468f3 | 7,378 |
import math
import numpy
def make_primarybeammap(gps, delays, frequency, model, extension='png',
plottype='beamsky', figsize=14, directory=None, resolution=1000, zenithnorm=True,
b_add_sources=False):
"""
"""
print("Output beam file resolution = %d , output directory = %s" % (resolution, directory))
# (az_grid, za_grid) = beam_tools.makeAZZA(resolution,'ZEA') #Get grids in radians
(az_grid, za_grid, n_total, dOMEGA) = beam_tools.makeAZZA_dOMEGA(resolution, 'ZEA') # TEST SIN vs. ZEA
az_grid = az_grid * 180 / math.pi
za_grid = za_grid * 180 / math.pi
# az_grid+=180.0
alt_grid = 90 - (za_grid)
obstime = su.time2tai(gps)
# first go from altitude to zenith angle
theta = (90 - alt_grid) * math.pi / 180
phi = az_grid * math.pi / 180
beams = {}
# this is the response for XX and YY
if model == 'analytic' or model == '2014':
# Handles theta and phi as floats, 1D, or 2D arrays (and probably higher dimensions)
beams['XX'], beams['YY'] = primary_beam.MWA_Tile_analytic(theta, phi,
freq=frequency, delays=delays,
zenithnorm=zenithnorm, power=True)
elif model == 'avg_EE' or model == 'advanced' or model == '2015' or model == 'AEE':
beams['XX'], beams['YY'] = primary_beam.MWA_Tile_advanced(theta, phi,
freq=frequency, delays=delays,
power=True)
elif model == 'full_EE' or model == '2016' or model == 'FEE' or model == 'Full_EE':
# model_ver = '02'
# h5filepath = 'MWA_embedded_element_pattern_V' + model_ver + '.h5'
beams['XX'], beams['YY'] = primary_beam.MWA_Tile_full_EE(theta, phi,
freq=frequency, delays=delays,
zenithnorm=zenithnorm, power=True)
# elif model == 'full_EE_AAVS05':
# # h5filepath='/Users/230255E/Temp/_1508_Aug/embedded_element/h5/AAVS05_embedded_element_02_rev0.h5'
# # h5filepath = 'AAVS05_embedded_element_02_rev0.h5'
# beams['XX'], beams['YY'] = primary_beam.MWA_Tile_full_EE(theta, phi,
# freq=frequency, delays=delays,
# zenithnorm=zenithnorm, power=True)
pols = ['XX', 'YY']
# Get Haslam and interpolate onto grid
my_map = get_Haslam(frequency)
mask = numpy.isnan(za_grid)
za_grid[numpy.isnan(za_grid)] = 90.0 # Replace nans as they break the interpolation
sky_grid = map_sky(my_map['skymap'], my_map['RA'], my_map['dec'], gps, az_grid, za_grid)
sky_grid[mask] = numpy.nan # Remask beyond the horizon
# test:
# delays1 = numpy.array([[6, 6, 6, 6,
# 4, 4, 4, 4,
# 2, 2, 2, 2,
# 0, 0, 0, 0],
# [6, 6, 6, 6,
# 4, 4, 4, 4,
# 2, 2, 2, 2,
# 0, 0, 0, 0]],
# dtype=numpy.float32)
# za_delays = {'0': delays1 * 0, '14': delays1, '28': delays1 * 2}
# tile = mwa_tile.get_AA_Cached()
# za_delay = '0'
# (ax0, ay0) = tile.getArrayFactor(az_grid, za_grid, frequency, za_delays[za_delay])
# val = numpy.abs(ax0)
# val_max = numpy.nanmax(val)
# print "VALUE : %.8f %.8f %.8f" % (frequency, val_max[0], val[resolution / 2, resolution / 2])
beamsky_sum_XX = 0
beam_sum_XX = 0
Tant_XX = 0
beam_dOMEGA_sum_XX = 0
beamsky_sum_YY = 0
beam_sum_YY = 0
Tant_YY = 0
beam_dOMEGA_sum_YY = 0
for pol in pols:
# Get gridded sky
print('frequency=%.2f , polarisation=%s' % (frequency, pol))
beam = beams[pol]
beamsky = beam * sky_grid
beam_dOMEGA = beam * dOMEGA
print('sum(beam)', numpy.nansum(beam))
print('sum(beamsky)', numpy.nansum(beamsky))
beamsky_sum = numpy.nansum(beamsky)
beam_sum = numpy.nansum(beam)
beam_dOMEGA_sum = numpy.nansum(beam_dOMEGA)
Tant = numpy.nansum(beamsky) / numpy.nansum(beam)
print('Tant=sum(beamsky)/sum(beam)=', Tant)
if pol == 'XX':
beamsky_sum_XX = beamsky_sum
beam_sum_XX = beam_sum
Tant_XX = Tant
beam_dOMEGA_sum_XX = beam_dOMEGA_sum
if pol == 'YY':
beamsky_sum_YY = beamsky_sum
beam_sum_YY = beam_sum
Tant_YY = Tant
beam_dOMEGA_sum_YY = beam_dOMEGA_sum
filename = '%s_%.2fMHz_%s_%s' % (gps, frequency / 1.0e6, pol, model)
fstring = "%.2f" % (frequency / 1.0e6)
if plottype == 'all':
plottypes = ['beam', 'sky', 'beamsky', 'beamsky_scaled']
else:
plottypes = [plottype]
for pt in plottypes:
if pt == 'beamsky':
textlabel = 'Beam x sky %s (LST %.2f hr), %s MHz, %s-pol, Tant=%.1f K' % (gps,
get_LST(gps),
fstring,
pol,
Tant)
plot_beamsky(beamsky, frequency, textlabel, filename, extension,
obstime=obstime, figsize=figsize, directory=directory)
elif pt == 'beamsky_scaled':
textlabel = 'Beam x sky (scaled) %s (LST %.2f hr), %s MHz, %s-pol, Tant=%.1f K (max T=%.1f K)' % (gps,
get_LST(gps),
fstring,
pol,
Tant,
float(numpy.nanmax(beamsky)))
plot_beamsky(beamsky, frequency, textlabel, filename + '_scaled', extension,
obstime=obstime, figsize=figsize, vmax=numpy.nanmax(beamsky) * 0.4, directory=directory)
elif pt == 'beam':
textlabel = 'Beam for %s, %s MHz, %s-pol' % (gps, fstring, pol)
plot_beamsky(beam, frequency, textlabel, filename + '_beam', extension,
obstime=obstime, figsize=figsize, cbar_label='', directory=directory,
b_add_sources=b_add_sources,
az_grid=az_grid, za_grid=za_grid)
elif pt == 'sky':
textlabel = 'Sky for %s (LST %.2f hr), %s MHz, %s-pol' % (gps, get_LST(gps), fstring, pol)
plot_beamsky(sky_grid, frequency, textlabel, filename + '_sky', extension,
obstime=obstime, figsize=figsize, directory=directory, b_add_sources=b_add_sources,
az_grid=az_grid, za_grid=za_grid)
return (beamsky_sum_XX,
beam_sum_XX,
Tant_XX,
beam_dOMEGA_sum_XX,
beamsky_sum_YY,
beam_sum_YY,
Tant_YY,
beam_dOMEGA_sum_YY) | 371a29412b52b27c69193bed1e945eeed6a988d7 | 7,379 |
import json
def view_page(request, content_id=None):
"""Displays the content in a more detailed way"""
if request.method == "GET":
if content_id:
if content_id.isdigit():
try:
# Get the contents details
content_data = Content.objects.get(pk=int(content_id))
content_data.fire = int(content_data.contents_history.all().aggregate(Avg("vote"))["vote__avg"] * 10) if content_data.contents_history.all().aggregate(Avg("vote"))["vote__avg"] else 0
try:
# Get all the available comments of this particular content
comment_data = content_data.content_comments.all()
if comment_data:
# Convert Data to JSON list
comment_list = json.loads(comment_data[0].comment)
content_comments = []
for a in comment_list:
try:
user = User.objects.get(pk=a["user_id"])
content_comments.append({
"id": a["id"],
"content_id": a["content_id"],
"profile_picture": (user.profile.profile_picture.url).replace("&export=download", "") if user.profile.profile_picture.url else "/static/teeker/assets/default_img/avatar/avataaars.png",
"username": user.username,
"user_id": user.pk,
"comment": a["comment"],
"date": a["date"]
})
except User.DoesNotExist:
print("Broken Comment...")
else:
content_comments = []
except json.JSONDecodeError:
content_data['contents_comment']['comment'] = []
# Check if the content isn't suspended
if content_data.suspended and not request.user.is_staff:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
# Check if the user is logged in
if request.user.is_authenticated:
# Check if the content is in the logged in user's recommended list
try:
if int(content_id) in json.loads(request.user.profile.recommended):
content_data.recommended = True
else:
content_data.recommended = False
except json.JSONDecodeError:
content_data.recommended = False
else:
content_data.recommended = False
except Content.DoesNotExist:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
else:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
else:
content_data = {
"title": "CONTENT UNAVAILABLE"
}
html_content = {
"content_data": content_data,
"content_comments": content_comments
}
return render(request, "teeker/site_templates/view.html", html_content) | 11908d7d0f75377485022ab87a93e7f27b2b626d | 7,380 |
def run_feat_model(fsf_file):
""" runs FSL's feat_model which uses the fsf file to generate
files necessary to run film_gls to fit design matrix to timeseries"""
clean_fsf = fsf_file.strip('.fsf')
cmd = 'feat_model %s'%(clean_fsf)
out = CommandLine(cmd).run()
if not out.runtime.returncode == 0:
return None, out.runtime.stderr
mat = fsf_file.replace('.fsf', '.mat')
return mat, cmd | 4b033ff1aceb60cdf0c39ebfbefc0841dc4df507 | 7,381 |
def exportDSV(input, delimiter = ',', textQualifier = '"', quoteall = 0, newline = '\n'):
"""
PROTOTYPE:
exportDSV(input, delimiter = ',', textQualifier = '\"', quoteall = 0)
DESCRIPTION:
Exports to DSV (delimiter-separated values) format.
ARGUMENTS:
- input is list of lists of data (as returned by importDSV)
- delimiter is character used to delimit columns
- textQualifier is character used to delimit ambiguous data
- quoteall is boolean specifying whether to quote all data or only data
that requires it
RETURNS:
data as string
"""
if not delimiter or type(delimiter) != type(''): raise InvalidDelimiter
if not textQualifier or type(delimiter) != type(''): raise InvalidTextQualifier
# double-up all text qualifiers in data (i.e. can't becomes can''t)
data = map(lambda i, q = textQualifier:
map(lambda j, q = q: str(j).replace(q, q * 2), i),
input)
if quoteall: # quote every data value
data = map(lambda i, q = textQualifier:
map(lambda j, q = q: q + j + q, i),
data)
else: # quote only the values that contain qualifiers, delimiters or newlines
data = map(lambda i, q = textQualifier, d = delimiter:
map(lambda j, q = q, d = d: ((j.find(q) != -1 or j.find(d) != -1
or j.find('\n') != -1)
and (q + j + q)) or j, i), data)
# assemble each line with delimiters
data = [delimiter.join(line) for line in data]
# assemble all lines together, separated by newlines
data = newline.join(data)
return data | 28075667459e872ec0713efb834a9c3aa1dc620e | 7,382 |
def DatasetSplit(X, y):
#Creating the test set and validation set.
# separating the target
""" To create the validation set, we need to make sure that the distribution of each class is similar
in both training and validation sets. stratify = y (which is the class or tags of each frame) keeps
the similar distribution of classes in both the training as well as the validation set."""
# creating the training and validation set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2, stratify = y)
# creating dummies of target variable for train and validation set
y_train = pd.get_dummies(y_train)
y_test = pd.get_dummies(y_test)
return X_train, X_test, y_train, y_test | 15c9d6acf51f6535bbd4396be83752b98c6a1fa0 | 7,383 |
def parse_children(root):
"""
:param root: root tags of .xml file
"""
attrib_list = set()
for child in root:
text = child.text
if text:
text = text.strip(' \n\t\r')
attrib_list = attrib_list | get_words_with_point(text)
attrib_list = attrib_list | parse_children(child)
for attribute_name, attribute_value in child.attrib.items():
if '.' in attribute_value:
attrib_list.add(attribute_value)
"""
returns list of attribute_value
"""
return attrib_list | 5062b39775bdb1b788fa7b324de108367421f743 | 7,384 |
def load_data(ETF):
"""
Function to load the ETF data from a file, remove NaN values and set the Date column as index.
...
Attributes
----------
ETF : filepath
"""
data = pd.read_csv(ETF, usecols=[0,4], parse_dates=[0], header=0)
data.dropna(subset = ['Close', 'Date'], inplace=True)
data_close = pd.DataFrame(data['Close'])
data_close.index = pd.to_datetime(data['Date'])
return data_close | 84b4c20c7d74c7e028e62b0147662e9a54311148 | 7,385 |
def preprocess_LLIL_GOTO(bv, llil_instruction):
""" Replaces integer addresses of llil instructions with hex addresses of assembly """
func = get_function_at(bv, llil_instruction.address)
# We have to use the lifted IL since the LLIL ignores comparisons and tests
lifted_instruction = list(
[k for k in find_lifted_il(func, llil_instruction.address) if k.operation == LowLevelILOperation.LLIL_GOTO]
)[0]
lifted_il = func.lifted_il
llil_instruction.dest = hex(lifted_il[lifted_instruction.dest].address).replace("L", "")
return llil_instruction | 656b6088816779395a84d32fe77e28866618b9ff | 7,386 |
import json
async def get_limited_f_result(request, task_id):
"""
This endpoint accepts the task_id and returns the result if ready.
"""
task_result = AsyncResult(task_id)
result = {
"task_id": task_id,
"task_status": task_result.status,
"task_result": task_result.result
}
return json(result) | e459d1963e2de829802927e3265b41bbe4da6bfe | 7,387 |
def process_addr():
"""Process the bridge IP address/hostname."""
server_addr = request.form.get('server_addr')
session['server_addr'] = server_addr
try:
leap_response = get_ca_cert(server_addr)
session['leap_version'] = leap_response['Body'] \
['PingResponse']['LEAPVersion']
except ConnectionRefusedError:
flash("A connection to %s could not be established. Please check "
"the IP address and try again." % server_addr, 'danger')
return redirect(url_for('wizard')) | 109d3e0652caa5e06fe00702f43640304c30323d | 7,388 |
import requests
import json
from datetime import datetime
import time
def get_bkk_list(request):
"""板块课(通识选修课)"""
myconfig = Config.objects.all().first()
year = (myconfig.nChoose)[0:4]
term = (myconfig.nChoose)[4:]
if term == "1":
term = "3"
elif term == "2":
term = "12"
if myconfig.apichange:
data = {
'xh':request.POST.get("xh"),
'pswd':request.POST.get("pswd"),
'bkk':request.POST.get("bkk")
}
res = requests.post(url=myconfig.otherapi+"/choose/bkk",data=data)
return HttpResponse(json.dumps(json.loads(res.text), ensure_ascii=False),
content_type="application/json,charset=utf-8")
if myconfig.maintenance:
return HttpResponse(json.dumps({'err':'教务系统出错维护中,请静待教务系统恢复正常!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if request.method == 'POST':
if request.POST:
xh = request.POST.get("xh")
pswd = request.POST.get("pswd")
bkk = request.POST.get("bkk")
else:
return HttpResponse(json.dumps({'err':'请提交正确的post数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
if not Students.objects.filter(studentId=int(xh)):
content = ('【%s】[%s]未登录访问板块课' % (datetime.datetime.now().strftime('%H:%M:%S'), xh))
writeLog(content)
return HttpResponse(json.dumps({'err':'还未登录,请重新登录!'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
stu = Students.objects.get(studentId=int(xh))
try:
bkk = "1" if bkk=="2" else "2"
startTime = time.time()
print('【%s】查看了板块课' % stu.name)
JSESSIONID = str(stu.JSESSIONID)
route = str(stu.route)
cookies_dict = {
'JSESSIONID': JSESSIONID,
'route': route
}
cookies = requests.utils.cookiejar_from_dict(cookies_dict)
person = Xuanke(base_url=base_url, cookies=cookies, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
endTime = time.time()
spendTime = endTime - startTime
if spendTime > 30:
ServerChan = config["ServerChan"]
text = "板块课超时"
if ServerChan == "none":
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
else:
requests.get(ServerChan + 'text=' + text)
return HttpResponse(json.dumps({'err':'板块课超时'}, ensure_ascii=False),
content_type="application/json,charset=utf-8")
content = ('【%s】[%s]访问了板块课,耗时%.2fs' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name, spendTime))
writeLog(content)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
except Exception as e:
print(e)
content = ('【%s】[%s]访问板块课出错' % (datetime.datetime.now().strftime('%H:%M:%S'), stu.name))
writeLog(content)
if myconfig.isKaptcha:
return get_kaptcha(xh)
else:
sta = update_cookies(request)
person = Xuanke(base_url=base_url, cookies=sta, year=year, term=term)
bkk_list = person.get_bkk_list(bkk)
return HttpResponse(json.dumps(bkk_list, ensure_ascii=False), content_type="application/json,charset=utf-8")
else:
return HttpResponse(json.dumps({'err':'请使用post并提交正确数据'}, ensure_ascii=False),
content_type="application/json,charset=utf-8") | 03010646ce1e83f644f9bf44b8dcb4e5b8355e52 | 7,389 |
import calendar
def mkmonth(year, month, dates, groups):
"""Make an array of data for the year and month given.
"""
cal = calendar.monthcalendar(int(year), month)
for row in cal:
for index in range(len(row)):
day = row[index]
if day == 0:
row[index] = None
else:
date = '%04.d-%02.d-%02.d' % (year, month, day)
items = dates.get(date, ())
grp = 0
len_items = len(items)
if len_items > 0:
while grp < len(groups):
grp += 1
if len_items <= groups[grp - 1]:
break
row[index] = [day, grp, items, date]
while len(cal) < 6:
cal.append([None] * 7)
return dict(name=calendar.month_name[month], weeks=cal,
startdate='%04.d-%02.d' % (year, month)) | 71298d60e852e6045b4ab5c45c2f371ae7049808 | 7,390 |
def is_extension(step_str):
"""Return true if step_str is an extension or Any.
Args:
step_str: the string to evaluate
Returns:
True if step_str is an extension
Raises:
ValueError: if step_str is not a valid step.
"""
if not is_valid_step(step_str):
raise ValueError('Not a valid step in a path: "' + step_str + '"')
return step_str[0] == "(" | a3b30e238b3b8c42b645d18ae370dea501d1f389 | 7,391 |
def diff_list(first, second):
"""
Get difference of lists.
"""
second = set(second)
return [item for item in first if item not in second] | 19975990b5a05433266b3258cd541cca54ab83ac | 7,392 |
from typing import Optional
def validate_dissolution_statement_type(filing_json, legal_type) -> Optional[list]:
"""Validate dissolution statement type of the filing."""
msg = []
dissolution_stmt_type_path = '/filing/dissolution/dissolutionStatementType'
dissolution_stmt_type = get_str(filing_json, dissolution_stmt_type_path)
if legal_type == Business.LegalTypes.COOP.value:
if not dissolution_stmt_type:
msg.append({'error': _('Dissolution statement type must be provided.'),
'path': dissolution_stmt_type_path})
return msg
if not DissolutionStatementTypes.has_value(dissolution_stmt_type):
msg.append({'error': _('Invalid Dissolution statement type.'),
'path': dissolution_stmt_type_path})
return msg
return None | 868c9f0d6b229c303462a4dee7df16f27cd58898 | 7,393 |
import argparse
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(
usage='%(prog)s [options] <target path> <image> [image] ...')
parser.add_argument(
'-c', '--captions',
dest='captions',
action='store_true',
default=False,
help='read image captions from text files ("<IMAGE_NAME>.txt")')
parser.add_argument(
'--no-resize',
dest='no_resize',
action='store_true',
default=False,
help='do not resize images, just copy them')
parser.add_argument(
'-s', '--size',
dest='max_image_size',
type=parse_dimension_arg,
default=ARGS_DEFAULT_MAX_IMAGE_SIZE,
help='set maximum image size [default: {}]'
.format(ARGS_DEFAULT_MAX_IMAGE_SIZE))
parser.add_argument(
'-t', '--thumbnail-size',
dest='max_thumbnail_size',
type=parse_dimension_arg,
default=ARGS_DEFAULT_MAX_THUMBNAIL_SIZE,
help='set maximum thumbnail size [default: {}]'
.format(ARGS_DEFAULT_MAX_THUMBNAIL_SIZE))
parser.add_argument(
'--title',
dest='title',
help='set gallery title on the website')
parser.add_argument(
'--lightbox',
dest='lightbox',
action='store_true',
default=False,
help='Enable lightbox effect on the website. This disables sub-page creation per picture.')
parser.add_argument(
'--html-only',
dest='html_only',
action='store_true',
default=False,
help='Only generate HTML. Do not process images. This is useful if already processed once but need to re-create html pages.')
parser.add_argument(
'--no-optimize-image',
dest='no_optimize_image',
action='store_true',
default=False,
help='Optimize images to reduce size and remove meta data.')
# First positional argument.
parser.add_argument('destination_path')
# Remaining positional arguments (at least one), as a list.
parser.add_argument('full_image_filenames', nargs='+')
return parser.parse_args() | 87b2d65edb10647eb2b292819cc411f2d82cd2a0 | 7,394 |
from typing import List
def weave(left: List[int], right: List[int]) -> List[List[int]]:
""" Gives all possible combinations of left and right
keeping the original order on left and right """
if not left or not right:
return [left] if left else [right]
left_result: List[List[int]] = weave_helper(left, right)
right_result: List[List[int]] = weave_helper(right, left)
return left_result + right_result | 9a9717e43337802e6cef87a37b7d8d01493ebc8a | 7,395 |
import torch
def compute_rel_attn_value(p_attn, rel_mat, emb, ignore_zero=True):
"""
Compute a part of *attention weight application* and *query-value product*
in generalized RPE.
(See eq. (10) - (11) in the MuseBERT paper.)
Specifically,
- We use distributive law on eq. (11). The function computes the
second term:
$ sum_j (alpha_{ij} * sum_a Emb_a^K(r_{ij}^a)) $
Here,
- b for batch size, h for n_head, vs for vocabulary size.
- dtype is torch.float unless specified.
:param p_attn: (b, d, L_q, L_k)
:param rel_mat: (b, Lq, Lk)
:param emb: (h, vs, d)
:param ignore_zero: bool. Whether to exclude the first vocab.
:return: (b, h, Lq, d)
"""
vs = emb.size(-2)
# bool_relmat: (b, Lq, vs - 1, Lk), dtype: torch.float
bool_relmat = compute_bool_rel_mat(rel_mat, vs, ignore_zero=ignore_zero)
# p_attn: -> (b, d, Lq, 1, 1, Lk)
# bool_relmat: -> (b, 1, L_q, vs - 1, L_k, 1)
# acmlt_p_attn: (b, d, Lq, vs - 1, 1, 1) -> (b, d, Lq, vs - 1)
acmlt_p_attn = \
torch.matmul(p_attn.unsqueeze(-2).unsqueeze(-2),
bool_relmat.unsqueeze(1).unsqueeze(-1)
).squeeze(-1).squeeze(-1)
# acc_p_attn: -> (b, h, Lq, 1, vs - 1)
# emb: -> (1, h, 1, vs, d)
# rel_scores: (b, h, Lq, 1, d) -> (b, h, Lq, d)
start_ind = 1 if ignore_zero else 0
rel_values = \
torch.matmul(acmlt_p_attn.unsqueeze(-2),
emb[:, start_ind:].unsqueeze(0).unsqueeze(-3)
).squeeze(-2)
return rel_values | a39ca9d5933bc334648994fc5211355a496f8126 | 7,396 |
import sys
import os
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
option = None
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
# One of two options: option & Pipe OR file & default option
if args[0].startswith('-'):
option = args[0][1:]
if sys.stdin.isatty(): # ensure the PDB data is streamed in
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif len(args) == 2:
# Two options: option & File
if not args[0].startswith('-'):
emsg = 'ERROR! First argument is not an option: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if not os.path.isfile(args[1]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else: # Whatever ...
sys.stderr.write(__doc__)
sys.exit(1)
# Validate option
if option is not None and option != 'multi':
emsg = 'ERROR!! You provided an invalid option: \'{}\'\n'
sys.stderr.write(emsg.format(option))
sys.stderr.write(__doc__)
sys.exit(1)
return (option, fh) | 23d2ca78c7bad056c686ac9d82c5800220a3f351 | 7,397 |
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal | 7dda66bccb5fcecbe55bd0f3ecb64171748947a6 | 7,398 |
def lend(request):
"""
Lend view.
It receives the data from the lend form, process and validates it,
and reloads the page if everything is OK
Args:
- request (HttpRequest): the request
Returns:
"""
logged_user = get_logged_user(request)
if logged_user is not None and logged_user.user_role == UserRole.LENDER:
d = dict(request.POST)
d['lender_input'] = logged_user.id
errors = Loan.objects.basic_validator(d)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
else:
borrower = request.POST.get('borrower_input', 0)
amount = request.POST.get('amount_input', 0)
new_loan = Loan.objects.create(
borrower=User.objects.get(id=borrower),
lender=logged_user,
amount=int(amount)
)
messages.info(request, 'Loan executed successfully')
return redirect('lender', id=logged_user.id)
else:
request.session.clear()
return redirect('/') | 59fdf04eafc1772b8ef880a1340af57739a71d25 | 7,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.