content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def drop_table(name, con):
"""
drop table from database
Parameters
----------
name : string, name of SQL table
con : sqlalchemy.engine.Engine or sqlite3.Connection
Returns
-------
True
Examples
--------
>>> import pandas as pd
>>> from sqlalchemy import create_engine
>>> from tidyframe import drop_table
>>>
>>> engine = create_engine("sqlite:///raw_table.db")
>>> df = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 2}])
>>> df.to_sql("raw_table", engine)
>>> drop_table("raw_table", engine)
True
"""
table = load_table_schema(name, con)
table.drop()
return True | c86ad4e71c24bdfaba924171a21e74096ab8c11e | 10,956 |
def class_info_interface(**class_name):
"""
Set Class_Name, Class_Index, and DNN Model
\nclass_name (kwargs) : Input Class Name with list type,
if want to set class number, add tuple parameters
like 'class_info_interface(class_name = [list], class_number = [list])'
\nclass_number : Default the number of class_name
"""
global window
window = Tk()
window.title("Auto Labeling Input Class Name")
global entry_num
global entry_name
global entry_model
# 1. DNN Model Interface
ttk.Label(window, text = "DNN Model : ").grid(row = 0,
column = 0,
padx = 10,
pady = 10)
entry_model = ttk.Entry(window)
entry_model.grid(row = 0,
column = 1,
padx = 10,
pady = 10)
# 2. Class name Interface
ttk.Label(window, text = "Class name : ").grid(row = 1,
column = 0,
padx = 10,
pady = 10)
entry_name = ttk.Entry(window)
entry_name.grid(row = 1,
column = 1,
padx = 10,
pady = 10)
# 3. Class number Interface
ttk.Label(window, text = "Class number : ").grid(row = 2,
column = 0,
padx = 10,
pady = 10)
entry_num = ttk.Entry(window)
entry_num.grid(row = 2,
column = 1,
padx = 10,
pady = 10)
ttk.Button(window, text="OK", command=get_class_info).grid(row = 2,
column = 2,
padx = 10,
pady = 10)
# 4. User Name Guide Interface
if not class_name or class_name.__len__() is 0: # isEmpty == True
ttk.Label(window, text = "Username \n\n" +
"professor_seo \n" +
"jaeseok \n" +
"hun \n").grid(row = 3,
column = 1,
padx = 10,
pady = 10)
ttk.Label(window, text = "| Class Name\n\n" +
"| 0\n| 1\n| 2\n").grid(row = 3,
column = 2,
padx = 10,
pady = 10)
elif len(class_name) is not 0: # tuple variable
class_name_list = list()
for key, value in class_name.items():
print(key, value)
class_name_list.append(value)
# Class Name [0]
name_value = str()
index_value = str()
for i, name in enumerate(class_name_list[0]):
name_value = name_value + name + ' \n'
index_value = index_value + '| ' + str(i) + ' \n'
ttk.Label(window, text = "Username \n\n" +
name_value).grid(row = 3,
column = 1,
padx = 10,
pady = 10)
# Class Index [1]
if len(class_name) == 2:
index_value = str()
for index in class_name_list[1]:
index_value = index_value + '|' + \
str(index) + ' \n'
ttk.Label(window, text = "| Class Name\n\n" +
index_value).grid(row = 3,
column = 2,
padx = 10,
pady = 10)
print("list")
else:
raise ValueError("Not Supported value. See function docstring")
window.mainloop()
return user_name, user_num, dnn_model | a9da1515192cf67bfe326ab90ff7c12a32106304 | 10,957 |
def uint8(value):
"""
Create an SPL ``uint8`` value.
Returns:
Expression: Expression representing the value.
"""
return streamsx.spl.op.Expression('UINT8', int(value)) | 7e8562b4ec82bbb932c92a9af4cfd06224b6596d | 10,958 |
import re
def print_table(log_results,
platform_width = 0,
build_failures_width = 0,
test_failures_width = 0,
successful_width = 0,
space_char = " ",
list_separator = DEFAULT_LIST_SEPARATOR):
"""Print out a table in the requested format (text or markdown)."""
# Print table header
output_lines = list()
headers = [
re.sub(r'\b \b', space_char, PLATFORM_HEADER.ljust(platform_width)),
re.sub(r'\b \b', space_char,BUILD_FAILURES_HEADER.ljust(build_failures_width)),
re.sub(r'\b \b', space_char,TEST_FAILURES_HEADER.ljust(test_failures_width))
] + (
[re.sub(r'\b \b', space_char,SUCCESSFUL_TESTS_HEADER.ljust(successful_width))]
if FLAGS.include_successful else []
)
# Print header line.
output_lines.append(("|" + " %s |" * len(headers)) % tuple(headers))
# Print a |-------|-------|---------| line.
output_lines.append(("|" + "-%s-|" * len(headers)) %
tuple([ re.sub("[^|]","-", header) for header in headers ]))
# Iterate through platforms and print out table lines.
for platform in sorted(log_results.keys()):
if log_results[platform]["build_failures"] or log_results[platform]["test_failures"] or FLAGS.include_successful:
columns = [
re.sub(r'\b \b', space_char, platform.ljust(platform_width)),
format_result(log_results[platform]["build_failures"], justify=build_failures_width, list_separator=list_separator),
format_result(log_results[platform]["test_failures"], justify=test_failures_width, list_separator=list_separator),
] + (
[format_result(log_results[platform]["successful"], justify=successful_width, list_separator=list_separator)]
if FLAGS.include_successful else []
)
output_lines.append(("|" + " %s |" * len(headers)) % tuple(columns))
return output_lines | e12ada2d86f3dcecef6292b5c052094599abda4b | 10,959 |
def checkLengthSmaller(op, graph, frm, to):
"""
Confirm resulting video has less frames that source.
:param op:
:param graph:
:param frm:
:param to:
:return:
@type op: Operation
@type graph: ImageGraph
@type frm: str
@type to: str
"""
edge = graph.get_edge(frm, to)
durationChangeTuple = getValue(edge, 'metadatadiff.video.nb_frames')
if durationChangeTuple is None or \
(durationChangeTuple[0] == 'change' and int(durationChangeTuple[1]) < int(durationChangeTuple[2])):
return (Severity.ERROR,"Length of video is not shorter") | a2101371e4f8af0ebaab1ece5d7cc31f4a277aca | 10,962 |
import logging
def enable_log(fmt='[%(asctime)s] [%(process)5s] %(levelname)s %(module)s %(name)s %(message)s',
enable_color=True, filename=None):
"""
Clears all log handlers, and adds color handler and/or file handlers
:param fmt: logging format string
:param enable_color: True to enable
:param filename: log file location
:return: Logger object
"""
lgr = logging.getLogger()
lgr.handlers.clear()
# if there's no special requirements for logging
# we still want the formatting.
if not enable_color and \
filename is None and \
filename != '':
loghandler = logging.StreamHandler()
logfmt = logging.Formatter(fmt)
loghandler.setFormatter(logfmt)
lgr.addHandler(loghandler)
return True
if enable_color:
loghandler = logging.StreamHandler()
logfmt = ColorLogFormatter(fmt)
loghandler.setFormatter(logfmt)
lgr.addHandler(loghandler)
if filename is not None and filename != '':
logfilename = abspath(filename)
fhandler = logging.FileHandler(logfilename)
logfmt = logging.Formatter(fmt)
fhandler.setFormatter(logfmt)
lgr.addHandler(fhandler)
return True | 3e018012e7cff555d86e93396485c9644dfb32ae | 10,963 |
def build_con_and_ds(dataset: str):
"""
Builds test connector and test datasource for testing with API key
Leave this function in if ever want to run tests without skipping
due to there being no Bearer tokens
How to use:
Replace build_ds function with this one in test_aircall file
Be sure to also replace the endpoints inside the aircall connector file
"""
con = AircallConnector(name='mah_test', bearer_auth_id='abc123efg')
ds = AircallDataSource(name='mah_ds', domain='test_domain', dataset=dataset, limit=1,)
return con, ds | 7fa19e15e0a38c22f575d6509e3156a874b8ea60 | 10,964 |
def is_palindrome_recursive(text, left=None, right=None):
"""time complexity: O(1) because you are checking which conditional will run, which does not involve any loops
text = str
left = int
right = int"""
if len(text) == 0:
return True
given = get_letters(text)
if left is None and right is None:
left = 0
right = len(str) - 1
if given[left] != given[right]:
return False
elif left >= right:
return True
else:
return is_palindrome_recursive(given, left+1, right-1) | d7bf4ab6e7f43d6418cde3485f94dc2d83e40180 | 10,966 |
import operator
import numpy
def flip(m, axis=None):
"""Reverses the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default, axis=None, will flip over
all of the axes of the input array. If axis is negative it counts from the
last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
Returns
-------
out : ndarray
A view of m with the entries of axis reversed. Since a view is returned, this
operation is done in constant time.
Note
----
flip(m, 0) is equivalent to flipud(m).
flip(m, 1) is equivalent to fliplr(m).
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all positions.
flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at position 0 and
position 1.
See Also
--------
flipud : Flips array in the up/down direction.
fliplr : Flips array in the left/right direction.
Examples
--------
>>> import nlcpy as vp
>>> A = vp.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]])
>>> vp.flip(A, 0)
array([[[4, 5],
[6, 7]],
<BLANKLINE>
[[0, 1],
[2, 3]]])
>>> vp.flip(A, 1)
array([[[2, 3],
[0, 1]],
<BLANKLINE>
[[6, 7],
[4, 5]]])
>>> vp.flip(A)
array([[[7, 6],
[5, 4]],
<BLANKLINE>
[[3, 2],
[1, 0]]])
>>> vp.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
<BLANKLINE>
[[1, 0],
[3, 2]]])
>>> A = vp.random.randn(3, 4, 5)
>>> vp.all(vp.flip(A, 2) == A[:, :, ::-1, ...])
array(True)
"""
m = nlcpy.asanyarray(m)
if axis is None:
indexer = (slice(None, None, -1),) * m.ndim
else:
if type(axis) is nlcpy.ndarray:
axis = axis.get()
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
_axis = []
for ax in axis:
if type(ax) is nlcpy.ndarray:
ax = ax.get()
if type(ax) is numpy.ndarray:
if ax.size > 1:
raise TypeError(
'only size-1 arrays can be converted to Python scalars')
else:
ax = ax.item()
_axis.append(ax + m.ndim if ax < 0 else ax)
axis = _axis
if len(axis) != len(set(axis)):
raise ValueError('repeated axis')
indexer = [slice(None) for i in range(m.ndim)]
for ax in axis:
if ax >= m.ndim or ax < 0:
raise AxisError(
'axis {0} is out of bounds for array of dimension {1}'
.format(ax, m.ndim))
indexer[ax] = slice(None, None, -1)
indexer = tuple(indexer)
return m[indexer] | 495b75a548d94bc9dbc9827678b08282efb104d8 | 10,967 |
def radius_of_gyration(pos):
"""
Radius of gyration of a group of positions.
Does not account for periodic boundaries.
"""
com = np.mean(pos, axis = 0)
delta = pos - com
rgv = np.sqrt(np.sum(delta**2, axis = 0) / len(pos))
return np.linalg.norm(rgv) | a12450cf63768bf9a238bef11a3360ce49e3092f | 10,968 |
def get_metadata_for_list(commit_range, git_dir=None, count=None,
series=None, allow_overwrite=False):
"""Reads out patch series metadata from the commits
This does a 'git log' on the relevant commits and pulls out the tags we
are interested in.
Args:
commit_range (str): Range of commits to count (e.g. 'HEAD..base')
git_dir (str): Path to git repositiory (None to use default)
count (int): Number of commits to list, or None for no limit
series (Series): Object to add information into. By default a new series
is started.
allow_overwrite (bool): Allow tags to overwrite an existing tag
Returns:
Series: Object containing information about the commits.
"""
if not series:
series = Series()
series.allow_overwrite = allow_overwrite
stdout = get_list(commit_range, git_dir, count)
pst = PatchStream(series, is_log=True)
for line in stdout.splitlines():
pst.process_line(line)
pst.finalise()
return series | 0134a836f28bf97e5196c63f80f5b07d372cc5d4 | 10,969 |
def side_seperator(lsep,rsep):
"""
To have a custom side lined formatter.
A side-lined formatter is:
`[DATE] SEP "L_SEP" EVENT "R_SEP" LOG`
`loggy.side_seperator(lsep="||",rsep="||") # Default vals`
"""
fmt['ls']=lsep
fmt['rs']=rsep
return fmt | 803519e93cef7342e9f951090823fc536f37839f | 10,970 |
from typing import OrderedDict
def make_sequential(layer_configs, input):
"""Makes sequential layers automatically.
Arguments:
layer_configs: An OrderedDict that contains the configurations of a
sequence of layers. The key is the layer_name while the value is a dict
contains hyper-parameters needed to instantiate the corresponding
layer. The key of the inner dict is the name of the hyper-parameter and
the value is the value of the corresponding hyper-parameter. Note that
the key "layer_type" indicates the type of the layer.
input: A tensor that mimics the batch input of the model. The first dim
is the batch size. All other dims should be exactly the same as the
real input shape in the later training.
Returns:
A sequence of layers organized by nn.Sequential.
"""
layers = OrderedDict()
for layer_name in layer_configs:
arguments = deepcopy(layer_configs[layer_name])
layer_type = arguments.pop("layer_type")
input_shape = [int(j) for j in input.data.size()]
arguments["input_shape"] = input_shape
layers.update({layer_name: make_layer(layer_type, **arguments)})
input = layers[layer_name](input)
return nn.Sequential(layers) | 662c8787115c7d6d3e499a89aa7e8c301b9e5e4b | 10,972 |
import random
def Pol_Dyn_ExploreWithNUTS(resultsList,totalSimDays=1000,numDaysRemain=1000,\
totalBudget=1000,numBudgetRemain=1000,policyParamList=[0],startDay=0):
"""
Grab intermediate and end node distribtuions via NUTS. Identify intermediate node
sample variances. Pick an intermediate node, weighed towards picking those
with higher sample variances. Pick an outlet from this intermediate node's
column in the transition matrix A, again by a weighting (where 0% nodes
have a non-zero probability of being selected). [log((p/1-p) + eps)?]
policyParamList = [number days to plan for, sensitivity, specificity, M,
Madapt, delta]
(Only enter the number of days to plan for in the main simulation code,
as the other parameters will be pulled from the respective input areas)
"""
#Initialize our output, a list with the above mentioned outputs
sampleSchedule = []
# How many days to plan for?
numDaysToSched = min(policyParamList[0],numDaysRemain)
usedBudgetSoFar = 0
firstTestDay = totalSimDays - numDaysRemain
if numDaysRemain == totalSimDays: # Our initial schedule should just be a distrubed exploration
currNode = resultsList[0][0]
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest): # Iterate through our end nodes
if currNode > resultsList[len(resultsList)-1][0]:
currNode = resultsList[0][0]
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
else:
sampleSchedule.append([firstTestDay+currDay,currNode])
currNode += 1
usedBudgetSoFar += 1
else: # Generate NUTS sample using current results and use it to generate a new schedule
ydata = []
nSamp = []
for rw in resultsList:
ydata.append(rw[2])
nSamp.append(rw[1])
A = simHelpers.GenerateTransitionMatrix(resultsList)
sens, spec, M, Madapt, delta = policyParamList[1:]
NUTSsamples = simEst.GenerateNUTSsamples(ydata,nSamp,A,sens,spec,M,Madapt,delta)
# Store sample variances for intermediate nodes
NUTSintVars = []
for intNode in range(A.shape[1]):
currVar = np.var(sps.expit(NUTSsamples[:,intNode]))
NUTSintVars.append(currVar)
# Normalize sum of all variances to 1
NUTSintVars = NUTSintVars/np.sum(NUTSintVars)
# Now pick from these samples to generate projections
for currDay in range(numDaysToSched):
numToTest = int(np.floor((numBudgetRemain-usedBudgetSoFar) / (numDaysRemain-currDay))) +\
min((numBudgetRemain-usedBudgetSoFar) % (numDaysRemain-currDay),1) # How many samples to conduct in the next day
for testInd in range(numToTest):
# Pick an intermediate node to "target", with more emphasis on higher sample variances
rUnif = random.uniform(0,1)
for intInd in range(A.shape[1]):
if rUnif < np.sum(NUTSintVars[0:(intInd+1)]):
targIntInd = intInd
break
# Go through the same process with the column of A
# pertaining to this target intermediate node
AtargCol = [row[targIntInd] for row in A]
# Add a small epsilon, for 0 values, and normalize
AtargCol = np.add(AtargCol,1e-3)
AtargCol = AtargCol/np.sum(AtargCol)
rUnif = random.uniform(0,1)
for intEnd in range(A.shape[0]):
if rUnif < np.sum(AtargCol[0:(intEnd+1)]):
currInd = intEnd
break
currNode = resultsList[currInd][0]
sampleSchedule.append([firstTestDay+currDay,currNode])
usedBudgetSoFar += 1
# Need to sort this list before passing it through
sampleSchedule.sort(key=lambda x: x[0])
return sampleSchedule | a96638a7f2816a42069cfa714822e728ee7e325f | 10,973 |
import numpy
def calc_cos_t(hb_ratio, d, theta_s_i, theta_v_i, relative_azimuth):
"""Calculate t cossine.
Args:
hb_ratio (int): h/b.
d (numpy array): d.
theta_s_i (numpy array): theta_s_i.
theta_v_i (numpy array): theta_v_i.
relative_azimuth (numpy array): relative_azimuth.
Returns:
cos_t : numpy.array.
"""
return hb_ratio * numpy.sqrt(d*d + numpy.power(numpy.tan(theta_s_i)*numpy.tan(theta_v_i)*numpy.sin(relative_azimuth), 2)) / (sec(theta_s_i) + sec(theta_v_i)) | 5ac37f2aa8994b75bb0c71d9f54616ff041a5ff6 | 10,974 |
from typing import Callable
def guild_only() -> Callable:
"""A decorator that limits the usage of a slash command to guild contexts.
The command won't be able to be used in private message channels.
Example
---------
.. code-block:: python3
from discord import guild_only
@bot.slash_command()
@guild_only()
async def test(ctx):
await ctx.respond('You\'re in a guild.')
"""
def inner(command: Callable):
if isinstance(command, ApplicationCommand):
command.guild_only = True
else:
command.__guild_only__ = True
return command
return inner | d8ca993dd0ea71791458edd3c3bcec0551262552 | 10,975 |
import re
def truncate(text, words=25):
"""Remove tags and truncate text to the specified number of words."""
return " ".join(re.sub("(?s)<.*?>", " ", text).split()[:words]) | 18d994a52dc5549aabb7cc8f33d5755be5392208 | 10,976 |
from datetime import datetime
def _run_query_create_log(query, client, destination_table=None):
"""
Runs BigQuery queryjob
:param query: Query to run as a string
:param client: BigQuery client object
:return: QueryJob object
"""
# Job config
job_config = bigquery.QueryJobConfig()
if destination_table is not None:
job_config.destination = destination_table
else:
timestamp_name = datetime.now().strftime("query_%Y%m%d%H%M%S")
project = "cmap-big-table"
dataset = "cmap_query"
dest_tbl = ".".join([project, dataset, timestamp_name])
job_config.destination = dest_tbl
job_config.create_disposition = "CREATE_IF_NEEDED"
return client.query(query, job_config=job_config) | 265361c150f654bc8826cda096d85b4ae2911317 | 10,977 |
def read_disparity_gt(filename: str) -> np.ndarray:
"""
reads the disparity files used for training/testing.
:param filename: name of the file.
:return: data points.
"""
points = []
with open(filename, 'r') as file:
for line in file:
line = line.split(' ')
frame = int(line[0])
x_rgb = int(line[1])
y = int(line[2])
x_ir = int(line[3])
points.append([frame, x_rgb, y, x_ir])
return np.array(points, dtype=np.int32) | bad5ad6698d58e5173709cf866fb027367daa8b1 | 10,978 |
def purchase_index(request):
"""displays users purchase history"""
login_id = request.user.id
context = {'histories': Purchase_history.objects.all().filter(acc_id=login_id).order_by('-date')} # get users purchase history
return render(request, 'profile/histories/purchase_history.html', context) | d353e839ff08adfeebaa28a708d36df4d21a7ea8 | 10,979 |
import array
def solve_EEC(self):
"""Compute the parameters dict for the equivalent electrical circuit
cf "Advanced Electrical Drives, analysis, modeling, control"
Rik de doncker, Duco W.J. Pulle, Andre Veltman, Springer edition
<--- --->
-----R-----wsLqIq---- -----R-----wsLdId----
| | | |
| | | BEMF
| | | |
---------Id---------- ---------Iq----------
---> --->
Ud Uq
Parameters
----------
self : EEC_PMSM
an EEC_PMSM object
Return
------
out_dict : dict
Dict containing all magnetic quantities that have been calculated in EEC
"""
felec = self.freq0
ws = 2 * pi * felec
out_dict = dict()
if "Ud" in self.parameters: # Voltage driven
# Prepare linear system
XR = array(
[
[self.parameters["R20"], -ws * self.parameters["Lq"]],
[ws * self.parameters["Ld"], self.parameters["R20"]],
]
)
XE = array([0, ws * self.parameters["phi"]])
XU = array([self.parameters["Ud"], self.parameters["Uq"]])
# Solve system
XI = solve(XR, XU - XE)
out_dict["Id"] = XI[0]
out_dict["Iq"] = XI[1]
out_dict["Ud"] = self.parameters["Ud"]
out_dict["Uq"] = self.parameters["Uq"]
else: # Current Driven
Ud = (
self.parameters["R20"] * self.parameters["Id"]
- ws * self.parameters["Phiq"]
)
Uq = (
self.parameters["R20"] * self.parameters["Iq"]
+ ws * self.parameters["Phid"]
)
out_dict["Ud"] = Ud
out_dict["Uq"] = Uq
out_dict["Id"] = self.parameters["Id"]
out_dict["Iq"] = self.parameters["Iq"]
return out_dict | ad862028447acd038e76ba95960b089985bffe9b | 10,980 |
def is_active(seat):
"""Return True if seat is empty. If occupied return False. """
active = seat_map.get(seat, ".")
return True if active == "#" else False | 098c4ccf9d4e9bbadb853d77a100eabd4e5142bf | 10,981 |
from typing import List
import tqdm
def calibrate_intensity_to_powder(peak_intensity: dict, powder_peak_intensity: dict,
powder_peak_label: List[str], image_numbers: List[int], powder_start: int = 1):
"""Calibrate peak intensity values to intensity measurements taken from a 'random' powder sample."""
corrected_peak_intensity = dict()
first_iteration = True
for image_number in tqdm(image_numbers):
corrected_peak_intensity[image_number] = dict()
for label in powder_peak_label:
powder_average = np.average(powder_peak_intensity[powder_start][label])
powder_error = np.std(powder_peak_intensity[powder_start][label], ddof=1)
corrected_peak_intensity[image_number][label] = []
corrected_peak_intensity[image_number][label] = peak_intensity[image_number][label] / powder_average
if first_iteration:
print(f"Normalised {label} intensities by a value of {powder_average} +/- {powder_error} from average powder intensity.")
else:
continue
first_iteration = False
return corrected_peak_intensity | 8019eec6c63152ee25bacc9dcf8fa723407f8107 | 10,982 |
import json
def examine(path):
""" Look for forbidden tasks in a job-output.json file path """
data = json.load(open(path))
to_fix = False
for playbook in data:
if playbook['trusted']:
continue
for play in playbook['plays']:
for task in play['tasks']:
for hostname, host in task['hosts'].items():
if hostname != 'localhost':
continue
if host['action'] in ['command', 'shell']:
print("Found disallowed task:")
print(" Playbook: %s" % playbook['playbook'])
print(" Role: %s" % task.get('role', {}).get('name'))
print(" Task: %s" % task.get('task', {}).get('name'))
to_fix = True
return to_fix | e441fc58bbfc4547bbdff451d6d06ba952e5a1ba | 10,983 |
def config_ask(default_message = True,
config_args = config_variables):
"""Formats user command line input for configuration details"""
if default_message:
print("Enter configuration parameters for the following variables... ")
config_dictionary = dict()
for v in config_args:
config_dictionary.update({v:input("{}: ".format(v))})
return config_dictionary
else:
print(default_message)
config_dictionary = dict()
for v in config_args:
config_dictionary.update({v:input("{}: ".format(v))})
return config_dictionary | 277d26ae67baf14ee6b16547bb72c029ab0bc610 | 10,985 |
def build_A(N):
"""
Build A based on the defined problem.
Args:
N -- (int) as defined above
Returns:
NumPy ndarray - A
"""
A = np.hstack( (np.eye(N), np.negative(np.eye(N))) )
A = np.vstack( (A, np.negative(np.hstack( (np.eye(N), np.eye(N)) ))) )
A = np.vstack( (A, np.hstack( (np.ones(N), np.zeros(N)) )) )
return A | eecb541e44cc177e594f38d9a7c1930f2d4f0c40 | 10,987 |
def gms_change_est2(T_cont, T_pert, q_cont, precip, level, lat,
lev_sfc=925., gamma=1.):
"""
Gross moist stability change estimate.
Near surface MSE difference between ITCZ and local latitude, neglecting
geopotential term and applying a thermodynamic scaling for the moisture
term, and multiplying the ITCZ terms by cos(lat) and a fixed fraction gamma
to account for deviation of upper level MSE from the near surface ITCZ
value.
"""
# ITCZ defined as latitude with maximum zonal mean precip.
itcz_ind = np.argmax(precip.mean(axis=-1))
# Need temperature change at
T_pert = np.squeeze(T_pert[np.where(level == lev_sfc)].mean(axis=-1))
T_cont = np.squeeze(T_cont[np.where(level == lev_sfc)].mean(axis=-1))
dT = T_pert - T_cont
dT_itcz = T_pert[itcz_ind] - T_cont[itcz_ind]
q_cont = np.squeeze(q_cont[np.where(level == lev_sfc)].mean(axis=-1))
# GMS is difference between surface
alpha = 0.07
return (np.cos(np.deg2rad(lat))**2*gamma*
(c_p + L_v*alpha*q_cont[itcz_ind])*dT_itcz -
(c_p + L_v*alpha*q_cont)*dT)/c_p | 991721a2dae52269dec276fa384d568b1d58672f | 10,988 |
def solid_polygon_info_(base_sides, printed=False):
"""Get information about a solid polygon from its side count."""
# Example: A rectangular solid (Each base has four sides) is made up of
# 12 edges, 8 vertices, 6 faces, and 12 triangles.
edges = base_sides * 3
vertices = base_sides * 2
faces = base_sides + 2
triangles = (base_sides - 2) * 2 + vertices
if printed:
print(f"Edges: {edges}\nVertices: {vertices}\nFaces: {faces}\nTriangles: {triangles}")
else:
return {"edges": edges,
"vertices": vertices,
"faces": faces,
"triangles": triangles} | a16bae9b82fd7a89332d5403359c2aa1eddf6cb4 | 10,989 |
def load_prism_theme():
"""Loads a PrismJS theme from settings."""
theme = get_theme()
if theme:
script = (
f"""<link href="{PRISM_PREFIX}{PRISM_VERSION}/themes/prism-{theme}"""
""".min.css" rel="stylesheet">"""
)
return mark_safe(script)
return "" | 565e9fdb7b201bf6c34b3b2d198aa18f22070145 | 10,991 |
def get_root_name(depth):
""" Returns the Rootname. """
return Alphabet.get_null_character() * depth | 1514bcd0ef9c6a2a4051772d8eeee34f3f7197a7 | 10,992 |
import hashlib
def md5(fname):
"""
Cacualte the MD5 hash of the file given as input.
Returns the hash value of the input file.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest() | 0c238810f1682f86e8a31982135c37017df4d6fd | 10,993 |
def date2num(date_axis, units, calendar):
"""
A wrapper from ``netCDF4.date2num`` able to handle "years since" and "months since" units.
If time units are not "years since" or "months since" calls usual ``netcdftime.date2num``.
:param numpy.array date_axis: The date axis following units
:param str units: The proper time units
:param str calendar: The NetCDF calendar attribute
:returns: The corresponding numerical time axis
:rtype: *array*
"""
# date_axis is the date time axis incremented following units (i.e., by years, months, etc).
if not units.split(' ')[0] in ['years', 'months']:
# If units are not 'years' or 'months since', call usual netcdftime.date2num:
return nc.date2num(date_axis, units=units, calendar=calendar)
else:
# Return to time reference with 'days since'
units_as_days = 'days ' + ' '.join(units.split(' ')[1:])
# Convert date axis as number of days since time reference
days_axis = nc.date2num(date_axis, units=units_as_days, calendar=calendar)
# Convert the time reference 'units_as_days' as datetime object
start_date = nc.num2date(0.0, units=units_as_days, calendar=calendar)
# Create years axis from input date axis
years = np.array([date.year for date in np.atleast_1d(np.array(date_axis))])
if units.split(' ')[0] == 'years':
# If units are 'years since'
# Define the number of maximum and minimum years to build a date axis covering
# the whole 'num_axis' period
max_years = np.max(years - start_date.year + 1)
min_years = np.min(years - start_date.year - 1)
# Create a date axis with one year that spans the entire period by year
years_axis = np.array([add_year(start_date, yid)
for yid in np.arange(min_years, max_years + 2)])
# Convert years axis as number of days since time reference
cdftime = netcdftime.utime(units_as_days, calendar=calendar)
years_axis_as_days = cdftime.date2num(years_axis)
# Find closest index for years_axis_as_days in days_axis
closest_index = np.searchsorted(years_axis_as_days, days_axis)
# Compute the difference between closest value of year axis and start date, in number of days
num = days_axis - years_axis_as_days[closest_index]
# Number of days of the corresponding closest year
den = np.diff(years_axis_as_days)[closest_index]
return min_years + closest_index + num / den
elif units.split(' ')[0] == 'months':
# If units are 'months since'
# Define the number of maximum and minimum months to build a date axis covering
# the whole 'num_axis' period
max_months = np.max(12 * (years - start_date.year + 12))
min_months = np.min(12 * (years - start_date.year - 12))
# Create a date axis with one month that spans the entire period by month
months_axis = np.array([add_month(start_date, mid)
for mid in np.arange(min_months, max_months)])
# Convert months axis as number of days since time reference
cdftime = netcdftime.utime(units_as_days, calendar=calendar)
months_axis_as_days = cdftime.date2num(months_axis)
# Find closest index for months_axis_as_days in days_axis
closest_index = np.searchsorted(months_axis_as_days, days_axis)
# Compute the difference between closest value of months axis and start date, in number of days
num = days_axis - months_axis_as_days[closest_index]
# Number of days of the corresponding closest month
den = np.diff(months_axis_as_days)[closest_index]
return min_months + closest_index + num / den | b435697098c58d1045f7e31eefb23cac201bfe0c | 10,994 |
import gettext
def _(txt):
""" Custom gettext translation function that uses the CurlyTx domain """
t = gettext.dgettext("CurlyTx", txt)
if t == txt:
#print "[CurlyTx] fallback to default translation for", txt
t = gettext.gettext(txt)
return t | 839c36184eabde641a40d7b7ad55d4695574dafb | 10,995 |
import html
def output_node(ctx, difference, path, indentstr, indentnum):
"""Returns a tuple (parent, continuation) where
- parent is a PartialString representing the body of the node, including
its comments, visuals, unified_diff and headers for its children - but
not the bodies of the children
- continuation is either None or (only in html-dir mode) a function which
when called with a single integer arg, the maximum size to print, will
print any remaining "split" pages for unified_diff up to the given size.
"""
indent = tuple(indentstr * (indentnum + x) for x in range(3))
t, cont = PartialString.cont()
comments = u""
if difference.comments:
comments = u'{0[1]}<div class="comment">\n{1}{0[1]}</div>\n'.format(
indent,
"".join(
u"{0[2]}{1}<br/>\n".format(indent, html.escape(x))
for x in difference.comments
),
)
visuals = u""
for visual in difference.visuals:
visuals += output_visual(visual, path, indentstr, indentnum + 1)
udiff = u""
ud_cont = None
if difference.unified_diff:
ud_cont = HTMLSideBySidePresenter().output_unified_diff(
ctx, difference.unified_diff, difference.has_internal_linenos
)
udiff = next(ud_cont)
if isinstance(udiff, PartialString):
ud_cont = ud_cont.send
udiff = udiff.pformatl(PartialString.of(ud_cont))
else:
for _ in ud_cont:
pass # exhaust the iterator, avoids GeneratorExit
ud_cont = None
# PartialString for this node
body = PartialString.numl(u"{0}{1}{2}{-1}", 3, cont).pformatl(
comments, visuals, udiff
)
if len(path) == 1:
# root node, frame it
body = output_node_frame(difference, path, indentstr, indentnum, body)
t = cont(t, body)
# Add holes for child nodes
for d in difference.details:
child = output_node_frame(
d, path + [d], indentstr, indentnum + 1, PartialString.of(d)
)
child = PartialString.numl(
u"""{0[1]}<div class="difference">
{1}{0[1]}</div>
{-1}""",
2,
cont,
).pformatl(indent, child)
t = cont(t, child)
# there might be extra holes for the unified diff continuation
assert len(t.holes) >= len(difference.details) + 1
return cont(t, u""), ud_cont | dbe4c5f806457d4308954fb9e13bf01419b4e1a1 | 10,996 |
def split_tree_into_feature_groups(tree: TreeObsForRailEnv.Node, max_tree_depth: int) -> (
np.ndarray, np.ndarray, np.ndarray):
"""
This function splits the tree into three difference arrays of values
"""
data, distance, agent_data = _split_node_into_feature_groups(tree)
for direction in TreeObsForRailEnv.tree_explored_actions_char:
sub_data, sub_distance, sub_agent_data = _split_subtree_into_feature_groups(tree.childs[direction], 1,
max_tree_depth)
data = np.concatenate((data, sub_data))
distance = np.concatenate((distance, sub_distance))
agent_data = np.concatenate((agent_data, sub_agent_data))
return data, distance, agent_data | 87352b0d500d178b32d4697ae49736133c7fd6a1 | 10,997 |
def _generate_training_batch(ground_truth_data, representation_function,
batch_size, num_points, random_state):
"""Sample a set of training samples based on a batch of ground-truth data.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
batch_size: Number of points to be used to compute the training_sample.
num_points: Number of points to be sampled for training set.
random_state: Numpy random state used for randomness.
Returns:
points: (num_points, dim_representation)-sized numpy array with training set
features.
labels: (num_points)-sized numpy array with training set labels.
"""
points = None # Dimensionality depends on the representation function.
labels = np.zeros(num_points, dtype=np.int64)
for i in range(num_points):
labels[i], feature_vector = _generate_training_sample(
ground_truth_data, representation_function, batch_size, random_state)
if points is None:
points = np.zeros((num_points, feature_vector.shape[0]))
points[i, :] = feature_vector
return points, labels | 944ed5845385089063f0e1558a9a9aedb4aa6d26 | 10,998 |
def get_mnist_loaders(data_dir, b_sz, shuffle=True):
"""Helper function that deserializes MNIST data
and returns the relevant data loaders.
params:
data_dir: string - root directory where the data will be saved
b_sz: integer - the batch size
shuffle: boolean - whether to shuffle the training set or not
"""
train_loader = DataLoader(
MNIST(data_dir, train=True, transform=ToTensor(), download=True),
shuffle=shuffle, batch_size=b_sz)
test_loader = DataLoader(
MNIST(data_dir, train=False, transform=ToTensor(), download=True),
shuffle=False, batch_size=b_sz)
return train_loader, test_loader | 7149dbe78ceb321c0afea52c20ae927ce154a8f6 | 10,999 |
def atomic_coordinates_as_json(pk):
"""Get atomic coordinates from database."""
subset = models.Subset.objects.get(pk=pk)
vectors = models.NumericalValue.objects.filter(
datapoint__subset=subset).filter(
datapoint__symbols__isnull=True).order_by(
'datapoint_id', 'counter')
data = {'vectors':
[[x.formatted('.10g') for x in vectors[:3]],
[x.formatted('.10g') for x in vectors[3:6]],
[x.formatted('.10g') for x in vectors[6:9]]]}
# Here counter=1 filters out the first six entries
symbols = models.Symbol.objects.filter(
datapoint__subset=subset).filter(counter=1).order_by(
'datapoint_id').values_list('value', flat=True)
coords = models.NumericalValue.objects.filter(
datapoint__subset=subset).filter(
datapoint__symbols__counter=1).select_related('error').order_by(
'counter', 'datapoint_id')
tmp = models.Symbol.objects.filter(
datapoint__subset=subset).annotate(
num=models.models.Count('datapoint__symbols')).filter(
num=2).first()
if tmp:
data['coord-type'] = tmp.value
data['coordinates'] = []
N = int(len(coords)/3)
for symbol, coord_x, coord_y, coord_z in zip(
symbols, coords[:N], coords[N:2*N], coords[2*N:3*N]):
data['coordinates'].append((symbol,
coord_x.formatted('.9g'),
coord_y.formatted('.9g'),
coord_z.formatted('.9g')))
return data | 515854e789a15e845b0dbcd754e17bedfc0bcf69 | 11,000 |
def additional_bases():
""""Manually added bases that cannot be retrieved from the REST API"""
return [
{
"facility_name": "Koltyr Northern Warpgate",
"facility_id": 400014,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Koltyr Eastern Warpgate",
"facility_id": 400015,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Koltyr Southern Warpgate",
"facility_id": 400016,
"facility_type_id": 7,
"facility_type": "Warpgate"
},
{
"facility_name": "Zorja",
"facility_id": 400017,
"facility_type_id": 2,
"facility_type": "Amp Station"
},
{
"facility_name": "Xander",
"facility_id": 400018,
"facility_type_id": 3,
"facility_type": "Bio Lab"
},
{
"facility_name": "Svarog",
"facility_id": 400019,
"facility_type_id": 4,
"facility_type": "Tech Plant"
},
{
"facility_name": "Koltyr Tech Plant Outpost",
"facility_id": 400020,
"facility_type_id": 5,
"facility_type": "Large Outpost"
},
{
"facility_name": "Koltyr Biolab Outpost",
"facility_id": 400021,
"facility_type_id": 5,
"facility_type": "Large Outpost"
},
{
"facility_name": "Koltyr Amp Station Outpost",
"facility_id": 400022,
"facility_type_id": 5,
"facility_type": "Large Outpost"
}
] | e2a5ad97ca1b424466f5ebe340466eaf9f627e7e | 11,001 |
def get_all_label_values(dataset_info):
"""Retrieves possible values for modeled labels from a `Seq2LabelDatasetInfo`.
Args:
dataset_info: a `Seq2LabelDatasetInfo` message.
Returns:
A dictionary mapping each label name to a tuple of its permissible values.
"""
return {
label_info.name: tuple(label_info.values)
for label_info in dataset_info.labels
} | 929db286b3f7ee8917618e9f46feabdff630d3b2 | 11,002 |
def load_input(file: str) -> ArrayLike:
"""Load the puzzle input and duplicate 5 times in each direction,
adding 1 to the array for each copy.
"""
input = puzzle_1.load_input(file)
input_1x5 = np.copy(input)
for _ in range(4):
input = np.clip(np.mod(input + 1, 10), a_min=1, a_max=None)
input_1x5 = np.concatenate([input_1x5, input], axis=1)
input_5x5 = np.copy(input_1x5)
for _ in range(4):
input_1x5 = np.clip(np.mod(input_1x5 + 1, 10), a_min=1, a_max=None)
input_5x5 = np.concatenate([input_5x5, input_1x5], axis=0)
return input_5x5 | 91b2cd7854a793ebbbfee2400eddb22304fc18bd | 11,003 |
def _get_xvals(end, dx):
"""Returns a integer numpy array of x-values incrementing by "dx"
and ending with "end".
Args:
end (int)
dx (int)
"""
arange = np.arange(0, end-1+dx, dx, dtype=int)
xvals = arange[1:]
return xvals | 24a4d7b7c470abb881700a1775008d16c35c1fc3 | 11,004 |
import torch
def top_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
"""
# batch support!
if top_k > 0:
values, _ = torch.topk(logits, top_k)
min_values = values[:, -1].unsqueeze(1).repeat(1, logits.shape[-1])
logits = torch.where(logits < min_values,
torch.ones_like(logits, dtype=logits.dtype) * -float('Inf'),
logits)
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
sorted_logits = sorted_logits.masked_fill_(sorted_indices_to_remove, filter_value)
logits = torch.zeros_like(logits).scatter(1, sorted_indices, sorted_logits)
return logits | 5cbbd9959a80e72364f098fe031e5e3c78485826 | 11,005 |
def get_reference_shift( self, seqID ):
"""Get a ``reference_shift`` attached to a particular ``seqID``.
If none was provided, it will return **1** as default.
:param str seqID: |seqID_param|.
:type shift: Union[:class:`int`, :class:`list`]
:raises:
:TypeError: |indf_error|.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: df = parse_rosetta_file("../rstoolbox/tests/data/input_ssebig.minisilent.gz",
...: {'sequence': 'C', 'structure': 'C'})
...: df.add_reference_structure('C', df.iloc[0].get_structure('C'))
...: df.add_reference_shift('C', 3)
...: df.get_reference_shift('C')
"""
if not isinstance(self, (pd.DataFrame, pd.Series)):
raise TypeError("Data container has to be a DataFrame/Series or a derived class.")
if self._subtyp != "sequence_frame" and (seqID not in self.get_available_structures() and
seqID not in self.get_available_sequences()):
raise KeyError("Data container does not have data for structure {}".format(seqID))
if seqID in self._reference:
return self._reference[seqID]["sft"]
else:
return 1 | 4a8f9fe683c9cf0085754ca2ebb9132bbae427ea | 11,006 |
def load_and_resolve_feature_metadata(eval_saved_model_path: Text,
graph: tf.Graph):
"""Get feature data (feature columns, feature) from EvalSavedModel metadata.
Like load_feature_metadata, but additionally resolves the Tensors in the given
graph.
Args:
eval_saved_model_path: Path to EvalSavedModel, for the purposes of loading
the feature_metadata file.
graph: tf.Graph to resolve the Tensors in.
Returns:
Same as load_feature_metadata, except associated_tensors and features
contain the Tensors resolved in the graph instead of TensorInfos.
"""
result = load_feature_metadata(eval_saved_model_path=eval_saved_model_path)
# Resolve Tensors in graph
result['associated_tensors'] = [
tf.compat.v1.saved_model.get_tensor_from_tensor_info(tensor_info, graph)
for tensor_info in result['associated_tensors']
]
result['features'] = {
k: tf.compat.v1.saved_model.get_tensor_from_tensor_info(v, graph)
for k, v in result['features'].items()
}
return result | 3377d66c962ccccab7b62abf563f88032a8a7b14 | 11,008 |
def greater_than_or_eq(quant1, quant2):
"""Binary function to call the operator"""
return quant1 >= quant2 | 920c28da125b567bc32a149aec6aaade3645ef87 | 11,009 |
def pr_define_role(pe_id,
role=None,
role_type=None,
entity_type=None,
sub_type=None):
"""
Back-end method to define a new affiliates-role for a person entity
@param pe_id: the person entity ID
@param role: the role name
@param role_type: the role type (from pr_role_types), default 9
@param entity_type: limit selection in CRUD forms to this entity type
@param sub_type: limit selection in CRUD forms to this entity sub-type
@return: the role ID
"""
if not pe_id:
return None
s3db = current.s3db
if role_type not in s3db.pr_role_types:
role_type = 9 # Other
data = {"pe_id": pe_id,
"role": role,
"role_type": role_type,
"entity_type": entity_type,
"sub_type": sub_type}
rtable = s3db.pr_role
if role:
query = (rtable.pe_id == pe_id) & \
(rtable.role == role)
duplicate = current.db(query).select(rtable.id,
rtable.role_type,
limitby=(0, 1)).first()
else:
duplicate = None
if duplicate:
if duplicate.role_type != role_type:
# Clear paths if this changes the role type
if str(role_type) != str(OU):
data["path"] = None
s3db.pr_role_rebuild_path(duplicate.id, clear=True)
duplicate.update_record(**data)
record_id = duplicate.id
else:
record_id = rtable.insert(**data)
return record_id | 3f09ac9eca47347b51069a20b7b08b2192e2d452 | 11,010 |
def inherently_superior(df):
"""
Find rows in a dataframe with all values 'inherently superior',
meaning that all values for certain metrics are as high or higher
then for all other rows.
Parameters
----------
df : DataFrame
Pandas dataframe containing the columns to be compared. The columns
should be in a format in which higher values are superior.
Returns
-------
DataFrame with index of best values and values compared.
"""
# Copy dataframe to prevent altering the columns.
df_copy = df.copy()
# Reset index to reference location of values. Also, convert to numpy.
df_copy.reset_index(inplace=True)
arr = df_copy.values
# Repeat and tile the array for comparison. Given indices [1, 2], arr1 is
# in format [1, 1, 2, 2], and arr2 is in format [1, 2, 1, 2].
arr1 = np.repeat(arr, arr.shape[0], axis=0)
arr2 = np.tile(arr, (arr.shape[0], 1))
# Check if any values are greater than for other rows.
any_arr = np.all(arr1[:, 1:] >= arr2[:, 1:], axis=1)
# Adjust array so that all points at which a row is being compared to itself
# are labeled as superior.
same_idx = np.array(range(0, len(any_arr), arr.shape[0])) + np.array(range(arr.shape[0]))
any_arr[same_idx] = 1
# Concatenate arr1 and array with superior labels.
arr1_any = np.concatenate([arr1, any_arr.reshape(-1, 1)], axis=1)
# Split data at unique indices. Used to check if greater than all other rows.
splits = np.array(np.split(arr1_any, np.unique(arr1[:, 0], return_index=True)[1][1:]))
perc_sup = np.mean(splits[:, :, -1], axis=1)
idx = np.all(splits[:, :, -1], axis=1)
# Choose superior data idx and create dataframe.
columns = df_copy.columns.tolist() + ['perc_sup', 'fully_sup']
data = np.concatenate([arr, perc_sup.reshape(-1, 1), idx.reshape(-1, 1)], axis=1)
arr_df = pd.DataFrame(data, columns=columns)
arr_df.drop('index', axis=1, inplace=True)
arr_df['fully_sup'] = arr_df['fully_sup'].astype(bool)
return arr_df | 02dd6db624efd4f1daa4c0ef4f126c6c60c0376e | 11,011 |
def LineColourArray():
"""Line colour options array"""
Colour = [
'Black',
'dimgrey',
'darkgrey',
'silver',
'lightgrey',
'maroon',
'darkred',
'firebrick',
'red',
'orangered',
'darkorange',
'orange',
'saddlebrown',
'darkgoldenrod',
'goldenrod',
'gold',
'darkolivegreen',
'olivedrab',
'olive',
'y',
'darkkhaki',
'khaki',
'darkgreen',
'Green',
'limegreen',
'lime',
'mediumspringgreen',
'palegreen',
'greenyellow',
'midnightblue',
'navy',
'darkblue',
'mediumblue',
'blue',
'slateblue',
'indigo',
'purple',
'darkmagenta',
'darkorchid',
'mediumorchid',
'orchid',
'plum',
'crimson',
'deeppink',
'magenta',
'hotpink',
'pink' ]
return Colour | 94f91d17c6e539983ab38ca7fdadd211e6268bfb | 11,012 |
from typing import Any
def parse_ccu_sys_var(data: dict[str, Any]) -> tuple[str, Any]:
"""Helper to parse type of system variables of CCU."""
# pylint: disable=no-else-return
if data[ATTR_TYPE] == ATTR_HM_LOGIC:
return data[ATTR_NAME], data[ATTR_VALUE] == "true"
if data[ATTR_TYPE] == ATTR_HM_ALARM:
return data[ATTR_NAME], data[ATTR_VALUE] == "true"
elif data[ATTR_TYPE] == ATTR_HM_NUMBER:
return data[ATTR_NAME], float(data[ATTR_VALUE])
elif data[ATTR_TYPE] == ATTR_HM_LIST:
return data[ATTR_NAME], int(data[ATTR_VALUE])
return data[ATTR_NAME], data[ATTR_VALUE] | 8b77dbbaa93739457a2e92aad79ac5b6bd3a6af0 | 11,014 |
def one_time_log_fixture(request, workspace) -> Single_Use_Log:
"""
Pytest Fixture for setting up a single use log file
At test conclusion, runs the cleanup to delete the single use text file
:return: Single_Use_Log class
"""
log_class = Single_Use_Log(workspace)
request.addfinalizer(log_class.cleanup)
return log_class | 73332892ece76ee90c15d84294b70d935e8a2f4c | 11,015 |
import json
def details(request, path):
"""
Returns detailed information on the entity at path.
:param path: Path to the entity (namespaceName/.../.../.../)
:return: JSON Struct: {property1: value, property2: value, ...}
"""
item = CACHE.get(ENTITIES_DETAIL_CACHE_KEY)
# ENTITIES_DETAIL : {"namespaceName": {"name":"", "description": "", "stream":{}, "artifact":"", "dataset":"",
# "application":""}, {}...} Each part in path.split('/') matches the key name in ENTITIES_DETAIL
# The detailed information of entity at path stores in the last dict
for k in path.strip('/').split('/'):
item = item[k]
item["privileges"] = _get_privileges_for_path(request.user, path)
return HttpResponse(json.dumps(item), content_type='application/json') | b460dc76f18f35b48509a1b2d8daa104bc89fbb5 | 11,016 |
def ca_get_container_capability_set(slot, h_container):
"""
Get the container capabilities of the given slot.
:param int slot: target slot number
:param int h_container: target container handle
:return: result code, {id: val} dict of capabilities (None if command failed)
"""
slot_id = CK_SLOT_ID(slot)
cont_id = CK_ULONG(h_container)
cap_ids = AutoCArray()
cap_vals = AutoCArray()
@refresh_c_arrays(1)
def _get_container_caps():
"""Closer for retries to work w/ properties"""
return CA_GetContainerCapabilitySet(
slot_id, cont_id, cap_ids.array, cap_ids.size, cap_vals.array, cap_vals.size
)
ret = _get_container_caps()
return ret, dict(list(zip(cap_ids, cap_vals))) | cf97db8f201d0c5fce12902b92abdc3a819ac394 | 11,017 |
def load_pyfunc(model_file):
"""
Loads a Keras model as a PyFunc from the passed-in persisted Keras model file.
:param model_file: Path to Keras model file.
:return: PyFunc model.
"""
return _KerasModelWrapper(_load_model(model_file)) | eb21f47a55f35bf3707ba7c5cb56e72948d24866 | 11,018 |
def business_days(start, stop):
"""
Return business days between two datetimes (inclusive).
"""
return dt_business_days(start.date(), stop.date()) | 1fa8c38e6cceca448bc988cd0c1eb24a27508a78 | 11,019 |
def empty_nzb_document():
""" Creates xmldoc XML document for a NZB file. """
# http://stackoverflow.com/questions/1980380/how-to-render-a-doctype-with-pythons-xml-dom-minidom
imp = minidom.getDOMImplementation()
dt = imp.createDocumentType("nzb", "-//newzBin//DTD NZB 1.1//EN",
"http://www.newzbin.com/DTD/nzb/nzb-1.1.dtd")
doc = imp.createDocument("http://www.newzbin.com/DTD/2003/nzb", "nzb", dt)
# http://stackoverflow.com/questions/2306149/how-to-write-xml-elements-with-namespaces-in-python
doc.documentElement.setAttribute('xmlns',
'http://www.newzbin.com/DTD/2003/nzb')
return doc | 7cd8aa73f201b4f432aa6adaed18d133ec08fa48 | 11,020 |
def get_output_directory(create_statistics=None, undersample=None, oversample=None):
"""
Determines the output directory given the balance of the dataset as well as columns.
Parameters
----------
create_statistics: bool
Whether the std, min and max columns have been created
undersample: bool
Whether the data has been undersampled
oversample: bool
Whether the data has been oversampled
Returns
-------
Output directory
"""
if create_statistics is None:
create_statistics = AppConfig.create_statistics
if undersample is None:
undersample = AppConfig.balance_data
if oversample is None:
oversample = AppConfig.oversample
stat = 'st' if create_statistics else 'ns'
bal = 'us' if undersample else 'ub'
bal = 'os' if oversample else bal
return f'./output/{stat}_{bal}/' | c10859e1eba4afb61d967e56be8a8206f5202618 | 11,021 |
def removePrefixes(word, prefixes):
"""
Attempts to remove the given prefixes from the given word.
Args:
word (string): Word to remove prefixes from.
prefixes (collections.Iterable or string): Prefixes to remove from given word.
Returns:
(string): Word with prefixes removed.
"""
if isinstance(prefixes, str):
return word.split(prefixes)[-1]
for prefix in prefixes:
word = word.split(prefix)[-1]
return word | 6932e5605b11eee004a350c7f9be831d8bb7ca9d | 11,022 |
def isSol(res):
"""
Check if the string is of the type ai bj ck
"""
if not res or res[0] != 'a' or res[-1] != 'c':
return False
l = 0
r = len(res)-1
while res[l] == "a":
l+=1
while res[r] == "c":
r-=1
if r-l+1 <= 0:
return False
for x in res[l:r-l+1]:
if x != 'b':
return False
return True | 14030e52a588dc13029602e81a5f2068707bca17 | 11,023 |
import pandas
def _h1_to_dataframe(h1: Histogram1D) -> pandas.DataFrame:
"""Convert histogram to pandas DataFrame."""
return pandas.DataFrame(
{"frequency": h1.frequencies, "error": h1.errors},
index=binning_to_index(h1.binning, name=h1.name),
) | 28aa8cc36abd21a17e0a30f4bde2bb996753864b | 11,024 |
def wgt_area_sum(data, lat_wgt, lon_wgt):
"""wgt_area_sum() performas weighted area addition over a geographical area.
data: data of which last 2 dimensions are lat and lon. Strictly needs to be a masked array
lat_wgt: weights over latitude of area (usually cos(lat * pi/180))
lon_wgt: weights over longitude of area (usually 1)
Returns, Numpy array with 2 less dimensions (Masked array.
Mask is False if no mask was supplied with the input data.
Else mask is derived from the input data)"""
# Get data shape
shp = data.shape
ndims = data.ndim
if(isinstance(lat_wgt, float)):
lat_wgt = [lat_wgt] * shp[ndims - 2]
if(isinstance(lon_wgt, float)):
lon_wgt = [lon_wgt] * shp[ndims - 1]
lat_wgt = np.array(lat_wgt).reshape(len(lat_wgt), 1)
lon_wgt = np.array(lon_wgt)
# Make grid of lon_wgt, lat_wgt with lat and lon coordinates (last 2 axis of data)
wy = np.broadcast_to(lon_wgt, data.shape[ndims - 2:ndims])
wx = np.broadcast_to(lat_wgt, data.shape[ndims - 2:ndims])
# Mask the array
# Get 2D mask from the array
ds = data[0]
for el in shp[1:ndims-2]:
ds = ds[0]
if(isinstance(ds, np.ma.masked_array)):
msk = ds.mask
else:
msk = False
wy = np.ma.masked_array(wy, msk)
wx = np.ma.masked_array(wx, msk)
data_wgt = data * wy * wx
sm_wgt = data_wgt.sum(axis = (ndims - 2, ndims - 1))
# sm_wgt = sm_wgt/np.sum(wy * wx)
return sm_wgt | 725f7f199e634cf56afb846ebff2a0917a92c685 | 11,025 |
def load(filename):
"""Load the labels and scores for Hits at K evaluation.
Loads labels and model predictions from files of the format:
Query \t Example \t Label \t Score
:param filename: Filename to load.
:return: list_of_list_of_labels, list_of_list_of_scores
"""
result_labels = []
result_scores = []
current_block_name = ""
current_block_scores = []
current_block_labels = []
with open(filename,'r') as fin:
for line in fin:
splt = line.strip().split("\t")
block_name = splt[0]
block_example = splt[1]
example_label = int(splt[2])
example_score = float(splt[3])
if block_name != current_block_name and current_block_name != "":
result_labels.append(current_block_labels)
result_scores.append(current_block_scores)
current_block_labels = []
current_block_scores = []
current_block_labels.append(example_label)
current_block_scores.append(example_score)
current_block_name = block_name
result_labels.append(current_block_labels)
result_scores.append(current_block_scores)
return result_labels,result_scores | 8d9570d794ebf09eb393342f926a5536dd0c1a75 | 11,027 |
def expanding_sum(a, axis = 0, data = None, state = None):
"""
equivalent to pandas a.expanding().sum().
- works with np.arrays
- handles nan without forward filling.
- supports state parameters
:Parameters:
------------
a : array, pd.Series, pd.DataFrame or list/dict of these
timeseries
axis : int, optional
0/1/-1. The default is 0.
data: None
unused at the moment. Allow code such as func(live, **func_(history)) to work
state: dict, optional
state parameters used to instantiate the internal calculations, based on history prior to 'a' provided.
:Example: agreement with pandas
--------------------------------
>>> from pyg import *; import pandas as pd; import numpy as np
>>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999))
>>> panda = a.expanding().sum(); ts = expanding_sum(a)
>>> assert eq(ts,panda)
:Example: nan handling
----------------------
Unlike pandas, timeseries does not forward fill the nans.
>>> a[a<0.1] = np.nan
>>> panda = a.expanding().sum(); ts = expanding_sum(a)
>>> pd.concat([panda,ts], axis=1)
>>> 0 1
>>> 1993-09-23 NaN NaN
>>> 1993-09-24 NaN NaN
>>> 1993-09-25 0.645944 0.645944
>>> 1993-09-26 2.816321 2.816321
>>> 1993-09-27 2.816321 NaN
>>> ... ...
>>> 2021-02-03 3976.911348 3976.911348
>>> 2021-02-04 3976.911348 NaN
>>> 2021-02-05 3976.911348 NaN
>>> 2021-02-06 3976.911348 NaN
>>> 2021-02-07 3976.911348 NaN
:Example: state management
--------------------------
One can split the calculation and run old and new data separately.
>>> old = a.iloc[:5000]
>>> new = a.iloc[5000:]
>>> ts = expanding_sum(a)
>>> old_ts = expanding_sum_(old)
>>> new_ts = expanding_sum(new, **old_ts)
>>> assert eq(new_ts, ts.iloc[5000:])
:Example: dict/list inputs
---------------------------
>>> assert eq(expanding_sum(dict(x = a, y = a**2)), dict(x = expanding_sum(a), y = expanding_sum(a**2)))
>>> assert eq(expanding_sum([a,a**2]), [expanding_sum(a), expanding_sum(a**2)])
"""
state = state or {}
return first_(_expanding_sum(a, axis = axis, **state)) | ec3fb41784f7ce5ef268ec8e7d8fe8e65f222157 | 11,028 |
def accuracy(output, target, top_k=(1,)):
"""Calculate classification accuracy between output and target.
:param output: output of classification network
:type output: pytorch tensor
:param target: ground truth from dataset
:type target: pytorch tensor
:param top_k: top k of metric, k is an interger
:type top_k: tuple of interger
:return: results of top k
:rtype: list
"""
max_k = max(top_k)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in top_k:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 68b7c48e5bd832a637e7a06353c48ffa09b449cd | 11,029 |
def sum_digits(number):
"""
Write a function named sum_digits which takes a number as input and
returns the sum of the absolute value of each of the number's decimal digits.
"""
return sum(int(n) for n in str(number) if n.isdigit()) | b6d8083a78d67a268316716174723f47d84b2287 | 11,032 |
import numpy
def label(input, structure=None, output=None):
"""Labels features in an array.
Args:
input (cupy.ndarray): The input array.
structure (array_like or None): A structuring element that defines
feature connections. ```structure``` must be centersymmetric. If
None, structure is automatically generated with a squared
connectivity equal to one.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
Returns:
label (cupy.ndarray): An integer array where each unique feature in
```input``` has a unique label in the array.
num_features (int): Number of features found.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.label`
"""
if not isinstance(input, cupy.ndarray):
raise TypeError('input must be cupy.ndarray')
if input.dtype.char in 'FD':
raise TypeError('Complex type not supported')
if structure is None:
structure = _generate_binary_structure(input.ndim, 1)
elif isinstance(structure, cupy.ndarray):
structure = cupy.asnumpy(structure)
structure = numpy.array(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have equal rank')
for i in structure.shape:
if i != 3:
raise ValueError('structure dimensions must be equal to 3')
if isinstance(output, cupy.ndarray):
if output.shape != input.shape:
raise ValueError("output shape not correct")
caller_provided_output = True
else:
caller_provided_output = False
if output is None:
output = cupy.empty(input.shape, numpy.int32)
else:
output = cupy.empty(input.shape, output)
if input.size == 0:
# empty
maxlabel = 0
elif input.ndim == 0:
# 0-dim array
maxlabel = 0 if input.item() == 0 else 1
output[...] = maxlabel
else:
if output.dtype != numpy.int32:
y = cupy.empty(input.shape, numpy.int32)
else:
y = output
maxlabel = _label(input, structure, y)
if output.dtype != numpy.int32:
output[...] = y[...]
if caller_provided_output:
return maxlabel
else:
return output, maxlabel | fe3e4b7ee30f7dc1ae0541133f7db3d02c7d3157 | 11,033 |
import functools
def get_experiment_fn(nnObj,data_dir, num_gpus,variable_strategy,use_distortion_for_training=True):
"""Returns an Experiment function.
Experiments perform training on several workers in parallel,
in other words experiments know how to invoke train and eval in a sensible
fashion for distributed training. Arguments passed directly to this
function are not tunable, all other arguments should be passed within
tf.HParams, passed to the enclosed function.
Args:
data_dir: str. Location of the data for input_fns.
num_gpus: int. Number of GPUs on each worker.
variable_strategy: String. CPU to use CPU as the parameter server
and GPU to use the GPUs as the parameter server.
use_distortion_for_training: bool. See cifar10.Cifar10DataSet.
Returns:
A function (tf.estimator.RunConfig, tf.contrib.training.HParams) ->
tf.contrib.learn.Experiment.
Suitable for use by tf.contrib.learn.learn_runner, which will run various
methods on Experiment (train, evaluate) based on information
about the current runner in `run_config`.
"""
def _experiment_fn(run_config, hparams):
"""Returns an Experiment."""
# Create estimator.
train_input_fn = functools.partial(
cifar_main.input_fn,
data_dir,
subset='train',
num_shards=num_gpus,
batch_size=hparams.train_batch_size,
use_distortion_for_training=use_distortion_for_training)
eval_input_fn = functools.partial(
cifar_main.input_fn,
data_dir,
subset='eval',
batch_size=hparams.eval_batch_size,
num_shards=num_gpus)
num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch('eval')
if num_eval_examples % hparams.eval_batch_size != 0:
raise ValueError(
'validation set size must be multiple of eval_batch_size')
train_steps = hparams.train_steps
eval_steps = num_eval_examples // hparams.eval_batch_size
classifier = tf.estimator.Estimator(
model_fn=cifar_main.get_model_fn(nnObj,num_gpus, variable_strategy,
run_config.num_worker_replicas or 1),
config=run_config,
params=hparams)
vail_accuracy=[]
for loop in range(20):
classifier.train(train_input_fn,steps=train_steps)
vail_accuracy.append(classifier.evaluate(eval_input_fn,steps=eval_steps))
print("finished iter:"+str((loop+1)*train_steps))
print("accuracy:")
print(vail_accuracy)
# Create experiment.
return tf.contrib.learn.Experiment(
classifier,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps)
return _experiment_fn | 07ddb4ebac493826127464f76fd79ea17e7bf474 | 11,034 |
def calc_psnr(tar_img, ref_img):
""" Compute the peak signal to noise ratio (PSNR) for an image.
Parameters
----------
tar_img : sitk
Test image.
ref_img : sitk
Ground-truth image.
Returns
-------
psnr : float
The PSNR metric.
References
----------
.. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
tar_vol = tar_img
ref_vol = ref_img
ref_vol, tar_vol = _as_floats(ref_vol, tar_vol)
err = calc_mse(ref_img, tar_img)
return 10 * np.log10((256 ** 2) / err) | 61097170fb439b85583cd8aac8002c70d02c094b | 11,035 |
from typing import Callable
from typing import Dict
from typing import Any
import functools
def glacier_wrap(
f: Callable[..., None],
enum_map: Dict[str, Dict[str, Any]],
) -> Callable[..., None]:
"""
Return the new function which is click-compatible
(has no enum signature arguments) from the arbitrary glacier compatible
function
"""
# Implemented the argument convert logic
@functools.wraps(f)
def wrapped(*args: Any, **kwargs: Any) -> None:
# convert args and kwargs
converted_kwargs = {}
for name, value in kwargs.items():
if name in enum_map:
converted_kwargs[name] = enum_map[name][value]
else:
converted_kwargs[name] = value
return f(*args, **converted_kwargs)
return wrapped | 01f3a90179bb0dba29ffb0b2fa9d91be15e0ee7e | 11,037 |
def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):
"""Returns a device list given a cluster spec."""
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
devices = []
for task_type in ("chief", "worker"):
for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):
if num_gpus_per_worker == 0:
devices.append("/job:%s/task:%d" % (task_type, task_id))
else:
devices.extend([
"/job:%s/task:%d/device:GPU:%i" % (task_type, task_id, gpu_id)
for gpu_id in range(num_gpus_per_worker)
])
return devices | 3032a28f80dbed1fd870e4fc2ea06d724fc529ce | 11,038 |
def group_by_time(df, col, by='day', fun='max', args=(), kwargs={}, index='categories'):
""" See <https://pandas.pydata.org/pandas-docs/stable/api.html#groupby>_ for the set of `fun` parameters
available. Examples are: 'count', 'max', 'min', 'median', etc
.. Tip:: Since Access inherits from TimeIntervalTable, the underlaying data format
is a `pandas.DataFrame`, not a `pandas.Series`. Consequently, only the groupby
functions of a generic GroupBy or DataFrameGroupBy are valid. Functions of SeriesGroupBy
are not allowed.
"""
if col == 'index':
t = df.index
else:
t = df.loc[:, col].dt
if by.lower() in ['y', 'year']:
group = df.groupby([t.year])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year']
elif by.lower() in ['m', 'month']:
group = df.groupby([t.year, t.month])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month']
elif by.lower() in ['d', 'day']:
group = df.groupby([t.year, t.month, t.day])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day']
elif by.lower() in ['h', 'hour']:
group = df.groupby([t.year, t.month, t.day, t.hour])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day', 'hour']
elif by.lower() in ['m', 'min', 'minute']:
group = df.groupby([t.year, t.month, t.day, t.hour, t.minute])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day', 'hour', 'min']
elif by.lower() in ['s', 'sec', 'second']:
group = df.groupby([t.year, t.month, t.day, t.hour, t.minute, t.second])
group = getattr(group, fun)(*args, **kwargs)
group.index.names = ['year', 'month', 'day', 'hour', 'min', 'sec']
else:
raise KeyError('Grouping can be by "year", "month", "day", "min" and "sec" only')
# Choose index
if index == 'categories':
return group
elif index == 'times':
group.index = pd.DatetimeIndex([pd.Timestamp(*i) for i, _ in group.iterrows()])
return group
else:
raise KeyError('Argument "index={}"" is not valid. Options are "categories" or "times"') | 6695d285b52757ee7dfd32ad5943aa433504322f | 11,039 |
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y
in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
Elements of k(t) are tuples (a, d) with a and d in k[t].
"""
m = len(G)
q, (fa, fd) = weak_normalizer(fa, fd, DE)
# Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)
# correspond to solutions y = z/q of the original equation.
gamma = q
G = [(q*ga).cancel(gd, include=True) for ga, gd in G]
a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)
# Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond
# to solutions z = q/hn of the weakly normalized equation.
gamma *= hn
A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)
# Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond
# to solutions q = p/hs of the previous equation.
gamma *= hs
g = A.gcd(B)
a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for
gia, gid in G]
# a*Dp + b*p = Sum(ci*gi) may have a polynomial solution
# only if the sum is in k[t].
q, M = prde_linear_constraints(a, b, g, DE)
# q = [q1, ..., qm] where qi in k[t] is the polynomial component
# of the partial fraction expansion of gi.
# M is a matrix with m columns and entries in k.
# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k,
# is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the sum is equal to Sum(fi*qi).
M, _ = constant_system(M, zeros(M.rows, 1), DE)
# M is a matrix with m columns and entries in Const(k).
# Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)
# if and only if M*Matrix([c1, ..., cm]) == 0,
# in which case the sum is Sum(ci*qi).
## Reduce number of constants at this point
V = M.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u).
# Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case,
# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)
# where rj = Sum(aji*qi) (j = 1, ..., u) in k[t].
if not V: # No non-trivial solution
return [], eye(m)
Mq = Matrix([q]) # A single row.
r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions
# y = p/gamma of the initial equation with ci = Sum(dj*aji).
try:
# We try n=5. At least for prde_spde, it will always
# terminate no matter what n is.
n = bound_degree(a, b, r, DE, parametric=True)
except NotImplementedError:
# A temporary bound is set. Eventually, it will be removed.
# the currently added test case takes large time
# even with n=5, and much longer with large n's.
n = 5
h, B = param_poly_rischDE(a, b, r, n, DE)
# h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation for ci = Sum(dj*aji)
# (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.
## Build combined relation matrix with m + u + v columns.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(h)))
A = A.col_join(zeros(B.rows, m).row_join(B))
## Eliminate d1, ..., du.
W = A.nullspace()
# W = [w1, ..., wt] where each wl is a column matrix with
# entries blk (k = 1, ..., m + u + v) in Const(k).
# The vectors (bl1, ..., blm) generate the space of those
# constant families (c1, ..., cm) for which a solution of
# the equation Dy + f*y == Sum(ci*Gi) exists. They generate
# the space and form a basis except possibly when Dy + f*y == 0
# is solvable in k(t}. The corresponding solutions are
# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u.
v = len(h)
M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.
N = M.nullspace()
# N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column
# vectors generating the space of linear relations between
# c1, ..., cm, e1, ..., ev.
C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns.
return [hk.cancel(gamma, include=True) for hk in h], C | afb910a9590195fa637be9c64382419c1c79a885 | 11,041 |
import torch
def huber_loss(x, delta=1.):
""" Standard Huber loss of parameter delta
https://en.wikipedia.org/wiki/Huber_loss
returns 0.5 * x^2 if |a| <= \delta
\delta * (|a| - 0.5 * \delta) o.w.
"""
if torch.abs(x) <= delta:
return 0.5 * (x ** 2)
else:
return delta * (torch.abs(x) - 0.5 * delta) | b3493eb9d4e38fa36f92db80dc52a47c32caf3c9 | 11,043 |
def licenses_mapper(license, licenses, package): # NOQA
"""
Update package licensing and return package based on the `license` and
`licenses` values found in a package.
Licensing data structure has evolved over time and is a tad messy.
https://docs.npmjs.com/files/package.json#license
license(s) is either:
- a string with:
- an SPDX id or expression { "license" : "(ISC OR GPL-3.0)" }
- some license name or id
- "SEE LICENSE IN <filename>"
- (Deprecated) an array or a list of arrays of type, url.
- "license": "UNLICENSED" means this is proprietary
"""
declared_license = get_declared_licenses(license) or []
declared_license.extend(get_declared_licenses(licenses) or [])
package.declared_license = declared_license
return package | 5568c323b342cc09d966ddef3455381abdca1ccc | 11,044 |
def send_command(target, data):
"""sends a nudge api command"""
url = urljoin(settings.NUDGE_REMOTE_ADDRESS, target)
req = urllib2.Request(url, urllib.urlencode(data))
try:
return urllib2.urlopen(req)
except urllib2.HTTPError, e:
raise CommandException(
'An exception occurred while contacting %s: %s' %
(url, e), e) | fc6967f84568b755db7f132f5fc511ef9687369f | 11,045 |
def logistic_log_partial_ij(x_i, y_i, beta, j):
"""i is index of point and j is index of derivative"""
return (y_i - logistic(dot(x_i, beta))) * x_i[j] | a24f704bc3178c6f2d8b37ad075f1beea3666964 | 11,046 |
def expected_win(theirs, mine):
"""Compute the expected win rate of my strategy given theirs"""
assert abs(theirs.r + theirs.p + theirs.s - 1) < 0.001
assert abs(mine.r + mine.p + mine.s - 1) < 0.001
wins = theirs.r * mine.p + theirs.p * mine.s + theirs.s * mine.r
losses = theirs.r * mine.s + theirs.p * mine.r + theirs.s * mine.p
return wins - losses | 92de2010287e0c027cb18c3dd01d95353e4653c4 | 11,047 |
def get_first_where(data, compare):
"""
Gets first dictionary in list that fit to compare-dictionary.
:param data: List with dictionarys
:param compare: Dictionary with keys for comparison {'key';'expected value'}
:return: list with dictionarys that fit to compare
"""
l = get_all_where(data, compare)
if len(l) < 1:
raise Exception('Data not found! (' + str(compare) + ')')
return l[0] | fc961d7154aa265efd101a658f668ad2025c121f | 11,048 |
import numpy
def parse_megam_weights(s, features_count, explicit=True):
"""
Given the stdout output generated by ``megam`` when training a
model, return a ``numpy`` array containing the corresponding weight
vector. This function does not currently handle bias features.
"""
if numpy is None:
raise ValueError("This function requires that numpy be installed")
assert explicit, "non-explicit not supported yet"
lines = s.strip().split("\n")
weights = numpy.zeros(features_count, "d")
for line in lines:
if line.strip():
fid, weight = line.split()
weights[int(fid)] = float(weight)
return weights | db172935fe7af892b420d515391565ccc2b44c55 | 11,049 |
from typing import Counter
def project_statistics(contributions):
"""Returns a dictionary containing statistics about all projects."""
projects = {}
for contribution in contributions:
# Don't count unreviewed contributions
if contribution["status"] == "unreviewed":
continue
project = contribution["repository"]
utopian_vote = contribution["utopian_vote"]
# Set default in case category doesn't exist
projects.setdefault(
project, {
"project": project,
"average_score": [],
"average_without_0": [],
"voted": 0,
"not_voted": 0,
"unvoted": 0,
"task-requests": 0,
"moderators": [],
"average_payout": [],
"total_payout": 0,
"utopian_total": []
}
)
# Check if contribution was voted on or unvoted
if contribution["status"] == "unvoted":
projects[project]["unvoted"] += 1
projects[project]["not_voted"] += 1
elif contribution["voted_on"]:
projects[project]["voted"] += 1
else:
projects[project]["not_voted"] += 1
# If contribution was a task request count this
if "task" in contribution["category"]:
projects[project]["task-requests"] += 1
# Add moderator and score
projects[project]["moderators"].append(contribution["moderator"])
projects[project]["average_score"].append(contribution["score"])
projects[project]["total_payout"] += contribution["total_payout"]
projects[project]["utopian_total"].append(utopian_vote)
if contribution["score"] > 0:
projects[project]["average_without_0"].append(
contribution["score"])
project_list = []
for project, value in projects.items():
# Set new keys and append value to list
value["reviewed"] = value["voted"] + value["not_voted"]
value["average_score"] = average(value["average_score"])
value["average_without_0"] = average(value["average_without_0"])
value["average_payout"] = value["total_payout"] / value["reviewed"]
value["moderators"] = Counter(value["moderators"]).most_common()
value["pct_voted"] = percentage(value["reviewed"], value["voted"])
# Add Utopian.io's vote statistics
value["utopian_total"] = [vote for vote in value["utopian_total"]
if vote != 0]
value["average_utopian_vote"] = average(value["utopian_total"])
value["utopian_total"] = sum(value["utopian_total"])
project_list.append(value)
return {"projects": project_list} | 91c27b504fc974b26f4e76b8a3f78e3665a21efa | 11,050 |
def exportSDFVisual(visualobj, linkobj, visualdata, indentation, modelname):
"""Simple wrapper for visual data of links.
The visual object is required to determine the position (pose) of the
object.
If relative poses are used the data found in visualdata (key pose) is used.
Otherwise the pose of the visual object will be combined with all
collected links up to the rootobject (see
phobos.utils.editing.getCombinedTransform).
Args:
visualobj: object to be used for pose
visualdata: data as provided by dictionary (should contain name,
geometry)
indentation: indentation at current level
relative: True for usage of sdf relative pathing
modelname: the name of the model (required for geometry)
linkobj:
Returns:
: str -- writable xml line
"""
tagger = xmlTagger(initial=indentation)
tagger.descend('visual', attribs={'name': visualdata['name']})
# OPT: tagger.attrib('cast_shadows', ...)
# OPT: tagger.attrib('laser_retro', ...)
# OPT: tagger.attrib('transparency', ...)
# OPT: tagger.descend('meta')
# OPT: tagger.attrib('layer', ...)
# tagger.ascend()
# OPT: tagger.write(exportSDFFrame(..., tagger.get_indent()))
# Pose data of the visual is transformed by link --> use local matrix
matrix = visualobj.matrix_local
posedata = {
'rawmatrix': matrix,
'matrix': [list(vector) for vector in list(matrix)],
'translation': list(matrix.to_translation()),
'rotation_euler': list(matrix.to_euler()),
'rotation_quaternion': list(matrix.to_quaternion()),
}
# overwrite absolute position of the visual object
tagger.write(exportSDFPose(posedata, tagger.get_indent()))
# write material data if available
if 'material' in visualdata:
tagger.write(exportSDFMaterial(visualdata['material'], tagger.get_indent()))
tagger.write(exportSDFGeometry(visualdata['geometry'], tagger.get_indent(), modelname))
tagger.ascend()
return "".join(tagger.get_output()) | f556a1eb1cef42adfde28c481a3443f149219518 | 11,051 |
import resource
def register_module():
"""Callback for module registration. Sets up URL routes."""
global custom_module # pylint: disable=global-statement
permissions = [
roles.Permission(EDIT_STUDENT_GROUPS_PERMISSION,
messages.EDIT_STUDENT_GROUPS_PERMISSION_DESCRIPTION),
]
def permissions_callback(unused_application_context):
return permissions
def notify_module_enabled():
"""Callback at module-enable time, just after module registration.
Responsible for registering module's callbacks and other items with
core and other modules.
"""
model_caching.CacheFactory.build(
MODULE_NAME_AS_IDENTIFIER, MODULE_NAME + " Caching",
messages.ENABLE_GROUP_CACHING,
max_size_bytes=(
StudentGroupAvailabilityRestHandler.MAX_NUM_MEMBERS * 1024 * 4),
ttl_sec=60 * 60, dao_class=StudentGroupDAO)
# Tell permissioning system about permission for this module.
roles.Roles.register_permissions(custom_module, permissions_callback)
# Navigation sub-tab for showing list of student groups, and
# associated role-level permission.
dashboard.DashboardHandler.add_sub_nav_mapping(
'settings', MODULE_NAME_AS_IDENTIFIER, 'Student Groups',
action=StudentGroupListHandler.ACTION,
contents=StudentGroupListHandler.render_groups_view)
dashboard.DashboardHandler.map_get_action_to_permission(
StudentGroupListHandler.ACTION, custom_module,
EDIT_STUDENT_GROUPS_PERMISSION)
# Register action for add/edit/delete of student group.
dashboard.DashboardHandler.add_custom_get_action(
StudentGroupRestHandler.ACTION,
handler=StudentGroupRestHandler.edit_student_group,
in_action=StudentGroupListHandler.ACTION)
dashboard.DashboardHandler.map_get_action_to_permission(
StudentGroupRestHandler.ACTION, custom_module,
EDIT_STUDENT_GROUPS_PERMISSION)
# Override existing action for availability. For UX convenience,
# we want to have the same page modify overall course availability
# as well as per-group availability.
dashboard.DashboardHandler.add_custom_get_action(
availability.AvailabilityRESTHandler.ACTION,
StudentGroupAvailabilityRestHandler.get_form, overwrite=True)
# Register a callback to add the user's student group ID (if any) to
# recorded events.
models.EventEntity.EVENT_LISTENERS.append(
_add_student_group_to_event)
# Register a component with the student-aggregator data pump source
# so that student-aggregate records get marked with the group ID
# for that student.
student_aggregate.StudentAggregateComponentRegistry.register_component(
AddToStudentAggregate)
# Register a callback with models.models.StudentProfileDAO to let us
# know when a student registers. This allows us to move the
# Definitive Truth about group membership to the Student record.
models.StudentProfileDAO.STUDENT_CREATION_HOOKS.append(
StudentGroupMembership.user_added_callback)
# Register a callback with Course so that when anyone asks for the
# student-facing list of units and lessons we can modify them as
# appropriate.
courses.Course.COURSE_ELEMENT_STUDENT_VIEW_HOOKS.append(
modify_unit_and_lesson_attributes)
# Register a callback with Course so that when the environment is
# fetched, we can submit overwrite items.
courses.Course.COURSE_ENV_POST_COPY_HOOKS.append(
modify_course_environment)
# Register student group as a generically handle-able translatable
# resource.
resource.Registry.register(ResourceHandlerStudentGroup)
# Register student group as a translatable item; the title and
# description can appear on student profile pages.
i18n_dashboard.TranslatableResourceRegistry.register(
TranslatableResourceStudentGroups)
# Register a section on the student profile to add the current
# student's group - if any.
utils.StudentProfileHandler.EXTRA_PROFILE_SECTION_PROVIDERS.append(
_add_student_group_to_profile)
# Register with gradebook to add student group as a filterable
# item.
gradebook.RawAnswersDataSource.FILTERS.append(StudentGroupFilter)
# Register with generator feeding gradebook to add some handling to
# the map and reduce steps so we can generate our filter-able data
# column in the generator's output.
gradebook.RawAnswersGenerator.register_hook(
MODULE_NAME,
_add_student_group_to_map_result,
_add_student_group_to_kwargs)
# Add our types to the set of DB tables for download/upload of course.
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(StudentGroupEntity)
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT.add(
StudentGroupMembership)
custom_module = custom_modules.Module(
MODULE_NAME, 'Define and manage groups of students.',
global_routes=[
(EmailToObfuscatedUserIdCleanup.URL,
EmailToObfuscatedUserIdCleanup),
], namespaced_routes=[
(StudentGroupRestHandler.URL,
StudentGroupRestHandler),
(StudentGroupAvailabilityRestHandler.URL,
StudentGroupAvailabilityRestHandler)
],
notify_module_enabled=notify_module_enabled)
return custom_module | 82e8d57c2b0f73ae21b460da61ce047b4a25ebe3 | 11,052 |
import scipy
def KL_distance(image1, image2):
"""
Given two images, calculate the KL divergence between the two
2d array is not supported, so we have to flatten the array and compare each pixel in the image1 to the corresponding pixel in the image2.
"""
return scipy.stats.entropy(image1.ravel(), image2.ravel()) | 6419c2f6456365e027fc7eff6f4b171e5eb4fc5f | 11,055 |
def stop_all_bots():
"""
This function address RestAPI call to stop polling for all bots which
have ever started polling.
:return:
"""
bots_stopped = procedures.stop_all() # Stop all bots.
botapi_logger.info('Successfully stopped {count} bots for polling in '
'start_all api call.'.format(count=len(bots_stopped)))
if bots_stopped > 0:
return jsonify({
"result": "success",
"message": "Successfully stopped {count} previously running "
"bots.".format(count=len(bots_stopped)),
"ids": [bot_id for bot_id in bots_stopped]
}), 200
else:
return internal_server_error(
message="No to stop previously running bots.") | 7e0bdaa0ae631e631cfbc56966311e59fc510d52 | 11,056 |
def load_word_embedding_dict(embedding, embedding_path, normalize_digits=True):
"""
load word embeddings from file
:param embedding:
:param embedding_path:
:return: embedding dict, embedding dimention, caseless
"""
print "loading embedding: %s from %s" % (embedding, embedding_path)
if embedding == 'word2vec':
# loading word2vec
word2vec = Word2Vec.load_word2vec_format(embedding_path, binary=True)
embedd_dim = word2vec.vector_size
return word2vec, embedd_dim, False
elif embedding == 'glove':
# loading GloVe
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if len(tokens) <101:
continue
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
word = data_utils.DIGIT_RE.sub(b"0", tokens[0]) if normalize_digits else tokens[0]
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'senna':
# loading Senna
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = tokens[1:]
word = data_utils.DIGIT_RE.sub(b"0", tokens[0]) if normalize_digits else tokens[0]
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'sskip':
embedd_dim = -1
embedd_dict = dict()
with gzip.open(embedding_path, 'r') as file:
# skip the first line
file.readline()
for line in file:
line = line.strip()
line = line.decode('utf-8')
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
start = len(tokens) - embedd_dim
word = ' '.join(tokens[0:start])
embedd[:] = tokens[start:]
word = data_utils.DIGIT_RE.sub(b"0", word) if normalize_digits else word
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, True
elif embedding == 'polyglot':
words, embeddings = pickle.load(open(embedding_path, 'rb'))
_, embedd_dim = embeddings.shape
embedd_dict = dict()
for i, word in enumerate(words):
embedd = np.empty([1, embedd_dim], dtype=theano.config.floatX)
embedd[:] = embeddings[i, :]
word = data_utils.DIGIT_RE.sub(b"0", word) if normalize_digits else word
embedd_dict[word] = embedd
return embedd_dict, embedd_dim, False
else:
raise ValueError("embedding should choose from [word2vec, senna]") | 98cda8061aa49c708bc6986a6ab036e8941967f6 | 11,057 |
def random_exponential(shape=(40,60), a0=100, dtype=float) :
"""Returns numpy array of requested shape and type filled with exponential distribution for width a0.
"""
a = a0*np.random.standard_exponential(size=shape)
return a.astype(dtype) | 29d3e438145d4495191868c956942b9626b76918 | 11,059 |
import json
def get_mpi_components_from_files(fileList, threads=False):
"""
Given a list of files to read input data from, gets a percentage of time
spent in MPI, and a breakdown of that time in MPI
"""
percentDict = dict()
timeDict = dict()
for filename in fileList:
filename = filename.strip()
try:
# Open the file for reading
with open(filename, "r") as infile:
# Read the json
jsonDict = json.load(infile)
runtime = get_runtime(jsonDict)
numprocs = get_num_threads(jsonDict) if threads else get_num_processes(jsonDict)
# Read the overview data and get the percentage of overall time spent in mpi
subDict = get_overview_data(jsonDict)
mpiPercent = get_dict_field_val(subDict, ["mpi", "percent"]) #mpiTime = (percent / 100.) * runtime
# Now get the sub-percentage of the mpi time
mpiEntry = get_dict_field_val(jsonDict, ["data", "mpi"])
# Get all of the percentages (as a percentage of total time)
mpiSubPercent = [float(get_dict_field_val(mpiEntry, [field])) * mpiPercent / 100. for field in mpiSubPercentages]
mpiSubTime = [runtime * subpercent / 100. for subpercent in mpiSubPercent]
percentDict[numprocs] = mpiSubPercent
timeDict[numprocs] = mpiSubTime
except IOError:
print("File " + filename + " does not exist. Skipping.")
pass
return percentDict, timeDict | 34549198676b823cf9e02ec927cb1e5fc30de2b8 | 11,060 |
import urllib
def get_character_url(name):
"""Gets a character's tibia.com URL"""
return url_character + urllib.parse.quote(name.encode('iso-8859-1')) | 62dc27528b7b9b303367551b8cba0a02204d0eb6 | 11,061 |
def parse_input(lines):
"""Parse the input document, which contains validity rules for the various
ticket fields, a representation of my ticket, and representations of a
number of other observed tickets.
Return a tuple of (rules, ticket, nearby_tickets)
"""
section = parse_sections(lines)
rules = parse_rules(section[0])
my_ticket = parse_ticket(section[1][1])
tickets = [parse_ticket(line) for line in section[2][1:]]
return (rules, my_ticket, tickets) | cccf2a9b47768428b2004caab1b3cab15a369a68 | 11,062 |
from dateutil import tz
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | a35c8b1d1fb7f38fc23439bbe5b9778062fc6aa7 | 11,063 |
from typing import Optional
def maximum(
left_node: NodeInput,
right_node: NodeInput,
auto_broadcast: str = "NUMPY",
name: Optional[str] = None,
) -> Node:
"""Return node which applies the maximum operation to input nodes elementwise."""
return _get_node_factory_opset1().create(
"Maximum", [left_node, right_node], {"auto_broadcast": auto_broadcast.upper()}
) | 9ca2ac093059a9c7c2a1b310635c551d1982b1bb | 11,064 |
def create_template_error():
"""
Создает заготовку для генерации ошибок
"""
return {'response': False} | f15c27cc980cf1bda6b82353d01bbe7871fdbff1 | 11,065 |
def e_qest(model, m):
"""
Calculation of photocounting statistics estimation from
photon-number statistics estimation
Parameters
----------
model : InvPBaseModel
m : int
Photocount number.
"""
return quicksum(model.T[m, n] * model.PEST[n]
for n in model.PSET) | b4b5f9fb4ba1c142af3d91d170fdb90ae960dd0e | 11,067 |
def load_input(fname):
"""Read in the data, return as a list."""
data = [""]
with open(fname, "r") as f:
for line in f.readlines():
if line.strip("\n"):
data[-1] += line.strip("\n") + " "
else:
data[-1] = data[-1].strip(" ")
data.append("")
data [-1] = data[-1].strip(" ")
return data | f83021dd416e3a959996a16bb8d0a0e7352a471f | 11,068 |
import json
def parse_repo_layout_from_json(file_):
"""Parse the repo layout from a JSON file.
Args:
file_ (File): The source file.
Returns:
RepoLayout
Raises:
InvalidConfigFileError: The configuration file is invalid.
"""
def encode_dict(data):
new_data = {}
for key, value in data.items():
# Waf Node API requires String objects
if not isinstance(key, str):
new_data[key.encode('utf-8')] = [i.encode('utf-8')
for i in value]
else:
new_data[key] = value
return new_data
try:
loaded_dict = json.load(file_, object_hook=encode_dict)
except ValueError as e:
raise blderror.InvalidConfigFileError('Invalid .bdelayoutconfig: %s' %
e.message)
repo_layout = repolayout.RepoLayout()
for key in loaded_dict:
if key in repo_layout.__dict__:
setattr(repo_layout, key, loaded_dict[key])
else:
logutil.warn('Invalid field in .bdelayoutconfig: %s.' %
key)
return repo_layout | db1b7843c26ecc6796233e0cc193b41336fecf2d | 11,069 |
def SizeArray(input_matrix):
"""
Return the size of an array
"""
nrows=input_matrix.shape[0]
ncolumns=input_matrix.shape[1]
return nrows,ncolumns | 3ac45e126c1fea5a70d9d7b35e967896c5d3be0b | 11,070 |
def show_fun_elem_state_machine(fun_elem_str, xml_state_list, xml_transition_list,
xml_fun_elem_list):
"""Creates lists with desired objects for <functional_element> state, send them to
plantuml_adapter.py then returns url_diagram"""
new_fun_elem_list = set()
main_fun_elem = check_get_object(fun_elem_str, **{'xml_fun_elem_list': xml_fun_elem_list})
if not main_fun_elem:
return None
if not main_fun_elem.allocated_state_list:
print(f"No state allocated to {main_fun_elem.name} (no display)")
return None
new_fun_elem_list.add(main_fun_elem)
new_state_list = {s for s in xml_state_list if s.id in main_fun_elem.allocated_state_list}
new_transition_list = get_transitions(new_state_list, xml_transition_list)
_, url_diagram = plantuml_adapter.get_state_machine_diagram(new_state_list,
new_transition_list,
xml_fun_elem_list)
print("State Machine Diagram for " + fun_elem_str + " generated")
return url_diagram | 3d8b1426e791bcc40c9850723da9bf350bea361f | 11,071 |
def get_bank_account_rows(*args, **kwargs):
"""
获取列表
:param args:
:param kwargs:
:return:
"""
return db_instance.get_rows(BankAccount, *args, **kwargs) | 0599b2bbae3b7bb044789db6c18f47604c3c9171 | 11,072 |
def pybo_mod(tokens, tag_codes=[]):
"""extract text/pos tuples from Token objects"""
txt_tags = []
for token in tokens:
tags = []
tags.append(token.text)
# Select and order the tags
for tag_code in tag_codes:
tags.append(get_tag(token, tag_code))
txt_tags.append(tags)
return txt_tags | e96bb6a4774a0e983f2288536921e98207aeaa4b | 11,074 |
def acf(
da: xr.DataArray, *, lag: int = 1, group: str | Grouper = "time.season"
) -> xr.DataArray:
"""Autocorrelation function.
Autocorrelation with a lag over a time resolution and averaged over all years.
Parameters
----------
da : xr.DataArray
Variable on which to calculate the diagnostic.
lag: int
Lag.
group : {'time.season', 'time.month'}
Grouping of the output.
E.g. If 'time.month', the autocorrelation is calculated over each month separately for all years.
Then, the autocorrelation for all Jan/Feb/... is averaged over all years, giving 12 outputs for each grid point.
Returns
-------
xr.DataArray
lag-{lag} autocorrelation of the variable over a {group.prop} and averaged over all years.
See Also
--------
statsmodels.tsa.stattools.acf
References
----------
Alavoine M., and Grenier P. (under review) The distinct problems of physical inconsistency and of multivariate bias potentially involved in the statistical adjustment of climate simulations. International Journal of Climatology, submitted on September 19th 2021. (Preprint: https://doi.org/10.31223/X5C34C)
Examples
--------
>>> from xclim.testing import open_dataset
>>> pr = open_dataset(path_to_pr_file).pr
>>> acf(da=pr, lag=3, group="time.season")
"""
attrs = da.attrs
def acf_last(x, nlags):
# noqa: D403
"""statsmodels acf calculates acf for lag 0 to nlags, this return only the last one."""
# As we resample + group, timeseries are quite short and fft=False seems more performant
out_last = stattools.acf(x, nlags=nlags, fft=False)
return out_last[-1]
@map_groups(out=[Grouper.PROP], main_only=True)
def _acf(ds, *, dim, lag, freq):
out = xr.apply_ufunc(
acf_last,
ds.dat.resample({dim: freq}),
input_core_dims=[[dim]],
vectorize=True,
kwargs={"nlags": lag},
)
out = out.mean("__resample_dim__")
return out.rename("out").to_dataset()
out = _acf(da.rename("dat").to_dataset(), group=group, lag=lag, freq=group.freq).out
out.attrs.update(attrs)
out.attrs["long_name"] = f"lag-{lag} autocorrelation"
out.attrs["units"] = ""
out.name = "acf"
return out | 630eb27574edb40f363f41656a23801f11cefb1c | 11,075 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.