content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def timed_zip_map_agent(func, in_streams, out_stream,
call_streams=None, name=None):
"""
Parameters
----------
in_streams: list of Stream
The list of input streams of the agent.
Each input stream is timed, i.e. the elements
are pairs (timestamp, value)
out_stream: Stream
The single output stream of the agent.
The output_stream is also timed.
call_streams: list of Stream
The list of call_streams. A new value in any stream in this
list causes a state transition of this agent.
name: Str
Name of the agent created by this function.
Returns
-------
Agent.
The agent created by this function.
Notes
-----
Each stream in in_streams must be a stream of tuples or lists
or NumPy arrays where element[0] is a time and where time is
a total order. Each stream in in_stream must be strictly
monotonically increasing in time.
out_stream merges the in_streams in order of time. An element
of out_stream is a list where element[0] is a time T and
element[1] is a list consisting of all elements of in in_streams
that have time T.
"""
# Check types of arguments
check_list_of_streams_type(list_of_streams=in_streams,
agent_name=name, parameter_name='in_streams')
check_stream_type(name, 'out_stream', out_stream)
check_list_of_streams_type(list_of_streams=call_streams,
agent_name=name, parameter_name='call_streams')
num_in_streams = len(in_streams)
indices = range(num_in_streams)
# The transition function for this agent.
def transition(in_lists, state):
# Check the types of in_lists
check_in_lists_type(name, in_lists, num_in_streams)
# input_lists is the list of lists that this agent can operate on
# in this transition.
input_lists = [in_list.list[in_list.start:in_list.stop]
for in_list in in_lists]
# pointers is a list where pointers[i] is a pointer into the i-th
# input lists
pointers = [0 for i in indices]
# stops is a list where pointers[i] must not exceed stops[i].
stops = [len(input_lists[i]) for i in indices]
# output_list is the single output list for this agent.
output_list = []
while all(pointers[i] < stops[i] for i in indices):
# slice is a list with one element per input stream.
# slice[i] is the value pointed to by pointers[i].
slice = [input_lists[i][pointers[i]] for i in indices]
# slice[i][0] is the time field for slice[i].
# earliest_time is the earliest time pointed to by pointers.
earliest_time = min(slice[i][0] for i in indices)
# slice[i][1:] is the list of fields other than the time
# field for slice[i].
# next_output_value is a list with one element for
# each input stream.
# next_output_value[i] is the empty list if the time
# for slice[i] is later than earliest time. If the time
# for slice[i] is the earliest time, hen next_output_value[i]
# is the list of all the non-time fields.
next_output_value = [slice[i][1] if slice[i][0] == earliest_time
else None for i in indices]
# increment pointers for this indexes where the time was the
# earliest time.
pointers = [pointers[i]+1 if slice[i][0] == earliest_time
else pointers[i] for i in indices]
# Make next_output a list consisting of a time: the earliest time
# followed by a sequence of lists, one for each input stream.
# Each list in this sequence consists of the non-time fields.
next_output = [earliest_time]
next_output.append(next_output_value)
next_output = func(next_output)
# output_list has an element for each time in the input list.
output_list.append(next_output)
# Return: (1) output_lists, the list of outputs, one per
# output stream. This agent has a single output stream
# and so output_lists = [output_list]
# (2) the new state; the state is irrelevant for this
# agent because all it does is merge streams.
# (3) the new starting pointer into this stream for
# this agent. Since this agent has read
# pointers[i] number of elements in the i-th input
# stream, move the starting pointer for the i-th input
# stream forward by pointers[i].
return [output_list], state, [in_lists[i].start+pointers[i] for i in indices]
# Finished transition
# Create agent
state = None
# Create agent with the following parameters:
# 1. list of input streams.
# 2. list of output streams. This agent has a single output stream and so
# out_streams is [out_stream].
# 3. transition function
# 4. new state (irrelevant for this agent), so state is None
# 5. list of calling streams
# 6. Agent name
return Agent(in_streams, [out_stream], transition, state, call_streams, name) | 0c23ff84ed72ee25b04bf118d57203c7831702e8 | 3,600 |
def get_repository_ids_requiring_prior_install( trans, tsr_ids, repository_dependencies ):
"""
Inspect the received repository_dependencies and determine if the encoded id of each required repository is in the received tsr_ids. If so,
then determine whether that required repository should be installed prior to it's dependent repository. Return a list of encoded repository
ids, each of which is contained in the received list of tsr_ids, and whose associated repositories must be installed prior to the dependent
repository associated with the received repository_dependencies.
"""
prior_install_ids = []
if repository_dependencies:
for key, rd_tups in repository_dependencies.items():
if key in [ 'description', 'root_key' ]:
continue
for rd_tup in rd_tups:
tool_shed, name, owner, changeset_revision, prior_installation_required = suc.parse_repository_dependency_tuple( rd_tup )
if asbool( prior_installation_required ):
repository = suc.get_repository_for_dependency_relationship( trans.app, tool_shed, name, owner, changeset_revision )
if repository:
encoded_repository_id = trans.security.encode_id( repository.id )
if encoded_repository_id in tsr_ids:
prior_install_ids.append( encoded_repository_id )
return prior_install_ids | 035e7f358b8af7d4915b255b988f1fabc56605c1 | 3,601 |
from typing import Iterable
from typing import Tuple
from typing import List
def get_words_and_spaces(
words: Iterable[str], text: str
) -> Tuple[List[str], List[bool]]:
"""Given a list of words and a text, reconstruct the original tokens and
return a list of words and spaces that can be used to create a Doc. This
can help recover destructive tokenization that didn't preserve any
whitespace information.
words (Iterable[str]): The words.
text (str): The original text.
RETURNS (Tuple[List[str], List[bool]]): The words and spaces.
"""
if "".join("".join(words).split()) != "".join(text.split()):
raise ValueError(Errors.E194.format(text=text, words=words))
text_words = []
text_spaces = []
text_pos = 0
# normalize words to remove all whitespace tokens
norm_words = [word for word in words if not word.isspace()]
# align words with text
for word in norm_words:
try:
word_start = text[text_pos:].index(word)
except ValueError:
raise ValueError(Errors.E194.format(text=text, words=words)) from None
if word_start > 0:
text_words.append(text[text_pos : text_pos + word_start])
text_spaces.append(False)
text_pos += word_start
text_words.append(word)
text_spaces.append(False)
text_pos += len(word)
if text_pos < len(text) and text[text_pos] == " ":
text_spaces[-1] = True
text_pos += 1
if text_pos < len(text):
text_words.append(text[text_pos:])
text_spaces.append(False)
return (text_words, text_spaces) | 205c48435018a99433fce298a6035de902774810 | 3,602 |
from typing import List
import ast
def parse_names(source: str) -> List['Name']:
"""Parse names from source."""
tree = ast.parse(source)
visitor = ImportTrackerVisitor()
visitor.visit(tree)
return sum([split_access(a) for a in visitor.accessed], []) | 42f622052bc8acd5cdec187cd16695cec3b86154 | 3,603 |
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code) | 6e99dc7d280ea28c3240f76a70b57234b9da98d3 | 3,604 |
def mean_square_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate MSE loss
Parameters
----------
y_true: ndarray of shape (n_samples, )
True response values
y_pred: ndarray of shape (n_samples, )
Predicted response values
Returns
-------
MSE of given predictions
"""
return (1 / y_true.shape[0]) * (np.sum((y_true - y_pred) ** 2)) | a9dbbd2264cba04618531024ce7eaae0e7c76b8d | 3,605 |
import sys
import os
import pickle
def gmail_auth(logfile, mode):
"""Handles Gmail authorization via Gmail API."""
creds = None
# the file .token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time
pickled_token = "../conf/.token.pickle"
if mode == "dev":
pickled_token = "../conf/.dev.token.pickle"
elif mode != "norm":
sys.stderr.write("Error: Call to gmail_auth with unknown mode: '" +
mode + "'...")
exit(1)
if os.path.exists(pickled_token):
with open(pickled_token, "rb") as token:
creds = pickle.load(token)
# if there are no (valid) credentials available, let the user log in
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
req_ret = None
try:
req_ret = func_timeout(LIMIT, Request)
except FunctionTimedOut:
log(logfile, "Request was not completed within " + str(LIMIT) +
" seconds.")
exit(1)
except:
log(logfile, "Something unexpected happened when trying to " +
"refresh Google credentials.")
exit(1)
creds.refresh(req_ret)
else:
flow = InstalledAppFlow.from_client_secrets_file("../conf/" +
"credentials.json",
SCOPES)
creds = None
try:
creds = func_timeout(LIMIT, flow.run_local_server)
except FunctionTimedOut:
log(logfile, "Authorization was not completed within " +
str(LIMIT) + " seconds.")
exit(1)
except:
log(logfile, "Something unexpected happened when trying to " +
"load Google credentials.")
exit(1)
# save the credentials for the next run
with open(pickled_token, "wb") as token:
pickle.dump(creds, token)
return creds | 223ea8c1b9e80505f8b0aa58a9fdebb8b411d262 | 3,606 |
def graduation_threshold(session):
"""get graduation threshold
url : "/user/graduation-threshold"
Args:
session ([requests.session]): must be login webap!
Returns:
[requests.models.Response]: requests response
other error will return False
"""
# post it, it will return Aength.kuas.edu.tw cookie
Aength_login = session.post('https://webap.nkust.edu.tw/nkust/fnc.jsp',
data={'fncid': 'AG635'})
# get post data
try:
root = etree.HTML(Aength_login.text)
term_form_xpath = root.xpath('//input[@type="hidden"]')
term_form = {i.values()[1]: i.values()[-1] for i in term_form_xpath}
except:
return False
# final post
query_url = 'http://Aength.kuas.edu.tw/AUPersonQ.aspx'
res = session.post(url=query_url, data=term_form)
return res | 18a1e3f1389995ee1c41fe49d16f047a3e4d8bf8 | 3,607 |
def q_conjugate(q):
"""
quarternion conjugate
"""
w, x, y, z = q
return (w, -x, -y, -z) | bb7e28d0318702d7d67616ba2f7dc0e922e27c72 | 3,608 |
def row_r(row, boxsize):
"""Cell labels in 'row' of Sudoku puzzle of dimension 'boxsize'."""
nr = n_rows(boxsize)
return range(nr * (row - 1) + 1, nr * row + 1) | b69e3995475b9ab62d9684c79d0d2473273487c7 | 3,609 |
from typing import Sequence
def get_set_from_word(permutation: Sequence[int], digit: Digit) -> set[int]:
"""
Returns a digit set from a given digit word,
based on the current permutation.
i.e. if:
permutation = [6, 5, 4, 3, 2, 1, 0]
digit = 'abcd'
then output = {6, 5, 4, 3}
"""
return {permutation[ord(char) - ord("a")] for char in digit} | 06058d96e94398f4a26613aefc8b5eeb92dec3e5 | 3,610 |
def get_avg_sentiment(sentiment):
"""
Compiles and returnes the average sentiment
of all titles and bodies of our query
"""
average = {}
for coin in sentiment:
# sum up all compound readings from each title & body associated with the
# coin we detected in keywords
average[coin] = sum([item['compound'] for item in sentiment[coin]])
# get the mean compound sentiment if it's not 0
if average[coin] != 0:
average[coin] = average[coin] / len(sentiment[coin])
return average | 6a79c3d4f28e18a33290ea86a912389a5b48b0f3 | 3,611 |
def is_valid(url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme) | 80e981d4556b0de79a68994666ac56d8dbe9bdd5 | 3,612 |
def data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_total_size_put(uuid, tapi_common_capacity_value=None): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_total_size_put
creates or updates tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated
:type tapi_common_capacity_value: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!' | b03bf60af4b98099a7b07278e72c72cf8b247823 | 3,613 |
def statistics_power_law_alpha(A_in):
"""
Compute the power law coefficient of the degree distribution of the input graph
Parameters
----------
A_in: sparse matrix or np.array
The input adjacency matrix.
Returns
-------
Power law coefficient
"""
degrees = A_in.sum(axis=0)
return powerlaw.Fit(degrees, xmin=max(np.min(degrees), 1)).power_law.alpha | 72f1ead0fa1e42752154ef1567ea8c10d407019d | 3,614 |
import os
def does_file_exist(path):
""" Checks if the given file is in the local filesystem.
Args:
path: A str, the path to the file.
Returns:
True on success, False otherwise.
"""
return os.path.isfile(path) | 38768ca739cf8a6f482bfb5d35cef397c70227c1 | 3,615 |
def common_gnuplot_settings():
""" common gnuplot settings. """
g_plot = Gnuplot.Gnuplot(persist=1)
# The following line is for rigor only. It seems to be assumed for .csv files
g_plot('set datafile separator \",\"')
g_plot('set ytics nomirror')
g_plot('set xtics nomirror')
g_plot('set xtics font ", 10"')
g_plot('set ytics font ", 10"')
g_plot('set tics out scale 1.0')
g_plot('set grid')
g_plot('set key out horiz')
g_plot('set key bot center')
g_plot('set key samplen 2 spacing .8 font ", 9"')
g_plot('set term png size 1200, 600')
g_plot('set title font ", 11"')
g_plot('set ylabel font ", 10"')
g_plot('set xlabel font ", 10"')
g_plot('set xlabel offset 0, 0.5')
g_plot('set xlabel "Elapsed Time (Seconds)"')
return(g_plot) | 0a8149c2fce1d7738b4c85bfb2eb82d32fa3c540 | 3,616 |
def video_feed_cam1():
"""Video streaming route. Put this in the src attribute of an img tag."""
cam = Camera(0)
return Response(gen(cam), mimetype='multipart/x-mixed-replace; boundary=frame') | ca11f40bb603dc45d2709b46719fc11bde526c55 | 3,617 |
def listDatasets(objects = dir()):
"""
Utility function to identify currently loaded datasets.
Function should be called with default parameters,
ie as 'listDatasets()'
"""
datasetlist = []
for item in objects:
try:
if eval(item + '.' + 'has_key("DATA")') == True:
datasetlist.append(item)
except AttributeError:
pass
return datasetlist | 061d7c9287c6166b3e7d55449be49db30400ce56 | 3,618 |
def _(node: FromReference, ctx: AnnotateContext) -> BoxType:
"""Check that the parent node had {node.name} as a valid reference. Raises
an error if not, else copy over the set of references.
"""
t = box_type(node.over)
ft = t.row.fields.get(node.name, None)
if not isinstance(ft, RowType):
raise ErrReference(
ErrType.INVALID_TABLE_REF, name=node.name, path=ctx.get_path(node.over)
)
return BoxType(node.name, ft) | f53c4f47d2027ae0a7fe59fb52f3fa48f463dda3 | 3,619 |
from typing import Mapping
from typing import Dict
def invert(d: Mapping):
"""
invert a mapper's key and value
:param d:
:return:
"""
r: Dict = {}
for k, v in d.items():
r[v] = of(r[v], k) if v in r else k
return r | 65a49d107b4277a97035becb7d8be3cc1098544f | 3,620 |
def data_dir() -> str:
"""The directory where result data is written to"""
return '/tmp/bingads/' | 7ace22372ad0043eb6492e028687e31e78d8a85f | 3,621 |
def intensity_weighted_dispersion(data, x0=0.0, dx=1.0, rms=None,
threshold=None, mask_path=None, axis=0):
"""
Returns the intensity weighted velocity dispersion (second moment).
"""
# Calculate the intensity weighted velocity first.
m1 = intensity_weighted_velocity(data=data, x0=x0, dx=dx, rms=rms,
threshold=threshold, mask_path=mask_path,
axis=axis)[0]
# Rearrange the data to what we need.
mask = _read_mask_path(mask_path=mask_path, data=data)
data = np.moveaxis(data, axis, 0)
mask = np.moveaxis(mask, axis, 0)
mask = _threshold_mask(data=data, mask=mask, rms=rms, threshold=threshold)
npix = np.sum(mask, axis=0)
weights = get_intensity_weights(data, mask)
npix_mask = np.where(npix > 1, 1, np.nan)
vpix = dx * np.arange(data.shape[0]) + x0
vpix = vpix[:, None, None] * np.ones(data.shape)
# Intensity weighted dispersion.
m1 = m1[None, :, :] * np.ones(data.shape)
m2 = np.sum(weights * (vpix - m1)**2, axis=0) / np.sum(weights, axis=0)
m2 = np.sqrt(m2)
if rms is None:
return m2 * npix_mask, None
# Calculate the uncertainties.
dm2 = ((vpix - m1)**2 - m2**2) * rms / np.sum(weights, axis=0)
dm2 = np.sqrt(np.sum(dm2**2, axis=0)) / 2. / m2
return m2 * npix_mask, dm2 * npix_mask | dd3539ac2f48a1e9a6ceacc8262dc3a8e3646205 | 3,622 |
import requests
def vrtnws_api_request(service, path, params=None):
"""Sends a request to the VRTNWS API endpoint"""
url = BASE_URL_VRTNWS_API.format(service, path)
try:
res = requests.get(url, params)
try:
return res.json()
except ValueError:
return None
except requests.RequestException as ex:
print("VRTNWS API request '{}' failed:".format(url), ex)
return None | 9dad4e372348ff699762a5eaa42c9c1e7700e18e | 3,623 |
from typing import Type
def test_coreapi_schema(sdk_client_fs: ADCMClient, tested_class: Type[BaseAPIObject]):
"""Test coreapi schema"""
def _get_params(link):
result = {}
for field in link.fields:
result[field.name] = True
return result
schema_obj = sdk_client_fs._api.schema
with allure.step(f'Get {tested_class.__name__} schema objects'):
for path in tested_class.PATH:
assert path in schema_obj.data
schema_obj = schema_obj[path]
params = _get_params(schema_obj.links['list'])
with allure.step(f'Check if filters are acceptable for coreapi {tested_class.__name__}'):
for _filter in tested_class.FILTERS:
expect(
_filter in params,
f"Filter {_filter} should be acceptable for coreapi in class {tested_class.__name__}",
)
assert_expectations() | 8c205a2055ede6941b549112ef0893c37367ad71 | 3,624 |
def augment_tensor(matrix, ndim=None):
"""
Increase the dimensionality of a tensor,
splicing it into an identity matrix of a higher
dimension. Useful for generalizing
transformation matrices.
"""
s = matrix.shape
if ndim is None:
ndim = s[0]+1
arr = N.identity(ndim)
arr[:s[0],:s[1]] = matrix
return arr | 89d6ea36d016f8648cdc62852e55351a965eae02 | 3,625 |
def ping_redis() -> bool:
"""Call ping on Redis."""
try:
return REDIS.ping()
except (redis.exceptions.ConnectionError, redis.exceptions.ResponseError):
LOGGER.warning('Redis Ping unsuccessful')
return False | 0584109627470629141fccadc87075bfbb82e753 | 3,626 |
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 7/8 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times due to fluctuations in difficulty. They will likely
come early, if the network space and VDF rates increase continuously.
We start off at 2,199,023,255,552 which is 2^41 (about 2.2 heather) and half year 2, then half again year 4, then
half again year 8 etc. after 5 halfings we drop to zero, but don't panic, that's year 64
right shift >> to half...
"""
if height == 0:
return uint64(int((7 / 8) * (_base_reward << 16)))
elif height < 1 * _blocks_per_year:
return uint64(int((7 / 8) * _base_reward))
elif height < 3 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 1)))
elif height < 7 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 2)))
elif height < 15 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 3)))
elif height < 31 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 4)))
elif height < 63 * _blocks_per_year:
return uint64(int((7 / 8) * (_base_reward >> 5)))
else:
return uint64(0) | 9493c9b422eb58d429b586d3ea19da4e537a0d71 | 3,627 |
import sys
def get_argument(index, default=''):
"""
取得 shell 參數, 或使用預設值
"""
if len(sys.argv) <= index:
return default
return sys.argv[index] | c2c8d78b608745428a1d6b4d97b5081e1f0961e7 | 3,628 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
username = request.form.get("username").strip()
password = request.form.get("password")
# Ensure username was submitted
if not username:
return apology("must provide username", 403)
# Ensure password was submitted
elif not password:
return apology("must provide password", 403)
username = request.form.get("username")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username", username=username)
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], password):
return apology("invalid username and/or password", 403)
print
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | dcb37d57fc30d399c397c472d619b157575556ec | 3,629 |
def get_string_property(device_t, property):
""" Search the given device for the specified string property
@param device_t Device to search
@param property String to search for.
@return Python string containing the value, or None if not found.
"""
key = cf.CFStringCreateWithCString(
kCFAllocatorDefault,
property.encode("mac_roman"),
kCFStringEncodingMacRoman
)
CFContainer = iokit.IORegistryEntryCreateCFProperty(
device_t,
key,
kCFAllocatorDefault,
0
);
output = None
if CFContainer:
output = cf.CFStringGetCStringPtr(CFContainer, 0)
return output | fb08c31cd0bdbc3198b23a1c7d37d15d932a158f | 3,630 |
from typing import Callable
from typing import Any
def gamma_from_delta(
fn: Callable[..., Tensor], *, create_graph: bool = False, **params: Any
) -> Tensor:
"""Computes and returns gamma of a derivative from the formula of delta.
Note:
The keyword argument ``**params`` should contain at least one of
the following combinations:
- ``spot``
- ``moneyness`` and ``strike``
- ``log_moneyness`` and ``strike``
Args:
fn (callable): Function to calculate delta.
create_graph (bool, default=False): If ``True``,
graph of the derivative will be constructed,
allowing to compute higher order derivative products.
**params: Parameters passed to ``fn``.
Returns:
torch.Tensor
"""
return delta(pricer=fn, create_graph=create_graph, **params) | 508cb5df3cb19c5406ad190dbb0562140eab097a | 3,631 |
import re
def clean_filename(string: str) -> str:
"""
清理文件名中的非法字符,防止保存到文件系统时出错
:param string:
:return:
"""
string = string.replace(':', '_').replace('/', '_').replace('\x00', '_')
string = re.sub('[\n\\\*><?\"|\t]', '', string)
return string.strip() | 805023382e30c0d0113715cdf6c7bcbc8b383066 | 3,632 |
def homework(request, id_class):
"""
View for listing the specified class' assignments
"""
cl = Classes.objects.get(pk=id_class)
assm = Assignments.objects.all().filter(a_class=cl)
return render_to_response("assignments.html", {"assignments": assm, "class": cl}, context_instance=RequestContext(request)) | be828d4519b73cdbd6f8f34b0aa10eda1299c0f0 | 3,633 |
import os
import importlib
def build_model(master_config):
"""
Imports the proper model class and builds model
"""
available_models = os.listdir("lib/classes/model_classes")
available_models = [i.replace(".py", "") for i in available_models]
model_type = master_config.Core_Config.Model_Config.model_type
model_class = None
if model_type in available_models:
model_class_module = importlib.import_module("lib.classes.model_classes." + model_type)
model_class = getattr(model_class_module, model_type)
else:
print("Error: model type not available. Check lib/classes/model_classes/ for available models", flush=True)
return False
model = model_class(master_config)
if master_config.Core_Config.Reload_Config.reload:
reload_path = master_config.Core_Config.Reload_Config.reload_path
if master_config.Core_Config.Reload_Config.by_name:
model.load_weights(reload_path + "model_and_config/final_model_weights.h5", by_name=True)
else:
model.load_weights(reload_path + "model_and_config/final_model_weights.h5")
return model | 3089df4094e211a0e5a7fe521bc08b2ca5ff23b0 | 3,634 |
def get_digits_from_right_to_left(number):
"""Return digits of an integer excluding the sign."""
number = abs(number)
if number < 10:
return (number, )
lst = list()
while number:
number, digit = divmod(number, 10)
lst.insert(0, digit)
return tuple(lst) | 6b5626ad42313534d207c75d2713d0c9dc97507c | 3,635 |
def make_diamond(block):
"""
Return a block after rotating counterclockwise 45° to form a diamond
"""
result = []
upper = upper_triangle(block)
upper = [i.rjust(size-1) for i in upper]
upper_form = []
upper_length = len(upper)
for i in range(upper_length):
upper_form.append(diag_line(upper))
upper = upper_triangle(upper)
upper = [k.rjust(size-1-i-1) for k in upper]
upper_form = [' '.join(i) for i in upper_form]
upper_form = upper_form[::-1]
diag = diag_line(block)
diag = ' '.join(diag)
lower = lower_triangle(block)
lower = [i.ljust(size-1) for i in lower]
lower_form = []
lower_length = len(lower)
for i in range(lower_length):
lower_form.append(diag_line(lower))
lower = lower_triangle(lower)
lower = [k.ljust(size-1-i-1) for k in lower]
lower_form = [' '.join(i) for i in lower_form]
max_length = len(diag)
upper_form = [i.center(max_length) for i in upper_form]
lower_form = [i.center(max_length) for i in lower_form]
result += upper_form
result.append(diag)
result += lower_form
return result | e8e2afbdd34465a03e9db53169bf5c6bec1a375a | 3,636 |
def do_sitelist(parser, token):
"""
Allows a template-level call a list of all the active sites.
"""
return SitelistNode() | 208288989469a57e141f64be37d595fe8a1f84d6 | 3,637 |
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result | 6408fe02facd709a0d449cb87a2d963a0d92a007 | 3,638 |
def build_null_stop_time_series(feed, date_label='20010101', freq='5Min',
*, split_directions=False):
"""
Return a stop time series with the same index and hierarchical columns
as output by :func:`compute_stop_time_series_base`,
but fill it full of null values.
"""
start = date_label
end = pd.to_datetime(date_label + ' 23:59:00')
rng = pd.date_range(start, end, freq=freq)
inds = [
'num_trips',
]
sids = feed.stops.stop_id
if split_directions:
product = [inds, sids, [0, 1]]
names = ['indicator', 'stop_id', 'direction_id']
else:
product = [inds, sids]
names = ['indicator', 'stop_id']
cols = pd.MultiIndex.from_product(product, names=names)
return pd.DataFrame([], index=rng, columns=cols).sort_index(
axis=1, sort_remaining=True) | da638448f7f5b88d6e23e487f2ecdbc0e72a6607 | 3,639 |
import signal
def yulewalk(order, F, M):
"""Recursive filter design using a least-squares method.
[B,A] = YULEWALK(N,F,M) finds the N-th order recursive filter
coefficients B and A such that the filter:
B(z) b(1) + b(2)z^-1 + .... + b(n)z^-(n-1)
---- = -------------------------------------
A(z) 1 + a(1)z^-1 + .... + a(n)z^-(n-1)
matches the magnitude frequency response given by vectors F and M.
The YULEWALK function performs a least squares fit in the time domain. The
denominator coefficients {a(1),...,a(NA)} are computed by the so called
"modified Yule Walker" equations, using NR correlation coefficients
computed by inverse Fourier transformation of the specified frequency
response H.
The numerator is computed by a four step procedure. First, a numerator
polynomial corresponding to an additive decomposition of the power
frequency response is computed. Next, the complete frequency response
corresponding to the numerator and denominator polynomials is evaluated.
Then a spectral factorization technique is used to obtain the impulse
response of the filter. Finally, the numerator polynomial is obtained by a
least squares fit to this impulse response. For a more detailed explanation
of the algorithm see [1]_.
Parameters
----------
order : int
Filter order.
F : array
Normalised frequency breakpoints for the filter. The frequencies in F
must be between 0.0 and 1.0, with 1.0 corresponding to half the sample
rate. They must be in increasing order and start with 0.0 and end with
1.0.
M : array
Magnitude breakpoints for the filter such that PLOT(F,M) would show a
plot of the desired frequency response.
References
----------
.. [1] B. Friedlander and B. Porat, "The Modified Yule-Walker Method of
ARMA Spectral Estimation," IEEE Transactions on Aerospace Electronic
Systems, Vol. AES-20, No. 2, pp. 158-173, March 1984.
Examples
--------
Design an 8th-order lowpass filter and overplot the desired
frequency response with the actual frequency response:
>>> f = [0, .6, .6, 1] # Frequency breakpoints
>>> m = [1, 1, 0, 0] # Magnitude breakpoints
>>> [b, a] = yulewalk(8, f, m) # Filter design using a least-squares method
"""
F = np.asarray(F)
M = np.asarray(M)
npt = 512
lap = np.fix(npt / 25).astype(int)
mf = F.size
npt = npt + 1 # For [dc 1 2 ... nyquist].
Ht = np.array(np.zeros((1, npt)))
nint = mf - 1
df = np.diff(F)
nb = 0
Ht[0][0] = M[0]
for i in range(nint):
if df[i] == 0:
nb = nb - int(lap / 2)
ne = nb + lap
else:
ne = int(np.fix(F[i + 1] * npt)) - 1
j = np.arange(nb, ne + 1)
if ne == nb:
inc = 0
else:
inc = (j - nb) / (ne - nb)
Ht[0][nb:ne + 1] = np.array(inc * M[i + 1] + (1 - inc) * M[i])
nb = ne + 1
Ht = np.concatenate((Ht, Ht[0][-2:0:-1]), axis=None)
n = Ht.size
n2 = np.fix((n + 1) / 2)
nb = order
nr = 4 * order
nt = np.arange(0, nr)
# compute correlation function of magnitude squared response
R = np.real(np.fft.ifft(Ht * Ht))
R = R[0:nr] * (0.54 + 0.46 * np.cos(np.pi * nt / (nr - 1))) # pick NR correlations # noqa
# Form window to be used in extracting the right "wing" of two-sided
# covariance sequence
Rwindow = np.concatenate(
(1 / 2, np.ones((1, int(n2 - 1))), np.zeros((1, int(n - n2)))),
axis=None)
A = polystab(denf(R, order)) # compute denominator
# compute additive decomposition
Qh = numf(np.concatenate((R[0] / 2, R[1:nr]), axis=None), A, order)
# compute impulse response
_, Ss = 2 * np.real(signal.freqz(Qh, A, worN=n, whole=True))
hh = np.fft.ifft(
np.exp(np.fft.fft(Rwindow * np.fft.ifft(np.log(Ss, dtype=np.complex))))
)
B = np.real(numf(hh[0:nr], A, nb))
return B, A | d3e0f709d303c7432854d4975c858b5968245084 | 3,640 |
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
user_obj = await tbot.get_entity(previous_message.sender_id)
else:
user = event.pattern_match.group(1)
if user.isnumeric():
user = int(user)
if not user:
await event.reply("Pass the user's username, id or reply!")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await tbot.get_entity(user_id)
return user_obj
try:
user_obj = await tbot.get_entity(user)
except (TypeError, ValueError) as err:
await event.reply(str(err))
return None
return user_obj | 600fc6d1e73f4637f51479d2e2ebabaa93723b34 | 3,641 |
import json
def parse_contest_list(json_file):
"""Parse a list of Contests from a JSON file.
Note:
Template for Contest format in JSON in contest_template.json
"""
with open(json_file, 'r') as json_data:
data = json.load(json_data)
contests = []
for contest in data:
contest_ballots = data[contest]['contest_ballots']
tally = data[contest]['tally']
num_winners = data[contest]['num_winners']
reported_winners = data[contest]['reported_winners']
contest_type = ContestType[data[contest]['contest_type']]
contests.append(Contest(contest_ballots, tally, num_winners, reported_winners, contest_type))
return contests | 637da8b03fe975aa2183d78eaa3704d57d66680d | 3,642 |
def get_image_blob(roidb, mode):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
if mode == 'train' or mode == 'val':
with open(roidb['image'], 'rb') as f:
data = f.read()
data = np.frombuffer(data, dtype='uint8')
img = cv2.imdecode(data, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gt_boxes = roidb['boxes']
gt_label = roidb['gt_classes']
# resize
if mode == 'train':
img, im_scale = _resize(img, target_size=800, max_size=1333)
need_gt_boxes = gt_boxes.copy()
need_gt_boxes[:, :4] *= im_scale
img, need_gt_boxes, need_gt_label = _rotation(
img, need_gt_boxes, gt_label, prob=1.0, gt_margin=1.4)
else:
img, im_scale = _resize(img, target_size=1000, max_size=1778)
need_gt_boxes = gt_boxes
need_gt_label = gt_label
img = img.astype(np.float32, copy=False)
img = img / 255.0
mean = np.array(cfg.pixel_means)[np.newaxis, np.newaxis, :]
std = np.array(cfg.pixel_std)[np.newaxis, np.newaxis, :]
img -= mean
img /= std
img = img.transpose((2, 0, 1))
return img, im_scale, need_gt_boxes, need_gt_label | 35fdea333b8245294c16907e8c26c16164fb4906 | 3,643 |
def rx_weight_fn(edge):
"""A function for returning the weight from the common vertex."""
return float(edge["weight"]) | 4c405ffeae306a3920a6e624c748fb00cc1ee8ac | 3,644 |
def image_inputs(images_and_videos, data_dir, text_tmp_images):
"""Generates a list of input arguments for ffmpeg with the given images."""
include_cmd = []
# adds images as video starting on overlay time and finishing on overlay end
img_formats = ['gif', 'jpg', 'jpeg', 'png']
for ovl in images_and_videos:
filename = ovl['image']
# checks if overlay is image or video
is_img = False
for img_fmt in img_formats:
is_img = filename.lower().endswith(img_fmt)
if is_img:
break
# treats image overlay
if is_img:
duration = str(float(ovl['end_time']) - float(ovl['start_time']))
is_gif = filename.lower().endswith('.gif')
has_fade = (float(ovl.get('fade_in_duration', 0)) +
float(ovl.get('fade_out_duration', 0))) > 0
# A GIF with no fade is treated as an animated GIF should.
# It works even if it is not animated.
# An animated GIF cannot have fade in or out effects.
if is_gif and not has_fade:
include_args = ['-ignore_loop', '0']
else:
include_args = ['-f', 'image2', '-loop', '1']
include_args += ['-itsoffset', str(ovl['start_time']), '-t', duration]
# GIFs should have a special input decoder for FFMPEG.
if is_gif:
include_args += ['-c:v', 'gif']
include_args += ['-i']
include_cmd += include_args + ['%s/assets/%s' % (data_dir,
filename)]
# treats video overlays
else:
duration = str(float(ovl['end_time']) - float(ovl['start_time']))
include_args = ['-itsoffset', str(ovl['start_time']), '-t', duration]
include_args += ['-i']
include_cmd += include_args + ['%s/assets/%s' % (data_dir,
filename)]
# adds texts as video starting and finishing on their overlay timing
for img2 in text_tmp_images:
duration = str(float(img2['end_time']) - float(img2['start_time']))
include_args = ['-f', 'image2', '-loop', '1']
include_args += ['-itsoffset', str(img2['start_time']), '-t', duration]
include_args += ['-i']
include_cmd += include_args + [str(img2['path'])]
return include_cmd | b210687d00edc802cbf362e4394b61e0c0989095 | 3,645 |
def generate_graph(data_sets: pd.DataFrame, data_source: str, data_state: str, toggle_new_case: bool, year: int) -> tuple[px.line, px.bar]:
"""Takes in the inputs and returns a graph object. The inputs are the source, data, location and year.
The graph is a prediction of the sentiment from the comments as a function of time. Another trace of cases can be displayed as well.
We can also have graphs directly comparing # of cases with sentiment by having cases on the x and its sentiment on that day on the y.
Depending on the input, a graph that takes into account source, state(how much the model is trained), show cases(toggle on/off), location and year.
The user can choose which type of graph to generate.
Returns a line graph and a bar chart.
"""
main_graph = px.line(
data_sets[data_source],
x="Date",
y="New Cases",
)
if toggle_new_case:
main_graph.add_trace(
go.Line(
x=data_sets[data_source].loc[:, 'Date'],
y=data_sets[data_source].loc[:, 'New Cases']
)
)
stat_data_sets = pd.DataFrame(
index=["Max", "Min", "Mean"],
data={
"Cases": [
data_sets[data_source].loc[:, "New Cases"].max(),
data_sets[data_source].loc[:, "New Cases"].min(),
data_sets[data_source].loc[:, "New Cases"].mean(),
]
},
)
stats_graph = px.bar(
stat_data_sets,
x=["Max", "Min", "Mean"],
y="Cases",
)
return main_graph, stats_graph | 437e34419e187ba7ae86bd50c57844a5e55a4bf7 | 3,646 |
def f_not_null(seq):
"""过滤非空值"""
seq = filter(lambda x: x not in (None, '', {}, [], ()), seq)
return seq | a88eab0a03ef5c1db3ceb4445bb0d84a54157875 | 3,647 |
def flickr(name, args, options, content, lineno,
contentOffset, blockText, state, stateMachine):
""" Restructured text extension for inserting flickr embedded slideshows """
if len(content) == 0:
return
string_vars = {
'flickid': content[0],
'width': 400,
'height': 300,
'extra': ''
}
extra_args = content[1:] # Because content[0] is ID
extra_args = [ea.strip().split("=") for ea in extra_args] # key=value
extra_args = [ea for ea in extra_args if len(ea) == 2] # drop bad lines
extra_args = dict(extra_args)
if 'width' in extra_args:
string_vars['width'] = extra_args.pop('width')
if 'height' in extra_args:
string_vars['height'] = extra_args.pop('height')
if extra_args:
params = [PARAM % (key, extra_args[key]) for key in extra_args]
string_vars['extra'] = "".join(params)
return [nodes.raw('', CODE % (string_vars), format='html')] | 9b41c558dd5f5ef7be1aff1e9567f1c5d5b26f31 | 3,648 |
def serialize(key):
"""
Return serialized version of key name
"""
s = current_app.config['SERIALIZER']
return s.dumps(key) | dba2202e00960420252c00120333b142d3a8f216 | 3,649 |
def calc_disordered_regions(limits, seq):
"""
Returns the sequence of disordered regions given a string of
starts and ends of the regions and the sequence.
Example
-------
limits = 1_5;8_10
seq = AHSEDQNAAANTH...
This will return `AHSED_AAA`
"""
seq = seq.replace(' ', '')
regions = [tuple(region.split('_')) for region
in limits.split(';')]
return '_'.join([seq[int(i)-1:int(j)] for i,j in regions]) | 2c9a487a776a742470deb98e6f471b04b23a0ff7 | 3,650 |
import random
def person_attribute_string_factory(sqla):
"""Create a fake person attribute that is enumerated."""
create_multiple_attributes(sqla, 5, 1)
people = sqla.query(Person).all()
if not people:
create_multiple_people(sqla, random.randint(3, 6))
people = sqla.query(Person).all()
current_person = random.choice(people)
nonenumerated_values = sqla.query(Attribute).all()
if not nonenumerated_values:
create_multiple_nonenumerated_values(sqla, random.randint(3, 6))
nonenumerated_values = sqla.query(Attribute).all()
current_nonenumerated_value = random.choice(nonenumerated_values)
person_attribute = {
'personId': current_person.id,
'attributeId': current_nonenumerated_value.id,
'stringValue': rl_fake().first_name()
}
return person_attribute | b2c3f632c0671b41044e143c5aab32abf928a362 | 3,651 |
def forward_pass(log_a, log_b, logprob_s0):
"""Computing the forward pass of Baum-Welch Algorithm.
By employing log-exp-sum trick, values are computed in log space, including
the output. Notation is adopted from https://arxiv.org/abs/1910.09588.
`log_a` is the likelihood of discrete states, `log p(s[t] | s[t-1], x[t-1])`,
`log_b` is the likelihood of observations, `log p(x[t], z[t] | s[t])`,
and `logprob_s0` is the likelihood of initial discrete states, `log p(s[0])`.
Forward pass calculates the filtering likelihood of `log p(s_t | x_1:t)`.
Args:
log_a: a float `Tensor` of size [batch, num_steps, num_categ, num_categ]
stores time dependent transition matrices, `log p(s[t] | s[t-1], x[t-1])`.
`A[i, j]` is the transition probability from `s[t-1]=j` to `s[t]=i`.
log_b: a float `Tensor` of size [batch, num_steps, num_categ] stores time
dependent emission matrices, 'log p(x[t](, z[t])| s[t])`.
logprob_s0: a float `Tensor` of size [num_categ], initial discrete states
probability, `log p(s[0])`.
Returns:
forward_pass: a float 3D `Tensor` of size [batch, num_steps, num_categ]
stores the forward pass probability of `log p(s_t | x_1:t)`, which is
normalized.
normalizer: a float 2D `Tensor` of size [batch, num_steps] stores the
normalizing probability, `log p(x_t | x_1:t-1)`.
"""
num_steps = log_a.get_shape().with_rank_at_least(3).dims[1].value
tas = [tf.TensorArray(tf.float32, num_steps, name=n)
for n in ["forward_prob", "normalizer"]]
# The function will return normalized forward probability and
# normalizing constant as a list, [forward_logprob, normalizer].
init_updates = utils.normalize_logprob(
logprob_s0[tf.newaxis, :] + log_b[:, 0, :], axis=-1)
tas = utils.write_updates_to_tas(tas, 0, init_updates)
prev_prob = init_updates[0]
init_state = (1,
prev_prob,
tas)
def _cond(t, *unused_args):
return t < num_steps
def _steps(t, prev_prob, fwd_tas):
"""One step forward in iterations."""
bi_t = log_b[:, t, :] # log p(x[t+1] | s[t+1])
aij_t = log_a[:, t, :, :] # log p(s[t+1] | s[t], x[t])
current_updates = tf.math.reduce_logsumexp(
bi_t[:, :, tf.newaxis] + aij_t + prev_prob[:, tf.newaxis, :],
axis=-1)
current_updates = utils.normalize_logprob(current_updates, axis=-1)
prev_prob = current_updates[0]
fwd_tas = utils.write_updates_to_tas(fwd_tas, t, current_updates)
return (t+1, prev_prob, fwd_tas)
_, _, tas_final = tf.while_loop(
_cond,
_steps,
init_state
)
# transpose to [batch, step, state]
forward_prob = tf.transpose(tas_final[0].stack(), [1, 0, 2])
normalizer = tf.transpose(tf.squeeze(tas_final[1].stack(), axis=[-1]), [1, 0])
return forward_prob, normalizer | e3c6cc193ea01bd308c821de31c43ab42c9fea69 | 3,652 |
def TonnetzToString(Tonnetz):
"""TonnetzToString: List -> String."""
TonnetzString = getKeyByValue(dictOfTonnetze, Tonnetz)
return TonnetzString | db878b4e0b857d08171653d53802fb41e6ca46a4 | 3,653 |
def get_mc_calibration_coeffs(tel_id):
"""
Get the calibration coefficients from the MC data file to the
data. This is ahack (until we have a real data structure for the
calibrated data), it should move into `ctapipe.io.hessio_event_source`.
returns
-------
(peds,gains) : arrays of the pedestal and pe/dc ratios.
"""
peds = pyhessio.get_pedestal(tel_id)[0]
gains = pyhessio.get_calibration(tel_id)[0]
return peds, gains | 0bd202f608ff062426d8cdce32677c2c2f2583de | 3,654 |
from math import exp, pi, sqrt
def bryc(K):
"""
基于2002年Bryc提出的一致逼近函数近似累积正态分布函数
绝对误差小于1.9e-5
:param X: 负无穷到正无穷取值
:return: 累积正态分布积分值的近似
"""
X = abs(K)
cnd = 1.-(X*X + 5.575192*X + 12.77436324) * exp(-X*X/2.)/(sqrt(2.*pi)*pow(X, 3) + 14.38718147*pow(X, 2) + 31.53531977*X + 2*12.77436324)
if K < 0:
cnd = 1. - cnd
return cnd | e2feb6fa7f806294cef60bb5afdc4e70c95447f8 | 3,655 |
def grid_square_neighbors_1d_from(shape_slim):
"""
From a (y,x) grid of coordinates, determine the 8 neighors of every coordinate on the grid which has 8
neighboring (y,x) coordinates.
Neighbor indexes use the 1D index of the pixel on the masked grid counting from the top-left right and down.
For example:
x x x x x x x x x x
x x x x x x x x x x Th s s an example mask.Mask2D, where:
x x x x x x x x x x
x x x 0 1 2 3 x x x x = `True` (P xel s masked and excluded from the gr d)
x x x 4 5 6 7 x x x o = `False` (P xel s not masked and ncluded n the gr d)
x x x 8 9 10 11 x x x
x x x x x x x x x x
x x x x x x x x x x
x x x x x x x x x x
x x x x x x x x x x
On the grid above, the grid cells in 1D indxes 5 and 6 have 8 neighboring pixels and their entries in the
grid_neighbors_1d array will be:
grid_neighbors_1d[0,:] = [0, 1, 2, 4, 6, 8, 9, 10]
grid_neighbors_1d[1,:] = [1, 2, 3, 5, 7, 9, 10, 11]
The other pixels will be included in the grid_neighbors_1d array, but correspond to `False` entries in
grid_has_neighbors and be omitted from calculations that use the neighbor array.
Parameters
----------
shape_slim : np.ndarray
The irregular 1D grid of (y,x) coordinates over which a square uniform grid is overlaid.
pixel_scales : (float, float)
The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates.
"""
shape_of_edge = int(np.sqrt(shape_slim))
has_neighbors = np.full(shape=shape_slim, fill_value=False)
neighbors_1d = np.full(shape=(shape_slim, 8), fill_value=-1.0)
index = 0
for y in range(shape_of_edge):
for x in range(shape_of_edge):
if y > 0 and x > 0 and y < shape_of_edge - 1 and x < shape_of_edge - 1:
neighbors_1d[index, 0] = index - shape_of_edge - 1
neighbors_1d[index, 1] = index - shape_of_edge
neighbors_1d[index, 2] = index - shape_of_edge + 1
neighbors_1d[index, 3] = index - 1
neighbors_1d[index, 4] = index + 1
neighbors_1d[index, 5] = index + shape_of_edge - 1
neighbors_1d[index, 6] = index + shape_of_edge
neighbors_1d[index, 7] = index + shape_of_edge + 1
has_neighbors[index] = True
index += 1
return neighbors_1d, has_neighbors | 72c0009915b397005c9b9afb52dfb2fa20c1c99c | 3,656 |
def get_total_entries(df, pdbid, cdr):
"""
Get the total number of entries of the particular CDR and PDBID in the database
:param df: dataframe.DataFrame
:rtype: int
"""
return len(get_all_entries(df, pdbid, cdr)) | fec351a6d23fd73e082d3b5c066fa9d367629dea | 3,657 |
def _gf2mulxinvmod(a,m):
"""
Computes ``a * x^(-1) mod m``.
*NOTE*: Does *not* check whether `a` is smaller in degree than `m`.
Parameters
----------
a, m : integer
Polynomial coefficient bit vectors.
Polynomial `a` should be smaller degree than `m`.
Returns
-------
c : integer
Polynomial coefficient bit vector of ``c = a * x^(-1) mod m``.
"""
c = (a ^ ((a&1)*m)) >> 1
return c | e60e99cd7ebfd3df795cdad5d712f1278b7b9a0f | 3,658 |
def find_cuda_family_config(repository_ctx, script_path, cuda_libraries):
"""Returns CUDA config dictionary from running find_cuda_config.py"""
python_bin = repository_ctx.which("python3")
exec_result = execute(repository_ctx, [python_bin, script_path] + cuda_libraries)
if exec_result.return_code:
errmsg = err_out(exec_result)
auto_configure_fail("Failed to run find_cuda_config.py: {}".format(errmsg))
# Parse the dict from stdout.
return dict([tuple(x.split(": ")) for x in exec_result.stdout.splitlines()]) | e71c14528946c8815bef2355cbcc797bbc03bb39 | 3,659 |
def _prediction_feature_weights(booster, dmatrix, n_targets,
feature_names, xgb_feature_names):
""" For each target, return score and numpy array with feature weights
on this prediction, following an idea from
http://blog.datadive.net/interpreting-random-forests/
"""
# XGBClassifier does not have pred_leaf argument, so use booster
leaf_ids, = booster.predict(dmatrix, pred_leaf=True)
xgb_feature_names = {f: i for i, f in enumerate(xgb_feature_names)}
tree_dumps = booster.get_dump(with_stats=True)
assert len(tree_dumps) == len(leaf_ids)
target_feature_weights = partial(
_target_feature_weights,
feature_names=feature_names, xgb_feature_names=xgb_feature_names)
if n_targets > 1:
# For multiclass, XGBoost stores dumps and leaf_ids in a 1d array,
# so we need to split them.
scores_weights = [
target_feature_weights(
leaf_ids[target_idx::n_targets],
tree_dumps[target_idx::n_targets],
) for target_idx in range(n_targets)]
else:
scores_weights = [target_feature_weights(leaf_ids, tree_dumps)]
return scores_weights | 54814b1d0d2ce0ca66e7bdd5c5933477c5c8c169 | 3,660 |
from typing import Sequence
from typing import Collection
from typing import List
def group_by_repo(repository_full_name_column_name: str,
repos: Sequence[Collection[str]],
df: pd.DataFrame,
) -> List[np.ndarray]:
"""Group items by the value of their "repository_full_name" column."""
if df.empty:
return [np.array([], dtype=int)] * len(repos)
df_repos = df[repository_full_name_column_name].values.astype("S")
repos = [
np.array(repo_group if not isinstance(repo_group, set) else list(repo_group), dtype="S")
for repo_group in repos
]
unique_repos, imap = np.unique(np.concatenate(repos), return_inverse=True)
if len(unique_repos) <= len(repos):
matches = np.array([df_repos == repo for repo in unique_repos])
pos = 0
result = []
for repo_group in repos:
step = len(repo_group)
cols = imap[pos:pos + step]
group = np.flatnonzero(np.sum(matches[cols], axis=0, dtype=bool))
pos += step
result.append(group)
else:
result = [
np.flatnonzero(np.in1d(df_repos, repo_group))
for repo_group in repos
]
return result | 8e10e32d1a1bb8b31e25000dc63be0b3cd1645d0 | 3,661 |
def _row_or_col_is_header(s_count, v_count):
"""
Utility function for subdivide
Heuristic for whether a row/col is a header or not.
"""
if s_count == 1 and v_count == 1:
return False
else:
return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3. | 525b235fe7027524658f75426b6dbc9c8e334232 | 3,662 |
def values_hash(array, step=0):
"""
Return consistent hash of array values
:param array array: (n,) array with or without structure
:param uint64 step: optional step number to modify hash values
:returns: (n,) uint64 array
"""
cls, cast_dtype, view_dtype = _get_optimal_cast(array)
array = cls._cast(array, cast_dtype, view_dtype)
return cls._hash(array, UINT64(step)) | 7d2fedf0ca244797dd33e4f344dc81726b26efb6 | 3,663 |
import urllib
async def get_molecule_image(optimization_id: str):
"""Render the molecule associated with a particular bespoke optimization to an
SVG file."""
task = _get_task(optimization_id=optimization_id)
svg_content = smiles_to_image(urllib.parse.unquote(task.input_schema.smiles))
svg_response = Response(svg_content, media_type="image/svg+xml")
return svg_response | 6e3b178f18cef7d8a7ed4c75b75dfb0acd346fbe | 3,664 |
import torch
def total_variation_divergence(logits, targets, reduction='mean'):
"""
Loss.
:param logits: predicted logits
:type logits: torch.autograd.Variable
:param targets: target distributions
:type targets: torch.autograd.Variable
:param reduction: reduction type
:type reduction: str
:return: error
:rtype: torch.autograd.Variable
"""
assert len(list(logits.size())) == len(list(targets.size()))
assert logits.size()[0] == targets.size()[0]
assert logits.size()[1] == targets.size()[1]
assert logits.size()[1] > 1
divergences = torch.sum(
torch.abs(torch.nn.functional.softmax(logits, dim=1) - targets), dim=1)
if reduction == 'mean':
return torch.mean(divergences)
elif reduction == 'sum':
return torch.sum(divergences)
else:
return divergences | 75f848e71e6fc78c60341e3fb46a2ff7d4531cbc | 3,665 |
def initialize_parameters(n_in, n_out, ini_type='plain'):
"""
Helper function to initialize some form of random weights and Zero biases
Args:
n_in: size of input layer
n_out: size of output/number of neurons
ini_type: set initialization type for weights
Returns:
params: a dictionary containing W and b
"""
params = dict() # initialize empty dictionary of neural net parameters W and b
if ini_type == 'plain':
params['W'] = np.random.randn(n_out, n_in) *0.01 # set weights 'W' to small random gaussian
elif ini_type == 'xavier':
params['W'] = np.random.randn(n_out, n_in) / (np.sqrt(n_in)) # set variance of W to 1/n
elif ini_type == 'he':
# Good when ReLU used in hidden layers
# Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification
# Kaiming He et al. (https://arxiv.org/abs/1502.01852)
# http: // cs231n.github.io / neural - networks - 2 / # init
params['W'] = np.random.randn(n_out, n_in) * np.sqrt(2/n_in) # set variance of W to 2/n
params['b'] = np.zeros((n_out, 1)) # set bias 'b' to zeros
return params | 1350d086c12dc40792a2f84a3a5edf5e683f9e95 | 3,666 |
def logcdf(samples, data, prior_bounds, weights=None, direction=DEFAULT_CUMULATIVE_INTEGRAL_DIRECTION, num_proc=DEFAULT_NUM_PROC):
"""estimates the log(cdf) at all points in samples based on data and integration in "direction".
Does this directly by estimating the CDF from the weighted samples WITHOUT building a KDE"""
### this should be relatively quick (just an ordered summation), so we do it once
data, cweights = stats.samples2cdf(data, weights=weights)
if direction=='increasing':
pass ### we already integrate up from the lower values to higher values
elif direction=='decreasing':
cweights = 1. - cweights ### reverse the order of the integral
else:
raise ValueError('direction=%s not understood!'%direction)
logcdfs = np.empty(len(samples), dtype=float)
if num_proc==1: ### do everything on this one core
logcdfs[:] = _logcdf_worker(samples, data, cweights, prior_bounds)
else: ### parallelize
# partition work amongst the requested number of cores
Nsamp = len(samples)
sets = _define_sets(Nsamp, num_proc)
# set up and launch processes.
procs = []
for truth in sets:
conn1, conn2 = mp.Pipe()
proc = mp.Process(target=_logcdf_worker, args=(samples[truth], data, cweights, prior_bounds), kwargs={'conn':conn2})
proc.start()
procs.append((proc, conn1))
conn2.close()
# read in results from process
for truth, (proci, conni) in zip(sets, procs):
proci.join() ### should clean up child...
logcdfs[truth] = conni.recv()
return logcdfs | 5b57b964a57cae59425e73b089a3d1d6f7fbf95d | 3,667 |
import requests
import json
def post_gist(description, files):
"""Post a gist of the analysis"""
username, password = get_auth()
sess = requests.Session()
sess.auth = (username, password)
params = {
'description': description,
'files': files,
'public': False,
}
headers = {
'Content-Type': 'application/json',
'Accept': '*/*',
'User-Agent': 'stolaf-cs-toolkit/v1',
}
req = sess.post('https://api.github.com/gists',
headers=headers,
data=json.dumps(params))
result = req.json()
return result.get('html_url', '"' + result.get('message', 'Error') + '"') | a348203a08455099f6eefb660a234796d22380ae | 3,668 |
def compute_exposure_params(reference, tone_mapper="aces", t_max=0.85, t_min=0.85):
"""
Computes start and stop exposure for HDR-FLIP based on given tone mapper and reference image.
Refer to the Visualizing Errors in Rendered High Dynamic Range Images
paper for details about the formulas
:param reference: float tensor (with CxHxW layout) containing reference image (nonnegative values)
:param tone_mapper: (optional) string describing the tone mapper assumed by HDR-FLIP
:param t_max: (optional) float describing the t value used to find the start exposure
:param t_max: (optional) float describing the t value used to find the stop exposure
:return: two floats describing start and stop exposure, respectively, to use for HDR-FLIP
"""
if tone_mapper == "reinhard":
k0 = 0
k1 = 1
k2 = 0
k3 = 0
k4 = 1
k5 = 1
x_max = t_max * k5 / (k1 - t_max * k4)
x_min = t_min * k5 / (k1 - t_min * k4)
elif tone_mapper == "hable":
# Source: https://64.github.io/tonemapping/
A = 0.15
B = 0.50
C = 0.10
D = 0.20
E = 0.02
F = 0.30
k0 = A * F - A * E
k1 = C * B * F - B * E
k2 = 0
k3 = A * F
k4 = B * F
k5 = D * F * F
W = 11.2
nom = k0 * np.power(W, 2) + k1 * W + k2
denom = k3 * np.power(W, 2) + k4 * W + k5
white_scale = denom / nom # = 1 / (nom / denom)
# Include white scale and exposure bias in rational polynomial coefficients
k0 = 4 * k0 * white_scale
k1 = 2 * k1 * white_scale
k2 = k2 * white_scale
k3 = 4 * k3
k4 = 2 * k4
#k5 = k5 # k5 is not changed
c0 = (k1 - k4 * t_max) / (k0 - k3 * t_max)
c1 = (k2 - k5 * t_max) / (k0 - k3 * t_max)
x_max = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
c0 = (k1 - k4 * t_min) / (k0 - k3 * t_min)
c1 = (k2 - k5 * t_min) / (k0 - k3 * t_min)
x_min = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
else: #tone_mapper == "aces":
# Source: ACES approximation: https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/
# Include pre-exposure cancelation in constants
k0 = 0.6 * 0.6 * 2.51
k1 = 0.6 * 0.03
k2 = 0
k3 = 0.6 * 0.6 * 2.43
k4 = 0.6 * 0.59
k5 = 0.14
c0 = (k1 - k4 * t_max) / (k0 - k3 * t_max)
c1 = (k2 - k5 * t_max) / (k0 - k3 * t_max)
x_max = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
c0 = (k1 - k4 * t_min) / (k0 - k3 * t_min)
c1 = (k2 - k5 * t_min) / (k0 - k3 * t_min)
x_min = - 0.5 * c0 + np.sqrt(((0.5 * c0) ** 2) - c1)
# Convert reference to luminance
lum_coeff_r = 0.2126
lum_coeff_g = 0.7152
lum_coeff_b = 0.0722
Y_reference = reference[0:1, :, :] * lum_coeff_r + reference[1:2, :, :] * lum_coeff_g + reference[2:3, :, :] * lum_coeff_b
# Compute start exposure
Y_hi = np.amax(Y_reference)
if Y_hi == 0:
return 0, 0
start_exposure = np.log2(x_max / Y_hi)
# Compute stop exposure
Y_lo = np.percentile(Y_reference, 50)
stop_exposure = np.log2(x_min / Y_lo)
return start_exposure, stop_exposure | 330265585be9a27f38f20cef55d6a2c588819d35 | 3,669 |
import os
def gen_filenames(only_new=False):
"""Returns a list of filenames referenced in sys.modules and translation
files.
"""
global _cached_modules, _cached_filenames
module_values = set(module_white_list())
if _cached_modules == module_values:
# No changes in module list, short-circuit the function
if only_new:
return []
else:
return _cached_filenames
new_modules = module_values - _cached_modules
new_filenames = [filename.__file__ for filename in new_modules
if hasattr(filename, '__file__')]
if only_new:
filelist = new_filenames
else:
filelist = _cached_filenames + new_filenames + _error_files
filenames = []
for filename in filelist:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
filenames.append(filename)
_cached_modules = _cached_modules.union(new_modules)
_cached_filenames += new_filenames
return filenames | 7f526930729d830cd10f4b100f2dbea76025b926 | 3,670 |
from typing import Literal
from typing import Any
def scores_generic_graph(
num_vertices: int,
edges: NpArrayEdges,
weights: NpArrayEdgesFloat,
cond: Literal["or", "both", "out", "in"] = "or",
is_directed: bool = False,
) -> NpArrayEdgesFloat:
"""
Args:
num_vertices: int
number ofvertices
edges: np.array
edges
weights: np.array
edge weights
cond: str
"out", "in", "both", "or"
Returns:
np.array:
**alphas** edge scores
"""
w_adj, adj = construct_sp_matrices(
weights, edges, num_vertices, is_directed=is_directed
)
def calc_degree(adj: Any, i: int) -> NpArrayEdgesFloat:
return np.asarray(adj.sum(axis=i)).flatten().astype(np.float64)
iin = edges[:, 1]
iout = edges[:, 0]
wdegree_out = calc_degree(w_adj, 0)[iout]
degree_out = calc_degree(adj, 0)[iout]
wdegree_in = calc_degree(w_adj, 1)[iin]
degree_in = calc_degree(adj, 1)[iin]
if cond == "out":
alphas = stick_break_scores(wdegree_out, degree_out, edges, weights)
elif cond == "in":
alphas = stick_break_scores(wdegree_in, degree_in, edges, weights)
else:
alphas_out = stick_break_scores(wdegree_out, degree_out, edges, weights)
alphas_in = stick_break_scores(wdegree_in, degree_in, edges, weights)
if cond == "both":
alphas = np.maximum(alphas_out, alphas_in)
elif cond == "or":
alphas = np.minimum(alphas_out, alphas_in)
return alphas | 6f3b4b969663ff48be7b0a4cf3571800dd0d15e8 | 3,671 |
def handle_storage_class(vol):
"""
vol: dict (send from the frontend)
If the fronend sent the special values `{none}` or `{empty}` then the
backend will need to set the corresponding storage_class value that the
python client expects.
"""
if "class" not in vol:
return None
if vol["class"] == "{none}":
return ""
if vol["class"] == "{empty}":
return None
else:
return vol["class"] | a2747b717c6b83bb1128f1d5e9d7696dd8deda19 | 3,672 |
def spherical_to_cartesian(radius, theta, phi):
""" Convert from spherical coordinates to cartesian.
Parameters
-------
radius: float
radial coordinate
theta: float
axial coordinate
phi: float
azimuthal coordinate
Returns
-------
list: cartesian vector
"""
cartesian = [radius * np.sin(theta) * np.cos(phi), radius * np.sin(theta) * np.sin(phi), radius * np.cos(theta)]
return cartesian | bc76aa608171243f3afc1fbdbaca90931b1e3d17 | 3,673 |
import torch
def compute_mrcnn_bbox_loss(mrcnn_target_deltas, mrcnn_pred_deltas, target_class_ids):
"""
:param mrcnn_target_deltas: (n_sampled_rois, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param mrcnn_pred_deltas: (n_sampled_rois, n_classes, (dy, dx, (dz), log(dh), log(dw), (log(dh)))
:param target_class_ids: (n_sampled_rois)
:return: loss: torch 1D tensor.
"""
if 0 not in torch.nonzero(target_class_ids > 0).size():
positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]
positive_roi_class_ids = target_class_ids[positive_roi_ix].long()
target_bbox = mrcnn_target_deltas[positive_roi_ix, :].detach()
pred_bbox = mrcnn_pred_deltas[positive_roi_ix, positive_roi_class_ids, :]
loss = F.smooth_l1_loss(pred_bbox, target_bbox)
else:
loss = torch.FloatTensor([0]).cuda()
return loss | b6f62a3255f21ce26cd69b6e53a778dfc23a7b86 | 3,674 |
import torch
def _sharpness(prediction):
"""TODO: Implement for discrete inputs as entropy."""
_, chol_std = prediction
scale = torch.diagonal(chol_std, dim1=-1, dim2=-2)
return scale.square().mean() | a914c43011d98a183c83fbb6024da7fbb1f87887 | 3,675 |
def dummy_sgs(dummies, sym, n):
"""
Return the strong generators for dummy indices
Parameters
==========
dummies : list of dummy indices
`dummies[2k], dummies[2k+1]` are paired indices
sym : symmetry under interchange of contracted dummies::
* None no symmetry
* 0 commuting
* 1 anticommuting
n : number of indices
in base form the dummy indices are always in consecutive positions
Examples
========
>>> from sympy.combinatorics.tensor_can import dummy_sgs
>>> dummy_sgs(range(2, 8), 0, 8)
[[0, 1, 3, 2, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 5, 4, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 7, 6, 8, 9], [0, 1, 4, 5, 2, 3, 6, 7, 8, 9],
[0, 1, 2, 3, 6, 7, 4, 5, 8, 9]]
"""
if len(dummies) > n:
raise ValueError("List too large")
res = []
# exchange of contravariant and covariant indices
if sym is not None:
for j in dummies[::2]:
a = list(range(n + 2))
if sym == 1:
a[n] = n + 1
a[n + 1] = n
a[j], a[j + 1] = a[j + 1], a[j]
res.append(a)
# rename dummy indices
for j in dummies[:-3:2]:
a = list(range(n + 2))
a[j:j + 4] = a[j + 2], a[j + 3], a[j], a[j + 1]
res.append(a)
return res | 774203b62a0335f9bea176a1228673b2466324e3 | 3,676 |
import time
def mc_tracing(func):
"""
This decorator is used below and logs certain statistics about the formula evaluation.
It measures execution time, and how many nodes are found that statisfy each subformula.
"""
@wraps(func)
def wrapper(*args):
formula = args[1]
start = time.time()
logger.debug(f"{func.__name__} for formula {str(formula)}")
retval = func(*args)
logger.debug(f"{func.__name__} found {len(retval)} nodes (in {time.time() - start} seconds) for formula {str(formula)}")
# assert isinstance(retval, (set, list, nx.classes.reportviews.NodeView)), f"{func.__name__} did not return a set, list or nx.NodeView, but a {type(retval)}"
return retval
return wrapper | c3a318de80a7d0c29e2a5b2dd0f4873d3242f1d9 | 3,677 |
def get_uniform_comparator(comparator):
""" convert comparator alias to uniform name
"""
if comparator in ["eq", "equals", "==", "is"]:
return "equals"
elif comparator in ["lt", "less_than"]:
return "less_than"
elif comparator in ["le", "less_than_or_equals"]:
return "less_than_or_equals"
elif comparator in ["gt", "greater_than"]:
return "greater_than"
elif comparator in ["ge", "greater_than_or_equals"]:
return "greater_than_or_equals"
elif comparator in ["ne", "not_equals"]:
return "not_equals"
elif comparator in ["str_eq", "string_equals"]:
return "string_equals"
elif comparator in ["len_eq", "length_equals", "count_eq"]:
return "length_equals"
elif comparator in ["len_gt", "count_gt", "length_greater_than", "count_greater_than"]:
return "length_greater_than"
elif comparator in ["len_ge", "count_ge", "length_greater_than_or_equals", \
"count_greater_than_or_equals"]:
return "length_greater_than_or_equals"
elif comparator in ["len_lt", "count_lt", "length_less_than", "count_less_than"]:
return "length_less_than"
elif comparator in ["len_le", "count_le", "length_less_than_or_equals", \
"count_less_than_or_equals"]:
return "length_less_than_or_equals"
else:
return comparator | 20c24ba35dea92d916d9dd1006d110db277e0816 | 3,678 |
def inorder_traversal(root):
"""Function to traverse a binary tree inorder
Args:
root (Node): The root of a binary tree
Returns:
(list): List containing all the values of the tree from an inorder search
"""
res = []
if root:
res = inorder_traversal(root.left)
res.append(root.data)
res = res + inorder_traversal(root.right)
return res | f6d5141cbe9f39da609bd515133b367975e56688 | 3,679 |
def get_extractor_metadata(clowder_md, extractor_name, extractor_version=None):
"""Crawl Clowder metadata object for particular extractor metadata and return if found.
If extractor_version specified, returned metadata must match."""
for sub_metadata in clowder_md:
if 'agent' in sub_metadata:
agent_data = sub_metadata['agent']
if 'name' in agent_data and agent_data['name'].find(extractor_name) > -1:
if not extractor_version:
return sub_metadata['content']
else:
# TODO: Eventually check this in preferred way
if 'extractor_version' in sub_metadata['content']:
existing_ver = str(sub_metadata['content']['extractor_version'])
if existing_ver == extractor_version:
return sub_metadata['content']
return None | 15a5e41003211dbd2e52a467ccd221102ebc28c1 | 3,680 |
def intensity_scale(X_f, X_o, name, thrs, scales=None, wavelet="Haar"):
"""
Compute an intensity-scale verification score.
Parameters
----------
X_f: array_like
Array of shape (m, n) containing the forecast field.
X_o: array_like
Array of shape (m, n) containing the verification observation field.
name: string
A string indicating the name of the spatial verification score
to be used:
+------------+--------------------------------------------------------+
| Name | Description |
+============+========================================================+
| FSS | Fractions skill score |
+------------+--------------------------------------------------------+
| BMSE | Binary mean squared error |
+------------+--------------------------------------------------------+
thrs: float or array_like
Scalar or 1-D array of intensity thresholds for which to compute the
verification.
scales: float or array_like, optional
Scalar or 1-D array of spatial scales in pixels,
required if ``name="FSS"``.
wavelet: str, optional
The name of the wavelet function to use in the BMSE.
Defaults to the Haar wavelet, as described in Casati et al. 2004.
See the documentation of PyWavelets for a list of available options.
Returns
-------
out: array_like
The two-dimensional array containing the intensity-scale skill scores
for each spatial scale and intensity threshold.
References
----------
:cite:`CRS2004`, :cite:`RL2008`, :cite:`EWWM2013`
See also
--------
pysteps.verification.spatialscores.binary_mse,
pysteps.verification.spatialscores.fss
"""
intscale = intensity_scale_init(name, thrs, scales, wavelet)
intensity_scale_accum(intscale, X_f, X_o)
return intensity_scale_compute(intscale) | 1f38d30378a9ec2dff7babd4edb52fceb8e23dab | 3,681 |
def make_count(bits, default_count=50):
"""
Return items count from URL bits if last bit is positive integer.
>>> make_count(['Emacs'])
50
>>> make_count(['20'])
20
>>> make_count(['бред', '15'])
15
"""
count = default_count
if len(bits) > 0:
last_bit = bits[len(bits)-1]
if last_bit.isdigit():
count = int(last_bit)
return count | 8e7dc356ba7c0787b4b44ee8bba17568e27d1619 | 3,682 |
def synthesize_ntf_minmax(order=32, osr=32, H_inf=1.5, f0=0, zf=False,
**options):
"""
Alias of :func:`ntf_fir_minmax`
.. deprecated:: 0.11.0
Function is now available from the :mod:`NTFdesign` module with
name :func:`ntf_fir_minmax`
"""
warn("Function superseded by ntf_fir_minmax in "
"NTFdesign module", PyDsmDeprecationWarning)
return ntf_fir_minmax(order, osr, H_inf, f0, zf, **options) | 6c6752584a4f9760218456b640187e442f2442aa | 3,683 |
def r2f(value):
"""
converts temperature in R(degrees Rankine) to F(degrees Fahrenheit)
:param value: temperature in R(degrees Rankine)
:return: temperature in F(degrees Fahrenheit)
"""
return const.convert_temperature(value, 'R', 'F') | 31e08dd0f3194912e5e306a5a2a5a4c9a98ef723 | 3,684 |
import os
def get_currently_playing_track():
"""Returns currently playing track as a file
No request params.
"""
try:
pt, _, _ = Track.get_currently_playing()
path = pt.track.path
return send_file( os.path.join( '..', path ) )
except DoesNotExist:
return error_response( 'Nije moguće dohvatiti trenutno svirani zapis: Trenutno se ne emitira ništa.', 404 )
except IndexError:
return error_response( 'Nije moguće dohvatiti trenutno svirani zapis: Lista za reprodukciju je završila prije vremena.', 404 )
except:
return error_response( 'Nije moguće dohvatiti trenutno svirani zapis.', 404 ) | ae1ec9123f70e1650813bae1397fbf2500287356 | 3,685 |
def get_statistics_percentiles(d_min, stats):
"""
For a given set of statistics, determine their percentile ranking compared
to other crystal structures at similar resolution.
"""
if (d_min is None):
return dict([ (s, None) for s in stats.keys() ])
try :
db = load_db()
except Exception as e :
return {}
d_min_mvd = flex.double([ float(x) for x in db['high_resolution'] ])
sel_perm = flex.sort_permutation(d_min_mvd)
d_min_mvd = d_min_mvd.select(sel_perm)
def find_value_in_list(values, value):
i = 0
j = len(values) - 1
while (i != j):
k = i + (j - i) // 2
if (value and value <= values[k]):
j = k
else :
i = k + 1
return i
index = find_value_in_list(d_min_mvd, d_min)
sel_around = flex.bool(d_min_mvd.size(), False)
index_tmp = index
while (index_tmp > 0):
d_min_other = d_min_mvd[index_tmp]
if (d_min_other < d_min - 0.1):
break
sel_around[index_tmp] = True
index_tmp -= 1
index_tmp = index
while (index_tmp < d_min_mvd.size()):
d_min_other = d_min_mvd[index_tmp]
if (d_min_other > d_min + 0.1):
break
sel_around[index_tmp] = True
index_tmp += 1
#print "%d structures around %g" % (sel_around.count(True), d_min)
percentiles = {}
for stat_name in stats.keys():
stat = stats[stat_name]
if (not stat_name in db):
percentiles[stat_name] = None
continue
values = db[stat_name].select(sel_perm).select(sel_around)
fvalues = flex.double()
for value in values :
try :
fvalues.append(float(value))
except ValueError :
pass
assert fvalues.size() != 0
fvalues_sorted = fvalues.select(flex.sort_permutation(fvalues))
stat_index = find_value_in_list(fvalues_sorted, stat)
# FIXME I think for some of these statistics we need to reverse this -
# i.e. if higher statistics are better
stat_perc = 100 * (1 - (stat_index / fvalues.size()))
percentiles[stat_name] = stat_perc
#print stat_name, stat_index, fvalues.size(), stat_perc
#flex.histogram(fvalues, n_slots=10).show(prefix=" ")
return percentiles | 1c38bec6ee0c3f3acbaaf0064fdbbfa6d166150e | 3,686 |
import re
def normalize_number(value: str, number_format: str) -> str:
"""
Transform a string that essentially represents a number to the corresponding number with the given number format.
Return a string that includes the transformed number. If the given number format does not match any supported one, return the given string.
:param value: the string
:param number_format: number format with which the value is normalized
:return: the normalized string
"""
if number_format == 'COMMA_POINT' or number_format == 'Comma Point':
nor_str = re.sub(pattern=',', repl='', string=value)
elif number_format == 'POINT_COMMA' or number_format == 'Point Comma':
nor_str = re.sub(pattern=',', repl='.', string=re.sub(pattern='\.', repl='', string=value))
elif number_format == 'SPACE_POINT' or number_format == 'Space Point':
nor_str = re.sub(pattern='\s', repl='', string=value)
elif number_format == 'SPACE_COMMA' or number_format == 'Space Comma':
nor_str = re.sub(pattern=',', repl='.', string=re.sub(pattern='\s', repl='', string=value))
elif number_format == 'NONE_COMMA' or number_format == 'None Comma':
nor_str = re.sub(pattern=',', repl='.', string=value)
else:
nor_str = value
return nor_str | c22bff28fc6ef6f424d0e9b8b0358b327cd153c5 | 3,687 |
import os
import subprocess
def get_returning_users(returning_count):
"""
Returns a list of returning users
:return:
"""
# Read the exclusion file
if os.path.exists(stats_dir + 'exclusion.lst'):
exclusion_file = open(stats_dir + 'exclusion.lst', 'r')
exclusion_list = exclusion_file.readlines()
exclusion_list = [u.strip() for u in exclusion_list]
else:
exclusion_list = []
# Read the user database
cmd_out = subprocess.getstatusoutput("sqlite3 " + data_dir + "jupyterhub.sqlite \"select name from users order by last_activity\"")[1]
all_users = cmd_out.split('\n')
# Exclude members of the lab
users_minus_exclusions = [user for user in all_users if user not in exclusion_list]
return users_minus_exclusions[:returning_count] | 021edab459a033a726f2c0af9872c3e3dfea0a72 | 3,688 |
def benchmark(Algorithm_, Network_, test):
"""
Benchmarks the Algorithm on a given class of Networks. Samples variable network size, and plots results.
@param Algorithm_: a subclass of Synchronous_Algorithm, the algorithm to test.
@param Network_: a subclass of Network, the network on which to benchmark the algorithm.
@param test: a function that may throw an assertion error
"""
def averages(x,y):
"""
Groups x's with the same value, averages corresponding y values.
@param x: A sorted list of x values
@param y: A list of corresponding y values
@return: (x grouped by value, corresponding mean y values)
Example:
averages([1,1,2,2,2,3], [5,6,3,5,1,8]) --> ([1, 2, 3], [5.5, 3.0, 8.0])
"""
new_x = [x[0]]
new_y = []
cur_x = new_x[0]
cur_ys = []
for x_i, y_i in zip(x,y):
if x_i == cur_x:
cur_ys.append(y_i)
else:
new_y.append( sum(cur_ys)/float(len(cur_ys) ) )
new_x.append( x_i )
cur_ys = [y_i]
cur_x = x_i
new_y.append( sum(cur_ys)/float(len(cur_ys) ) )
return new_x, new_y
def plot(x, y, title):
"""Plots the points (x[i],y[i]) for all i, fig."""
fig, ax = plt.subplots()
x_ave,y_ave = averages(x,y)
ax.scatter(x, y, label="data", color='b')
ax.scatter(x_ave, y_ave, label="means", color='r')
ax.set_xlim( xmin=0 )
ax.set_ylim( ymin=0 )
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_title(title)
ax.set_xlabel(Network_.__name__ +' size')
data = sample(Algorithm_, Network_, test)
if data is None: return
size, comm, time = data
if issubclass(Algorithm_, Synchronous_Algorithm):
plot(size, time, Algorithm_.__name__ + ' Time Complexity')
plot(size, comm, Algorithm_.__name__ + ' Communication Complexity') | ee8d0d2bd9c9bc11eb8db07c09bfd6dc22e61ace | 3,689 |
def scale(obj, scale_ratio):
"""
:param obj: trimesh or file path
:param scale_ratio: float, scale all axis equally
:return:
author: weiwei
date: 20201116
"""
if isinstance(obj, trm.Trimesh):
tmpmesh = obj.copy()
tmpmesh.apply_scale(scale_ratio)
return tmpmesh
elif isinstance(obj, str):
originalmesh = trm.load(obj)
tmpmesh = originalmesh.copy()
tmpmesh.apply_scale(scale_ratio)
return tmpmesh | bdc84d04e9fd7d9009a60e2c47e59322526e3248 | 3,690 |
import collections
def _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name,
allow_python_preds):
"""Verifies input arguments for the case function.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a
callable which returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for the case operation.
allow_python_preds: if true, pred_fn_pairs may contain Python bools in
addition to boolean Tensors
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
Returns:
a tuple <list of scalar bool tensors, list of callables>.
"""
del name
if not isinstance(pred_fn_pairs, (list, tuple, dict)):
raise TypeError('fns must be a list, tuple, or dict')
if isinstance(pred_fn_pairs, collections.OrderedDict):
pred_fn_pairs = pred_fn_pairs.items()
elif isinstance(pred_fn_pairs, dict):
# No name to sort on in eager mode. Use dictionary traversal order,
# which is nondeterministic in versions of Python < 3.6
if not exclusive:
raise ValueError('Unordered dictionaries are not supported for the '
'`pred_fn_pairs` argument when `exclusive=False` and '
'eager mode is enabled.')
pred_fn_pairs = list(pred_fn_pairs.items())
for pred_fn_pair in pred_fn_pairs:
if not isinstance(pred_fn_pair, tuple) or len(pred_fn_pair) != 2:
raise TypeError('Each entry in pred_fn_pairs must be a 2-tuple')
pred, fn = pred_fn_pair
if ops.is_tensor(pred):
if pred.dtype != dtype.bool:
raise TypeError('pred must be Tensor of type bool: %s' % pred.name)
elif not allow_python_preds:
raise TypeError('pred must be a Tensor, got: %s' % pred)
elif not isinstance(pred, bool):
raise TypeError('pred must be a Tensor or bool, got: %s' % pred)
if not callable(fn):
raise TypeError('fn for pred %s must be callable.' % pred.name)
predicates, actions = zip(*pred_fn_pairs)
return predicates, actions | 6a6b16561600ce24ef69964ff33a309d664bb53f | 3,691 |
def get_policy(policy_name: str) -> Policy:
"""Returns a mixed precision policy parsed from a string."""
# Loose grammar supporting:
# - "c=f16" (params full, compute+output in f16),
# - "p=f16,c=f16" (params, compute and output in f16).
# - "p=f16,c=bf16" (params in f16, compute in bf16, output in bf16)
# For values that are not specified params defaults to f32, compute follows
# params and output follows compute (e.g. 'c=f16' -> 'p=f32,c=f16,o=f16').
param_dtype = jnp.float32
compute_dtype = output_dtype = None
if "=" in policy_name:
for part in policy_name.split(","):
key, value = part.split("=", 2)
value = parse_dtype(value)
if key == "p" or key == "params":
param_dtype = value
elif key == "c" or key == "compute":
compute_dtype = value
elif key == "o" or key == "output":
output_dtype = value
else:
raise ValueError(f"Unknown key '{key}' in '{policy_name}' should be "
"'params', 'compute' or 'output'.")
if compute_dtype is None:
compute_dtype = param_dtype
if output_dtype is None:
output_dtype = compute_dtype
else:
# Assume policy name is a dtype (e.g. 'f32' or 'half') that all components
# of the policy should contain.
param_dtype = compute_dtype = output_dtype = parse_dtype(policy_name)
return Policy(param_dtype=param_dtype, compute_dtype=compute_dtype,
output_dtype=output_dtype) | 2aac684706f001b537bdb103abfc63ffc79eb4c5 | 3,692 |
def reset():
"""Reset password page. User launch this page via the link in
the find password email."""
if g.user:
return redirect('/')
token = request.values.get('token')
if not token:
flash(_('Token is missing.'), 'error')
return redirect('/')
user = verify_auth_token(token, expires=1)
if not user:
flash(_('Invalid or expired token.'), 'error')
return redirect(url_for('.find'))
form = ResetForm()
if form.validate_on_submit():
user.change_password(form.password.data).save()
login_user(user)
flash(_('Your password is updated.'), 'info')
return redirect(url_for('.setting'))
return render_template('account/reset.html', form=form, token=token) | fd6e2c356bb664b87d1d2ad9dcd2305a05d541ec | 3,693 |
from pathlib import Path
def usort_file(path: Path, dry_run: bool = False, diff: bool = False) -> Result:
"""
Sorts one file, optionally writing the result back.
Returns: a Result object.
Note: Not intended to be run concurrently, as the timings are stored in a
global.
"""
result = Result(path)
result.timings = []
with save_timings(result.timings):
try:
config = Config.find(path)
src_contents = path.read_bytes()
dst_contents, encoding = usort_bytes(src_contents, config, path)
if src_contents != dst_contents:
result.changed = True
if diff:
result.diff = unified_diff(
src_contents.decode(encoding),
dst_contents.decode(encoding),
path.as_posix(),
)
if not dry_run:
path.write_bytes(dst_contents)
result.written = True
except Exception as e:
result.error = e
return result | b8455cc81f25890ecb88e809aab7d016ee1604d2 | 3,694 |
from datetime import datetime
import dateutil
def datetimeobj_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S),
tzinfo=dateutil.tz.tzoffset(None, _offset(z))
) | 2d3761d842a6f21ea646f6ac539d7ca4d78e20e9 | 3,695 |
def fn_sigmoid_proxy_func(threshold, preds, labels, temperature=1.):
"""Approximation of False rejection rate using Sigmoid."""
return tf.reduce_sum(
tf.multiply(tf.sigmoid(-1. * temperature * (preds - threshold)), labels)) | 84a248f4883ab383e2afc6556a335f33d114a9ae | 3,696 |
from typing import Optional
from typing import Sequence
def get_images(filters: Optional[Sequence[pulumi.InputType['GetImagesFilterArgs']]] = None,
sorts: Optional[Sequence[pulumi.InputType['GetImagesSortArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetImagesResult:
"""
Get information on images for use in other resources (e.g. creating a Droplet
based on a snapshot), with the ability to filter and sort the results. If no filters are specified,
all images will be returned.
This data source is useful if the image in question is not managed by the provider or you need to utilize any
of the image's data.
Note: You can use the `getImage` data source to obtain metadata
about a single image if you already know the `slug`, unique `name`, or `id` to retrieve.
:param Sequence[pulumi.InputType['GetImagesFilterArgs']] filters: Filter the results.
The `filter` block is documented below.
:param Sequence[pulumi.InputType['GetImagesSortArgs']] sorts: Sort the results.
The `sort` block is documented below.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['sorts'] = sorts
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getImages:getImages', __args__, opts=opts, typ=GetImagesResult).value
return AwaitableGetImagesResult(
filters=__ret__.filters,
id=__ret__.id,
images=__ret__.images,
sorts=__ret__.sorts) | 0e56efb6f7735f3b15f2241e7bfa43e28863f866 | 3,697 |
def polevl(x, coef):
"""Taken from http://numba.pydata.org/numba-doc/0.12.2/examples.html"""
N = len(coef)
ans = coef[0]
i = 1
while i < N:
ans = ans * x + coef[i]
i += 1
return ans | 2c7c0f5b329ab5ea28d123112ec065dd0c292c12 | 3,698 |
import os
def is_regular_file(path):
"""Check whether 'path' is a regular file, especially not a symlink."""
return os.path.isfile(path) and not os.path.islink(path) | 28ad19350d1a11b62e858aa8408cb29dc4d4c126 | 3,699 |
Subsets and Splits