content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import re
def check_playlist_url(playlist_url):
"""Check if a playlist URL is well-formated.
Parameters
----------
playlist_url : str
URL to a YouTube playlist.
Returns
-------
str
If the URL is well-formated, return the playlist ID. Else return `None`.
"""
match = re.match(
r"https?://www\.youtube\.com/playlist\?list=(.+)",
playlist_url.strip()
)
if match is None:
raise ValueError("Incorrect URL: %s" % playlist_url)
return match.group(1) | b14808e3dc25fcb7f91e9b66ec5f31ae869c6ae5 | 16,500 |
import multiprocessing
import asyncio
def test_PipeJsonRpcSendAsync_2(method, params, result, notification):
"""
Test of basic functionality. Here we don't test for timeout case (it raises an exception).
"""
value_nonlocal = None
def method_handler1():
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return 5
def method_handler2(value=2):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 10
def method_handler3(*, value=3):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 15
class SomeClass:
def method_handler4(self, *, value=4):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 15
some_class = SomeClass()
conn1, conn2 = multiprocessing.Pipe()
pc = PipeJsonRpcReceive(conn=conn2, name="comm-server")
pc.add_method(method_handler1) # No name is specified, default name is "method_handler1"
pc.add_method(method_handler1, "method1")
pc.add_method(method_handler2, "method2")
pc.add_method(method_handler3, "method3")
pc.add_method(some_class.method_handler4, "method4")
pc.start()
async def send_messages():
nonlocal value_nonlocal
p_send = PipeJsonRpcSendAsync(conn=conn1, name="comm-client")
p_send.start()
for n in range(3):
value_nonlocal = None
response = await p_send.send_msg(method, params, notification=notification)
if not notification:
assert response == result, f"Result does not match the expected: {response}"
assert value_nonlocal == "function_was_called", "Non-local variable has incorrect value"
elif response is not None:
assert False, "Response was received for notification."
p_send.stop()
asyncio.run(send_messages())
pc.stop() | 3311c1be5013eae986b5c8e358ef08eafff6420c | 16,501 |
def f(t, T):
"""
returns -1, 0, or 1 based on relationship between t and T
throws IndexError
"""
if(t > 0 and t < float(T/2)):
return 1
elif(t == float(T/2)):
return 0
elif(t > float(T/2) and t < T):
return -1
raise IndexError("Out of function domain") | f2365094d41d2a151322ad640dcf4b290dd1de79 | 16,502 |
import http
import json
def auth0_token():
"""
Token for Auth0 API
"""
auth = settings["auth0"]
conn = http.client.HTTPSConnection(auth['domain'])
payload = '{' + f"\"client_id\":\"{auth['client']}\"," \
f"\"client_secret\":\"{auth['client-secret']}\"," \
f"\"audience\":\"https://{auth['domain']}/api/v2/\",\"grant_type\":\"client_credentials\"" + '}'
headers = {'content-type': "application/json"}
conn.request("POST", "/oauth/token", payload, headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))["access_token"] | ba27798d4af79999f1c37d5b356a54cd427a0681 | 16,503 |
def get_ami(region, instance_type):
"""Returns the appropriate AMI to use for a given region + instance type
HVM is always used except for instance types which cannot use it. Based
on matrix here:
http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
.. note::
:func:`populate_ami_ids` must be called first to populate the available
AMI's.
"""
if not _POPULATED:
raise KeyError('populate_ami_ids must be called first')
instances = AWS_AMI_IDS[region]
inst_type = "hvm"
if instance_type[:2] in ["m1", "m2", "c1", "t1"]:
inst_type = "paravirtual"
if inst_type not in instances:
msg = "Could not find instance type %r in %s for region %s"
raise KeyError(msg % (inst_type, list(instances.keys()), region))
return instances[inst_type].id | 7ea60dbda0dabb05d9f7509ddb4c567560d681eb | 16,504 |
def convert_config(cfg):
""" Convert some configuration values to different values
Args:
cfg (dict): dict of sub-dicts, each sub-dict containing configuration
keys and values pertinent to a process or algorithm
Returns:
dict: configuration dict with some items converted to different objects
Raises:
KeyError: raise KeyError if configuration file is not specified
correctly
"""
# Parse dataset:
cfg = _parse_dataset_config(cfg)
# Parse YATSM:
cfg = _parse_YATSM_config(cfg)
return cfg | f84ae8f5b90f364eb378b48071c7ea4a99370076 | 16,505 |
def _signals_exist(names):
""" Return true if all of the given signals exist in this version of flask.
"""
return all(getattr(signals, n, False) for n in names) | 3f62e5a6309d792843947c3aa6f5ad687b6debf5 | 16,506 |
def login():
"""Route for logging the user in."""
try:
if request.method == 'POST':
return do_the_login()
if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('login.html')
except Exception as e:
abort(500, {'message': str(e)}) | 1f610bcfd450de5de576eef797ed9d26f726ec72 | 16,507 |
import os
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return [] | 3dc0418a4b1674b7db34fe1893f66e55d44ea5bc | 16,508 |
from typing import List
def __screen_info_to_dict():
"""
筛查
:return:
"""
screen_dict: {str: List[GitLogObject]} = {}
for info in git.get_all_commit_info():
if not authors.__contains__(info.name):
continue
if not info.check_today_time():
continue
if screen_dict.__contains__(info.name):
screen_dict.get(info.name).append(info.msg)
else:
screen_dict[info.name] = [info.msg]
pass
pass
return screen_dict | 6fb5519ab746b8918f18ec0c0a1769b5dca3558c | 16,509 |
from typing import Type
from typing import Optional
def bind_prop_arr(
prop_name: str,
elem_type: Type[Variable],
doc: Optional[str] = None,
doc_add_type=True,
) -> property:
"""Convenience wrapper around bind_prop for array properties
:meta private:
"""
if doc is None:
doc = f"Wrapper around `variables['{prop_name}']` of type `VariableArray[{elem_type.__name__}]`."
if doc_add_type:
doc = f"MutableSequence[{_get_python_prop_type(elem_type)}]: {doc}"
return bind_prop(
prop_name,
VariableArray,
lambda: VariableArray(elem_type),
doc=doc,
doc_add_type=False,
objtype=True,
) | f8e610011f096013c3976ee22f43eb58472c0513 | 16,510 |
from typing import Dict
from typing import Any
import json
def generate_profile_yaml_file(destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
profiles_config["credentials_json"] = json.dumps(profiles_config)
profiles_config["dataset_id"] = target_schema
else:
profiles_config["schema"] = target_schema
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml)
return profiles_config | f66b8df469b00d8aa11e6aba7d6efb5c2af4e21f | 16,511 |
def get_foreign_trips(db_connection):
"""
Gets the time series data for all Foreign visitors from the database
Args:
db_connection (Psycopg.connection): The database connection
Returns:
Pandas.DataFrame: The time series data for each unique Foreign visitor.
It has the columns cust_id, date, date_diff, calls,
calls_in_florence, calls_near_airport
"""
counts = get_daily_call_counts(db_connection,
'optourism.foreigners_timeseries_daily')
return get_trips(counts) | bd5f0d308681286c615d60b0bf2fcb88bfd2a806 | 16,512 |
from typing import List
def get_diagonal_sums(square: Square) -> List[int]:
""" Returns a list of the sum of each diagonal. """
topleft = 0
bottomleft = 0
# Seems like this could be more compact
i = 0
for row in square.rows:
topleft += row[i]
i += 1
i = 0
for col in square.columns:
bottomleft += col[i]
i += 1
return [topleft, bottomleft] | 37a5e167b3170feece19b963c21eae1359df14ec | 16,513 |
import base64
def int_to_base64(i: int) -> str:
""" Returns a 12 char length representation of i in base64 """
return base64.b64encode(i.to_bytes(8, 'big')) | 5bd7bb032926a8f429d766632c2ef2af9ee01edc | 16,514 |
def payment_provider(provider_base_config):
"""When it doesn't matter if request is contained within provider the fixture can still be used"""
return TurkuPaymentProviderV3(config=provider_base_config) | d6439a5ef097350682a2e17ccc41aeba1310a78a | 16,515 |
import re
def gradle_extract_data(build_gradle):
"""
Extract the project name and dependencies from a build.gradle file.
:param Path build_gradle: The path of the build.gradle file
:rtype: dict
"""
# Content for dependencies
content_build_gradle = extract_content(build_gradle)
match = re.search(r'apply plugin: ("|\')org.ros2.tools.gradle\1', content_build_gradle)
if not match:
raise RuntimeError("Gradle plugin missing, please add the following to build.gradle: \"apply plugin: 'org.ros2.tools.gradle'\"")
return extract_data(build_gradle) | dd802b8fedb682493a1978ae6cd60be9706580ff | 16,516 |
from typing import Sequence
import torch
def stack_batch_img(
img_tensors: Sequence[torch.Tensor], divisible: int = 0, pad_value: float = 0
) -> torch.Tensor:
"""
Args
:param img_tensors (Sequence[torch.Tensor]):
:param divisible (int):
:param pad_value (float): value to pad
:return: torch.Tensor.
"""
assert len(img_tensors) > 0
assert isinstance(img_tensors, (tuple, list))
assert divisible >= 0
img_heights = []
img_widths = []
for img in img_tensors:
assert img.shape[:-2] == img_tensors[0].shape[:-2]
img_heights.append(img.shape[-2])
img_widths.append(img.shape[-1])
max_h, max_w = max(img_heights), max(img_widths)
if divisible > 0:
max_h = (max_h + divisible - 1) // divisible * divisible
max_w = (max_w + divisible - 1) // divisible * divisible
batch_imgs = []
for img in img_tensors:
padding_size = [0, max_w - img.shape[-1], 0, max_h - img.shape[-2]]
batch_imgs.append(F.pad(img, padding_size, value=pad_value))
return torch.stack(batch_imgs, dim=0).contiguous() | 9952965a89688d742a3342804062cb8051f47f54 | 16,517 |
from evo.core import lie_algebra as lie
def convert_rel_traj_to_abs_traj(traj):
""" Converts a relative pose trajectory to an absolute-pose trajectory.
The incoming trajectory is processed elemente-wise. Poses at each
timestamp are appended to the absolute pose from the previous timestamp.
Args:
traj: A PoseTrajectory3D object with timestamps as indices containing, at a minimum,
columns representing the xyz position and wxyz quaternion-rotation at each
timestamp, corresponding to the pose between previous and current timestamps.
Returns:
A PoseTrajectory3D object with xyz position and wxyz quaternion fields for the
relative pose trajectory corresponding to the relative one given in `traj`.
"""
new_poses = [lie.se3()] # origin at identity
for i in range(0, len(traj.timestamps)):
abs_pose = np.dot(new_poses[-1], traj.poses_se3[i])
new_poses.append(abs_pose)
return trajectory.PoseTrajectory3D(timestamps=traj.timestamps[1:], poses_se3=new_poses) | 57e4972f5bc4ea67bf62b88ea87fc5df8dda0d7c | 16,518 |
def remove(handle):
"""The remove action allows users to remove a roommate."""
user_id = session['user']
roommate = model.roommate.get_roommate(user_id, handle)
# Check if roommate exists
if not roommate:
return abort(404)
if request.method == 'POST':
model.roommate.delete_roommate(roommate.id)
return redirect(url_for('roommate.overview'))
return render_template('/roommate/remove.jinja', roommate=roommate) | b1a279989d3cb463d54c8559352f2ae67f198b40 | 16,519 |
def maxsubarray(list):
"""
Find a maximum subarray following this idea:
Knowing a maximum subarray of list[0..j]
find a maximum subarray of list[0..j+1] which is either
(I) the maximum subarray of list[0..j]
(II) or is a maximum subarray list[i..j+1] for some 0 <= i <= j
We can determine (II) in constant time by keeping a max
subarray ending at the current j.
This is done in the first if of the loop, where the max
subarray ending at j is max(previousSumUntilJ + array[j], array[j])
This works because if array[j] + sum so far is less than array[j]
then the sum of the subarray so far is negative (and less than array[j]
in case it is also negative) so it has a bad impact on the
subarray until J sum and we can safely discard it and start anew
from array[j]
Complexity (n = length of list)
Time complexity: O(n)
Space complexity: O(1)
"""
if len(list) == 0:
return (-1, -1, 0)
# keep the max sum of subarray ending in position j
maxSumJ = list[0]
# keep the starting index of the maxSumJ
maxSumJStart = 0
# keep the sum of the maximum subarray found so far
maxSum = list[0]
# keep the starting index of the current max subarray found
maxStart = 0
# keep the ending index of the current max subarray found
maxEnd = 0
for j in range(1, len(list)):
if maxSumJ + list[j] >= list[j]:
maxSumJ = maxSumJ + list[j]
else:
maxSumJ = list[j]
maxSumJStart = j
if maxSum < maxSumJ:
maxSum = maxSumJ
maxStart = maxSumJStart
maxEnd = j
return (maxSum, maxStart, maxEnd) | a991ca09c0594b0d47eb4dd8be44d093d593cd36 | 16,520 |
def get_merged_threadlocal(bound_logger: BindableLogger) -> Context:
"""
Return a copy of the current thread-local context merged with the context
from *bound_logger*.
.. versionadded:: 21.2.0
"""
ctx = _get_context().copy()
ctx.update(structlog.get_context(bound_logger))
return ctx | 03c2689fd71542c7c007512fb4c2bf76a841a7bc | 16,521 |
def sort_cipher_suites(cipher_suites, ordering):
"""Sorts the given list of CipherSuite instances in a specific order."""
if ordering == 'asc':
return cipher_suites.order_by('name')
elif ordering == 'desc':
return cipher_suites.order_by('-name')
else:
return cipher_suites | 5a554ba1e2e4d82f53f29c5a1c2f4d311f538889 | 16,522 |
def make_1D_distributions(lims, n_points, all_shifts, all_errs, norm=None, max_shifts=None, seed=None):
"""
Generate 1D distributions of chemical shifts from arrays of shifts and errors of each distribution
Inputs: - lims Limits of the distributions
- n_points Number of points in the distributions
- all_shifts Array of shifts for each distribution
- all_errs Array of predicted error for each distribution
- norm Distribution normalization to apply
None: no normalization
"max": top of each distribution set to 1
- max_shifts Maximum number of shifts to consider when constructing the distribution
- seed Seed for the random selection of shifts
Outputs: - x Array of shielding values to plot the distributions against
- ys List of distributions
"""
# Construct the array of shielding values
x = np.linspace(lims[0], lims[1], n_points)
# Generate the distributions
ys = []
for i, (sh, er) in enumerate(zip(all_shifts, all_errs)):
print(" Constructing distribution {}/{}...".format(i+1, len(all_shifts)))
ys.append(make_1D_distribution(x, sh, er, norm=norm, max_shifts=max_shifts, seed=seed))
print(" Distribution constructed!\n")
return x, ys | 87c48b80dc395b4423b88fcbb3307dd53655333e | 16,523 |
def fill_column_values(df, icol=0):
"""
Fills empty values in the targeted column with the value above it.
Parameters
----------
df: pandas.DataFrame
icol: int
Returns
-------
pandas.DataFrame
"""
v = df.iloc[:,icol].fillna('').values.tolist()
vnew = fill_gaps(v)
dfnew = df.copy() # type: pd.DataFrame
dfnew.iloc[:,icol] = vnew
return dfnew | 158939f6436a4c9b5a13a18567ee6061e71df51c | 16,524 |
import torch
def reward(static, tour_indices):
"""
Euclidean distance between all cities / nodes given by tour_indices
"""
# Convert the indices back into a tour
idx = tour_indices.unsqueeze(1).expand(-1, static.size(1), -1)
tour = torch.gather(static.data, 2, idx).permute(0, 2, 1)
# Ensure we're always returning to the depot - note the extra concat
# won't add any extra loss, as the euclidean distance between consecutive
# points is 0
start = static.data[:, :, 0].unsqueeze(1)
y = torch.cat((start, tour, start), dim=1)
# Euclidean distance between each consecutive point
tour_len = torch.sqrt(torch.sum(torch.pow(y[:, :-1] - y[:, 1:], 2), dim=2))
return tour_len.sum(1) | f7197bcfb3699cafa4df3c1430b4f9ee1bf53242 | 16,525 |
def valid_review_queue_name(request):
"""
Given a name for a queue, validates the correctness for our review system
:param request:
:return:
"""
queue = request.matchdict.get('queue')
if queue in all_queues:
request.validated['queue'] = queue
return True
else:
_tn = Translator(get_language_from_cookie(request))
add_error(request, 'Invalid queue', _tn.get(_.internalError))
return False | fc6ef2fb728b18ce84669736f0e4ec1f020ea2bf | 16,526 |
import subprocess
def is_valid_cluster_dir(path):
"""Checks whether a given path is a valid postgres cluster
Args:
pg_ctl_exe - str, path to pg_ctl executable
path - str, path to directory
Returns:
bool, whether or not a directory is a valid postgres cluster
"""
pg_controldata_exe = which('pg_controldata')
cmd = '"{pg_controldata}" "{path}"'.format(
pg_controldata=pg_controldata_exe, path=path)
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = proc.communicate()
if 'No such file or directory' in err.decode('utf-8'):
return False
else:
return True | f34a9569af6a8e83d3f1f7b37e0c266f24c86cca | 16,527 |
def get_best_straight(possible_straights, hand):
""" get list of indices of hands that make the strongest straight
if no one makes a straight, return empty list
:param possible_straights: ({tuple(str): int})
map tuple of connecting cards --> best straight value they make
:param hand: (set(str)) set of strings
:return: (int) top value in the straight, or 0 if no straight
"""
highest_straight_value = 0 # e.g. 14 for broadway, 5 for the wheel
hand_values = set(
ranks_to_sorted_values(
ranks=[r for r, _ in hand], aces_high=True, aces_low=True
)
)
for connecting_values, max_value in possible_straights.items():
connecting_cards = set(connecting_values) & hand_values
if len(connecting_cards) == 2:
# we've made a straight!
if max_value > highest_straight_value:
highest_straight_value = max_value
return highest_straight_value | f2a470ef3033cac27cb406702daead42d59683aa | 16,528 |
from django.shortcuts import render_to_response, RequestContext
def stats(request):
"""
Display statistics for the web site
"""
views = list(View.objects.all().only('internal_url', 'browser'))
urls = {}
mob_vs_desk = { 'desktop': 0, 'mobile': 0 }
for view in views:
if is_mobile(view.browser):
mob_vs_desk['mobile'] += 1
else:
mob_vs_desk['desktop'] += 1
if not urls.has_key(view.internal_url):
urls[view.internal_url] = 0
urls[view.internal_url] += 1
stats = []
count = 0
for url in urls:
stats.append({'url': url, 'count': urls[url]})
count += urls[url]
stats = sorted(stats, key=lambda k: k['count'], reverse=True)
return render_to_response('admin/appview/view/display_stats.html',
RequestContext(request, { 'stats' : stats,
'total' : count,
'views': mob_vs_desk
}
)
) | 3b63250e6ce3c9ddd09ec8d19c9961b22bfab62a | 16,529 |
def build_argparser():
"""
Builds argument parser.
:return argparse.ArgumentParser
"""
banner = "%(prog)s - generate a static file representation of a PEP data repository."
additional_description = "\n..."
parser = _VersionInHelpParser(
description=banner,
epilog=additional_description)
parser.add_argument(
"-V", "--version",
action="version",
version="%(prog)s {v}".format(v=__version__))
parser.add_argument(
"-d", "--data", required=False,
default=PEPHUB_URL,
help="URL/Path to PEP storage tree.")
parser.add_argument(
"-o", "--out", required=False,
default="./out",
help="Outpath for generated PEP tree.")
parser.add_argument(
"-p", "--path", required=False,
help="Path to serve the file server at."
)
# parser for serve command
subparsers = parser.add_subparsers(
help="Functions",
dest="serve"
)
serve_parser = subparsers.add_parser("serve", help="Serve a directory using pythons built-in http library")
serve_parser.set_defaults(
func=serve_directory
)
serve_parser.add_argument(
"-f", "--files", required=False,
help="Files to serve.",
default="./out"
)
return parser | f33679c82a1499db83caf3473b0e5403ebfa52fe | 16,530 |
def abc19():
"""Solution to exercise C-1.19.
Demonstrate how to use Python’s list comprehension syntax to produce
the list [ a , b , c , ..., z ], but without having to type all 26 such
characters literally.
"""
a_idx = 97
return [chr(a_idx + x) for x in range(26)] | c9bb948ad57ddbc138dfbc0c481fabb45de620ba | 16,531 |
import os
def setForceFieldVersion(version):
"""
Set the forcefield parameters using a version number (string) and return
a handle to the object holding them
current version are:
'4.2': emulating AutoDock4.2
'default': future AutoDock5
Parameters <- setForceFieldVersion(version)
"""
_parameters.clearAll()
if version=='default':
_parameters.loadDefaults()
elif version=='4.2':
_parameters.loadFromDatFile(os.path.join(ADFRcc.__path__[0], 'Data',
'AD42PARAM.DAT'))
else:
raise ValueError("ERROR: unknown forcefield version %s"%version)
return _parameters | a5349bda04f919c6858c4653400f98fe57092a9e | 16,532 |
def filter_words(data: TD_Data_Dictionary):
"""This function removes all instances of Key.ctrl from the list of keys and
any repeats because of Press and Realese events"""
# NOTE: We may just want to remove all instances of Key.ctrl from the list and anything that follows that
keys = data.get_letters()
return keys | fb34e1758c83af0e30b5ae807a3f852ab7e3be29 | 16,533 |
from typing import Dict
def check_url_secure(
docker_ip: str,
public_port: int,
*,
auth_header: Dict[str, str],
ssl_context: SSLContext,
) -> bool:
"""
Secure form of lovey/pytest/docker/compose.py::check_url() that checks when the secure docker registry service is
operational.
Args:
docker_ip: IP address on which the service is exposed.
public_port: Port on which the service is exposed.
auth_header: HTTP basic authentication header to using when connecting to the service.
ssl_context:
SSL context referencing the trusted root CA certificated to used when negotiating the TLS connection.
Returns:
(bool) True when the service is operational, False otherwise.
"""
try:
https_connection = HTTPSConnection(
context=ssl_context, host=docker_ip, port=public_port
)
https_connection.request("HEAD", "/v2/", headers=auth_header)
return https_connection.getresponse().status < 500
except Exception: # pylint: disable=broad-except
return False | ebdc8f4d175f3be70000022424382f71d9fd73b5 | 16,534 |
def ResNet101(pretrained=False, use_ssld=False, **kwargs):
"""
ResNet101
Args:
pretrained: bool=False or str. If `True` load pretrained parameters, `False` otherwise.
If str, means the path of the pretrained model.
use_ssld: bool=False. Whether using distillation pretrained model when pretrained=True.
Returns:
model: nn.Layer. Specific `ResNet101` model depends on args.
"""
model = ResNet(config=NET_CONFIG["101"], version="vb", **kwargs)
_load_pretrained(pretrained, model, MODEL_URLS["ResNet101"], use_ssld)
return model | 0277c59f9b60d5c6127fb1021eb71b10691bd0f8 | 16,535 |
def LineTextInCurrentBuffer( line_number ):
""" Returns the text on the 1-indexed line (NOT 0-indexed) """
return vim.current.buffer[ line_number - 1 ] | 8c3b51a48e25e8955a00d89619da9e191612861a | 16,536 |
def imported_instrumentor(library):
"""
Convert a library name to that of the correlated auto-instrumentor
in the libraries package.
"""
instrumentor_lib = "signalfx_tracing.libraries.{}_".format(library)
return get_module(instrumentor_lib) | db26277b23f989d8d5323c7c6bde0905b1e2f5ef | 16,537 |
import time
import calendar
def parse_cache_entry_into_seconds(isoTime):
"""
Returns the number of seconds from the UNIX epoch.
See :py:attribute:`synapseclient.utils.ISO_FORMAT` for the parameter's expected format.
"""
# Note: The `strptime() method is not thread-safe (http://bugs.python.org/issue7980)
strptimeLock.acquire()
cacheTime = time.strptime(isoTime, utils.ISO_FORMAT)
strptimeLock.release()
return calendar.timegm(cacheTime) | 7492b828b331d54f3bf90056d9010e2dbbd86d06 | 16,538 |
from datetime import datetime
def parse_runtime(log_file):
""" Parse the job run-time from a log-file
"""
with open(log_file, 'r') as f:
for line in f:
l0 = line.rstrip("\n")
break
l1 = tail(log_file, 1)[0].rstrip("\n")
l0 = l0.split()[:2]
l1 = l1.split()[:2]
try:
y0, m0, d0 = list(map(int, l0[0].split('-')))
h0, min0, s0 = list(map(float, l0[1][:-1].split(':')))
except ValueError as e:
print(log_file)
print(l0)
raise e
try:
y1, m1, d1 = list(map(int, l1[0].split('-')))
h1, min1, s1 = list(map(float, l1[1][:-1].split(':')))
except ValueError as e:
print(log_file)
print(l1)
raise e
date0 = datetime.datetime(y0, m0, d0, int(h0), int(min0), int(s0))
date1 = datetime.datetime(y1, m1, d1, int(h1), int(min1), int(s1))
diff = (date1 - date0).total_seconds()
return diff | 75a5a80409918779173eb1e80d6b3f95abf242cb | 16,539 |
def calculateEMA(coin_pair, period, unit):
"""
Returns the Exponential Moving Average for a coin pair
"""
closing_prices = getClosingPrices(coin_pair, period, unit)
previous_EMA = calculateSMA(coin_pair, period, unit)
constant = (2 / (period + 1))
current_EMA = (closing_prices[-1] * (2 / (1 + period))) + (previous_EMA * (1 - (2 / (1 + period))))
return current_EMA | ec884f89c2e8e64ada4384767251d6722c7b63c8 | 16,540 |
def euler_method(r0, N):
"""
euler_method function description:
This method computes the vector r(t)'s using Euler's method.
Args:
r0 - the initial r-value
N - the number of steps in each period
"""
delta_t = (2*np.pi)/N # delta t
r = np.zeros((5*N, 2)) # 5Nx2 array
r[0] = r0 # initial r-value
J = np.array(([0,1],[-1,0])) # antisymmetric matrix (meaning its transpose equals its negative)
for i in range(1, 5*N):
r[i] = r[i-1] + delta_t*(J@(r[i-1])) # euler's method
return r | 6ac3deae5cdb5ce84fa19433b55de80bf04ddf47 | 16,541 |
def main_menu(found_exists):
"""prints main menu and asks for user input
returns task that is chosen by user input"""
show_main_menu(found_exists)
inp = input(">> ")
if inp == "1":
return "update"
elif inp == "2":
return "show_all"
elif inp == "3":
return "show_waypoints"
elif inp == "4":
return "map-menu"
elif inp == "5":
return "show_one"
elif inp == "6":
return "search"
elif inp == "7" and found_exists:
return "show_founds"
elif inp == "8" and found_exists:
return "exit"
elif inp == "7" and not found_exists:
return "exit"
else:
print("Ungueltige Eingabe!") | 61d0bda6a1ddf8bf70a79ff6e7488601d781c5fc | 16,542 |
def fromPsl(psl, qCdsRange=None, inclUnaln=False, projectCds=False, contained=False):
"""generate a PairAlign from a PSL. cdsRange is None or a tuple. In
inclUnaln is True, then include Block objects for unaligned regions"""
qCds = _getCds(qCdsRange, psl.qStrand, psl.qSize)
qSeq = _mkPslSeq(psl.qName, psl.qStart, psl.qEnd, psl.qSize, psl.qStrand, qCds)
tSeq = _mkPslSeq(psl.tName, psl.tStart, psl.tEnd, psl.tSize, psl.tStrand)
aln = PairAlign(qSeq, tSeq)
prevBlk = None
for i in range(psl.blockCount):
prevBlk = _addPslBlk(psl.blocks[i], aln, prevBlk, inclUnaln)
if projectCds and (aln.qSeq.cds is not None):
aln.projectCdsToTarget(contained)
return aln | f1da225d53f36abf5d10589077de934f13c1ca2a | 16,543 |
def drawMatrix(ax, mat, **kwargs):
"""Draw a view to a matrix into the axe."
TODO
----
* pg.core.BlockMatrix
Parameters
----------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: obj
obj can be so far:
* pg.core.*Matrix
* scipy.sparce
Returns
-------
ax:
"""
mat = pg.utils.sparseMatrix2coo(mat)
ax.spy(mat)
return ax | 3ad4988b90909d803c5e730a50c0e09bd9c554ce | 16,544 |
from typing import Optional
def get_graph(identifier: str, *, rows: Optional[int] = None) -> pybel.BELGraph:
"""Get the graph surrounding a given GO term and its descendants."""
graph = pybel.BELGraph()
enrich_graph(graph, identifier, rows=rows)
return graph | fc004ebd3cdfa70edd01b611987dfd48306ceb80 | 16,545 |
import logging
def parse_custom_variant(self, cfg):
"""Parse custom variant definition from a users input returning a variant dict
an example of user defined variant configuration
1) integrated: cpu=2 ram=4 max_nics=6 chassis=sr-1 slot=A card=cpm-1 slot=1 mda/1=me6-100gb-qsfp28
2) distributed: cp: cpu=2 ram=4 chassis=ixr-e slot=A card=cpm-ixr-e ___ lc: cpu=2 ram=4 max_nics=34 chassis=ixr-e slot=1 card=imm24-sfp++8-sfp28+2-qsfp28 mda/1=m24-sfp++8-sfp28+2-qsfp28
"""
def _parse(cfg, obj, is_cp=False):
timos_line = []
chassis = None
xiom = None
for elem in cfg.split():
# skip cp: lc: markers
if elem in ["cp:", "lc:"]:
continue
if "cpu=" in elem:
obj["cpu"] = int(elem.split("=")[1])
continue
if "ram=" in elem:
obj["min_ram"] = int(elem.split("=")[1])
continue
if not is_cp and "max_nics=" in elem:
obj["max_nics"] = int(elem.split("=")[1])
continue
# JvB for SR-xs check supported MDA type and determine #nics
if "chassis=" in elem:
chassis = elem.split("=")[1].lower()
elif chassis in SRS_CHASSIS: # SR-1s through SR-14s
if "xiom/" in elem: # /x1 or /x2
if is_cp:
raise ValueError(f"Cannot configure XIOM for control plane VM: {elem}")
xiom = elem.split("=")[1]
elif "mda/" in elem:
if is_cp:
raise ValueError(f"Cannot configure MDA for control plane VM: {elem}")
mda = elem.split("=")[1]
supported = SR_S_MDA_VARIANTS_IOM_S if xiom else SR_S_MDA_VARIANTS_NO_XIOM
if mda not in supported:
raise ValueError(f"Unsupported {chassis} MDA type for XIOM {xiom}: {mda}")
obj["max_nics"] = supported[mda]
timos_line.append(elem)
obj["timos_line"] = " ".join(timos_line)
# set default cpu and ram
if "cpu" not in obj:
obj["cpu"] = 2
if "min_ram" not in obj:
obj["min_ram"] = 4
return obj
# init variant object that gets returned
variant = {
"max_nics": 40
} # some default value for num nics if it is not provided in user cfg
# parsing distributed custom variant
if "___" in cfg:
variant["deployment_model"] = "distributed"
for hw_part in cfg.split("___"):
if "cp:" in hw_part:
logging.debug( f"Parsing cp:{hw_part}" )
variant["cp"] = _parse(hw_part.strip(),obj={},is_cp=True)
elif "lc:" in hw_part:
logging.debug( f"Parsing lc:{hw_part}" )
variant["lc"] = _parse(hw_part.strip(),obj={})
if "max_nics" in variant["lc"]:
variant["max_nics"] = int(variant["lc"]["max_nics"])
variant["lc"].pop("max_nics")
else:
raise ValueError(f"Missing 'cp:' or 'lc:' in distributed config string: {cfg}")
else:
# parsing integrated mode config
variant["deployment_model"] = "integrated"
variant = _parse(cfg.strip(),obj=variant)
return variant | f79874b6512a3874f2c88f7473c34dcf40b5fc65 | 16,546 |
def cube_recenter_via_speckles(cube_sci, cube_ref=None, alignment_iter=5,
gammaval=1, min_spat_freq=0.5, max_spat_freq=3,
fwhm=4, debug=False, recenter_median=False,
fit_type='gaus', negative=True, crop=True,
subframesize=21, mask=None, imlib='vip-fft',
interpolation='lanczos4', border_mode='reflect',
plot=True, full_output=False):
""" Registers frames based on the median speckle pattern. Optionally centers
based on the position of the vortex null in the median frame. Images are
filtered to isolate speckle spatial frequencies.
Parameters
----------
cube_sci : numpy ndarray
Science cube.
cube_ref : numpy ndarray
Reference cube (e.g. for NIRC2 data in RDI mode).
alignment_iter : int, optional
Number of alignment iterations (recomputes median after each iteration).
gammaval : int, optional
Applies a gamma correction to emphasize speckles (useful for faint
stars).
min_spat_freq : float, optional
Spatial frequency for low pass filter.
max_spat_freq : float, optional
Spatial frequency for high pass filter.
fwhm : float, optional
Full width at half maximum.
debug : bool, optional
Outputs extra info.
recenter_median : bool, optional
Recenter the frames at each iteration based on a 2d fit.
fit_type : str, optional
If recenter_median is True, this is the model to which the image is
fitted to for recentering. 'gaus' works well for NIRC2_AGPM data.
'ann' works better for NACO+AGPM data.
negative : bool, optional
If True, uses a negative gaussian fit to determine the center of the
median frame.
crop: bool, optional
Whether to calculate the recentering on a cropped version of the cube
that is speckle-dominated (recommended).
subframesize : int, optional
Sub-frame window size used. Should cover the region where speckles are
the dominant noise source.
mask: 2D np.ndarray, optional
Binary mask indicating where the cross-correlation should be calculated
in the images. If provided, should be the same size as array frames.
imlib : str, optional
Image processing library to use.
interpolation : str, optional
Interpolation method to use.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
plot : bool, optional
If True, the shifts are plotted.
full_output: bool, optional
Whether to return more variables, useful for debugging.
Returns
-------
if full_output is False, returns:
cube_reg_sci: Registered science cube (numpy 3d ndarray)
If cube_ref is not None, also returns:
cube_reg_ref: Ref. cube registered to science frames (np 3d ndarray)
If full_output is True, returns in addition to the above:
cube_sci_lpf: Low+high-pass filtered science cube (np 3d ndarray)
cube_stret: Cube with stretched values used for cross-corr (np 3d ndarray)
cum_x_shifts_sci: Vector of x shifts for science frames (np 1d array)
cum_y_shifts_sci: Vector of y shifts for science frames (np 1d array)
And if cube_ref is not None, also returns:
cum_x_shifts_ref: Vector of x shifts for ref. frames.
cum_y_shifts_ref: Vector of y shifts for ref. frames.
"""
n, y, x = cube_sci.shape
check_array(cube_sci, dim=3)
if recenter_median and fit_type not in {'gaus','ann'}:
raise TypeError("fit type not recognized. Should be 'ann' or 'gaus'")
if crop and not subframesize < y/2.:
raise ValueError('`Subframesize` is too large')
if cube_ref is not None:
ref_star = True
nref = cube_ref.shape[0]
else:
ref_star = False
if crop:
cube_sci_subframe = cube_crop_frames(cube_sci, subframesize,
verbose=False)
if ref_star:
cube_ref_subframe = cube_crop_frames(cube_ref, subframesize,
verbose=False)
else:
subframesize = cube_sci.shape[-1]
cube_sci_subframe = cube_sci.copy()
if ref_star:
cube_ref_subframe = cube_ref.copy()
ceny, cenx = frame_center(cube_sci_subframe[0])
print('Sub frame shape: {}'.format(cube_sci_subframe.shape))
print('Center pixel: ({}, {})'.format(ceny, cenx))
# Filtering cubes. Will be used for alignment purposes
cube_sci_lpf = cube_sci_subframe.copy()
if ref_star:
cube_ref_lpf = cube_ref_subframe.copy()
cube_sci_lpf = cube_sci_lpf + np.abs(np.min(cube_sci_lpf))
if ref_star:
cube_ref_lpf = cube_ref_lpf + np.abs(np.min(cube_ref_lpf))
median_size = int(fwhm * max_spat_freq)
# Remove spatial frequencies <0.5 lam/D and >3lam/D to isolate speckles
cube_sci_hpf = cube_filter_highpass(cube_sci_lpf, 'median-subt',
median_size=median_size, verbose=False)
if min_spat_freq>0:
cube_sci_lpf = cube_filter_lowpass(cube_sci_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_sci_lpf = cube_sci_hpf
if ref_star:
cube_ref_hpf = cube_filter_highpass(cube_ref_lpf, 'median-subt',
median_size=median_size,
verbose=False)
if min_spat_freq>0:
cube_ref_lpf = cube_filter_lowpass(cube_ref_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_ref_lpf = cube_ref_hpf
if ref_star:
alignment_cube = np.zeros((1 + n + nref, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
alignment_cube[(n + 1):(n + 2 + nref), :, :] = cube_ref_lpf
else:
alignment_cube = np.zeros((1 + n, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
n_frames = alignment_cube.shape[0] # 1+n or 1+n+nref
cum_y_shifts = 0
cum_x_shifts = 0
for i in range(alignment_iter):
alignment_cube[0] = np.median(alignment_cube[1:(n + 1)], axis=0)
if recenter_median:
# Recenter the median frame using a 2d fit
if fit_type == 'gaus':
crop_sz = int(fwhm)
else:
crop_sz = int(6*fwhm)
if not crop_sz%2:
crop_sz+=1
sub_image, y1, x1 = get_square(alignment_cube[0], size=crop_sz,
y=ceny, x=cenx, position=True)
if fit_type == 'gaus':
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False,
threshold=False, sigfactor=1,
debug=debug, full_output=False)
elif fit_type == 'ann':
y_i, x_i, rad = _fit_2dannulus(sub_image, fwhm=fwhm, crop=False,
hole_rad=0.5, sampl_cen=0.1,
sampl_rad=0.2, ann_width=0.5,
unc_in=2.)
yshift = ceny - (y1 + y_i)
xshift = cenx - (x1 + x_i)
alignment_cube[0] = frame_shift(alignment_cube[0, :, :], yshift,
xshift, imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
# center the cube with stretched values
cube_stret = np.log10((np.abs(alignment_cube) + 1) ** gammaval)
if mask is not None and crop:
mask_tmp = frame_crop(mask, subframesize)
else:
mask_tmp = mask
res = cube_recenter_dft_upsampling(cube_stret, (ceny, cenx), fwhm=fwhm,
subi_size=None, full_output=True,
verbose=False, plot=False,
mask=mask_tmp, imlib=imlib,
interpolation=interpolation)
_, y_shift, x_shift = res
sqsum_shifts = np.sum(np.sqrt(y_shift ** 2 + x_shift ** 2))
print('Square sum of shift vecs: ' + str(sqsum_shifts))
for j in range(1, n_frames):
alignment_cube[j] = frame_shift(alignment_cube[j], y_shift[j],
x_shift[j], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
cum_y_shifts += y_shift
cum_x_shifts += x_shift
cube_reg_sci = cube_sci.copy()
cum_y_shifts_sci = cum_y_shifts[1:(n + 1)]
cum_x_shifts_sci = cum_x_shifts[1:(n + 1)]
for i in range(n):
cube_reg_sci[i] = frame_shift(cube_sci[i], cum_y_shifts_sci[i],
cum_x_shifts_sci[i], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(cum_x_shifts_sci, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(cum_y_shifts_sci, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n))
la = 'Histogram'
_ = plt.hist(cum_x_shifts_sci, bins=b, alpha=0.5, label=la+' shifts X')
_ = plt.hist(cum_y_shifts_sci, bins=b, alpha=0.5, label=la+' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if ref_star:
cube_reg_ref = cube_ref.copy()
cum_y_shifts_ref = cum_y_shifts[(n + 1):]
cum_x_shifts_ref = cum_x_shifts[(n + 1):]
for i in range(nref):
cube_reg_ref[i] = frame_shift(cube_ref[i], cum_y_shifts_ref[i],
cum_x_shifts_ref[i], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
if ref_star:
if full_output:
return (cube_reg_sci, cube_reg_ref, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci, cum_x_shifts_ref,
cum_y_shifts_ref)
else:
return (cube_reg_sci, cube_reg_ref)
else:
if full_output:
return (cube_reg_sci, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci)
else:
return cube_reg_sci | aaf113bc0c701993c492fbfeb855aeb4c0b73c8d | 16,547 |
def root_histogram_shape(root_hist, use_matrix_indexing=True):
"""
Return a tuple corresponding to the shape of the histogram.
If use_matrix_indexing is true, the tuple is in 'reversed' zyx
order. Matrix-order is the layout used in the internal buffer
of the root histogram - keep True if reshaping the array).
"""
dim = root_hist.GetDimension()
shape = np.array([root_hist.GetNbinsZ(),
root_hist.GetNbinsY(),
root_hist.GetNbinsX()][3 - dim:]) + 2
if not use_matrix_indexing:
shape = reversed(shape)
return tuple(shape) | 8df83a84f0a3b12bab248949042cd2df5df6f53e | 16,548 |
import io
import json
import sys
def json_file_to_dict(fname):
""" Read a JSON file and return its Python representation, transforming all
the strings from Unicode to ASCII. The order of keys in the JSON file is
preserved.
Positional arguments:
fname - the name of the file to parse
"""
try:
with io.open(fname, encoding='ascii',
errors='ignore') as file_obj:
return json.load(
file_obj, object_pairs_hook=_ordered_dict_collapse_dups
)
except (ValueError, IOError) as e:
sys.stderr.write("Error parsing '%s': %s\n" % (fname, e))
raise | c2542597d0785ed495ae217f1300df941156763a | 16,549 |
from typing import Union
def get_weather_sensor_by(
weather_sensor_type_name: str, latitude: float = 0, longitude: float = 0
) -> Union[WeatherSensor, ResponseTuple]:
"""
Search a weather sensor by type and location.
Can create a weather sensor if needed (depends on API mode)
and then inform the requesting user which one to use.
"""
# Look for the WeatherSensor object
weather_sensor = (
WeatherSensor.query.filter(
WeatherSensor.weather_sensor_type_name == weather_sensor_type_name
)
.filter(WeatherSensor.latitude == latitude)
.filter(WeatherSensor.longitude == longitude)
.one_or_none()
)
if weather_sensor is None:
create_sensor_if_unknown = False
if current_app.config.get("FLEXMEASURES_MODE", "") == "play":
create_sensor_if_unknown = True
# either create a new weather sensor and post to that
if create_sensor_if_unknown:
current_app.logger.info("CREATING NEW WEATHER SENSOR...")
weather_sensor = WeatherSensor(
name="Weather sensor for %s at latitude %s and longitude %s"
% (weather_sensor_type_name, latitude, longitude),
weather_sensor_type_name=weather_sensor_type_name,
latitude=latitude,
longitude=longitude,
)
db.session.add(weather_sensor)
db.session.flush() # flush so that we can reference the new object in the current db session
# or query and return the nearest sensor and let the requesting user post to that one
else:
nearest_weather_sensor = WeatherSensor.query.order_by(
WeatherSensor.great_circle_distance(
latitude=latitude, longitude=longitude
).asc()
).first()
if nearest_weather_sensor is not None:
return unrecognized_sensor(
nearest_weather_sensor.latitude,
nearest_weather_sensor.longitude,
)
else:
return unrecognized_sensor()
return weather_sensor | b4feb0a75709d1bf27378df6d90420c74e36646c | 16,550 |
import six
def _npy_loads(data):
"""
Deserializes npy-formatted bytes into a numpy array
"""
logger.info("Inside _npy_loads fn")
stream = six.BytesIO(data)
return np.load(stream,allow_pickle=True) | 5e9ee0a0d41403af0a8e1ed41f6d15a677d82c44 | 16,551 |
import dateutil
def parse_string(string):
"""Parse the string to a datetime object.
:param str string: The string to parse
:rtype: `datetime.datetime`
:raises: :exc:`InvalidDateFormat` when date format is invalid
"""
try:
# Try to parse string as a date
value = dateutil.parser.parse(string)
except (OverflowError, TypeError, ValueError):
raise InvalidDateFormat("Invalid date format %r" % (string, ))
return value | 6db2edad31f1febced496c92bfb2d7d76761850a | 16,552 |
def get_elfs_oriented(atoms, density, basis, mode, view = serial_view()):
"""
Outdated, use get_elfs() with "mode='elf'/'nn'" instead.
Like get_elfs, but returns real, oriented elfs
mode = {'elf': Use the ElF algorithm to orient fingerprint,
'nn': Use nearest neighbor algorithm}
"""
return get_elfs(atoms, density, basis, view, orient_mode = mode) | 36b5abe66e9054ab49a25eca753d4a61148a1b1c | 16,553 |
import os
import json
def _get_corpora_json_contents(corpora_file):
"""
Get the contents of corpora.json, or an empty dict
"""
exists = os.path.isfile(corpora_file)
if not exists:
print("Corpora file not found at {}!".format(corpora_file))
return dict()
with open(corpora_file, "r") as fo:
return json.loads(fo.read()) | d2a0dcf8bbbca573121237190b96c8d372d027b4 | 16,554 |
def error_logger(param=None):
"""
Function to get an error logger, object of Logger class.
@param param : Custom parameter that can be passed to the logger.
@return: custom logger
"""
logger = Logger('ERROR_LOGGER', param)
return logger.get_logger() | ca6449c2e63ebdccbd7bd3993dc1d11375e66e29 | 16,555 |
import multiprocessing
import os
def _run_make_examples(pipeline_args):
"""Runs the make_examples job."""
def get_region_paths(regions):
return filter(_is_valid_gcs_path, regions or [])
def get_region_literals(regions):
return [
region for region in regions or [] if not _is_valid_gcs_path(region)
]
def get_extra_args():
"""Optional arguments that are specific to make_examples binary."""
extra_args = []
if pipeline_args.gvcf_outfile:
extra_args.extend(
['--gvcf', '"${GVCF}"/gvcf_output.tfrecord@"${SHARDS}".gz'])
if pipeline_args.gvcf_gq_binsize:
extra_args.extend(
['--gvcf_gq_binsize',
str(pipeline_args.gvcf_gq_binsize)])
if pipeline_args.regions:
num_localized_region_paths = len(get_region_paths(pipeline_args.regions))
localized_region_paths = map('"${{INPUT_REGIONS_{0}}}"'.format,
range(num_localized_region_paths))
region_literals = get_region_literals(pipeline_args.regions)
extra_args.extend([
'--regions',
'\'%s\'' % ' '.join(region_literals + localized_region_paths)
])
if pipeline_args.sample_name:
extra_args.extend(['--sample_name', pipeline_args.sample_name])
if pipeline_args.hts_block_size:
extra_args.extend(['--hts_block_size', str(pipeline_args.hts_block_size)])
return extra_args
if pipeline_args.gcsfuse:
command = _MAKE_EXAMPLES_COMMAND_WITH_GCSFUSE.format(
EXTRA_ARGS=' '.join(get_extra_args()))
else:
command = _MAKE_EXAMPLES_COMMAND_NO_GCSFUSE.format(
EXTRA_ARGS=' '.join(get_extra_args()))
machine_type = 'custom-{0}-{1}'.format(
pipeline_args.make_examples_cores_per_worker,
pipeline_args.make_examples_ram_per_worker_gb * 1024)
num_workers = min(pipeline_args.make_examples_workers, pipeline_args.shards)
shards_per_worker = pipeline_args.shards / num_workers
threads = multiprocessing.Pool(num_workers)
results = []
for i in range(num_workers):
outputs = [
'EXAMPLES=' + _get_staging_examples_folder_to_write(pipeline_args, i) +
'/*'
]
if pipeline_args.gvcf_outfile:
outputs.extend(['GVCF=' + _get_staging_gvcf_folder(pipeline_args) + '/*'])
inputs = [
'INPUT_BAI=' + pipeline_args.bai,
'INPUT_REF=' + pipeline_args.ref,
'INPUT_REF_FAI=' + pipeline_args.ref_fai,
] + [
'INPUT_REGIONS_%s=%s' % (k, region_path)
for k, region_path in enumerate(
get_region_paths(pipeline_args.regions))
]
if pipeline_args.ref_gzi:
inputs.extend([pipeline_args.ref_gzi])
env_args = [
'--set', 'SHARDS=' + str(pipeline_args.shards), '--set',
'SHARD_START_INDEX=' + str(int(i * shards_per_worker)), '--set',
'SHARD_END_INDEX=' + str(int((i + 1) * shards_per_worker - 1))
]
if pipeline_args.gcsfuse:
env_args.extend([
'--set', 'GCS_BUCKET=' + _get_gcs_bucket(pipeline_args.bam), '--set',
'BAM=' + _get_gcs_relative_path(pipeline_args.bam)
])
else:
inputs.extend(['INPUT_BAM=' + pipeline_args.bam])
job_name = pipeline_args.job_name_prefix + _MAKE_EXAMPLES_JOB_NAME
output_path = os.path.join(pipeline_args.logging, _MAKE_EXAMPLES_JOB_NAME,
str(i))
run_args = _get_base_job_args(pipeline_args) + env_args + [
'--name', job_name, '--vm-labels', 'dv-job-name=' + job_name, '--image',
pipeline_args.docker_image, '--output', output_path, '--inputs',
','.join(inputs), '--outputs', ','.join(outputs), '--machine-type',
machine_type, '--disk-size',
str(pipeline_args.make_examples_disk_per_worker_gb), '--command',
command
]
results.append(threads.apply_async(_run_job, [run_args, output_path]))
_wait_for_results(threads, results) | f4481ba095242c3b6ef2b465c96a32082bf5b912 | 16,556 |
def get_iou(mask, label):
"""
:param mask: predicted mask with 0 for background and 1 for object
:param label: label
:return: iou
"""
# mask = mask.numpy()
# label = labels.numpy()
size = mask.shape
mask = mask.flatten()
label = label.flatten()
m = mask + label
i = len(np.argwhere(m == 2))
u = len(np.argwhere(m != 0))
if u == 0:
u = size[0] * size[1]
iou = float(i) / u
if i == 0 and u == 0:
iou = 1
return iou | 9322d0184a3e28bdd1d5bf3214b7fbe8936d6a21 | 16,557 |
from typing import List
from typing import Set
from typing import Any
def mean_jaccard_distance(sets: List[Set[Any]]) -> float:
"""
Compute the mean Jaccard distance for sets A_1, \dots A_n:
d = \frac{1}{n} \sum_{i=1}^{n-1} \sum_{j=i+1}^n (1 - J(A_i, A_j))
where J(A, B) is the Jaccard index between sets A and B and 1-J(A, B)
is the Jaccard distance.
"""
n = len(sets)
assert n > 0
if n == 1:
return 0
else:
d = 0.0
for i in range(n - 1):
for j in range(i + 1, n):
d += 1 - jaccard_index(sets[i], sets[j])
d /= n * (n - 1) / 2
return d | efbfce8092e2e3a9b5b076c46a636dfa17e2d266 | 16,558 |
def nx_find_connected(graph, start_set, end_set, cutoff=np.inf):
"""Return the nodes in end_set connected to start_set."""
reachable = []
for end in end_set:
if nx_is_reachable(graph, end, start_set):
reachable.append(end)
if len(reachable) >= cutoff:
break
return reachable | a3feb8a172bb610fa4416c6f4a4c0558540d2190 | 16,559 |
def svn_client_proplist(*args):
"""
svn_client_proplist(char target, svn_opt_revision_t revision, svn_boolean_t recurse,
svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t
"""
return _client.svn_client_proplist(*args) | 1cc82161292df7b9ba284397a0dcd55da9d0d7c1 | 16,560 |
def dev_transform(signal, input_path='../data/', is_denoised=True):
"""
normalization function that transforms each fature based on the
scaling of the trainning set. This transformation should be done on
test set(developmental set), or any new input for a trained neural
network. Due to existence of a denoising step in the normal_seq funciton,
this transformation can not reproduce the exact same of initial sequences,
instead it transforms to the scale of denoised version of training set.
Parameters
----------
signal : numpy array or pandas dataframe
in the shape of (n_samples, n_features)
input_path : str, default='../data/'
is_denoised : boolean
it specifies the state if original sequence is denoised by a threshold,
if it's set to False it means that user used q=None in normal_seq function.
Returns
-------
transformed : numpy array
a normalised sequence or features
"""
transformed = []
if isinstance(signal, pd.DataFrame):
signal = signal.to_numpy(copy=True)
elif isinstance(signal, list):
signal = np.array(signal)
scales = pd.read_csv(input_path + 'min_max_inputs.csv')
max_element = scales.to_numpy(copy=True)[1, 1:]
min_element = scales.to_numpy(copy=True)[0, 1:]
if signal.ndim == 1:
if is_denoised is True:
signal[signal > max_element] = max_element
transformed.append((signal-min_element)/(
max_element-min_element))
else:
for i in range(signal.shape[1]):
if is_denoised is True:
signal[signal[:, i] > max_element[i]] = max_element[i]
transformed.append((signal[:, i]-min_element[i])/(
max_element[i]-min_element[i]))
transformed = np.array(transformed).T # transpose for correspondence
return transformed | ce6dfe780bb724ae8036502d2b1d1828fce675dc | 16,561 |
def moveTo(self, parent):
"""Move this element to new parent, as last child"""
self.getParent().removeChild(self)
parent.addChild(self)
return self | 40caa9681346db9a6cfb5c95fdb761a9f98e607a | 16,562 |
from datetime import datetime
def coerce_to_end_of_day_datetime(value):
"""
gets the end of day datetime equivalent of given date object.
if the value is not a date, it returns the same input.
:param date value: value to be coerced.
:rtype: datetime | object
"""
if not isinstance(value, datetime) and isinstance(value, date):
return end_of_day(value)
return value | 374e7decf543e5fb40fb7714d4472cf4cfa48cb1 | 16,563 |
def greybody(nu, temperature, beta, A=1.0, logscale=0.0,
units='cgs', frequency_units='Hz', kappa0=4.0, nu0=3000e9,
normalize=max):
"""
Same as modified blackbody... not sure why I have it at all, though the
normalization constants are different.
"""
h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c']
modification = (1. - exp(-(nu/nu0)**beta))
I = blackbody(nu,temperature,units=units,frequency_units=frequency_units,normalize=normalize)*modification
if normalize and hasattr(I,'__len__'):
if len(I) > 1:
return I/normalize(I) * 10.**logscale
else:
return I * 10.**logscale
else:
return I * 10.**logscale | 89cca39acf5659e8ab7b403c5747b19c119d0e51 | 16,564 |
import copy
def GCLarsen_v0(WF, WS, WD, TI,
pars=[0.435449861, 0.797853685, -0.124807893, 0.136821858, 15.6298, 1.0]):
"""Computes the WindFarm flow and Power using GCLarsen
[Larsen, 2009, A simple Stationary...]
Inputs
----------
WF: WindFarm
Windfarm instance
WS: list
Rotor averaged Undisturbed wind speed [m/s] for each WT
WD: float
Rotor averaged Undisturbed wind direction [deg] for each WT
Meteorological axis. North = 0 [deg], clockwise.
TI: float
Rotor averaged turbulence intensity [-] for each WT
Returns
-------
P_WT: ndarray
Power production of the wind turbines (nWT,1) [W]
U_WT: ndarray
Wind speed at hub height (nWT,1) [m/s]
Ct: ndarray
Thrust coefficients for each wind turbine (nWT,1) [-]
"""
Dist, nDownstream, id0 = WF.turbineDistance(np.mean(WD))
zg = WF.vectWTtoWT[2,:,:]
# Initialize arrays to NaN
Ct = np.nan * np.ones([WF.nWT])
U_WT = copy(WS)
P_WT = np.nan * np.ones([WF.nWT])
# Initialize first upstream turbine
Ct[id0[0]] = WF.WT[id0[0]].get_CT(WS[id0[0]])
P_WT[id0[0]] = WF.WT[id0[0]].get_P(WS[id0[0]])
U_WT[id0[0]] = WS[id0[0]]
for i in range(1, WF.nWT):
cWT = id0[i] # Current wind turbine (wake operating)
cR = WF.WT[cWT].R
LocalDU = np.zeros([WF.nWT, 1])
for j in range(i-1, -1, -1):
# Loop on the upstream turbines (uWT) of the cWT
uWT = id0[j]
uWS = U_WT[uWT] # Wind speed at wind turbine uWT
uR = WF.WT[uWT].R
uCT = Ct[uWT]
if np.isnan(uCT):
uCT = WF.WT[uWT].get_CT(uWS)
# WT2WT vector in wake coordinates
Dist, _,_ = WF.turbineDistance(WD[uWT])
x = Dist[0, uWT, cWT]
y = Dist[1, uWT, cWT]
z = zg[uWT, cWT]
r = np.sqrt(y**2.+z**2.)
# Calculate the wake width of uWT at the position of cWT
Rw = get_Rw(x, uR, TI[uWT], uCT, pars)[0]
if (r <= Rw+cR or uWS > 0):
LocalDU[uWT] = uWS*get_dUeq(x,y,z,cR,uR,uCT,TI[uWT],pars)
# Wake superposition
DU = LocalDU.sum()
U_WT[cWT] = U_WT[cWT] + DU
if U_WT[cWT] > WF.WT[cWT].u_cutin:
Ct[cWT] = WF.WT[cWT].get_CT(U_WT[cWT])
P_WT[cWT] = WF.WT[cWT].get_P(U_WT[cWT])
else:
Ct[cWT] = WF.WT[cWT].CT_idle
P_WT[cWT] = 0.0
return (P_WT,U_WT,Ct) | a075074b0cee9b36fdf3411804ff4eff2f5fe63b | 16,565 |
def guess_table_address(*args):
"""
guess_table_address(insn) -> ea_t
Guess the jump table address (ibm pc specific)
@param insn (C++: const insn_t &)
"""
return _ida_ua.guess_table_address(*args) | 073773e33b5cf4c59f3a3c892d5a53320c2c1f4b | 16,566 |
import logging
import os
def xmind_to_excel_file(xmind_file):
"""Convert XMind file to a excel csv file"""
xmind_file = get_absolute_path(xmind_file)
logging.info('Start converting XMind file(%s) to excel file...', xmind_file)
testcases = get_xmind_testcase_list(xmind_file)
fileheader = ["所属模块", "用例标题", "前置条件", "步骤", "预期", "关键词", "优先级", "用例类型", "适用阶段"]
wbk = xlwt.Workbook()
sheet1 = wbk.add_sheet('测试用例', cell_overwrite_ok=False)
# 自动换行
style1 = xlwt.easyxf('align: wrap on, vert top')
sheet1.col(0).width = 256*30
sheet1.col(1).width = 256*40
sheet1.col(2).width = 256*30
sheet1.col(3).width = 256*40
sheet1.col(4).width = 256*40
# 用例title
for i in range(0, len(fileheader)):
sheet1.write(0, i, fileheader[i])
#第二行开始写入用例
case_index = 1
for testcase in testcases:
# row = gen_a_testcase_row(testcase)
row = gen_a_testcase_row(testcase)
# print("row_list >> ", row_list)
for i in range(0,len(row)):
sheet1.write(case_index, i, row[i], style1)
case_index = case_index + 1
excel_file = xmind_file[:-5] + 'xls'
if os.path.exists(excel_file):
logging.info('The excel file already exists, return it directly: %s', excel_file)
return excel_file
if excel_file:
wbk.save(excel_file)
logging.info('Convert XMind file(%s) to a iwork excel file(%s) successfully!', xmind_file, excel_file)
return excel_file | 779f540b56043ee60ea0ef28a89cea49d7cda51c | 16,567 |
def get_elbs(account, region):
""" Get elastic load balancers """
elb_data = []
aws_accounts = AwsAccounts()
if not account:
session = boto3.session.Session(region_name=region)
for account_rec in aws_accounts.all():
elb_data.extend(
query_elbs_for_account(account_rec, region, session))
elif account.isdigit() and len(account) == 12:
session = boto3.session.Session()
aws_account = aws_accounts.with_number(account)
if aws_account:
elb_data.append(
query_elbs_for_account(
aws_account, region, session))
else:
return dict(Message="Account not found"), 404
# print(elb_data)
return dict(LoadBalancers=elb_data), 200 | 32b059c7929b0adae3df7b8393fd062f5a281cc3 | 16,568 |
import io
import os
def nyul_normalize(img_dir, mask_dir=None, output_dir=None, standard_hist=None, write_to_disk=True):
"""
Use Nyul and Udupa method ([1,2]) to normalize the intensities of a set of MR images
Args:
img_dir (str): directory containing MR images
mask_dir (str): directory containing masks for MR images
output_dir (str): directory to save images if you do not want them saved in
same directory as data_dir
standard_hist (str): path to output or use standard histogram landmarks
write_to_disk (bool): write the normalized data to disk or nah
Returns:
normalized (np.ndarray): last normalized image from img_dir
References:
[1] N. Laszlo G and J. K. Udupa, “On Standardizing the MR Image
Intensity Scale,” Magn. Reson. Med., vol. 42, pp. 1072–1081,
1999.
[2] M. Shah, Y. Xiao, N. Subbanna, S. Francis, D. L. Arnold,
D. L. Collins, and T. Arbel, “Evaluating intensity
normalization on MRIs of human brain with multiple sclerosis,”
Med. Image Anal., vol. 15, no. 2, pp. 267–282, 2011.
"""
input_files = io.glob_nii(img_dir)
if output_dir is None:
out_fns = [None] * len(input_files)
else:
out_fns = []
for fn in input_files:
_, base, ext = io.split_filename(fn)
out_fns.append(os.path.join(output_dir, base + '_hm' + ext))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
mask_files = [None] * len(input_files) if mask_dir is None else io.glob_nii(mask_dir)
if standard_hist is None:
logger.info('Learning standard scale for the set of images')
standard_scale, percs = train(input_files, mask_files)
elif not os.path.isfile(standard_hist):
logger.info('Learning standard scale for the set of images')
standard_scale, percs = train(input_files, mask_files)
np.save(standard_hist, np.vstack((standard_scale, percs)))
else:
logger.info('Loading standard scale ({}) for the set of images'.format(standard_hist))
standard_scale, percs = np.load(standard_hist)
normalized = None
for i, (img_fn, mask_fn, out_fn) in enumerate(zip(input_files, mask_files, out_fns)):
_, base, _ = io.split_filename(img_fn)
logger.info('Transforming image {} to standard scale ({:d}/{:d})'.format(base, i + 1, len(input_files)))
img = io.open_nii(img_fn)
mask = io.open_nii(mask_fn) if mask_fn is not None else None
normalized = do_hist_norm(img, percs, standard_scale, mask)
if write_to_disk:
io.save_nii(normalized, out_fn, is_nii=True)
return normalized | 9774725919ed43735060047855ad67e55e227936 | 16,569 |
def likelihood_params(ll_mode, mode, behav_tuple, num_induc, inner_dims, inv_link, tbin, jitter,
J, cutoff, neurons, mapping_net, C):
"""
Create the likelihood object.
"""
if mode is not None:
kernel_tuples_, ind_list = kernel_used(mode, behav_tuple, num_induc, inner_dims)
if ll_mode =='hZIP':
inv_link_hetero = 'sigmoid'
elif ll_mode =='hCMP':
inv_link_hetero = 'identity'
elif ll_mode =='hNB':
inv_link_hetero = 'softplus'
else:
inv_link_hetero = None
if inv_link_hetero is not None:
mean_func = np.zeros((inner_dims))
kt, ind_list = kernel_used(mode, behav_tuple, num_induc, inner_dims)
gp_lvms = GP_params(ind_list, kt, num_induc, neurons, inv_link, jitter, mean_func, None,
learn_mean=True)
else:
gp_lvms = None
inv_link_hetero = None
if ll_mode == 'IBP':
likelihood = mdl.likelihoods.Bernoulli(tbin, inner_dims, inv_link)
elif ll_mode == 'IP':
likelihood = mdl.likelihoods.Poisson(tbin, inner_dims, inv_link)
elif ll_mode == 'ZIP' or ll_mode =='hZIP':
alpha = .1*np.ones(inner_dims)
likelihood = mdl.likelihoods.ZI_Poisson(tbin, inner_dims, inv_link, alpha, dispersion_mapping=gp_lvms)
#inv_link_hetero = lambda x: torch.sigmoid(x)/tbin
elif ll_mode == 'NB' or ll_mode =='hNB':
r_inv = 10.*np.ones(inner_dims)
likelihood = mdl.likelihoods.Negative_binomial(tbin, inner_dims, inv_link, r_inv, dispersion_mapping=gp_lvms)
elif ll_mode == 'CMP' or ll_mode =='hCMP':
log_nu = np.zeros(inner_dims)
likelihood = mdl.likelihoods.COM_Poisson(tbin, inner_dims, inv_link, log_nu, J=J, dispersion_mapping=gp_lvms)
elif ll_mode == 'IG': # renewal process
shape = np.ones(inner_dims)
likelihood = mdl.likelihoods.Gamma(tbin, inner_dims, inv_link, shape, allow_duplicate=False)
elif ll_mode == 'IIG': # renewal process
mu_t = np.ones(inner_dims)
likelihood = mdl.likelihoods.invGaussian(tbin, inner_dims, inv_link, mu_t, allow_duplicate=False)
elif ll_mode == 'LN': # renewal process
sigma_t = np.ones(inner_dims)
likelihood = mdl.likelihoods.logNormal(tbin, inner_dims, inv_link, sigma_t, allow_duplicate=False)
elif ll_mode == 'U':
likelihood = mdl.likelihoods.Universal(inner_dims//C, C, inv_link, cutoff, mapping_net)
else:
raise NotImplementedError
return likelihood | 2e817c4fdfdd9a65d138f61166ef8fbb3154460b | 16,570 |
def is_num_idx(k):
"""This key corresponds to """
return k.endswith("_x") and (k.startswith("tap_x") or k.startswith("sig")) | bd4ed2c9c4a24ae423ec6c738d99b31ace6ec267 | 16,571 |
def convert_to_boolarr(int_arr, cluster_id):
"""
:param int_arr: array of integers which relate to no, one or multiple clusters
cluster_id: 0=Pleiades, 1=Meingast 1, 2=Hyades, 3=Alpha Per, 4=Coma Ber
"""
return np.array((np.floor(int_arr/2**cluster_id) % 2), dtype=bool) | c769ca07ea32a9e0ab0d230cd3574e5b71434de4 | 16,572 |
def serialize(root):
#
"""Serialization is the process of converting a data structure or object
into a sequence of bits so that it can be stored in a file or memory buffer,
or transmitted across a network connection link to be reconstructed later in
the same or another computer environment.
Design an algorithm to serialize and deserialize a binary tree. There is no
restriction on how your serialization/deserialization algorithm should work.
You just need to ensure that a binary tree can be serialized to a string and
this string can be deserialized to the original tree structure.
Input:
1
/ \
2 3
/ \
4 5
1
/ \
2 3
/ \
4 56 7
1
/ \
10 11
/ \
100 101 110 111
Output: [1,2,3,null,null,4,5]
0 = 0
1 = 2**0 + 1
2 = 2**0 + 2
3 = 2**1 + 1
4 = 2**1 + 2
5 = 2**2 + 1
6 = 2**2 + 2**1
7 = 2**2 + 2**1 + 2**0
"""
queue = [(root, "1")]
indices = {}
max_location = 0
while queue:
node, location = queue.pop(0)
current_location = int(location, 2)
max_location = max(max_location, current_location)
indices[int(location, 2)] = node.val
if node.left:
queue.append((node.left, location + "0"))
if node.right:
queue.append((node.right, location + "1"))
result = [None] * (max_location + 1)
for k, v in indices.items():
result[k] = v
return result[1:] | a2bec43b384302d5218e8c62c83bc069be3bcbd3 | 16,573 |
def ensure_daemon(f):
"""A decorator for running an integration test with and without the daemon enabled."""
def wrapper(self, *args, **kwargs):
for enable_daemon in [False, True]:
enable_daemon_str = str(enable_daemon)
env = {
"HERMETIC_ENV": "PANTS_PANTSD,PANTS_SUBPROCESSDIR",
"PANTS_PANTSD": enable_daemon_str,
}
with environment_as(**env):
try:
f(self, *args, **kwargs)
except Exception:
print(f"Test failed with enable-pantsd={enable_daemon}:")
if not enable_daemon:
print(
"Skipping run with pantsd=true because it already "
"failed with pantsd=false."
)
raise
finally:
kill_daemon()
return wrapper | d9005c48d489b8b5da1f9687b78d1f455aaf3d62 | 16,574 |
from adaptivefiltering.pdal import execute_pdal_pipeline
from adaptivefiltering.pdal import PDALInMemoryDataSet
import json
def reproject_dataset(dataset, out_srs, in_srs=None):
"""Standalone function to reproject a given dataset with the option of forcing an input reference system
:param out_srs:
The desired output format in WKT.
:type out_srs: str
:param in_srs:
The input format in WKT from which to convert. The default is the dataset's current reference system.
:type in_srs: str
:return:
A reprojected dataset
:rtype: adaptivefiltering.DataSet
"""
dataset = PDALInMemoryDataSet.convert(dataset)
if in_srs is None:
in_srs = dataset.spatial_reference
config = {
"type": "filters.reprojection",
"in_srs": in_srs,
"out_srs": out_srs,
}
pipeline = execute_pdal_pipeline(dataset=dataset, config=config)
spatial_reference = json.loads(pipeline.metadata)["metadata"][
"filters.reprojection"
]["comp_spatialreference"]
return PDALInMemoryDataSet(
pipeline=pipeline,
spatial_reference=spatial_reference,
) | 0380442a837f89bbf06d0d1b5e9917e7309876ad | 16,575 |
def conditional(condition, decorator):
""" Decorator for a conditionally applied decorator.
Example:
@conditional(get_config('use_cache'), ormcache)
def fn():
pass
"""
if condition:
return decorator
else:
return lambda fn: fn | 7c17ad3aaacffd0008ec1cf66871ea6755f7869a | 16,576 |
import statistics
def variance(data, mu=None):
"""Compute variance over a list."""
if mu is None:
mu = statistics.mean(data)
return sum([(x - mu) ** 2 for x in data]) / len(data) | 92f89d35c2ae5abf742b10ba838a381d6f74e92c | 16,577 |
def make_note(outfile, headers, paragraphs, **kw):
"""Builds a pdf file named outfile based on headers and
paragraphs, formatted according to parameters in kw.
:param outfile: outfile name
:param headers: <OrderedDict> of headers
:param paragraphs: <OrderedDict> of paragraphs
:param kw: keyword arguments for formatting
"""
story = [Paragraph(x, headers[x]) for x in headers.keys()]
for headline, paragraph in paragraphs.items():
story.append(Paragraph(headline, paragraph.get("style", h3)))
if not paragraph.has_key("tpl"):
for sub_headline, sub_paragraph in paragraph.items():
story.append(Paragraph(sub_headline, paragraph.get("style", h4)))
story.append(Paragraph(sub_paragraph.get("tpl").render(**kw), p))
else:
if isinstance(paragraph.get("tpl"), Template):
story.append(Paragraph(paragraph.get("tpl").render(**kw), p))
elif isinstance(paragraph.get("tpl"), Table):
story.append(Spacer(1, 0.2 * inch))
story.append(paragraph.get("tpl"))
story.append(Spacer(1, 0.2 * inch))
else:
pass
doc = SimpleDocTemplate(outfile)
doc.build(story, onFirstPage=formatted_page, onLaterPages=formatted_page)
return doc | d9bc331167649210cf18e76bcff4099817c28458 | 16,578 |
import stat
def output_file_exists(filename):
"""Check if a file exists and its size is > 0"""
if not file_exists(filename):
return False
st = stat(filename)
if st[stat_module.ST_SIZE] == 0:
return False
return True | ad2f3a7451feefd32fe98da7fc3bfca9852b080c | 16,579 |
def IMF_N(m,a=.241367,b=.241367,c=.497056):
"""
returns number of stars with mass m
"""
# a,b,c = (.241367,.241367,.497056)
# a=b=c=1/3.6631098624
if .1 <= m <= .3:
res = c*( m**(-1.2) )
elif .3 < m <= 1.:
res = b*( m**(-1.8) )
elif 1. < m <= 100.:
# res = a*( m**(-1.3)-100**(-1.3) )/1.3
res = a*( m**(-2.3) )
else:
res = 0
return res | 4d120af2840a793468335cddd867f6d29940d415 | 16,580 |
def features_disable(partial_name, partial_name_field, force, **kwargs):
"""Disable a feature"""
mode = "disable"
params = {"mode": "force"} if force else None
feature = _okta_get("features", partial_name,
selector=_selector_field_find(partial_name_field, partial_name))
feature_id = feature["id"]
rv = okta_manager.call_okta(f"/features/{feature_id}/{mode}",
REST.post, params=params)
return rv | 5477a43ad2f849669a6a209abfc835f0f4ee453a | 16,581 |
def _get_images():
"""Get the official AWS public AMIs created by Flambe
that have tag 'Creator: [email protected]'
ATTENTION: why not just search the tags? We need to make sure
the AMIs we pick were created by the Flambe team. Because of tags
values not being unique, anyone can create a public AMI with
'Creator: [email protected]' as a tag. If we pick that AMI, then
we could potentially be creating instances with unknown AMIs,
causing potential security issues.
By filtering by our acount id (which can be public), then we can
make sure that all AMIs that are being scanned were created
by Flambe team.
"""
client = boto3.client('ec2')
return client.describe_images(Owners=[const.AWS_FLAMBE_ACCOUNT],
Filters=[{'Name': 'tag:Creator', 'Values': ['[email protected]']}]) | 975596ff9eb1c9c0864cadb41edc2b1a4d009790 | 16,582 |
def ar_coefficient(x, param):
"""
This feature calculator fits the unconditional maximum likelihood
of an autoregressive AR(k) process.
The k parameter is the maximum lag of the process
.. math::
X_{t}=\\varphi_0 +\\sum _{{i=1}}^{k}\\varphi_{i}X_{{t-i}}+\\varepsilon_{t}
For the configurations from param which should contain the maxlag "k" and such an AR process is calculated. Then
the coefficients :math:`\\varphi_{i}` whose index :math:`i` contained from "coeff" are returned.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"coeff": x, "k": y} with x,y int
:type param: list
:return x: the different feature values
:return type: pandas.Series
"""
calculated_ar_params = {}
x_as_list = list(x)
calculated_AR = AR(x_as_list)
res = {}
k = param["k"]
p = param["coeff"]
column_name = "k_{}__coeff_{}".format(k, p)
if k not in calculated_ar_params:
try:
calculated_ar_params[k] = calculated_AR.fit(maxlag=k, solver="mle").params
except (np.linalg.LinAlgError, ValueError):
calculated_ar_params[k] = [np.NaN]*k
mod = calculated_ar_params[k]
if p <= k:
try:
res[column_name] = mod[p]
except IndexError:
res[column_name] = 0
else:
res[column_name] = np.NaN
return [value for key, value in res.items()][0] | a7a7171a44055d23457fd622d7e893f839f17bcf | 16,583 |
import matplotlib.pyplot as plt
import warnings
def measure_ir(
sweep_length=1.0,
sweep_type="exponential",
fs=48000,
f_lo=0.0,
f_hi=None,
volume=0.9,
pre_delay=0.0,
post_delay=0.1,
fade_in_out=0.0,
dev_in=None,
dev_out=None,
channels_input_mapping=None,
channels_output_mapping=None,
ascending=False,
deconvolution=True,
plot=True,
):
"""
Measures an impulse response by playing a sweep and recording it using the sounddevice package.
Parameters
----------
sweep_length: float, optional
length of the sweep in seconds
sweep_type: SweepType, optional
type of sweep to use linear or exponential (default)
fs: int, optional
sampling frequency (default 48 kHz)
f_lo: float, optional
lowest frequency in the sweep
f_hi: float, optional
highest frequency in the sweep, can be a negative offset from fs/2
volume: float, optional
multiply the sweep by this number before playing (default 0.9)
pre_delay: float, optional
delay in second before playing sweep
post_delay: float, optional
delay in second before stopping recording after playing the sweep
fade_in_out: float, optional
length in seconds of the fade in and out of the sweep (default 0.)
dev_in: int, optional
input device number
dev_out: int, optional
output device number
channels_input_mapping: array_like, optional
List of channel numbers (starting with 1) to record. If mapping is
given, channels is silently ignored.
channels_output_mapping: array_like, optional
List of channel numbers (starting with 1) where the columns of data
shall be played back on. Must have the same length as number of
channels in data (except if data is mono, in which case the signal is
played back on all given output channels). Each channel number may only
appear once in mapping.
ascending: bool, optional
wether the sweep is from high to low (default) or low to high frequencies
deconvolution: bool, optional
if True, apply deconvolution to the recorded signal to remove the sweep (default 0.)
plot: bool, optional
plot the resulting signal
Returns
-------
Returns the impulse response if `deconvolution == True` and the recorded signal if not
"""
if not sounddevice_available:
raise ImportError(
"Sounddevice package not availble. Install it to use this function."
)
N = int(sweep_length * fs) + 1
sweep_func = _sweep_types[sweep_type]
sweep = sweep_func(
sweep_length, fs, f_lo=f_lo, f_hi=f_hi, fade=fade_in_out, ascending=ascending
)
# adjust the amplitude
sweep *= volume
# zero pad as needed
pre_zeros = int(pre_delay * fs)
post_zeros = int(post_delay * fs)
test_signal = np.concatenate((np.zeros(pre_zeros), sweep, np.zeros(post_zeros)))
# setup audio interface parameters
if channels_input_mapping is None:
channels_input_mapping = [1]
if channels_output_mapping is None:
channels_output_mapping = [1]
if dev_in is not None:
sd.default.device[0] = dev_in
if dev_out is not None:
sd.default.device[1] = dev_out
# repeat if we need to play in multiple channels
if len(channels_output_mapping) > 1:
play_signal = np.tile(
test_signal[:, np.newaxis], (1, len(channels_output_mapping))
)
else:
play_signal = test_signal
recorded_signal = sd.playrec(
test_signal,
samplerate=fs,
input_mapping=channels_input_mapping,
output_mapping=channels_output_mapping,
blocking=True,
)
h = None
if deconvolution:
h = np.array(
[
wiener_deconvolve(recorded_signal[:, c], sweep)
for c in range(len(channels_input_mapping))
]
).T
if plot:
try:
except ImportError:
warnings.warn("Matplotlib is required for plotting")
return
if h is not None:
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(np.arange(h.shape[0]) / fs, h)
plt.title("Impulse Response")
plt.subplot(1, 2, 2)
freq = np.arange(h.shape[0] // 2 + 1) * fs / h.shape[0]
plt.plot(freq, 20.0 * np.log10(np.abs(np.fft.rfft(h, axis=0))))
plt.title("Frequency content")
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(np.arange(recorded_signal.shape[0]) / fs, recorded_signal)
plt.title("Recorded signal")
plt.subplot(1, 2, 2)
freq = (
np.arange(recorded_signal.shape[0] // 2 + 1) * fs / recorded_signal.shape[0]
)
plt.plot(freq, 20.0 * np.log10(np.abs(np.fft.rfft(recorded_signal, axis=0))))
plt.title("Frequency content")
plt.show()
if deconvolution:
return recorded_signal, h
else:
return recorded_signal | 33873a6c373daf0351db25872f02613dfaf635d4 | 16,584 |
from faker import Faker
import random
def address_factory(sqla):
"""Create a fake address."""
fake = Faker() # Use a generic one; others may not have all methods.
addresslines = fake.address().splitlines()
areas = sqla.query(Area).all()
if not areas:
create_multiple_areas(sqla, random.randint(3, 6))
areas = sqla.query(Area).all()
current_area = random.choice(areas)
address = {
'name': fake.name(),
'address': addresslines[0],
'city': addresslines[1].split(",")[0],
'area_id': current_area.id,
'country_code': current_area.country_code,
'latitude': random.random() * 0.064116 + -2.933783,
'longitude': random.random() * 0.09952 + -79.055411
}
return address | 91f4558887025841d99ab6e65795111bbc804238 | 16,585 |
from pm4py.util import constants
from pm4py.algo.discovery.dfg.adapters.pandas.df_statistics import get_dfg_graph
from pm4py.statistics.start_activities.pandas import get as start_activities_module
from pm4py.statistics.end_activities.pandas import get as end_activities_module
from pm4py.algo.discovery.dfg.variants import performance as dfg_discovery
from pm4py.statistics.start_activities.log import get as start_activities_module
from pm4py.statistics.end_activities.log import get as end_activities_module
from typing import Union
from typing import List
from typing import Tuple
def discover_performance_dfg(log: Union[EventLog, pd.DataFrame], business_hours: bool = False, worktiming: List[int] = [7, 17], weekends: List[int] = [6, 7]) -> Tuple[dict, dict, dict]:
"""
Discovers a performance directly-follows graph from an event log
Parameters
---------------
log
Event log
business_hours
Enables/disables the computation based on the business hours (default: False)
worktiming
(If the business hours are enabled) The hour range in which the resources of the log are working (default: 7 to 17)
weekends
(If the business hours are enabled) The weekends days (default: Saturday (6), Sunday (7))
Returns
---------------
performance_dfg
Performance DFG
start_activities
Start activities
end_activities
End activities
"""
general_checks_classical_event_log(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
properties = get_properties(log)
activity_key = properties[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if constants.PARAMETER_CONSTANT_ACTIVITY_KEY in properties else xes_constants.DEFAULT_NAME_KEY
timestamp_key = properties[constants.PARAMETER_CONSTANT_TIMESTAMP_KEY] if constants.PARAMETER_CONSTANT_TIMESTAMP_KEY in properties else xes_constants.DEFAULT_TIMESTAMP_KEY
case_id_key = properties[constants.PARAMETER_CONSTANT_CASEID_KEY] if constants.PARAMETER_CONSTANT_CASEID_KEY in properties else constants.CASE_CONCEPT_NAME
dfg = get_dfg_graph(log, activity_key=activity_key, timestamp_key=timestamp_key, case_id_glue=case_id_key, measure="performance", perf_aggregation_key="all",
business_hours=business_hours, worktiming=worktiming, weekends=weekends)
start_activities = start_activities_module.get_start_activities(log, parameters=properties)
end_activities = end_activities_module.get_end_activities(log, parameters=properties)
else:
properties = get_properties(log)
properties[dfg_discovery.Parameters.AGGREGATION_MEASURE] = "all"
properties[dfg_discovery.Parameters.BUSINESS_HOURS] = business_hours
properties[dfg_discovery.Parameters.WORKTIMING] = worktiming
properties[dfg_discovery.Parameters.WEEKENDS] = weekends
dfg = dfg_discovery.apply(log, parameters=properties)
start_activities = start_activities_module.get_start_activities(log, parameters=properties)
end_activities = end_activities_module.get_end_activities(log, parameters=properties)
return dfg, start_activities, end_activities | df8d9669c7e2a4cd3170cb1c5a1ecc7e7811649e | 16,586 |
import warnings
def mifs(data, target_variable, prev_variables_index, candidate_variable_index, **kwargs):
"""
This estimator computes the Mutual Information Feature Selection criterion.
Parameters
----------
data : np.array matrix
Matrix of data set. Columns are variables, rows are observations.
target_variable : int or float
Target variable. Can not be in data!
prev_variables_index: list of ints, set of ints
Indexes of previously selected variables.
candidate_variable_index : int
Index of candidate variable in data matrix.
beta: float
Impact of redundancy segment in MIFS approximation. Higher the beta is, higher the impact.
Returns
-------
j_criterion_value : float
J_criterion approximated by the Mutual Information Feature Selection.
"""
assert isinstance(data, np.ndarray), "Argument 'data' must be a numpy matrix"
assert isinstance(target_variable, np.ndarray), "Argument 'target_variable' must be a numpy matrix"
assert isinstance(candidate_variable_index, int), "Argument 'candidate_variable_index' must be an integer"
assert len(data.shape) == 2, "For 'data' argument use numpy array of shape (n,p)"
assert data.shape[0] == len(target_variable), "Number of rows in 'data' must equal target_variable length"
assert candidate_variable_index < data.shape[1], "Index 'candidate_variable_index' out of range in 'data'"
for i in prev_variables_index:
assert isinstance(i, int), "All previous variable indexes must be int."
if kwargs.get('beta') is None:
beta = 1
warnings.warn("Parameter `beta` not provided, default value of 1 is selected.", Warning)
else:
beta = kwargs.pop('beta')
assert isinstance(beta, int) or isinstance(beta, float), "Argument 'beta' must be int or float"
candidate_variable = data[:, candidate_variable_index]
if len(prev_variables_index) == 0:
redundancy_sum = 0
else:
redundancy_sum = np.apply_along_axis(mutual_information, axis=0, arr=data[:, prev_variables_index], vector_2=candidate_variable).sum()
return mutual_information(candidate_variable, target_variable) - beta*redundancy_sum | 058ebdbb831d7fb52c4b5f053ba7bb8a1ce7f144 | 16,587 |
def input_thing():
"""输入物品信息"""
name_str, price_str, weight_str = input('请输入物品信息(名称 价格 重量):').split()
return name_str, int(price_str), int(weight_str) | 2a986e9479e8e4262cfab89f258af3536c5fefe3 | 16,588 |
def extract_features_mask(img, mask):
"""Computes law texture features for masked area of image."""
preprocessed_img = laws_texture.preprocess_image(img, size=15)
law_images = laws_texture.filter_image(preprocessed_img, LAW_MASKS)
law_energy = laws_texture.compute_energy(law_images, 10)
energy_features_list = []
for type, energy in law_energy.items():
# extract features for mask
energy_masked = energy[np.where(mask != 0)]
energy_feature = np.mean(energy_masked, dtype=np.float32)
energy_features_list.append(energy_feature)
return energy_features_list | e184695fb2879cf9fd418e7110498717585b4878 | 16,589 |
def construct_grid_with_k_connectivity(n1,n2,k,figu = False):
"""Constructs directed grid graph with side lengths n1 and n2 and neighborhood connectivity k"""
"""For plotting the adjacency matrix give fig = true"""
def feuclidhorz(u , v):
return np.sqrt((u[0] - (v[0]-n2))**2+(u[1] - v[1])**2)
def feuclidvert(u , v):
return np.sqrt((u[0] - (v[0]))**2+(u[1] - (v[1]-n1))**2 )
def fperiodeuc(u , v):
return np.sqrt((u[0] - (v[0]-n2))**2 + (u[1] - (v[1]-n1))**2 )
def finvperiodic(u,v):
return fperiodeuc(v,u)
def finvvert(u,v):
return feuclidvert(v,u)
def finvhorz(u,v):
return feuclidhorz(v,u)
def fperiodeucb(u , v):
return np.sqrt((u[0]-n2 - (v[0]))**2 + (u[1] - (v[1]-n1))**2 )
def fperiodeucc(v, u):
return np.sqrt((u[0]-n2 - (v[0]))**2 + (u[1] - (v[1]-n1))**2 )
def fchhorz(u , v):
return max(abs(u[0] - (v[0]-n2)), abs(u[1] - v[1]))
def fchvert(u , v):
return max(abs(u[0] - (v[0])),abs(u[1] - (v[1]-n1)) )
def fperiodch(u , v):
return max(abs(u[0] - (v[0]-n2)) , abs(u[1] - (v[1]-n1)) )
def finvperiodicch(u,v):
return fperiodch(v,u)
def finvvertch(u,v):
return fchvert(v,u)
def finvhorzch(u,v):
return fchhorz(v,u)
def fperiodchb(u , v):
return max(abs(u[0]-n2 - (v[0])) , abs(u[1] - (v[1]-n1)))
def fperiodchc(v, u):
return max(abs(u[0]-n2 - (v[0])) , abs(u[1] - (v[1]-n1)) )
def fperiodchd(u , v):
return max(abs(n2-u[0] - (v[0])) , abs(u[1] - (n1-v[1])))
def fperiodche(v, u):
return max(abs(n2-u[0] - (v[0])) , abs(u[1] - (n1-v[1])) )
#distF = distance Function
#distM = distance meter
for case in switch(k):
if case(4):
distF = 'euclidean'
distM = 1 #.41
break
if case(8):
distF = 'euclidean'
distM = 1.5
break
if case(12):
distF = 'euclidean'
distM = 2
break
if case(20):
distF = 'euclidean'
distM = 2.3 #2.5
break
if case(24): #check this again
distF = 'chebyshev'
distM = 2 #or euclidean 2.9
break
if case(36):
distF = 'euclidean'
distM = 3.5
break
if case(44):
distF = 'euclidean'
distM = 3.8
break
if case(28):
distF = 'euclidean'
distM = 3
break
if case(48):
distF = 'euclidean'
distM = 4
break
x = np.linspace(1,n1,n1)
y = np.linspace(1,n2,n2)
X,Y = np.meshgrid(x,y)
XY = np.vstack((Y.flatten(), X.flatten()))
adj = squareform( (pdist(XY.T, metric = distF)) <= distM )
if k!= 24:
adjb = squareform( (pdist(XY.T, metric = feuclidhorz)) <= distM )
adjc = squareform( (pdist(XY.T, metric = feuclidvert)) <= distM )
adjd = squareform( (pdist(XY.T, metric = fperiodeuc)) <= distM )
adje = squareform( (pdist(XY.T, metric = finvperiodic)) <= distM )
adjf = squareform( (pdist(XY.T, metric = finvvert)) <= distM )
adjg = squareform( (pdist(XY.T, metric = finvhorz)) <= distM )
adjx = squareform( (pdist(XY.T, metric = fperiodeucc)) <= distM )
adjy = squareform( (pdist(XY.T, metric = fperiodeucb)) <= distM )
Adj = ( adj + adjb +adjc+adjd+adje+adjf+adjg+adjx+adjy >=1)
if k == 24:
adjb = squareform( (pdist(XY.T, metric = fchhorz)) <= distM )
adjc = squareform( (pdist(XY.T, metric = fchvert)) <= distM )
adjd = squareform( (pdist(XY.T, metric = fperiodch)) <= distM )
adje = squareform( (pdist(XY.T, metric = finvperiodicch)) <= distM )
adjf = squareform( (pdist(XY.T, metric = finvvertch)) <= distM )
adjg = squareform( (pdist(XY.T, metric = finvhorzch)) <= distM )
adjx = squareform( (pdist(XY.T, metric = fperiodchb)) <= distM )
adjy = squareform( (pdist(XY.T, metric = fperiodchc)) <= distM )
Adj = ( adj + adjb +adjc+adjd+adje+adjf+adjg+adjx+adjy >=1)
#Adj = ( adj+adjb >=1 )
#print adj
#plt.plot(sum(Adj))
if figu:
plt.figure(figsize=(1000,1000))
plt.imshow(Adj,interpolation = 'none', extent = [0,n1*n2 , n1*n2,0] )
plt.xticks(np.arange(n1*n2))
plt.yticks(np.arange(n1*n2))
plt.grid(ls = 'solid')
#plt.colorbar()
""" #text portion
min_val = 0
max_val = n1*n2
diff = 1
ind_array = np.arange(min_val, max_val, diff)
x, y = np.meshgrid(ind_array, ind_array)
for x_val, y_val in zip(x.flatten(), y.flatten()):
c = adj[x_val,y_val]
plt.text(x_val+0.5, y_val+0.5, '%.2f' % c, fontsize=8,va='center', ha='center')
"""
G = nx.from_numpy_matrix(Adj)
return (G,Adj) | 46b690f02c4f025719424582acecff43580543da | 16,590 |
import os
import urllib
import tarfile
def get_tar_file(tar_url, dump_dir=os.getcwd()):
""" Downloads and unpacks compressed folder
Parameters
----------
tar_url : string
url of world wide web location
dump_dir : string
path to place the content
Returns
-------
tar_names : list
list of strings of file names within the compressed folder
"""
ftp_stream = urllib.request.urlopen(tar_url)
tar_file = tarfile.open(fileobj=ftp_stream, mode="r|gz")
tar_file.extractall(path=dump_dir)
tar_names = tar_file.getnames()
return tar_names | 2d07940dc107510436155a0ce5ff55299bf2f45e | 16,591 |
import array
def _optimal_shift(pos, r_pad, log):
"""
Find the shift for the periodic unit cube that would minimise the padding.
"""
npts, ndim = pos.shape
# +1 whenever a region starts, -1 when it finishes
start_end = empty(npts*2, dtype=np.int32)
start_end[:npts] = 1
start_end[npts:] = -1
pad_min = []
# Go along each axis, find the point that would require least padding
for ax in range(ndim):
start_reg = pos[:,ax] - r_pad
end_reg = pos[:,ax] + r_pad
# make periodic
start_reg -= floor(start_reg)
end_reg -= floor(end_reg)
# Order from 0-1, add 1 whenever we come into range of a new point, -1
# whenever we leave
idx_sort = argsort(concatenate([start_reg, end_reg]))
region_change = cumsum(start_end[idx_sort])
# Find the minimum
min_chg = argmin(region_change)
# Note since this is the minimum trough:
# start_end[idx_sort[min_chg]==-1 (a trough)
# start_end[idx_sort[min_chg+1]] == +1 (otherwise it wasnt the minimum)
trough0 = end_reg[idx_sort[min_chg]-npts] # has to be a -1 (i.e. region end)
if min_chg+1==2*npts:
trough1 = start_reg[idx_sort[0]]+1
mid_trough = 0.5 * (trough0 + trough1)
mid_trough -= floor(mid_trough)
else:
trough1 = start_reg[idx_sort[min_chg+1]]
mid_trough = 0.5 * (trough0 + trough1)
pad_min.append(mid_trough)
shift = array([1.0-x for x in pad_min], dtype=pos.dtype)
print("Best shift", ', '.join('%.3f'%x for x in shift), file=log)
return shift | cac3c56307ea3d240ebe838ea4d26bb38c62dc3c | 16,592 |
def ShowActStack(cmd_args=None):
""" Routine to print out the stack of a specific thread.
usage: showactstack <activation>
"""
if cmd_args == None or len(cmd_args) < 1:
print "No arguments passed"
print ShowAct.__doc__.strip()
return False
threadval = kern.GetValueFromAddress(cmd_args[0], 'thread *')
print GetThreadSummary.header
print GetThreadSummary(threadval)
print GetThreadBackTrace(threadval, prefix="\t")
return | 43b0eca326465fe9dc7b0207ba448d75da7e9889 | 16,593 |
import os
from datetime import datetime
import time
def upload(dbx, fullname, dbx_folder, subfolder, name, overwrite=False):
"""Upload a file.
Return the request response, or None in case of error.
"""
path = '/%s/%s/%s' % (dbx_folder, subfolder.replace(os.path.sep, '/'), name)
while '//' in path:
path = path.replace('//', '/')
mode = (dropbox.files.WriteMode.overwrite
if overwrite
else dropbox.files.WriteMode.add)
mtime = os.path.getmtime(fullname)
with open(fullname, 'rb') as f:
data = f.read()
with stopwatch('upload %d bytes' % len(data)):
try:
res = dbx.files_upload(
data, path, mode,
client_modified=datetime.datetime(*time.gmtime(mtime)[:6]),
mute=True)
except dropbox.exceptions.ApiError as err:
print('*** API error', err)
return None
print('uploaded as', res.name.encode('utf8'))
return res | 67271753093f8ccf15943301bcd37253e71a63c5 | 16,594 |
import json
def load_request(possible_keys):
"""Given list of possible keys, return any matching post data"""
pdata = request.json
if pdata is None:
pdata = json.loads(request.body.getvalue().decode('utf-8'))
for k in possible_keys:
if k not in pdata:
pdata[k] = None
# print('pkeys: %s pdata: %s' % (possible_keys, pdata))
return pdata | b21c503fac56398be6745a10fb95889128c6e2b2 | 16,595 |
import random
def get_random_tcp_start_pos():
""" reachability area:
x = [-0.2; 0.4]
y = [-0.28; -0.1] """
z_up = 0.6
tcp_x = round(random.uniform(-0.2, 0.4), 4)
tcp_y = round(random.uniform(-0.28, -0.1), 4)
start_tcp_pos = (tcp_x, tcp_y, z_up)
# start_tcp_pos = (-0.2, -0.28, z_up)
return start_tcp_pos | adf87dec45bf5a81c321f94c93d45a67f0aeff0d | 16,596 |
def CalculateChiv3p(mol):
"""
#################################################################
Calculation of valence molecular connectivity chi index for
path order 3
---->Chiv3
Usage:
result=CalculateChiv3p(mol)
Input: mol is a molecule object.
Output: result is a numeric value
#################################################################
"""
return _CalculateChivnp(mol,NumPath=3) | 27405fce52540a0de9c4c1c2d5a35454681554fa | 16,597 |
from typing import Tuple
from typing import Optional
def coerce(version: str) -> Tuple[Version, Optional[str]]:
"""
Convert an incomplete version string into a semver-compatible Version
object
* Tries to detect a "basic" version string (``major.minor.patch``).
* If not enough components can be found, missing components are
set to zero to obtain a valid semver version.
:param str version: the version string to convert
:return: a tuple with a :class:`Version` instance (or ``None``
if it's not a version) and the rest of the string which doesn't
belong to a basic version.
:rtype: tuple(:class:`Version` | None, str)
"""
match = BASEVERSION.search(version)
if not match:
return (None, version)
ver = {
key: 0 if value is None else value for key, value in match.groupdict().items()
}
ver = Version(**ver)
rest = match.string[match.end():] # noqa:E203
return ver, rest | e712533aa05444ad47403fc10e7f2ec29b8132ec | 16,598 |
def choose_wyckoff(wyckoffs, number):
"""
choose the wyckoff sites based on the current number of atoms
rules
1, the newly added sites is equal/less than the required number.
2, prefer the sites with large multiplicity
"""
for wyckoff in wyckoffs:
if len(wyckoff[0]) <= number:
return choose(wyckoff)
return False | 14b276d8aa50e84f47d77f6796e193cc96ddd0a9 | 16,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.