content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Optional
from typing import Dict
def from_file(archive: PathOrTarFile) -> Optional[Dict]:
"""Load and parse CRAN package archive
Args:
archive (PathOrTarFile): path to archive or `tarfile.TarFile` instance
Returns:
(dict): Dictionary of R package metadata
"""
return decode(read_description(archive))
|
b4e0c24bfc0c7489be27da23fcc5c3431273d932
| 34,453 |
from google.cloud import bigquery
def to_google_cloud_bigquery(pandas_gbq_schema):
"""Given a schema in pandas-gbq API format,
return a sequence of :class:`google.cloud.bigquery.schema.SchemaField`.
"""
# Need to convert from JSON representation to format used by client library.
schema = add_default_nullable_mode(pandas_gbq_schema)
return [bigquery.SchemaField.from_api_repr(field) for field in schema["fields"]]
|
519c6314889c1ca0bca747e2f5784ac00bf94aed
| 34,454 |
def update_exception_behavior(behavior_input, id):
"""
Executes a "update exception behavior" mutation with graphql-client
:param behavior_input: the behavior input
:param id: The id of the behavior to update
:return: Server response
"""
behavior_variables = {
"behaviorInput": behavior_input,
"id": id
}
response = perform_infinity_request(query_or_mutation=UPDATE_EXCEPTION_BEHAVIOR_MUTATION,
version=1,
variables=behavior_variables,
description="Updating exception Behavior")
print("Updating exception behavior", response)
return response
|
b8046c2a2a64e66e73159f2f19137bd7b8ab9097
| 34,455 |
def ps_dim(tasks):
"""
Dimension a Polling Server such that it fits into the provided task set.
Parameters
----------
tasks : list of pSyCH.tasks.Periodic
Task set whose schedulability is to be checked.
Returns
-------
sched : bool
True if the task set is schedulable, False otherwise.
params : dict
The dictionary includes the following key-value pairs:
|br|
"P" - HB product.
|br|
"T" - Suggested budget replenishment period for the server.
|br|
"C" - Suggested max budget for the server.
"""
params = {}
P = ut.get_P(tasks)
params["P"] = P
U_max = round((2 - P)/P, 3)
if (U_max < 0):
return False, params
T = min([task.t for task in tasks])
C = round(U_max * T, 3)
params["T"] = T
params["C"] = C
return True, params
|
458b77bd5bb32ddd5c7002f872d0ed4e22642c4c
| 34,456 |
def signature(obj, *, follow_wrapped=True):
"""Get a signature object for the passed callable."""
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
|
cec0e2d7bcf6cd3347bcd06ef58f33a0c65bdb94
| 34,457 |
def create_sys(Model, size, epsi, dens, init, bd_cond='periodic'):
"""Creates a instance of the LdftModel `Model` under the parameters `size`,
`epsi`, `dens`. The `init`-parameter determines the initial density profile
of the created system.
Parameters
----------
Model : `class`
The model, the system should be an instance of. Either ``LGAO2dHighl``,
``LGAO3dHighl``, ``LGAO2dMf`` or ``LGAO3dMf``.
size : `Tuple` of `int`
Size of the system (for each dimension).
epsi : `float`
Attraction strength (times inverse temperature).
dens : `float`
Average density
init : `string` or `Model`
Specifies the initial density profile. It is either a system providing
its current density profile or a String. The following strings are
supported: 'hom', 'sph', 'sl', 'cyl'. Note, that 'sl' and 'cyl' is the
same in two dimensions. The density profile then either is homogeneous
(`init`='hom'), a point-like nucleus (`init`='sph'), a slab
(`init`='sl') or a cylinder (`init`='cyl'). The cylinder point in the
0'th axis. The normal of the slab points in the last axis (2 in three
dim. and 1 in two dim.). For bd_cond '11_if', '110_if' or '111_if'
therefore the last entry of the `size`-tuple is supposed to be double
of the others.
bd_cond : `string`
The boundary condition of the system. It supports the following values:
'periodic', '11_if', '110_if', '111_if'. For bd_cond '11_if', '110_if'
or '111_if' therefore the last entry of the `size`-tuple is supposed to
be double of the others.
Returns
-------
syst : `Model`
System of the type `Model`, with initial density profile.
"""
#Create the system in case of no profile of another system shall be
#taken over as initial profile
if type(init)==str:
nucLen = max(size[0]//20, 1)
#Determine the shape of the nucleus
if init == 'hom':
nuc = tuple([0 for s in size])
elif init == 'sph':
nuc = tuple([nucLen for s in size])
elif init == 'cyl':
nuc = [nucLen for s in size]
nuc[0] = size[0]
nuc = tuple(nuc)
elif init == 'sl':
nuc = [s for s in size]
nuc[-1] = nucLen
nuc = tuple(nuc)
#Create the initial density profile
if Model == LG2dAOHighl:
r_pc = Model._cal_bulk_r_pc(dens, epsi)
syst = Model(size, epsi=epsi, dens_c=dens,
bound_cond=bd_cond)
syst.create_init_profile(dens=[dens, r_pc, r_pc],
shape=[nuc, (0, 0), (0, 0)])
if Model == LG3dAOHighl:
r_pc = Model._cal_bulk_r_pc(dens, epsi)
syst = Model(size, epsi=epsi, dens_c=dens,
bound_cond=bd_cond)
syst.create_init_profile(dens=[dens, r_pc, r_pc, r_pc],
shape=[nuc, (0, 0, 0), (0, 0, 0), (0, 0, 0)])
if Model == LG2dMf or Model == LG3dMf:
syst = Model(size, epsi=epsi, dens=dens, bound_cond=bd_cond)
syst.create_init_profile(dens=[dens], shape=[nuc])
#Create the system in case when a density profile of another system
#shall be taken over as initial profile
if type(init)==Model:
initProfile = init.r
if Model == LG2dAOHighl or Model == LG3dAOHighl:
syst = Model(size, epsi=epsi, dens_c=dens, r=initProfile,
bound_cond=bd_cond)
elif Model == LG2dMf or Model == LG3dMf:
syst = Model(size, epsi=epsi, dens=dens, r=initProfile,
bound_cond=bd_cond)
return syst
|
d30ae63022934e668c0ef90a72c91713caa6a228
| 34,459 |
def limit(resource, value):
"""
Check if this is a valid limit for the number of matching resources to be specified.
:param resource:
:type resource:
:param value: specified limit
:type value: int
:return: True if valid limit, False otherwise
:rtype: bool
"""
return value > 0
|
3ee75e7e41752e2bddebb94915bbf9161e02caec
| 34,460 |
import gc
def sequence(values, rasts):
"""
Iterates through a sequence of linearly interpolated rasters.
Args:
values:
The unknown values for which new rasters will be interpolated and returned.
rasts:
A dictionary of the known values and rasters between which the interpolated rasters are calculated.
Dictionary entries consist of value-raster pairs, where raster can be either a preloaded raster, or a
function that loads and returns a raster (useful to avoid memory errors).
"""
def _lerp(value, fromval, fromrast, toval, torast):
if value == fromval:
return fromrast
elif value == toval:
return torast
elif not fromval < value < toval:
raise Exception("Value to interpolate must be between fromval and toval")
# figure out relative position between rasters, and multiply this to the difference
prog = (value - fromval) / float(toval - fromval)
#print "prog",prog
diffband = torast.bands[0] - fromrast.bands[0]
#print diffband, diffband.summarystats()
offsetband = diffband * prog
#print offsetband, offsetband.summarystats()
newband = fromrast.bands[0] + offsetband
# finally assign to raster
outrast = fromrast.copy(shallow=True)
outrast.add_band(newband)
del diffband,offsetband
gc.collect()
return outrast
# allow preloaded rasters or callables that load upon request
def _make_callable(rast):
if not hasattr(rast, '__call__'):
return lambda: rast
else:
return rast
rasts = ((val,_make_callable(rast)) for val,rast in rasts.items())
# loop pairs of fromrast torast
rasts = sorted(rasts, key=lambda valrast: valrast[0])
# NEW
rasts = iter(rasts)
fromval,fromrast = next(rasts)
fromrast = fromrast()
toval,torast = next(rasts)
torast = torast()
for val in values:
if val < fromval:
raise NotImplementedError('Extrapolation not currently supported')
# increment to next pair
if val > toval:
if val > values[-1]:
raise NotImplementedError('Extrapolation not currently supported')
del fromrast
gc.collect()
fromval,fromrast = toval,torast
toval,torast = next(rasts)
torast = torast()
# interp
rast = _lerp(val, fromval, fromrast, toval, torast)
yield val,rast
# OLD
|
6ff4d0f192f1bc92030693aba4e903378d06b636
| 34,461 |
def is_native_xmon_op(op: cirq.Operation) -> bool:
"""Check if the gate corresponding to an operation is a native xmon gate.
Args:
op: Input operation.
Returns:
True if the operation is native to the xmon, false otherwise.
"""
return isinstance(op, cirq.GateOperation) and is_native_xmon_gate(op.gate)
|
41189dc252c766669127129e4a801f13b93a33fb
| 34,462 |
def mono_extractor(b_format, azis=None, eles=None, mode='beam'):
"""
:param b_format: (frames, channels) IN SN3D
:param mode: 'beamforming' or 'omni'
:return:
"""
frames, channels = b_format.shape
x = np.zeros(frames)
if mode == 'beam':
# MaxRE decoding
b_format_n3d = b_format * np.asarray([1, np.sqrt(3), np.sqrt(3), np.sqrt(3)]) # N3D
alpha = np.asarray([0.775, 0.4, 0.4, 0.4]) # MaxRE coefs
decoding_gains = get_ambisonic_gains(azis, eles) # N3D
w = decoding_gains*alpha[:,np.newaxis]
x = np.sum(b_format_n3d * w.T, axis=1) # N3D BY N3D
elif mode == 'omni':
# Just take the W channel
x = b_format[:,0]
return x
|
edaf426546a41dd3bb4afcb95e28dcfe93e20044
| 34,463 |
def get_ranking_order_switches(list):
"""list has to be list of quadruples (obs_id, sent_id, score, rank) output from get_ranks function
as in Ranking-Based Evaluation of Regression Models, Rosset et al."""
ranking_order_switches=0
#itearte over list, get observation i
n = len(list)
for i in range(0,n):
#get observation j and iterate over j to n
j = i+1
while j<n-1:
if list[i][3]>list[j][3]:
if enable_printing:
print("switch detected: ", list[i], " - ", list[j])
ranking_order_switches+=1
j += 1
return ranking_order_switches
|
f87e8e07bdde631747454ddd27a79e81ed3b5c6f
| 34,464 |
def b58_to_bytes(val: str) -> bytes:
"""
Convert a base 58 string to bytes
"""
return base58.b58decode(val)
|
49f4634a4c44f162aca242e5ccf41239926af504
| 34,465 |
def generate_sphere(phi, theta, r):
"""
Generate points for structured grid for a spherical shell volume.
This method is useful for generating a structured cylindrical mesh for VTK.
:param phi: azimuthal angle array
:param theta: polar angle array
:param r: radius of sphere
:return: grid points, dimensions tuple
"""
r = np.array(r, dtype=np.float)
points = np.empty([len(phi) * len(r) * len(theta), 3])
start = 0
for th in theta:
x_plane = (np.cos(phi) * r[:, None] * np.sin(th)).ravel()
y_plane = (np.sin(phi) * r[:, None] * np.sin(th)).ravel()
z_plane = (np.ones_like(phi) * r[:, None] * np.cos(th)).ravel()
end = start + len(x_plane)
plane_points = points[start:end]
plane_points[:, 0] = x_plane
plane_points[:, 1] = y_plane
plane_points[:, 2] = z_plane
start = end
dims = (len(phi), len(r), len(theta))
return points, dims
|
3de54ecdc22c056d5ec3a3d979573b4e4cf0e893
| 34,466 |
def get_target_langs(request):
"""
Get Target Languages for a CI Pipeline
:param request: Request object
:return: HttpResponse object
"""
if not request.is_ajax():
return HttpResponse("Not an Ajax Call", status=400)
post_params = request.POST.dict()
ci_pipeline = post_params.get('ci_pipeline', '')
context = Context(
{'META': request.META,
'ci_pipeline': ci_pipeline}
)
template_string = """
{% load tag_target_langs from custom_tags %}
{% tag_target_langs ci_pipeline %}
"""
return HttpResponse(Template(template_string).render(context))
|
f7b663b26012505568b5d49b81903951ad4842ac
| 34,467 |
def page_list_return(total, current=1):
"""
page
分页,返回本次分页的最小页数到最大页数列表
"""
min_page = current - 4 if current - 6 > 0 else 1
max_page = min_page + 6 if min_page + 6 < total else total
return range(min_page, max_page + 1)
|
99b099a7e90e1e150881d93129b1558eb8bc9a20
| 34,469 |
import time
def get_deployment_dates(site, node, sensor, deploy):
"""
Based on the site, node and sensor names and the deployment number, determine the start and end times for a
deployment.
:param site: Site name to query
:param node: Node name to query
:param sensor: Sensor name to query
:param deploy: Deployment number
:return: start and stop dates for the deployment of interest
"""
# request the sensor deployment metadata
data = get_sensor_information(site, node, sensor, deploy)
# use the metadata to extract the start and end times for the deployment
if data:
start = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',
time.gmtime(data[0]['eventStartTime'] / 1000.))
else:
return None, None
if data[0]['eventStopTime']:
# check to see if there is a stop time for the deployment, if so use it ...
stop = time.strftime('%Y-%m-%dT%H:%M:%S.000Z',
time.gmtime(data[0]['eventStopTime'] / 1000.))
else:
# ... otherwise use the current time as this is an active deployment
stop = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(time.time()))
return start, stop
|
cbc91ea8518184a40c1e8d35bacc1deebab3e768
| 34,472 |
def delete_documents_by_filter(filters: FilterRequest):
"""
Can be used to delete documents from a document store.
:param filters: Filters to narrow down the documents to delete.
Example: '{"filters": {{"name": ["some", "more"], "category": ["only_one"]}}'
To delete all documents you should provide an empty dict, like:
'{"filters": {}}'
"""
DOCUMENT_STORE.delete_documents(filters=filters.filters)
return True
|
d69f6d390544d61666a576c1a84d4721bad6ad5a
| 34,473 |
def ensure_credential_server_running( foreground=False, run_once=False ):
"""
Instantiate our credential server and keep it running.
"""
# is the watchdog running?
pids = syndicate_watchdog.find_by_attrs( "syndicate-credential-server-watchdog", {} )
if len(pids) > 0:
# it's running
return True
if foreground:
# run in foreground
if run_once:
return credential_server_spawn( 0 )
else:
return syndicate_watchdog.main( credential_server_spawn, respawn_exit_statuses=range(1,254) )
# not running, and not foregrounding. fork a new one
try:
watchdog_pid = os.fork()
except OSError, oe:
logger.error("Failed to fork, errno = %s" % oe.errno)
return False
if watchdog_pid != 0:
# child--become watchdog
setproctitle.setproctitle( "syndicate-credential-server-watchdog" )
if run_once:
syndicate_daemon.daemonize( lambda: credential_server_spawn(0), logfile_path=getattr(CONFIG, "SYNDICATE_HTTP_LOGFILE", None) )
else:
syndicate_daemon.daemonize( lambda: syndicate_watchdog.main( credential_server_spawn, respawn_exit_statuses=range(1,254) ), logfile_path=getattr(CONFIG, "SYNDICATE_HTTP_LOGFILE", None) )
|
2a55534f29509ad275d350db534c7ab158388278
| 34,475 |
import glob
def load_dbc(folder, verbose=True):
"""
Load all dbc files from specified folder add to dbc database.
Parameters
----------
folder : str
Absolute or relative path to folder, which contains dbc files.
verbose : bool, optional
Set to False to have no readout. The default is True.
Returns
-------
dbc_db : cantools.db.Database
dbc database to convert the data from binary format.
"""
dbc_db = cantools.db.Database(
messages=None, nodes=None, buses=None, version=None
)
if verbose:
print("Loading dbc...")
dbc_files = glob.iglob(folder + "/*.dbc")
file_count = 0
for dbc_file in dbc_files:
file_count += 1
dbc_db.add_dbc_file(dbc_file)
assert file_count > 0, "No dbc-files in '{}'!".format(folder)
if verbose:
print("Finished loading.")
return dbc_db
|
62b062d1f48021317f99cd46dad6ad697d0ec4a9
| 34,476 |
def unique_timestamps(data):
"""
Identify unique timestamps in a dataframe
:param data: dataframe. The 'Time' column is used by default
:returns: returns a sorted numpy array
"""
unique_timestamps = sorted(data['Time'].unique())
return unique_timestamps
|
4e8b86643e4c976d51e39663ffefc95ec2be0e55
| 34,477 |
def printf_format_for_type(t, types):
""" Returns a format string for printing the given type
(either atomic or struct). """
description = type_description(t, types)
if "struct" in description:
specifer = printf_format_for_struct(t, types)
else:
specifer = description["printf_specifier"]
return specifer.replace("\"", "\\\"")
|
6647cfa80934e345f269b4d4afa69e95f0d8928d
| 34,478 |
def get_operator_artifact_type(operatorArtifactString):
"""get_operator_artifact_type takes a yaml string and determines if it is
one of the expected bundle types.
:param operatorArtifactString: Yaml string to type check
"""
# Default to unknown file unless identified
artifact_type = UNKNOWN_FILE
try:
operatorArtifact = safe_load(operatorArtifactString)
except MarkedYAMLError:
msg = "Courier requires valid input YAML files"
logger.error(msg)
raise OpCourierBadYaml(msg)
else:
if isinstance(operatorArtifact, dict):
if "packageName" in operatorArtifact:
artifact_type = PKG_STR
elif operatorArtifact.get("kind") in {CRD_STR, CSV_STR}:
artifact_type = operatorArtifact["kind"]
return artifact_type
|
50e14ff39e8c4e7258c0f305cf8a05973c836d3a
| 34,479 |
def is_occ_conflict_exception(e):
"""
Is the exception an OccConflictException?
:type e: :py:class:`botocore.exceptions.ClientError`
:param e: The ClientError caught.
:rtype: bool
:return: True if the exception is an OccConflictException. False otherwise.
"""
is_occ = e.response['Error']['Code'] == 'OccConflictException'
return is_occ
|
3df46480341b617570e1e980ade194c9bd3fb26e
| 34,480 |
def replace_list_element(l, before, after):
"""Helper function for get_cluster_idx
"""
for i, e in enumerate(l):
if e == before:
l[i] = after
return l
|
b15f43332efdcec878fbd16df64d46b1e23d2630
| 34,481 |
def hit_counter_from_list(filenames, barcode):
"""
:param filenames: all files from a single barcode - just the same hit_counter but for a list of filenames
:param barcode: the barcode associated to our experiment
:return: a dict {genome:hits}
"""
genomes = dict()
for name in filenames:
command = f"samtools view ./{barcode}/bams/{name}.bam | cut -f 3"
list_ids = shell_runner(command)
for hit in list_ids:
if hit == '*': hit = 'unmapped';
current = genomes.setdefault(hit, 0)
genomes[hit] += current + 1
return genomes
|
ed85426ed654399a10434a4d1c4d97cd603368c1
| 34,482 |
def average(v):
"""
:param v: a list of numerical values
:return: average for a list of values expressed as a float
"""
return sum(v) * 1.0 / len(v)
|
cbc9e450ee854289c62b613c257655fcd0c3e62c
| 34,483 |
def get_all_projects_of_type(project_type: int):
"""Get the project ids for active and inactive projects in Firebase DB."""
project_id_list = []
fb_db = firebaseDB()
# we neglect private projects here
# since there are no projects set up in production yet
status_list = ["active", "inactive"]
for status in status_list:
logger.info(f"query {status} projects")
projects = (
fb_db.reference(f"v2/projects/")
.order_by_child("status")
.equal_to(status)
.get()
)
for project_id, data in projects.items():
if (data.get("projectType", 1) == project_type) & (
data.get("tutorialId", None) is None
):
project_id_list.append(project_id)
logger.info(f"got {len(project_id_list)} project from firebase.")
return project_id_list
|
ed5211bcab68139de79c262d2f61c5e62e714610
| 34,484 |
import typing
def set_engine(filename, *,
resolve: bool = False,
require: bool = False,
title: typing.Optional[str] = None,
title_memory_tag: str = _globals.MEMORY_TAG):
"""Return new sqlite3 engine and set it as default engine for treedb."""
log.info('set_engine: %r', filename)
if isinstance(filename, sa.engine.Engine):
engine = filename
if isinstance(filename, _proxies.EngineProxy):
engine = engine.engine
ENGINE.engine = engine
return ENGINE
if filename is None:
if title is None: # pragma: no cover
raise TypeError(f'filename=None requires title, given: {title!r}')
ENGINE.memory_write_path = _tools.path_from_filename(f'{title}{title_memory_tag}',
expanduser=False)
else:
del title
filename = _tools.path_from_filename(filename)
if resolve:
filename = filename.resolve(strict=False)
if require and not filename.exists():
log.error('required engine file not found: %r', filename)
raise RuntimeError(f'engine file does not exist: {filename!r}')
ENGINE.file = filename
log_versions(engine=ENGINE)
return ENGINE
|
eebd51176d8b4533af810d2f27cd4e48ea6bf316
| 34,485 |
def camel_split(string):
# test: (str) -> str
"""
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBees')))
('the', 'Birds', 'And', 'The', 'Bees')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBees123')))
('the', 'Birds', 'And', 'The', 'Bees', '123')
>>> print('(%s)' % ', '.join("'%s'" % s for s in camel_split('theBirdsAndTheBeesABC123')))
('the', 'Birds', 'And', 'The', 'Bees', 'ABC', '123')
"""
words = []
character_type = None
for s in string:
if s in '0123456789':
if character_type != 0:
words.append([s])
else:
words[-1].append(s)
character_type = 0
elif s.lower() == s:
if character_type not in (1, 2):
words.append([s])
else:
words[-1].append(s)
character_type = 1
else:
if character_type != 2:
words.append([s])
else:
words[-1].append(s)
character_type = 2
return tuple(
''.join(w) for w in words
)
|
d001f42b103ad911c92256bd326a3d478fc8f424
| 34,486 |
def iSP(a: Point, b: Point, c: Point) -> Turn:
"""iSP
Determine the positional relationship of the three Points.
Returns
-------
Turn direction
"""
flg = sign((b - a).det(c - a))
if flg == 1:
return Turn.CCW
elif flg == -1:
return Turn.CW
else:
if sign((b - a).dot(c - b)) > 0:
return Turn.FRONT
elif sign((a - b).dot(c - a)) > 0:
return Turn.BACK
else:
return Turn.MIDDLE
|
80143dcb4d3b6985cae49208193ee9662b4e5edb
| 34,487 |
def generatePathsACS(ants: list, graph: np.ndarray, H: np.ndarray, P: np.ndarray, alpha: float,
beta: float, decay: float, pher_init: float, Q: float,
exp_heuristic: bool = True) -> list:
"""
Function that performs the exploration of the graph using the Ant Colony System strategy
proposed in
Dorigo, M., & Gambardella, L. M. (1997). Ant colony system: a cooperative learning
approach to the traveling salesman problem. IEEE Transactions on evolutionary computation,
1(1), 53-66.
Parameters
----------
ants: list
List of ant instances.
graph: np.ndarray (nodes, nodes), dtype=np.int8
Graph to be explored.
H: np.ndarray (nodes, nodes), dtype=np.float64
Heuristic information.
P: np.ndarray (nodes, nodes), dtype=np.float64
Pheromone information.
alpha: float
The alpha parameter reference the influence of pheromones when the ant makes a decision
on the path through the walk being constructed.
beta: float
Analogous to the alpha parameter, the beta parameter reference the importance given
to the heuristic information received in H.
decay: float
Decay to be applied during the local update of the pheromone matrix values after an ant
has made the tour. This parameter is used in the local pheromone update given by equation
P[i,j] = (1 - decay) * P[i,j] + decay * pher_init
pher_init: float
Parameter involved in the local update of the pheromone matrix values according to the
equation
P[i,j] = (1 - decay) * P[i,j] + decay * pher_init
Q: float, default=None
Parameter that determines the probability of selecting the next move deterministically
by selecting the move to the node that has the highest probability. By default this
parameter will not be considered.
exp_heuristic: bool, default=True
Parameter indicating whether to exponentiate the heuristic matrix to the beta value. By
default it will not be assumed that the exponentiation has been precomputed.
Returns
-------
:list
List of ant instances that have traversed the graph to be explored.
"""
if exp_heuristic:
H_beta = np.power(H, beta)
else:
H_beta = H
new_ants = []
for ant in ants:
new_ant = Ant(l_min=ant.min_length, l_max=ant.max_length, graph_type=ant.representation,
check_params=False)
new_ant.initAdjMatrix(n_nodes=graph.shape[0])
init_pos = randomInit(graph) if new_ant.initial_position is None else new_ant.initial_position
new_ant.setInitialPosition(init_pos)
# Generate random walk
new_ant.visited_nodes = getRandomWalk(
initial_position=new_ant.initial_position, current_path=new_ant.visited_nodes,
adjacency_matrix=graph, heuristic=H_beta, pheromone=P, alpha=alpha,
max_lim=new_ant.max_length, Q=Q, R=None)
# Local pheromone update
if new_ant.representation == 'u':
updateUndLocalPher(ant=new_ant, P=P, decay=decay, init_val=pher_init)
else:
updateDirLocalPher(ant=new_ant, P=P, decay=decay, init_val=pher_init)
new_ants.append(new_ant)
return new_ants
|
36cdd05ab9b403203603851403b85e502c614905
| 34,488 |
def get_comments(user):
"""Returns all of the user's comments"""
comments = ''
for comment in user.get_comments(limit=None):
comments = comments + ' ' + comment.body
return comments
|
cb095b78a2ac304c849e75a7b988c581a826aef1
| 34,491 |
def is_supported():
"""Get whether Dialite is supported for the current platform."""
return not isinstance(_the_app, StubApp)
|
67846bf87c1b7c3ceda7bf6eb1090f55f9bd6453
| 34,492 |
def _check_axes_range(axes, ndim):
"""
Check axes are within the number of dimensions of tensor x and normalize the negative axes.
Args:
axes (Union[int, tuple(int), list(int)]): Axes of the tensor.
ndim (int): The number of dimensions of the tensor.
Return:
Axes (Union[int, tuple(int)]). If input is integer, return integer, else tuple.
"""
if not isinstance(axes, int) and not isinstance(axes, tuple) and not isinstance(axes, list):
raise TypeError(
f"int, tuple(int) or list(int) expected, but got {type(axes)}.")
low = -ndim
up = ndim - 1
if low > up:
raise ValueError(
f"Lower bound {low} and upper bound {up} of axes are not allowed.")
if isinstance(axes, int):
if axes < low or axes > up:
raise TypeError(
f"axis {axes} is out of bounds for tensor of dimension {ndim}.")
return axes if axes >= 0 else axes + ndim
new_axes = []
for item in axes:
if not isinstance(item, int):
raise TypeError(
f"int in tuple or list expected, but got {type(item)}.")
if item < low or item > up:
raise TypeError(
f"axis {item} in {axes} is out of bounds for tensor of dimension {ndim}.")
new_axes.append(item if item >= 0 else item + ndim)
return tuple(new_axes)
|
135b7b729b5e207c5b97de14a32035d291765c2e
| 34,493 |
def cm_LU_USGS24():
"""Land use colormap.
https://github.com/blaylockbk/pyBKB_v2/blob/master/BB_cmap/landuse_colormap.py
# MUST SET VMAX AND VMIN LIKE THIS TO SCALE COLOR RANGE CORRECTLY
cm, labels = LU_MODIS21()
plt.pcolormesh(LU_INDEX, cmap=cm, vmin=1, vmax=len(labels) + 1)
"""
C = np.array([[1, 0, 0], # 1 Urban and Built-up Land
[1, 1, 0], # 2 Dryland Cropland and Pasture
[1, 1, .2], # 3 Irrigated Cropland and Pasture
[1, 1, .3], # 4 Mixed Dryland/Irrigated Cropland and Pasture
[.7, .9, .3], # 5 Cropland/Grassland Mosaic
[.7, .9, .3], # 6 Cropland/Woodland Mosaic
[0, 1, 0], # 7 Grassland
[.3, .7, 0], # 8 Shrubland
[.82, .41, .12], # 9 Mixed Shrubland/Grassland
[1, .84, .0], # 10 Savanna
[.2, .8, .4], # 11 Deciduous Broadleaf Forest
[.2, .8, .2], # 12 Deciduous Needleleaf Forest
[0, .4, .2], # 13 Evergreen Broadleaf Forest
[0, .4, 0], # 14 Evergreen Needleleaf Forest
[.2, .6, .2], # 15 Mixed Forests
[0, 0, .88], # 16 Water Bodies
[0, 1, 1], # 17 Herbaceous Wetlands
[.2, 1, 1], # 18 Wooden Wetlands
[.914, .914, .7], # 19 Barren or Sparsely Vegetated
[.86, .08, .23], # 20 Herbaceous Tundraa
[.86, .08, .23], # 21 Wooded Tundra
[.97, .5, .31], # 22 Mixed Tundra
[.91, .59, .48], # 23 Barren Tundra
[1, 1, 1]]) # 24 Snow and Ice
cm = mpl.colors.ListedColormap(C)
labels = ['Urban and Built-up Land',
'Dryland Cropland and Pasture',
'Irrigated Cropland and Pasture',
'Mixed Dryland/Irrigated Cropland and Pasture',
'Cropland/Grassland Mosaic',
'Cropland/Woodland Mosaic',
'Grassland',
'Shrubland',
'Mixed Shrubland/Grassland',
'Savanna',
'Deciduous Broadleaf Forest',
'Deciduous Needleleaf Forest',
'Evergreen Broadleaf',
'Evergreen Needleleaf',
'Mixed Forest',
'Water Bodies',
'Herbaceous Wetland',
'Wooden Wetland',
'Barren or Sparsely Vegetated',
'Herbaceous Tundra',
'Wooded Tundra',
'Mixed Tundra',
'Bare Ground Tundra',
'Snow or Ice']
return cm, labels
|
4f5eb1cce0f4fddc88712d8e2863a8b8593e3cba
| 34,495 |
import functools
def lazy_value(fce):
""" The decorator for only once computed value. Same a functools.cache,
but there is no need to take care of arguments.
"""
x = []
""" Hack for staticmethod decorator, which is in fact binded by the descriptor protocol """
if isinstance(fce, staticmethod):
fce = fce.__func__
@functools.wraps(fce)
def cached_fce():
if not x:
x.append(fce())
return x[0]
def clear():
x = []
cached_fce.clear = clear
return cached_fce
|
dd983e23b036f5d2c7fbe98e048973c894378d97
| 34,496 |
def identity_by_descent(dataset, maf=None, bounded=True, min=None, max=None) -> Table:
"""Compute matrix of identity-by-descent estimates.
.. include:: ../_templates/req_tvariant.rst
.. include:: ../_templates/req_biallelic.rst
Examples
--------
To calculate a full IBD matrix, using minor allele frequencies computed
from the dataset itself:
>>> hl.identity_by_descent(dataset)
To calculate an IBD matrix containing only pairs of samples with
``PI_HAT`` in :math:`[0.2, 0.9]`, using minor allele frequencies stored in
the row field `panel_maf`:
>>> hl.identity_by_descent(dataset, maf=dataset['panel_maf'], min=0.2, max=0.9)
Notes
-----
The implementation is based on the IBD algorithm described in the `PLINK
paper <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1950838>`__.
:func:`.identity_by_descent` requires the dataset to be biallelic and does
not perform LD pruning. Linkage disequilibrium may bias the result so
consider filtering variants first.
The resulting :class:`.Table` entries have the type: *{ i: String,
j: String, ibd: { Z0: Double, Z1: Double, Z2: Double, PI_HAT: Double },
ibs0: Long, ibs1: Long, ibs2: Long }*. The key list is: `*i: String, j:
String*`.
Conceptually, the output is a symmetric, sample-by-sample matrix. The
output table has the following form
.. code-block:: text
i j ibd.Z0 ibd.Z1 ibd.Z2 ibd.PI_HAT ibs0 ibs1 ibs2
sample1 sample2 1.0000 0.0000 0.0000 0.0000 ...
sample1 sample3 1.0000 0.0000 0.0000 0.0000 ...
sample1 sample4 0.6807 0.0000 0.3193 0.3193 ...
sample1 sample5 0.1966 0.0000 0.8034 0.8034 ...
Parameters
----------
dataset : :class:`.MatrixTable`
Variant-keyed :class:`.MatrixTable` containing genotype information.
maf : :class:`.Float64Expression`, optional
Row-indexed expression for the minor allele frequency.
bounded : :obj:`bool`
Forces the estimations for `Z0``, ``Z1``, ``Z2``, and ``PI_HAT`` to take
on biologically meaningful values (in the range [0,1]).
min : :obj:`float` or :obj:`None`
Sample pairs with a ``PI_HAT`` below this value will
not be included in the output. Must be in :math:`[0,1]`.
max : :obj:`float` or :obj:`None`
Sample pairs with a ``PI_HAT`` above this value will
not be included in the output. Must be in :math:`[0,1]`.
Returns
-------
:class:`.Table`
"""
if maf is not None:
analyze('identity_by_descent/maf', maf, dataset._row_indices)
dataset, _ = dataset._process_joins(maf)
maf = maf._ast.to_hql()
return Table(Env.hail().methods.IBD.apply(require_biallelic(dataset, 'ibd')._jvds,
joption(maf),
bounded,
joption(min),
joption(max)))
|
7d8a0a2015955d19046835dcbf0f2ae60e937aa4
| 34,497 |
def sanitize_cloud(cloud: str) -> str:
"""Fix rare cloud layer issues"""
if len(cloud) < 4:
return cloud
if not cloud[3].isdigit() and cloud[3] not in ("/", "-"):
# Bad "O": FEWO03 -> FEW003
if cloud[3] == "O":
cloud = cloud[:3] + "0" + cloud[4:]
# Move modifiers to end: BKNC015 -> BKN015C
elif cloud[3] != "U" and cloud[:4] not in ("BASE", "UNKN"):
cloud = cloud[:3] + cloud[4:] + cloud[3]
return cloud
|
7ec10be12ac2bc1a305688b31125af11a734c327
| 34,498 |
def list_permission(request):
"""显示权限名称的列表"""
tpl_name = 'user/list_permission.html'
perms = Permission.objects.all()
info = {'perms':perms}
return render(request,tpl_name,info)
|
1fc388516789ab594d12080b7d8beeb5afa50220
| 34,499 |
def compute_sphercial_clustering_loss(centroids, features, batch_memberships):
"""Compute repulsive chain loss
Args:
features: [B, C] l2 normalized data, cf) B: batch size, C: feature dimension
memberships (int): [B, ] current membership
centroids: [K, C] cluster centers
Returns:
loss
"""
batch_size = features.shape[0]
similarities = tf.matmul(features, tf.transpose(centroids, [1, 0])) # [B, K]
pos_idx = tf.concat([tf.expand_dims(tf.range(batch_size), axis=-1), tf.expand_dims(batch_memberships, axis=-1)], axis=-1)
pos_similarities = tf.gather_nd(similarities, pos_idx) # [B, ]
loss = -tf.reduce_mean(pos_similarities)
return loss
|
23fd09308af857f463c304b62dc0bb86dd89adb2
| 34,500 |
from typing import Union
import typing
def av_client_start(
tutk_platform_lib: CDLL,
session_id: Union[int, c_int],
username: bytes,
password: bytes,
timeout_secs: int,
channel_id: int,
) -> typing.Tuple[c_int, c_uint]:
"""Start an AV client.
Start an AV client by providing view account and password. It shall pass
the authentication of the AV server before receiving AV data.
:param tutk_platform_lib: the c library loaded from the 'load_library' call.
:param session_id: The session ID of the IOTC session to start AV client
:param username: The view account for authentication
:param password: The view password for authentication
:param timeout_secs: The timeout for this function in unit of second
Specify it as 0 will make this AV client try connection once
and this process will exit immediately if not connection
is unsuccessful.
:param channel_id: The channel ID of the channel to start AV client
:return: returns a tuple of two values:
- av_chan_id: AV channel ID if return value >= 0; error code if return value < 0
- pn_serv_type: The user-defined service type set when an AV server starts. Can be NULL.
"""
n_timeout = c_uint(timeout_secs)
user_defined_service_type = c_uint()
chan_id = c_uint8(channel_id)
av_chan_id = tutk_platform_lib.avClientStart(
session_id,
c_char_p(username),
c_char_p(password),
n_timeout,
pointer(user_defined_service_type),
chan_id,
)
return av_chan_id, user_defined_service_type
|
95058f251163e987ce6eecf3646413352f6b4a8c
| 34,501 |
def balance_data(data):
""" DEPRECATED """
survived, died = count_survivors(data['Survived'])
remove_n = abs(survived - died)
dropId = []
if survived > died:
dropid = np.random.choice(data[data['Survived'] == 1].index, remove_n, replace=false)
elif survived < died:
dropId = np.random.choice(data[data['Survived'] == 0].index, remove_n, replace=False)
dataBalanced = data.drop(dropId)
return dataBalanced
|
34b502f52dfa70d2f48d0ad6735766fc969a41f9
| 34,503 |
def log_concave_rejection_sampler(
mode,
prob_fn,
dtype,
sample_shape=(),
distribution_minimum=None,
distribution_maximum=None,
seed=None):
"""Utility for rejection sampling from log-concave discrete distributions.
This utility constructs an easy-to-sample-from upper bound for a discrete
univariate log-concave distribution (for discrete univariate distributions, a
necessary and sufficient condition is p_k^2 >= p_{k-1} p_{k+1} for all k).
The method requires that the mode of the distribution is known. While a better
method can likely be derived for any given distribution, this method is
general and easy to implement. The expected number of iterations is bounded by
4+m, where m is the probability of the mode. For details, see [(Devroye,
1979)][1].
Args:
mode: Tensor, the mode[s] of the [batch of] distribution[s].
prob_fn: Python callable, counts -> prob(counts).
dtype: DType of the generated samples.
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
distribution_minimum: Tensor of type `dtype`. The minimum value
taken by the distribution. The `prob` method will only be called on values
greater than equal to the specified minimum. The shape must broadcast with
the batch shape of the distribution. If unspecified, the domain is treated
as unbounded below.
distribution_maximum: Tensor of type `dtype`. The maximum value
taken by the distribution. See `distribution_minimum` for details.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
#### References
[1] Luc Devroye. A Simple Generator for Discrete Log-Concave
Distributions. Computing, 1987.
"""
mode = tf.broadcast_to(
mode, ps.concat([sample_shape, ps.shape(mode)], axis=0))
mode_height = prob_fn(mode)
mode_shape = ps.shape(mode)
top_width = 1. + mode_height / 2. # w in ref [1].
top_fraction = top_width / (1 + top_width)
exponential_distribution = exponential.Exponential(
rate=tf.ones([], dtype=dtype)) # E in ref [1].
if distribution_minimum is None:
distribution_minimum = tf.constant(-np.inf, dtype)
if distribution_maximum is None:
distribution_maximum = tf.constant(np.inf, dtype)
def proposal(seed):
"""Proposal for log-concave rejection sampler."""
(top_lobe_fractions_seed,
exponential_samples_seed,
top_selector_seed,
rademacher_seed) = samplers.split_seed(seed, n=4)
top_lobe_fractions = samplers.uniform(
mode_shape, seed=top_lobe_fractions_seed, dtype=dtype) # V in ref [1].
top_offsets = top_lobe_fractions * top_width / mode_height
exponential_samples = exponential_distribution.sample(
mode_shape, seed=exponential_samples_seed) # E in ref [1].
exponential_height = (exponential_distribution.prob(exponential_samples) *
mode_height)
exponential_offsets = (top_width + exponential_samples) / mode_height
top_selector = samplers.uniform(
mode_shape, seed=top_selector_seed, dtype=dtype) # U in ref [1].
on_top_mask = (top_selector <= top_fraction)
unsigned_offsets = tf.where(on_top_mask, top_offsets, exponential_offsets)
offsets = tf.round(
tfp_random.rademacher(
mode_shape, seed=rademacher_seed, dtype=dtype) *
unsigned_offsets)
potential_samples = mode + offsets
envelope_height = tf.where(on_top_mask, mode_height, exponential_height)
return potential_samples, envelope_height
def target(values):
# Check for out of bounds rather than in bounds to avoid accidentally
# masking a `nan` value.
out_of_bounds_mask = (
(values < distribution_minimum) | (values > distribution_maximum))
in_bounds_values = tf.where(
out_of_bounds_mask, tf.constant(0., dtype=values.dtype), values)
probs = prob_fn(in_bounds_values)
return tf.where(out_of_bounds_mask, tf.zeros([], probs.dtype), probs)
return tf.stop_gradient(
brs.batched_rejection_sampler(
proposal, target, seed, dtype=dtype)[0]) # Discard `num_iters`.
|
6c109478f39ce3ec4e5d630bb5d7b6e8a81df76c
| 34,504 |
import time
def login(user_name, password):
""" This function takes user name and password to login into Github account.
Args:
:param user_name: Github username
:param password: Github password
Returns:
error_dict (dict): If it's logged in successfully or not with the error message.
"""
driver.get('https://github.com/login') # github login page
username = driver.find_element_by_xpath('//*[@id="login_field"]') # Input Username
username.send_keys(user_name)
time.sleep(2)
pass_word = driver.find_element_by_xpath('//*[@id="password"]') # Input Password
pass_word.send_keys(password)
sign_in = driver.find_element_by_xpath('//*[@id="login"]/div[4]/form/div/input[12]') # Click on sig in button
sign_in.click()
input("Please enter your verification code then hit enter, if it's logged in without it just hit enter ...")
error_dict = {'error': None, 'message': None} # Check if it's logged in successfully
try:
error = driver.find_element_by_xpath('//*[@id="js-flash-container"]/div/div').text # Check for login error
error_dict['error'] = True
error_dict['message'] = error
except NoSuchElementException:
error_dict['error'] = False
error_dict['message'] = None
return error_dict
|
fc119d4584ae721b72e916505445e1d760d04629
| 34,505 |
def make_tukey(n, a=0.5):
"""Make a tukey window
Args:
n (int): Number of points
a (float, optional): Width of window. Defaults to 0.5.
Returns:
np.array: Weights
"""
x = np.arange(n)
weights = np.ones_like(x, dtype=float)
weights[0:int(a*n/2)] = 1/2*(1-np.cos(2*np.pi*x[0:int(a*n/2)]/(a*n)))
weights[-int(a*n/2):] = weights[0:int(a*n/2)][::-1]
return weights
|
cb14f4cfa9495567954b6ea83eef0f2c568190e3
| 34,506 |
def linear_gradient(start_hex: str, finish_hex: str = "#FFFFFF", n: int = 11) -> color_dict:
""" returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
including the number sign ("#FFFFFF") """
# Starting and ending colors in RGB form
s = hex_to_rgb(start_hex)
f = hex_to_rgb(finish_hex)
# Initialize a list of the output colors with the starting color
rgb_list = [s]
# Calculate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t) / (n - 1)) * (f[j] - s[j]))
for j in range(3)
]
# Add it to our list of output colors
rgb_list.append(curr_vector)
return color_dict(rgb_list)
|
4df9e2d3f2b921e0826ffd51dde208a30bd78e96
| 34,507 |
def bast_label_to_number(annotation, label):
"""Given an annotation file and a label convert it to its corresponding
number for the BAST dataset."""
return bast_label_to_number_dict(annotation).get(label, None)
|
d62451272d84d1749d13070cda3b27825bb7c0d8
| 34,508 |
def function_with_exception(val):
"""Return a `val` if it is non-negative"""
if val < 0:
raise ValueError("val cannot be negative.")
return val
|
f9a4a50879477a5e45fcb9a9d54a10695fc526df
| 34,510 |
def _c(obj: model.Component):
"""Convert :class:`.Component`."""
# Raises AttributeError if the concept_identity is missing
return str(obj.concept_identity.id)
|
369d1f7b797e0a7162337f36d621593c7cf1f42e
| 34,511 |
import copy
def get_power_instance(wildcards):
"""
Returns a formatted template
Arguments:
rest_base - Base URL of the RESTful interface
ident - Identifier of the chassis
"""
c = copy.deepcopy(_TEMPLATE)
c['@odata.context'] = c['@odata.context'].format(**wildcards)
c['@odata.id'] = c['@odata.id'].format(**wildcards)
c['PowerControl'][0]['@odata.id']=c['PowerControl'][0]['@odata.id'].format(**wildcards)
c['PowerControl'][0]['RelatedItem'][0]['@odata.id']=c['PowerControl'][0]['RelatedItem'][0]['@odata.id'].format(**wildcards)
c['PowerControl'][0]['RelatedItem'][1]['@odata.id']=c['PowerControl'][0]['RelatedItem'][1]['@odata.id'].format(**wildcards)
c['Voltages'][0]['@odata.id']=c['Voltages'][0]['@odata.id'].format(**wildcards)
c['Voltages'][0]['RelatedItem'][0]['@odata.id']=c['Voltages'][0]['RelatedItem'][0]['@odata.id'].format(**wildcards)
c['Voltages'][0]['RelatedItem'][1]['@odata.id']=c['Voltages'][0]['RelatedItem'][1]['@odata.id'].format(**wildcards)
c['Voltages'][1]['@odata.id']=c['Voltages'][1]['@odata.id'].format(**wildcards)
c['Voltages'][1]['RelatedItem'][0]['@odata.id']=c['Voltages'][1]['RelatedItem'][0]['@odata.id'].format(**wildcards)
c['Voltages'][1]['RelatedItem'][1]['@odata.id']=c['Voltages'][1]['RelatedItem'][1]['@odata.id'].format(**wildcards)
c['PowerSupplies'][0]['@odata.id']=c['PowerSupplies'][0]['@odata.id'].format(**wildcards)
c['PowerSupplies'][0]['RelatedItem'][0]['@odata.id']=c['PowerSupplies'][0]['RelatedItem'][0]['@odata.id'].format(**wildcards)
c['PowerSupplies'][1]['@odata.id']=c['PowerSupplies'][1]['@odata.id'].format(**wildcards)
c['PowerSupplies'][1]['RelatedItem'][0]['@odata.id']=c['PowerSupplies'][1]['RelatedItem'][0]['@odata.id'].format(**wildcards)
c['PowerSupplies'][2]['@odata.id']=c['PowerSupplies'][2]['@odata.id'].format(**wildcards)
return c
|
f7fe438dcba027b68cf65919458568c742bf9735
| 34,512 |
import pyspark.sql.functions as fn
from pyspark.sql import Window
def get_user_history(history_type):
"""
:param history_type: 'click' or 'top' or 'play'
:return:
"""
spark.sql('use {}'.format(user_pre_db))
if history_type == 'play':
tmp_df = spark.sql('select user_id, movie_id, cate_id, datetime, play_time from merge_action').where(
'play_time > 0')
else:
tmp_df = spark.sql(
'select user_id, movie_id, cate_id, {}, datetime from merge_action'.format(history_type)) \
.where('{} = 1'.format(history_type)).drop('{}'.format(history_type))
# tmp_df.show()
# 对重复观看的电影过滤只留下时间最近的
ret = tmp_df.groupby('user_id', 'movie_id') \
.agg(fn.max('cate_id').alias('cate_id'), fn.max('datetime').alias('datetime'))
# 添加电影标题,便于查看结果
title_df = spark.sql('select id movie_id, title, year from {}.db_asset'.format(movie_original_db))
ret = ret.join(title_df, on='movie_id', how='left')
# 根据时间戳对观看记录进行排序
ret = ret.withColumn('sort_time', fn.row_number().over(
Window.partitionBy('user_id', 'cate_id').orderBy(ret['datetime'].desc())))
return ret
|
346c2615961db564bda0336c679e08079b8a1ec8
| 34,513 |
def display_on_frame(self, image, left_curverad, right_curverad, car_off):
"""
Display texts on image using passed values
"""
font = cv2.FONT_HERSHEY_COMPLEX
curve_disp_txt = 'Curvature: Right = ' + str(np.round(right_curverad,2)) + 'm, Left = ' + str(np.round(left_curverad,2)) + 'm'
off_disp_txt = 'Car off by ' + str(np.round(car_off,2)) + 'm'
cv2.putText(image, curve_disp_txt, (30, 60), font, 1, (0,0,0), 2)
cv2.putText(image, off_disp_txt, (30, 110), font, 1, (0,0,0), 2)
return image
|
c8e86350b7018d1b2eba94db8929a870cf6f8bc5
| 34,514 |
from pathlib import Path
def path_is_relative_to(path: Path, other: PathOrStr) -> bool:
"""
This is copied from :meth:`pathlib.PurePath.is_relative_to` to support older Python
versions (before 3.9, when this method was introduced).
"""
try:
path.relative_to(other)
return True
except ValueError:
return False
|
577c591d519e4b0364f202306151411694d81138
| 34,515 |
def generate_tokens( parser, lines, flags, keywords):
"""
This is a rewrite of pypy.module.parser.pytokenize.generate_tokens since
the original function is not RPYTHON (uses yield)
It was also slightly modified to generate Token instances instead
of the original 5-tuples -- it's now a 4-tuple of
* the Token instance
* the whole line as a string
* the line number (the real one, counting continuation lines)
* the position on the line of the end of the token.
Original docstring ::
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string.
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
#for line in lines:
# print repr(line)
#print '------------------- flags=%s ---->' % flags
assert isinstance( parser, Parser )
token_list = []
lnum = parenlev = continued = 0
namechars = NAMECHARS
numchars = NUMCHARS
contstr, needcont = '', 0
contline = None
indents = [0]
last_comment = ''
# make the annotator happy
pos = -1
lines.append('') # XXX HACK probably not needed
# look for the bom (byte-order marker) for utf-8
# make the annotator happy
endDFA = automata.DFA([], [])
# make the annotator happy
line = ''
for line in lines:
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF while scanning triple-quoted string", line,
(lnum-1, 0), token_list)
endmatch = endDFA.recognize(line)
if endmatch >= 0:
pos = end = endmatch
tok = Token(parser, parser.tokens['STRING'], contstr + line[:end])
token_list.append((tok, line, lnum, pos))
last_comment = ''
# token_list.append((STRING, contstr + line[:end],
# strstart, (lnum, end), contline + line))
contstr, needcont = '', 0
contline = None
elif (needcont and not line.endswith('\\\n') and
not line.endswith('\\\r\n')):
tok = Token(parser, parser.tokens['ERRORTOKEN'], contstr + line)
token_list.append((tok, line, lnum, pos))
last_comment = ''
# token_list.append((ERRORTOKEN, contstr + line,
# strstart, (lnum, len(line)), contline))
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n':
# skip comments or blank lines
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
tok = Token(parser, parser.tokens['INDENT'], line[:pos])
token_list.append((tok, line, lnum, pos))
last_comment = ''
while column < indents[-1]:
indents = indents[:-1]
tok = Token(parser, parser.tokens['DEDENT'], '')
token_list.append((tok, line, lnum, pos))
last_comment = ''
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", line,
(lnum, 0), token_list)
continued = 0
while pos < max:
pseudomatch = pseudoDFA.recognize(line, pos)
if pseudomatch >= 0: # scan for tokens
# JDR: Modified
start = whiteSpaceDFA.recognize(line, pos)
if start < 0:
start = pos
end = pseudomatch
if start == end:
# Nothing matched!!!
raise TokenError("Unknown character", line,
(lnum, start), token_list)
pos = end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
tok = Token(parser, parser.tokens['NUMBER'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial in '\r\n':
if parenlev <= 0:
tok = Token(parser, parser.tokens['NEWLINE'], token)
# XXX YUCK !
tok.value = last_comment
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial == '#':
# skip comment
last_comment = token
elif token in triple_quoted:
endDFA = endDFAs[token]
endmatch = endDFA.recognize(line, pos)
if endmatch >= 0: # all on one line
pos = endmatch
token = line[start:pos]
tok = Token(parser, parser.tokens['STRING'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
else:
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
endDFA = (endDFAs[initial] or endDFAs[token[1]] or
endDFAs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
tok = Token(parser, parser.tokens['STRING'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial in namechars: # ordinary name
tok = Token(parser, parser.tokens['NAME'], token)
if token not in keywords:
tok.isKeyword = False
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial == '\\': # continued stmt
continued = 1
# lnum -= 1 disabled: count continuation lines separately
else:
if initial in '([{':
parenlev = parenlev + 1
elif initial in ')]}':
parenlev = parenlev - 1
if parenlev < 0:
raise TokenError("unmatched '%s'" % initial, line,
(lnum-1, 0), token_list)
if token in parser.tok_values:
punct = parser.tok_values[token]
tok = Token(parser, punct, None)
else:
tok = Token(parser, parser.tokens['OP'], token)
token_list.append((tok, line, lnum, pos))
last_comment = ''
else:
start = whiteSpaceDFA.recognize(line, pos)
if start < 0:
start = pos
if start<max and line[start] in single_quoted:
raise TokenError("EOL while scanning single-quoted string", line,
(lnum, start), token_list)
tok = Token(parser, parser.tokens['ERRORTOKEN'], line[pos])
token_list.append((tok, line, lnum, pos))
last_comment = ''
pos = pos + 1
lnum -= 1
if not (flags & PyCF_DONT_IMPLY_DEDENT):
if token_list and token_list[-1][0].codename != parser.tokens['NEWLINE']:
token_list.append((Token(parser, parser.tokens['NEWLINE'], ''), '\n', lnum, 0))
for indent in indents[1:]: # pop remaining indent levels
tok = Token(parser, parser.tokens['DEDENT'], '')
token_list.append((tok, line, lnum, pos))
#if token_list and token_list[-1][0].codename != pytoken.NEWLINE:
token_list.append((Token(parser, parser.tokens['NEWLINE'], ''), '\n', lnum, 0))
tok = Token(parser, parser.tokens['ENDMARKER'], '',)
token_list.append((tok, line, lnum, pos))
#for t in token_list:
# print '%20s %-25s %d' % (pytoken.tok_name.get(t[0].codename, '?'), t[0], t[-2])
#print '----------------------------------------- pyparser/pythonlexer.py'
return token_list
|
c09fea9f20879017d87e695c63d6a6cca637a0ae
| 34,516 |
def get_days_word_ending(days: int) -> str:
"""Определяет окончание слова "дня", "дней" и т.д. в зависимости от входящего числа"""
last_numeral = days % 10
prelast_numeral = days % 100
prelast_numeral = prelast_numeral // 10
if prelast_numeral == 1:
return 'дней'
if last_numeral == 0 or last_numeral >= 5:
return 'дней'
elif last_numeral == 1:
return 'день'
else:
return 'дня'
|
4f2887b438ab8909b29a0fa572c5735477da2262
| 34,517 |
def _init_hoomd_rb_torsions(structure, ref_energy=1.0):
"""RB dihedrals (implemented as OPLS dihedrals in HOOMD)."""
# Identify the unique dihedral types before setting
dihedral_type_params = {}
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = "-".join((t1, t2, t3, t4))
else:
dihedral_type = "-".join((t4, t3, t2, t1))
if dihedral_type not in dihedral_type_params:
dihedral_type_params[dihedral_type] = dihedral.type
# Set the hoomd parameter
rb_torsion = hoomd.md.dihedral.opls()
for name, dihedral_type in dihedral_type_params.items():
F_coeffs = RB_to_OPLS(
dihedral_type.c0 / ref_energy,
dihedral_type.c1 / ref_energy,
dihedral_type.c2 / ref_energy,
dihedral_type.c3 / ref_energy,
dihedral_type.c4 / ref_energy,
dihedral_type.c5 / ref_energy,
error_if_outside_tolerance=False,
)
rb_torsion.dihedral_coeff.set(
name, k1=F_coeffs[1], k2=F_coeffs[2], k3=F_coeffs[3], k4=F_coeffs[4]
)
return rb_torsion
|
f71d955f1f2294ed1c6a5b041ed75dea1028bc34
| 34,518 |
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
total = data.sum()
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(
np.abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum()
)
row = data[int(x), :]
width_y = np.sqrt(
np.abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum()
)
height = data.max()
return height, x, y, width_x, width_y
|
371cb1c1b8419eb9a604300c87a7a9084a13e46f
| 34,519 |
def compute_throughputs(batch_size, gpu_times):
"""
Given a batch size and an array of time running on GPU,
returns an array of throughputs
"""
return [batch_size / gpu_times[i] * 1000 for i in range(len(gpu_times))]
|
14b20806ad8e21126c460613a99f9b68bce31ef0
| 34,520 |
import torch
def load_trained_model(args, config_class, model_class, label_count_info, n_gpu, device):
"""Load trained model for evaluation"""
model = _build_pretrained_model(args, config_class, model_class, label_count_info)
model.to(device)
model_path = args.output_dir + "/epoch_%d" % args.load_n_epoch
ckpt_state = torch.load(model_path)['model_state']
model.load_state_dict(ckpt_state)
return model
|
d64ff9fdd7e37dfd577983618f57f2bf7f1107cb
| 34,521 |
import re
def parse_filter_field(string_filters) -> dict:
"""
Parses string with sets of name, value and comparison into a dict
Args:
string_filters: A string of the form 'name=<name1>,value=<value1>,comparison=<comparison1>;name=<name2>...'
Returns:
A dict of the form {<name1>:[{'Value': <value1>, 'Comparison': <comparison1>}],<name2>:[{...}]}
"""
filters = {}
regex = re.compile(r'name=([\w\d_:.-]+),value=([\w\d_:.-]*),comparison=([ /\w\d@_,.\*-]+)', flags=re.I)
regex_parse_result = regex.findall(string_filters)
if regex_parse_result:
for name, value, comparison in regex_parse_result:
filters.update({
name: [{
'Value': value,
'Comparison': comparison.upper()
}]
})
else:
demisto.info(f'could not parse filter: {string_filters}')
return filters
|
adec2fa457a9f9d38770dce1ba9f50143480c884
| 34,522 |
def convert_to_one_hot(integer_vector, dtype=None, max_labels=None,
mode='stack', sparse=False):
"""
Formats a given array of target labels into a one-hot
vector.
Parameters
----------
max_labels : int, optional
The number of possible classes/labels. This means that
all labels should be < max_labels. Example: For MNIST
there are 10 numbers and hence max_labels = 10. If not
given it defaults to max(integer_vector) + 1.
dtype : dtype, optional
The desired dtype for the converted one-hot vectors.
Defaults to config.floatX if not given.
integer_vector : ndarray
A 1D array of targets, or a batch (2D array) where
each row is a list of targets.
mode : string
The way in which to convert the labels to arrays. Takes
three different options:
- "concatenate" : concatenates the one-hot vectors from
multiple labels
- "stack" : returns a matrix where each row is the
one-hot vector of a label
- "merge" : merges the one-hot vectors together to
form a vector where the elements are
the result of an indicator function
sparse : bool
If true then the return value is sparse matrix. Note that
if sparse is True, then mode cannot be 'stack' because
sparse matrices need to be 2D
Returns
-------
one_hot : NumPy array
Can be 1D-3D depending on settings. Normally, the first axis are
the different batch items, the second axis the labels, the third
axis the one_hot vectors. Can be dense or sparse.
"""
if dtype is None:
dtype = config.floatX
if isinstance(integer_vector, list):
integer_vector = np.array(integer_vector)
assert np.min(integer_vector) >= 0
assert integer_vector.ndim <= 2
if max_labels is None:
max_labels = max(integer_vector) + 1
return OneHotFormatter(max_labels, dtype=dtype).format(
integer_vector, mode=mode, sparse=sparse
)
|
0341090659f502f81adb368880af4659b1886236
| 34,524 |
from typing import Sequence
def get_constraints(
guesses: list[str], calls: list[Sequence[Call]]
) -> tuple[dict[int, str], set[str], set[str]]:
"""Get constraints."""
positions = {}
appears = set()
no_appears = set()
for call, guess in zip(calls, guesses):
for i, c, x in zip(itt.count(), call, guess):
if c == "correct":
positions[i] = x
appears.add(x)
elif c == "somewhere":
appears.add(x)
elif c == "incorrect":
no_appears.add(x)
return positions, appears, no_appears
|
0163f17fb93d9b2244f6762dbafdddfa7eefb0da
| 34,525 |
def apply_trained_model(
trained_model_object, input_table, feature_names, replace_missing,
standardize, transform_via_svd, replacement_dict_for_training_data=None,
standardization_dict_for_training_data=None,
svd_dict_for_training_data=None):
"""Uses a trained model to make predictions for binary classification.
N = number of examples
:param trained_model_object: Trained instance of scikit-learn model. Must
implement the method `predict_proba`.
:param input_table: N-row pandas DataFrame, where each row is one example
(data point).
:param feature_names: 1-D list with names of features (predictor variables).
Each feature must be a column of input_table.
:param replace_missing: See documentation for _preprocess_data_for_learning.
:param standardize: See doc for _preprocess_data_for_learning.
:param transform_via_svd: See doc for _preprocess_data_for_learning.
:param replacement_dict_for_training_data: See doc for
_preprocess_data_for_learning.
:param standardization_dict_for_training_data: See doc for
_preprocess_data_for_learning.
:param svd_dict_for_training_data: See doc for
_preprocess_data_for_learning.
:return: forecast_probabilities: length-N numpy array of forecast
probabilities. The [i]th value is the forecast probability of an event
(class = 1 rather than 0) for the [i]th example.
"""
_check_input_data_for_learning(
input_table=input_table, feature_names=feature_names, target_name=None)
preprocessed_input_table, preprocessed_feature_names, _, _, _ = (
_preprocess_data_for_learning(
input_table=input_table, feature_names=feature_names,
learning_phase=TESTING_PHASE, replace_missing=replace_missing,
standardize=standardize, transform_via_svd=transform_via_svd,
replacement_dict_for_training_data=
replacement_dict_for_training_data,
standardization_dict_for_training_data=
standardization_dict_for_training_data,
svd_dict_for_training_data=svd_dict_for_training_data))
return trained_model_object.predict_proba(
preprocessed_input_table.as_matrix(
columns=preprocessed_feature_names))[:, 1]
|
0760fe4bf49ba7f5b7b00de46a7e5dc5cb01f5d7
| 34,526 |
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected information, string or None
:raises: exception.InvalidVolume
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if host is None:
msg = _("volume is not assigned to a host")
raise exception.InvalidVolume(reason=msg)
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
|
01ee481a143ab32069e91bcb082e1e1eb229da71
| 34,527 |
from medtagger.api.rest import app
from typing import Any
def get_api_client() -> Any:
"""Return API client for testing purpose."""
app.testing = True
return app.test_client()
|
48d6897a4ed07f9fb87aee37e8e2ff69383f71f8
| 34,528 |
def verify_ospf3_metric(device,
interface,
metric,
max_time=60,
check_interval=10):
"""Verify the OSPF3 metric
Args:
device (obj): Device object
interface (str): Interface name
metric (str): OSPF3 metric
Returns:
True/False
Raises:
N/A
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse('show ospf3 interface extensive')
except SchemaEmptyParserError:
timeout.sleep()
continue
# Example dictionary
# "ospf3-interface": [
# {
# "interface-cost": "5",
# "interface-name": "ge-0/0/0.0",
# },
ospf3_interface_list = out.q.get_values('ospf3-interface')
for ospf3_interface_dict in ospf3_interface_list:
#{'interface-name': 'ge-0/0/1.0'}
interface_ = ospf3_interface_dict.get('interface-name')
#{'interface-cost': '5'}
metric_ = ospf3_interface_dict.get('interface-cost')
if interface_.lower() == interface.lower() and str(metric_) == str(
metric):
return True
return False
|
6c7f12304a4987293b9336bcbf6300951de75f09
| 34,529 |
import weakref
def copy_cache(
cache: t.Optional[t.MutableMapping],
) -> t.Optional[t.MutableMapping[t.Tuple[weakref.ref, str], "Template"]]:
"""Create an empty copy of the given cache."""
if cache is None:
return None
if type(cache) is dict:
return {}
return LRUCache(cache.capacity)
|
4c0945da8471112a28bd34ad27286df55a23e968
| 34,532 |
def get_plugins(metadata):
"""Return the registered plugins.
Load and return all registered plugins.
"""
plugins = load_plugins()
if not plugins:
raise NoPluginsError("No plugins found")
results = []
for p in sorted(plugins.get_all(), key=attrgetter("name")):
if metadata:
data = {"name": p.name, "homepage": p.homepage}
hints = getattr(p, "hints", [])
if hints:
data["hints"] = hints
results.append(data)
else:
results.append(p.name)
return results
|
b88ddbc9e85bdc04d0f2a71458ab2545f925c88b
| 34,533 |
def _ConvertUnmatchedResultsToStringDict(unmatched_results):
"""Converts |unmatched_results| to a dict of strings for reporting.
Args:
unmatched_results: A dict mapping builder names (string) to lists of
data_types.Result who did not have a matching expectation.
Returns:
A string dictionary representation of |unmatched_results| in the following
format:
{
test_name: {
builder_name: {
step_name: [
individual_result_string_1,
individual_result_string_2,
...
],
...
},
...
},
...
}
"""
output_dict = {}
for builder, results in unmatched_results.iteritems():
for r in results:
builder_map = output_dict.setdefault(r.test, {})
step_map = builder_map.setdefault(builder, {})
result_str = 'Got "%s" on %s with tags [%s]' % (
r.actual_result, data_types.BuildLinkFromBuildId(
r.build_id), ' '.join(r.tags))
step_map.setdefault(r.step, []).append(result_str)
return output_dict
|
9c09a205bbcea48865e490f67342e0768f58555e
| 34,534 |
def get_color(r, g, b, a):
""" converts rgba values of 0 - 255 to the equivalent in 0 - 1 """
return (r / 255.0, g / 255.0, b / 255.0, a / 255.0)
|
78b4d71e04c7f3271462461641ec71e9fb849347
| 34,536 |
import re
def escape_version(version):
"""
Escaped version in wheel filename. Doesn't exactly follow
the escaping specification in :pep:`427#escaping-and-unicode`
because this conflicts with :pep:`440#local-version-identifiers`.
"""
return re.sub(r"[^\w\d.+]+", "_", version, flags=re.UNICODE)
|
ad382dc611a87b66db49f0332698618bda4cf86b
| 34,539 |
def union_degree(node_df, edge_df, degree_df, OD_full_path):
"""
Inputs:
node_df, edge_df - the node and edge specific df from get_specific_df
OD_full_path - output of shortest_path
degree_df - output of get degree df
Outputs:
edge_degree_df, node_degree_df - respective dfs merged with degree df
"""
print("=====Running union_degree=====")
node_degree_df = node_df.merge(degree_df, how="left", on="SA2_MAIN16")
origin_dest = list(zip(edge_df["origin"].values, edge_df["destination"].values))
edge_degree_df = pd.DataFrame(columns=["sa2_code16_x", "sa2_code16_y", "num_nodes_x",
"num_1degree_x", "num_2degree_x", "num_3degree_x", "num_4degree_x",
"num_greater5degree_x",
"num_nodes_y",
"num_1degree_y", "num_2degree_y", "num_3degree_y", "num_4degree_y",
"num_greater5degree_y"])
sa_to_i = {}
i_to_sa = {}
sa_to_data = {}
for i, row in degree_df.iterrows():
print(i)
i_to_sa[i] = row["SA2_MAIN16"]
sa_to_i[row["SA2_MAIN16"]] = i
sa_to_data[row["SA2_MAIN16"]] = row[['num_nodes', 'num_1degree','num_2degree', 'num_3degree', 'num_4degree', 'num_greater5degree']]
for o,d in origin_dest:
if o != d:
o_data = degree_df[degree_df["SA2_MAIN16"]==o]
d_data = degree_df[degree_df["SA2_MAIN16"]==d]
num_nodes_pth = 0
num_1degree_pth = 0
num_2degree_pth = 0
num_3degree_pth = 0
num_4degree_pth = 0
num_greater5degree_pth = 0
oid = sa_to_i[o]
did = sa_to_i[d]
for i in OD_full_path[(oid,did)]:
sa = i_to_sa[i]
num_nodes_pth += float(sa_to_data[sa][0])
num_1degree_pth += float(sa_to_data[sa][1])
num_2degree_pth += float(sa_to_data[sa][2])
num_3degree_pth += float(sa_to_data[sa][3])
num_4degree_pth += float(sa_to_data[sa][4])
num_greater5degree_pth += float(sa_to_data[sa][5])
num_nodes_x = float(o_data["num_nodes"].iloc[0])
num_1degree_x = float(o_data["num_1degree"].iloc[0])
num_2degree_x = float(o_data["num_2degree"].iloc[0])
num_3degree_x = float(o_data["num_3degree"].iloc[0])
num_4degree_x = float(o_data["num_4degree"].iloc[0])
num_greater5degree_x = float(o_data["num_greater5degree"].iloc[0])
num_nodes_y = float(d_data["num_nodes"].iloc[0])
num_1degree_y = float(d_data["num_1degree"].iloc[0])
num_2degree_y = float(d_data["num_2degree"].iloc[0])
num_3degree_y = float(d_data["num_3degree"].iloc[0])
num_4degree_y = float(d_data["num_4degree"].iloc[0])
num_greater5degree_y = float(d_data["num_greater5degree"].iloc[0])
else:
o_data = degree_df[degree_df["SA2_MAIN16"]==o]
d_data = degree_df[degree_df["SA2_MAIN16"]==d]
num_nodes_x = num_nodes_y = num_nodes_pth = float(o_data["num_nodes"].iloc[0])
num_1degree_x = num_1degree_y = num_1degree_pth = float(o_data["num_1degree"].iloc[0])
num_2degree_x = num_2degree_y = num_2degree_pth = float(o_data["num_2degree"].iloc[0])
num_3degree_x = num_3degree_y = num_3degree_pth = float(o_data["num_3degree"].iloc[0])
num_4degree_x = num_4degree_y = num_4degree_pth = float(o_data["num_4degree"].iloc[0])
num_greater5degree_x = num_greater5degree_y = num_greater5degree_pth = float(o_data["num_greater5degree"].iloc[0])
edge_degree_df = edge_degree_df.append({"sa2_code16_x": o, "sa2_code16_y":d ,"num_nodes_x":num_nodes_x,
"num_1degree_x":num_1degree_x, "num_2degree_x":num_2degree_x,
"num_3degree_x":num_3degree_x, "num_4degree_x":num_4degree_x,
"num_greater5degree_x":num_greater5degree_x,
"num_nodes_y":num_nodes_y,
"num_1degree_y":num_1degree_y, "num_2degree_y":num_2degree_y,
"num_3degree_y":num_3degree_y, "num_4degree_y":num_4degree_y,
"num_greater5degree_y":num_greater5degree_y,
"num_nodes_pth":num_nodes_pth,
"num_1degree_pth":num_1degree_pth,
"num_2degree_pth":num_2degree_pth,
"num_3degree_pth":num_3degree_pth,
"num_4degree_pth":num_4degree_pth,
"num_greater5degree_pth":num_greater5degree_pth },
ignore_index=True)
edge_degree_df = edge_df.merge(edge_degree_df, how="left", on=["sa2_code16_x","sa2_code16_y"])
print("=====DONE union_degree=====")
return edge_degree_df, node_degree_df
|
89e5a9298fbb9214049c19c0cbc725b3ed7f66a5
| 34,540 |
def paths(resources, resource_strip_prefix):
"""Return a list of path tuples (target, source) where:
target - is a path in the archive (with given prefix stripped off)
source - is an absolute path of the resource file
Tuple ordering is aligned with zipper format ie zip_path=file
Args:
resources: list of file objects
resource_strip_prefix: string to strip from resource path
"""
return [(_target_path(resource, resource_strip_prefix), resource.path) for resource in resources]
|
2e0ebcee01cc7b61143ba6fc63bf44820e393fcb
| 34,541 |
from typing import Optional
import json
import yaml
from typing import cast
def get_content(extra_file: ExtraFileTypeDef) -> Optional[str]:
"""Get serialized content based on content_type.
Args:
extra_file: The extra file configuration.
Returns:
Serialized content based on the content_type.
"""
content_type = extra_file.get("content_type")
content = extra_file.get("content")
if content:
if isinstance(content, (dict, list)):
if content_type == "application/json":
return json.dumps(content)
if content_type == "text/yaml":
return yaml.safe_dump(content)
raise ValueError(
'"content_type" must be json or yaml if "content" is not a string'
)
if not isinstance(content, str):
raise TypeError(f"unsupported content: {type(content)}")
return cast(Optional[str], content)
|
0c8d9fee5c244f2e6a42f5dd53eb7c4c508b824c
| 34,542 |
def to_bs():
"""Example Registry Pipeline that loads existing pipelines"""
return [("pbsmrtpipe.pipelines.dev_04:pbsmrtpipe.tasks.dev_hello_world:0", "pbsmrtpipe.tasks.dev_txt_to_fasta:0")]
|
36c08e38ccdefda793708b88a41f61d5f05e4197
| 34,543 |
def get_total_reflectance(mco_filename):
"""
extract reflectance from mco file.
Attention: mco_filename specifies full path.
Returns: the reflectance
"""
return get_diffuse_reflectance(mco_filename) + \
get_specular_reflectance(mco_filename)
|
f3ea4830fc2c8ee45fd33c229bec3430f85c199a
| 34,544 |
import random
def mutate(c, gp, pmut):
"""Mutation of chromosome
Based on the probability to mutate, it selects a random gene to change. It selects
a random other creature to take a new gene from a gene pool.
Args:
c: creature's chromosme to be changed.
gp: gene pool to select from.
pmut: probability of mutation.
Returns:
Array(float): changed array with mutation if condition passed, else returned unchanged chromsome.
"""
if(np.random.choice([True, False], p=[pmut, 1-pmut])):
s = random.randrange(0, len(c))
new_gene = gp[random.randrange(0, len(gp))]
newChromosome = c[:s] + [new_gene] + c[s+1:]
return newChromosome
else:
return c
|
d65435e5ffbdcbdd44199493c2b0599fc8b96417
| 34,545 |
import numpy
def _(shape: numpy.ndarray):
"""
If a shape is an array of points, compute the minima/maxima
or let it pass through if it's 1 dimensional & length 4
"""
if (shape.ndim == 1) & (len(shape) == 4):
return shape
return numpy.array([*shape.min(axis=0), *shape.max(axis=0)])
|
1ab490ece446a6afec11f90038a285adf9afa141
| 34,546 |
def selectHierarchy(node):
""" get the hierarchy of the current given object
:param node: the object to search through
:type node: string
:return: list of the objects children and current object included
:rtype: list
"""
ad = cmds.listRelatives(node, ad=1, f=1) or []
ad.append(node[0])
return ad[::-1]
|
c83c28b391e39f30dade59e73f89335a09fe807e
| 34,548 |
import logging
import http
def logErrorAndReturnOK(error_msg='Error found in Task'):
"""Logs the given error message and returns a HTTP OK response.
Args:
error_msg: Error message to log
"""
logging.error(error_msg)
return http.HttpResponse()
|
788f2b6b30e55e8375aab5fdb364f991aed7f71f
| 34,549 |
def make_std_gaussian(var_names):
"""
Make a d dimensional standard Gaussian.
:param var_names: The variable name of the factor.
:type var_names: str list
:return: The standard Gaussian
:rtype: Gaussian
"""
assert var_names, "Error: var_names list cannot be empty."
dim = len(var_names)
cov = np.eye(dim, dim)
mean = np.zeros([dim, 1])
random_gaussian = Gaussian(cov=cov, mean=mean, log_weight=0.0, var_names=var_names)
return random_gaussian
|
3c2c9f5a35786391d6ca1131e7a54767bc18a54d
| 34,550 |
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
# In order to accelerate program
for i in lis:
if i.startswith(sub_s) is True:
return True
return False
|
e53f941ce35bfe1e4f9c9a20d8cf90541186397d
| 34,551 |
def sum_numbers_loop(n: int) -> int:
"""
BIG-O Notation = O(n)
"""
result = 0
for i in range(n + 1):
result += i
return result
|
d3265f5d7ceb277d105ab08f81ac4ce91fcc4153
| 34,552 |
import itk
def range(imageOrFilter) :
"""Return the range of values in a image of in the output image of a filter
The minimum and maximum values are returned in a tuple: (min, max)
range() take care of updating the pipeline
"""
img = image(imageOrFilter)
img.UpdateOutputInformation()
img.Update()
comp = itk.MinimumMaximumImageCalculator[img].New(Image=img)
comp.Compute()
return (comp.GetMinimum(), comp.GetMaximum())
|
9bbe0fc68be26df35f062da3d4589c906050a10f
| 34,554 |
import uuid
def get_unique_id():
"""Generate and set unique identifier of length 10 integers"""
identifier = uuid.uuid4()
return str(identifier.int)[:10]
|
52ac119a062f454faad77df2fecb1f902bdd8530
| 34,555 |
import scipy
def erb_fir_filters(signal, edges, fs):
"""
Generates a bank of FIR filters for a given set of erb edges following the approach of
Hopkins et al. (2010), such that each filter has a response of -6 dB (relative to the
peak response) at the frequencies at which its response intersects with the
response of the two neighbouring filters. This code was adapted from Matlab code that
was kindly shared by Micheal Stone, which was written in 2006 while at the Auditory
Perception Group at University of Cambridge (group head Prof. Brian C.J. Moore).
CAVEAT EMPTOR: this code was written for this specfic TFS experiment only, no guarantees
can be made for its correctness outside of the boundaries of this specific experiment.
To generate bandpass filters for analysis/synthesis, need some magic numbers to define
transition widths/time domain impulse response. These are chosen so that for nchans=16,
filters have similar or wider freq response to ERB, so that no long time impulse
response filters generated. Also set minimum number of taps to ensure that tails fall
to < -65 dB. Also adjust transition width so that for low number of channels, where
spacing is high, it still chooses moderately steep filters to get good channel separation
Two attempts are made to tame the tails:
* (a) in lpf design use kaiser with beta=6,
* (b) then after bpf has been generated by convolution of lpf with hpf, use beta=3.
"""
firlen0 = int(2*np.floor(2.5*fs/1000)) # 80 for 16k, minimum number of taps, MUST be EVEN (pref divisible by 4).
max_mult = 8 # Magic number of 8, controls maximum fir size likely for channel splits.
qnorm = 3.3 # Magic number of about 3.3.
ramp_res = 512 # Resolution of ramp to fade out tails of tilted synthesis filters.
# Ramp to attenuate lf tails of synthesis filter.
ramp = 0.5*(1.2 - 0.8*np.cos(0.5*np.pi*np.arange(ramp_res)/ramp_res))
nchans = len(edges) - 1 # Number of processing channels.
bp_lf = edges[:nchans] # Lower corner frequencies for channel splits.
bp_hf = edges[1:nchans+1] # Upper corner frequencies for channel splits.
bp_cf = 0.5*(bp_hf + bp_lf) # Centre frequencies/crossovers.
bp_bw = bp_hf - bp_lf # Bandwidth
bp_norm = np.sqrt(bp_bw) # Signal _gain_ per filter when filtering WHITE noise.
bp_norm = bp_norm[0]/bp_norm # Invert and make zero dB at DC.
# Shape of transition is dependent on difference between the adjacent cfs.
delta_cf = bp_cf[1:nchans] - bp_cf[:nchans-1]
# Measure of steepness, is 'q' around transition.
q = 0.5*(bp_cf[1:nchans] + bp_cf[:nchans-1])/(bp_cf[1:nchans] - bp_cf[:nchans-1])
# Effectively broaden transitions for high nchans, tighten for low
# nchans & reduces number of taps for low freq channels.
delta_cf = delta_cf*(q/qnorm)
# Preallocate bpfs. Note: extra +1 is added for
# a) Length of filter
# b) Centre tap (i.e. odd order filters).
#size = max_mult*(int(100*firlen0/delta_cf[0]) + int(100*firlen0/delta_cf[1])) + 2
#bpf = np.zeros((size, nchans))
bpf = []
# Channel envelope lpf is fixed for all channels, fc approx 1/2 octave above 32 Hz.
# No need to worry about phase since using bidirectional filters.
channel_lpfB, channel_lpfA = scipy.signal.ellip(3,.1,35,45/(fs/2))
# Generate analysis filterbank.
for index in range(nchans):
# Design filters, gradually increase window to reduce tails.
# Filter is designed in two stages, depending on adjacent channels,
# high pass first then low pass.
if index != 0:
# Index of middle position of lpf.
mid_lpf = int(len(lpf_chan)/2)
# High-pass is complementary of low-pass from previous channel.
hpf_chan = -lpf_chan
hpf_chan[mid_lpf] += 1
else:
# Special case at the start.
hpf_chan = np.atleast_1d(1)
# firlen adapts to transition width, last channel is irrelevant.
if index < nchans - 1:
# Must ALWAYS end up EVEN, so fir1() turns it ODD.
firlen = max(firlen0, max_mult*np.floor(firlen0*100/delta_cf[index]))
# Design and tame tales.
lpf_chan = scipy.signal.firwin(firlen + 1, bp_hf[index]/(fs/2), window=('kaiser', 6.4))
bpf_len = len(hpf_chan) + len(lpf_chan) - 1
bpf_chan = np.zeros(bpf_len)
# Copy hpf_chan in.
bpf_chan[:len(hpf_chan)] = hpf_chan
# Convolve two halves of filter to make one.
bpf_chan = scipy.signal.lfilter(lpf_chan, 1, bpf_chan)
# No need to ensure 0dB gain at centre otherwise flat recombination does not work.
# Save filter size and filter for later.
# Add bandpass filter and centre frequency to output.
bpf.append([bpf_chan, bp_cf[index]])
return bpf
|
5ff1d3680e0f92e955d3a8314488b0c5900ba585
| 34,556 |
def restart(*args, **kwargs):
"""Return an instance of this to restart a workflow with the new input."""
return restart_type(args, kwargs)
|
1203873275248791046f2ef87f75276141b59a46
| 34,557 |
def FourierMaskRandom(width, height,proportion, R):
"""
Create a random sampling pattern where each point is sampled from :
1 if r < R
1/(1-r)**2 if r > R
with 0<R<1
Args:
width (int): Size of output mask in x direction (width)
height (int): Size of output mask in y direction (height)
proportion (int): proportion of the sample picked compare to the total number of pixel
R (float): Normed Radius (0-1) of ball around origin where all samples are included.
Returns:
np.ndarray: A numpy array (mask) depicting sampling pattern.
"""
s = np.zeros((width, height))
center = [s.shape[0] // 2, s.shape[1] // 2]
radius = np.sqrt(center[0] ** 2 + center[1] ** 2)
for i in range(s.shape[0]):
for j in range(s.shape[1]):
if (np.sqrt((i - center[0]) ** 2 + (j - center[1]) ** 2)) / radius < R:
s[i, j] = 1
else:
s[i, j] = (1 / (1 - (np.sqrt(((i - center[0]) ** 2) + ((j - center[1]) ** 2)))) ** 2)
mask = np.random.binomial(1, s)
sum = sum = np.sum(mask)
p = sum / (width * height)
while p <proportion:
draw = np.random.binomial(1, s)
mask = mask+draw
mask = np.where(mask > 0, 1, 0)
sum = sum = np.sum(mask)
p = sum / (width * height)
return mask
|
d4bee00f7eb2899bceba025f90f5c5389765a30a
| 34,558 |
def get_shapes_from_group(group):
""" Gets all object shapes existing inside the given group
:param group: maya transform node
:type group: str
:return: list of shapes objects
:rtype: list str
.. important:: only mesh shapes are returned for now
"""
# checks if exists inside maya scene
if not cmds.objExists(group):
raise RuntimeError("Given element {} does not exists.".format(group))
# gets shapes inside the given group
shapes = cmds.ls(group, dagObjects=True, noIntermediate=True,
exactType=("mesh"))
if not shapes:
raise ValueError("No shape(s) found under the given group: '{}'"
.format(group))
return shapes
|
053faff6240f75ab859e1c2bbd33604bdbde0c84
| 34,560 |
def bond_quatinty(price, investment, minimum_fraction=0.1):
"""
Computes the quantity of bonds purchased given the investment,
bond price per unit, and the minimum fraction of a bond that
can be purchased
:param investment: Amount of money that will be invested
:param minimum_fraction: Minimum fraction that can be purchased
:param price: Price of bond per unit
:return: [quantity of bonds purchased, Total Value Invested, Eror%]
"""
Qf = int(investment / (minimum_fraction * price))
Q = Qf * minimum_fraction
value = Q * price
error = (investment - value) / value * 100
return [Q, value, error]
|
7b42ae44d2e2db2229251088cf3645e965887e0d
| 34,561 |
def find_template(templates_list, template_name):
"""
Function returns copy of a template with a name template_name from templates_list.
"""
result_list = [template for template in templates_list if template_name == template['name']]
return deepcopy(result_list[0]) if result_list else {"template":{}}
|
a766a5dcf6d451ff26bee94d0eb8a28ff5e98c60
| 34,563 |
def dct_compress(X, n_components, window_size=128):
"""
Compress using the DCT
Parameters
----------
X : ndarray, shape=(n_samples,)
The input signal to compress. Should be 1-dimensional
n_components : int
The number of DCT components to keep. Setting n_components to about
.5 * window_size can give compression with fairly good reconstruction.
window_size : int
The input X is broken into windows of window_size, each of which are
then compressed with the DCT.
Returns
-------
X_compressed : ndarray, shape=(num_windows, window_size)
A 2D array of non-overlapping DCT coefficients. For use with uncompress
Reference
---------
http://nbviewer.ipython.org/github/craffel/crucialpython/blob/master/week3/stride_tricks.ipynb
"""
if len(X) % window_size != 0:
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_size
X_strided = X.reshape((num_frames, window_size))
X_dct = fftpack.dct(X_strided, norm='ortho')
if n_components is not None:
X_dct = X_dct[:, :n_components]
return X_dct
|
57cc70951c5e1e15964715383f876975d2e80844
| 34,564 |
def nan_divide(a: NDArrayOrFloat, b: NDArrayOrFloat) -> NDArrayOrFloat:
"""Helper function to avoid divide by zero in arrays and floats.
Args:
a: Numerator
b: Denominator
Returns:
a/b replace div0 by np.nan
"""
bc_shp = check_broadcastable(a=a, b=b)
return np.divide(a, b, out=np.full(bc_shp, np.nan), where=b != 0.0)
|
ee3858c2ad3e2fdb87704eabb2acf523337f68d2
| 34,565 |
def get_folder_sessions(id):
"""
Get a list of sessions in the given folder
To fetch all elements, this endpoint can be called multiple times,
starting at pageNumber = 0 and incrementing the page number until
no results are returned.
"""
url = "{}/{}/sessions".format(panopto_url("folders"), id)
return get_paged_resource(url, session_from_json)
|
94dc5dd76bccc5b072aa37aa85f941cbebe7a4e9
| 34,566 |
def neighbours(image, i, j):
"""Define neighbours of the current pixel
Arguments:
image {numpy.ndarray} -- image
i {int} -- row coordinate of the pixel
j {int} -- column coordinate of the pixel
Returns:
list -- list of pixels that are neighbours of current pixel
"""
padding = (-1) * np.ones(
(image.shape[0] + 2,
image.shape[1] + 2)
)
padding[1:-1, 1:-1] = image
neighbours = [Pixel(i + 1, j, padding[i + 1, j]),
Pixel(i, j + 1, padding[i, j + 1]),
Pixel(i + 1, j + 2, padding[i + 1, j + 2]),
Pixel(i + 2, j + 1, padding[i + 2, j + 1])]
neighbours = list(filter(lambda x: x.label != -1, neighbours))
neighbours = [Pixel(x.i - 1, x.j - 1, x.label) for x in neighbours]
return neighbours
|
eb9b7f0358fee26865ae75b544b532cb5dc5cd51
| 34,567 |
def mutation_delete_musicplaylist(identifier: str):
"""Returns a mutation for deleting a musicplaylist object based on the identifier.
Arguments:
identifier: The unique identifier of the musicplaylist object.
Returns:
The string for the mutation for deleting the musicplaylist object based on the identifier.
"""
return format_mutation("DeleteMusicPlaylist", {"identifier": identifier})
|
bb552783f4b9b8d232dd2fae79823eb06fcf5c00
| 34,568 |
import re
def check_for_repeating_characters(tokens, character):
"""
References:
:func:`re.findall`
Args:
tokens ():
character ():
Returns:
"""
replacements = []
pattern = "([" + character + "{2,}]{2,4})"
for token in tokens:
if len(token) > 12:
if not re.findall(r'{}'.format(pattern), token):
pass
else:
m_strings = re.findall(r'{}'.format(pattern), token)
if len(m_strings) > 2:
replacements.append((token, ' '))
else:
pass
else:
pass
return replacements
|
9a421e634ad1cd330c2933fda84eb2430e7ef2ed
| 34,569 |
import hashlib
def md5sum(filename):
""" Compute the MD5 hash for a given filename """
blocksize = 65536
hasher = hashlib.md5()
with open(filename, "rb") as fid:
buf = fid.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fid.read(blocksize)
return hasher.hexdigest()
|
4de15169bb672067fc4fcebeae930e56768b0881
| 34,570 |
from datetime import datetime
def time_stamp():
"""Current time stamp"""
ts = datetime.now()
return ts.strftime('%d-%b-%Y %H:%M:%S')
|
b17e6841b4c79cc1098123e154c9dd6e59098953
| 34,572 |
def fromDict(moduleDict):
"""Factory function recreating any moduleItem or moduleItem subtype from a dictionary-serialized representation.
If implemented correctly, this should act as the opposite to the original object's toDict method.
If the requested module is builtIn, return the builtIn module object of the same name.
:param dict moduleDict: A dictionary containg all information necessary to create the desired moduleItem object
:return: The moduleItem object described in moduleDict
:rtype: moduleItem
"""
if moduleDict.get("builtIn", False):
return bbData.builtInModuleObjs[moduleDict["name"]]
else:
if "type" in moduleDict and moduleDict["type"] in typeConstructors:
return typeConstructors[moduleDict["type"]](moduleDict)
else:
return ModuleItem.fromDict(moduleDict)
|
f0c2fdecc8bd73a40bea92e810724e14fffad341
| 34,573 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.