content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _login(client, user, users):
"""Login user and return url."""
login_user_via_session(client, user=User.query.get(user.id))
return user | 079136eb777957caf09c51c75ae5148ab2eea836 | 4,500 |
def search(request):
"""renders search page"""
queryset_list = Listing.objects.order_by('-list_date')
if 'keywords' in request.GET:
keywords = request.GET['keywords']
# Checking if its none
if keywords:
queryset_list = queryset_list.filter(
description__icontains=keywords)
if 'city' in request.GET:
city = request.GET['city']
# Checking if its none
if city:
queryset_list = queryset_list.filter(
city__iexact=city)
if 'state' in request.GET:
state = request.GET['state']
# Checking if its none
if state:
queryset_list = queryset_list.filter(
state__iexact=state)
if 'bedrooms' in request.GET:
bedrooms = request.GET['bedrooms']
# Here LTE(lte) means less then or equal
if bedrooms:
queryset_list = queryset_list.filter(
bedrooms__lte=bedrooms)
if 'price' in request.GET:
price = request.GET['price']
# Here LTE(lte) means less then or equal
if price:
queryset_list = queryset_list.filter(
price__lte=price)
context = {
"price_choices": price_choices,
"bedroom_choices": bedroom_choices,
"state_choices": state_choices,
"listings": queryset_list,
"values": request.GET
}
return render(request, 'listings/search.html', context) | a25d6e112d4054dfaf505aff5c4c36f07a95d989 | 4,501 |
def generate_cutout(butler, skymap, ra, dec, band='N708', data_type='deepCoadd',
half_size=10.0 * u.arcsec, psf=True, verbose=False):
"""Generate a single cutout image.
"""
if not isinstance(half_size, u.Quantity):
# Assume that this is in pixel
half_size_pix = int(half_size)
else:
half_size_pix = int(half_size.to('arcsec').value / PIXEL_SCALE)
if isinstance(ra, u.Quantity):
ra = ra.value
if isinstance(dec, u.Quantity):
dec = dec.value
# Width and height of the post-stamps
stamp_shape = (half_size_pix * 2 + 1, half_size_pix * 2 + 1)
# Make a list of (RA, Dec) that covers the cutout region
radec_list = np.array(
sky_cone(ra, dec, half_size_pix * PIXEL_SCALE * u.Unit('arcsec'), steps=50)).T
# Retrieve the Patches that cover the cutout region
img_patches = _get_patches(butler, skymap, radec_list, band, data_type=data_type)
if img_patches is None:
if verbose:
print('***** No data at {:.5f} {:.5f} *****'.format(ra, dec))
return None
# Coordinate of the image center
coord = geom.SpherePoint(ra * geom.degrees, dec * geom.degrees)
# Making the stacked cutout
cutouts = []
idx, bbox_sizes, bbox_origins = [], [], []
for img_p in img_patches:
# Generate cutout
cut, x0, y0 = _get_single_cutout(img_p, coord, half_size_pix)
cutouts.append(cut)
# Original lower corner pixel coordinate
bbox_origins.append([x0, y0])
# New lower corner pixel coordinate
xnew, ynew = cut.getBBox().getBeginX() - x0, cut.getBBox().getBeginY() - y0
idx.append([xnew, xnew + cut.getBBox().getWidth(),
ynew, ynew + cut.getBBox().getHeight()])
# Area of the cutout region on this patch in unit of pixels
# Will reverse rank all the overlapped images by this
bbox_sizes.append(cut.getBBox().getWidth() * cut.getBBox().getHeight())
# Stitch cutouts together with the largest bboxes inserted last
stamp = afwImage.MaskedImageF(geom.BoxI(geom.Point2I(0,0), geom.Extent2I(*stamp_shape)))
bbox_sorted_ind = np.argsort(bbox_sizes)
for i in bbox_sorted_ind:
masked_img = cutouts[i].getMaskedImage()
stamp[idx[i][0]: idx[i][1], idx[i][2]: idx[i][3]] = masked_img
# Build the new WCS of the cutout
stamp_wcs = _build_cutout_wcs(coord, cutouts, bbox_sorted_ind[-1], bbox_origins)
cutout = afwImage.ExposureF(stamp, stamp_wcs)
if bbox_sizes[bbox_sorted_ind[-1]] < (half_size_pix * 2 + 1) ** 2:
flag = 1
else:
flag = 2
# The final product of the cutout
if psf:
psf = _get_psf(cutouts[bbox_sorted_ind[-1]], coord)
return cutout, psf, flag
return cutout, flag | fdc42ad0dd0f357d53804a1f6fa43c93e86d2c0e | 4,502 |
def get_arraytypes ():
"""pygame.sndarray.get_arraytypes (): return tuple
Gets the array system types currently supported.
Checks, which array system types are available and returns them as a
tuple of strings. The values of the tuple can be used directly in
the use_arraytype () method.
If no supported array system could be found, None will be returned.
"""
vals = []
if __hasnumeric:
vals.append ("numeric")
if __hasnumpy:
vals.append ("numpy")
if len (vals) == 0:
return None
return tuple (vals) | 192cb215fdc651543ac6ed4ce2f9cac2b0d3b4f4 | 4,503 |
def is_request_authentic(request, secret_token: bytes = conf.WEBHOOK_SECRET_TOKEN):
"""
Examine the given request object to determine if it was sent by an authorized source.
:param request: Request object to examine for authenticity
:type request: :class:`~chalice.app.Request`
:param secret_token: Shared secret token used to create payload hash
:type: :class:`~bytes`
:return: Response object indicating whether or not the request is authentic
:rtype: :class:`~lopper.response.Response`
"""
signature = request.headers.get('X-Hub-Signature')
if not signature:
return response.unauthorized('Missing "X-Hub-Signature" header')
return auth.is_authentic(signature, request.raw_body, secret_token) | 1ffceea3aebc0c038384c003edc93358e6faa9ed | 4,504 |
def circular_mask_string(centre_ra_dec_posns, aperture_radius="1arcmin"):
"""Get a mask string representing circular apertures about (x,y) tuples"""
mask = ''
if centre_ra_dec_posns is None:
return mask
for coords in centre_ra_dec_posns:
mask += 'circle [ [ {x} , {y}] , {r} ]\n'.format(
x=coords[0], y=coords[1], r=aperture_radius)
return mask | 04e66d160eb908f543990adf896e494226674c71 | 4,505 |
def dataset_hdf5(dataset, tmp_path):
"""Make an HDF5 dataset and write it to disk."""
path = str(tmp_path / 'test.h5')
dataset.write_hdf5(path, object_id_itemsize=10)
return path | 4a7920adf7715797561513fbb87593abf95f0bca | 4,506 |
def _make_indexable(iterable):
"""Ensure iterable supports indexing or convert to an indexable variant.
Convert sparse matrices to csr and other non-indexable iterable to arrays.
Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged.
Parameters
----------
iterable : {list, dataframe, array, sparse} or None
Object to be converted to an indexable iterable.
"""
if sp.issparse(iterable):
return iterable.tocsr()
elif hasattr(iterable, "__getitem__") or hasattr(iterable, "iloc"):
return iterable
elif iterable is None:
return iterable
return np.array(iterable) | 94be904009adfd3bf15de0f258b94a196a9612df | 4,507 |
import sys
import abc
def get_all_readers():
"""Get all the readers from the module."""
readers = []
for _, name in getmembers(sys.modules[__name__]):
if isinstance(name, abc.ABCMeta) and name.__name__ != 'Reader':
readers.append(name)
return readers | 50d8451ce70c2a2b5a4561952911f960c3667d02 | 4,508 |
def fib_for(n):
"""
Compute Fibonnaci sequence using a for loop
Parameters
----------
n : integer
the nth Fibonnaci number in the sequence
Returns
-------
the nth Fibonnaci number in the sequence
"""
res = [0, 1]
for i in range(n-1):
res.append(res[i] + res[i+1])
return res[n] | 1609a2d52f5308a6a9d496f13c1de3f7eee6332d | 4,509 |
import pickle
def command_factory(command):
"""A factory which returns functions for direct daemon communication.
This factory will create a function which sends a payload to the daemon
and returns the unpickled object which is returned by the daemon.
Args:
command (string): The type of payload this should be. This determines
as what kind of instruction this will be interpreted by the daemon.
Returns:
function: The created function.
"""
def communicate(body={}, root_dir=None):
"""Communicate with the daemon.
This function sends a payload to the daemon and returns the unpickled
object sent by the daemon.
Args:
body (dir): Any other arguments that should be put into the payload.
root_dir (str): The root directory in which we expect the daemon.
We need this to connect to the daemons socket.
Returns:
function: The returned payload.
"""
client = connect_socket(root_dir)
body['mode'] = command
# Delete the func entry we use to call the correct function with argparse
# as functions can't be pickled and this shouldn't be send to the daemon.
if 'func' in body:
del body['func']
data_string = pickle.dumps(body, -1)
client.send(data_string)
# Receive message, unpickle and return it
response = receive_data(client)
return response
return communicate | ec84d6ab611d4edaf55ba0c365ed8526250c7ce1 | 4,510 |
def load_prepare_saif_data(threshold=0.25):
"""
Loads and prepares saif's data.
Parameters
----------
threshold : float
Only data with intensities equal to or
above this threshold will be kept (range 0-1).
Returns
-------
DataFrame : pd.DataFrame
Concatenated tweets with labels as a pandas DataFrame.
"""
files = get_saif_files()
df = pd.concat([pd.read_csv(f, sep='\t', index_col=0, names=['tweet', 'emotion', 'intensity']) for f in files], axis=0)
df = df[df['intensity'] >= threshold]
df.drop('intensity', axis=1, inplace=True)
return df | b2087d0558473069cf5985bd7e2b063162157df5 | 4,511 |
def nonmax_suppression(harris_resp, halfwidth=2):
"""
Takes a Harris response from an image, performs nonmax suppression, and outputs the x,y values
of the corners in the image.
:param harris_resp: Harris response for an image which is an array of the same shape as the original image.
:param halfwidth: The size of the padding to use in building the window (matrix) for nonmax suppression.
The window will have a total shape of (2*halfwidth+1, 2*halfwidth+1).
:return: Tuple of x and y coordinates for the corners that were found from the Harris response
after nonmax suppression.
"""
cornersx = []
cornersy = []
h, w = harris_resp.shape[:2]
boxlength = 2*halfwidth + 1
for i in range(halfwidth, w-halfwidth-1):
for j in range(halfwidth, h-halfwidth-1):
matrix = np.zeros((boxlength, boxlength))
for k in range(-halfwidth, halfwidth+1):
for l in range(-halfwidth, halfwidth+1):
matrix[k+halfwidth, l+halfwidth] = harris_resp[i+k, j+l]
if matrix[halfwidth, halfwidth] == 0:
pass
elif matrix[halfwidth, halfwidth] < np.amax(matrix):
matrix[halfwidth, halfwidth] = 0
else:
cornersx.append(j)
cornersy.append(i)
return cornersx, cornersy | b980ac9045728c8231749e7a43aa2f06d958d80c | 4,512 |
import uuid
from datetime import datetime
import pytz
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": "0.95",
"user_username": "ron",
"user_email": "[email protected]",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
'User "%s" tried to initiate a request for credit in course "%s", '
'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible # lint-amnesty, pylint: disable=raise-missing-from
except CreditProvider.DoesNotExist:
log.error('Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured # lint-amnesty, pylint: disable=raise-missing-from
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
check_keys_exist(shared_secret_key, credit_provider.provider_id)
if isinstance(shared_secret_key, list):
# if keys exist, and keys are stored as a list
# then we know at least 1 is available for [0]
shared_secret_key = [key for key in shared_secret_key if key][0]
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
'Cannot initiate credit request because the request with UUID "%s" '
'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
requirement__course__course_key=course_key,
status="satisfied"
).reason["final_grade"]
# NOTE (CCB): Limiting the grade to seven characters is a hack for ASU.
if len(str(final_grade)) > 7:
final_grade = f'{final_grade:.5f}'
else:
final_grade = str(final_grade)
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
msg = 'Could not retrieve final grade from the credit eligibility table for ' \
'user [{user_id}] in course [{course_key}].'.format(user_id=user.id, course_key=course_key)
log.exception(msg)
raise UserIsNotEligible(msg) # lint-amnesty, pylint: disable=raise-missing-from
# Getting the students's enrollment date
course_enrollment = CourseEnrollment.get_enrollment(user, course_key)
enrollment_date = course_enrollment.created if course_enrollment else ""
# Getting the student's course completion date
completion_date = get_last_exam_completion_date(course_key, username)
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"enrollment_timestamp": to_timestamp(enrollment_date) if enrollment_date else "",
"course_completion_timestamp": to_timestamp(completion_date) if completion_date else "",
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": "",
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info('Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
} | 8c9e763d1f10f9187c102746911dc242385100e8 | 4,513 |
import pathlib
def is_valid_project_root(project_root: pathlib.Path) -> bool:
"""Check if the project root is a valid trestle project root."""
if project_root is None or project_root == '' or len(project_root.parts) <= 0:
return False
trestle_dir = pathlib.Path.joinpath(project_root, const.TRESTLE_CONFIG_DIR)
if trestle_dir.exists() and trestle_dir.is_dir():
return True
return False | f35d63373d96ee34592e84f21296eadb3ebc6c98 | 4,514 |
def make_2D_predictions_into_one_hot_4D(prediction_2D, dim):
"""
This method gets 2D prediction of shape (#batch, #kpts)
and then returns 4D one_hot maps of shape
(#batch, #kpts, #dim, #dim)
"""
# getting one_hot maps of predicted locations
# one_hot_maps is of shape (#batch, #kpts, #dim * #dim)
one_hot_Maps = get_one_hot_map(prediction_2D, dim)
num_batch, num_kpt = prediction_2D.shape
one_hot_Maps_4D = one_hot_Maps.reshape(num_batch, num_kpt, dim, dim)
return one_hot_Maps_4D | 507d2fa9c52d5f8a1674e695f55928783a179082 | 4,515 |
import bisect
def display_code_marginal_densities(codes, num_hist_bins, log_prob=False,
ignore_vals=[], lines=True, overlaid=False, plot_title=""):
"""
Estimates the marginal density of coefficients of a code over some dataset
Parameters
----------
codes : ndarray(float32, size=(D, s))
The codes for a dataset of size D. These are the vectors x for each
sample from the dataset. The value s is the dimensionality of the code
num_hist_bins : int
The number of bins to use when we make a histogram estimate of the
empirical density.
log_prob : bool, optional
Display probabilities on a logarithmic scale. Useful for most sparse
codes. Default False.
ignore_vals : list, optional
A list of code values to ignore from the estimate. Default []. TODO:
make this more flexible so this can ignore values in a certain range.
lines : bool, optional
If true, plot the binned counts using a line rather than bars. This
can make it a lot easier to compare multiple datasets at once but
can look kind of jagged if there aren't many samples
overlaid : bool, optional
If true, then make a single plot with the marginal densities all overlaid
on top of eachother. This gets messy for more than a few coefficients.
Alteratively, display the densities in their own separate plots.
Default False.
plot_title : str, optional
The title of the plot. Default ""
Returns
-------
code_density_figs : list
A list containing pyplot figures. Can be saved separately, or whatever
from the calling function
"""
def filter_code_vals(scalar_code_vals):
if len(ignore_vals) > 0:
keep_these_inds = scalar_code_vals != ignore_vals[0]
for i in range(1, len(ignore_vals)):
keep_these_inds = np.logical_and(keep_these_inds,
scalar_code_vals != ignore_vals[i])
return scalar_code_vals[keep_these_inds]
else:
return scalar_code_vals
# TODO: get this going for convolutional codes
if overlaid:
# there's just a single plot
fig = plt.figure(figsize=(15, 15))
fig.suptitle(plot_title, fontsize=15)
ax = plt.subplot(1, 1, 1)
blue=plt.get_cmap('Blues')
cmap_indeces = np.linspace(0.25, 1.0, codes.shape[1])
histogram_min = np.min(codes)
histogram_max = np.max(codes)
histogram_bin_edges = np.linspace(histogram_min, histogram_max,
num_hist_bins + 1)
histogram_bin_centers = (histogram_bin_edges[:-1] +
histogram_bin_edges[1:]) / 2
for de_idx in range(codes.shape[1]):
code = filter_code_vals(codes[:, de_idx])
counts, _ = np.histogram(code, histogram_bin_edges)
empirical_density = counts / np.sum(counts)
if lines:
ax.plot(histogram_bin_centers, empirical_density,
color=blue(cmap_indeces[de_idx]), linewidth=2,
label='Coeff idx ' + str(de_idx))
else:
ax.bar(histogram_bin_centers, empirical_density, align='center',
color=blue(cmap_indeces[de_idx]),
width=histogram_bin_centers[1]-histogram_bin_centers[0],
alpha=0.4, label='Coeff idx ' + str(de_idx))
ax.legend(fontsize=10)
if log_prob:
ax.set_yscale('log')
de_figs = [fig]
else:
# every coefficient gets its own subplot
max_de_per_fig = 20*20 # max 20x20 {d}ictionary {e}lements displayed
assert np.sqrt(max_de_per_fig) % 1 == 0, 'please pick a square number'
num_de = codes.shape[1]
num_de_figs = int(np.ceil(num_de / max_de_per_fig))
# this determines how many dictionary elements are aranged in a square
# grid within any given figure
if num_de_figs > 1:
de_per_fig = max_de_per_fig
else:
squares = [x**2 for x in range(1, int(np.sqrt(max_de_per_fig))+1)]
de_per_fig = squares[bisect.bisect_left(squares, num_de)]
plot_sidelength = int(np.sqrt(de_per_fig))
de_idx = 0
de_figs = []
for in_de_fig_idx in range(num_de_figs):
fig = plt.figure(figsize=(15, 15))
fig.suptitle(plot_title + ', fig {} of {}'.format(
in_de_fig_idx+1, num_de_figs), fontsize=15)
subplot_grid = gridspec.GridSpec(plot_sidelength, plot_sidelength)
fig_de_idx = de_idx % de_per_fig
while fig_de_idx < de_per_fig and de_idx < num_de:
if de_idx % 100 == 0:
print('plotted', de_idx, 'of', num_de, 'code coefficients')
ax = plt.Subplot(fig, subplot_grid[fig_de_idx])
code = filter_code_vals(codes[:, de_idx])
histogram_min = min(code)
histogram_max = max(code)
histogram_bin_edges = np.linspace(histogram_min, histogram_max,
num_hist_bins + 1)
histogram_bin_centers = (histogram_bin_edges[:-1] +
histogram_bin_edges[1:]) / 2
counts, _ = np.histogram(code, histogram_bin_edges)
empirical_density = counts / np.sum(counts)
max_density = np.max(empirical_density)
variance = np.var(code)
hist_kurtosis = kurtosis(empirical_density, fisher=False)
if lines:
ax.plot(histogram_bin_centers, empirical_density,
color='k', linewidth=1)
else:
ax.bar(histogram_bin_centers, empirical_density,
align='center', color='k',
width=histogram_bin_centers[1]-histogram_bin_centers[0])
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax.tick_params(axis='both', which='major',
labelsize=5)
if histogram_min < 0.:
ax.set_xticks([histogram_min, 0., histogram_max])
else:
ax.set_xticks([histogram_min, histogram_max])
ax.text(0.1, 0.75, 'K: {:.1f}'.format(
hist_kurtosis), transform=ax.transAxes,
color='g', fontsize=5)
ax.text(0.95, 0.75, 'V: {:.1f}'.format(
variance), transform=ax.transAxes,
color='b', fontsize=5, horizontalalignment='right')
ax.set_yticks([0., max_density])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if log_prob:
ax.set_yscale('log')
fig.add_subplot(ax)
fig_de_idx += 1
de_idx += 1
subplot_grid.tight_layout(figure=fig, pad=3.25, w_pad=0.2, h_pad=0.2)
de_figs.append(fig)
return de_figs | 2085e007c25b855dda78fa910c2c93dc4c2b0767 | 4,516 |
def distance(a, b):
"""
"""
dimensions = len(a)
_sum = 0
for dimension in range(dimensions):
difference_sq = (a[dimension] - b[dimension]) ** 2
_sum += difference_sq
return sqrt(_sum) | 20acd50d7e3ab7f512f3e9ab9920f76b805043a9 | 4,517 |
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck)):
return True
return False | 8c6b5f59797646b27301a25a40d753b6c404b418 | 4,518 |
def playlist_500_fixture():
"""Load payload for playlist 500 and return it."""
return load_fixture("plex/playlist_500.xml") | 834efe057419f56b626c40430b68860fd5e0db1e | 4,519 |
def strip_output(nb):
"""strip the outputs from a notebook object"""
nb.metadata.pop('signature', None)
for cell in nb.cells:
if 'outputs' in cell:
cell['outputs'] = []
if 'prompt_number' in cell:
cell['prompt_number'] = None
return nb | 6339100f6897951bad4f91f8b8d86d1e5a68f459 | 4,520 |
def get_neighbors_general(status: CachingDataStructure, key: tuple) -> list:
"""
Returns a list of tuples of all coordinates that are direct neighbors,
meaning the index is valid and they are not KNOWN
"""
coords = []
for key in get_direct_neighbour_coords_general(key):
if status.valid_index(*key) and not status[key]: # Not known
coords.append(key)
return coords | 46a2b3aa91e424122982011ccaa684c2d9cf83f2 | 4,521 |
def transit_params(time):
"""
Dummy transit parameters for time series simulations
Parameters
----------
time: sequence
The time axis of the transit observation
Returns
-------
batman.transitmodel.TransitModel
The transit model
"""
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [0.1, 0.1] # limb darkening coefficients
params.rp = 0. # planet radius (placeholder)
tmodel = batman.TransitModel(params, time)
tmodel.teff = 3500 # effective temperature of the host star
tmodel.logg = 5 # log surface gravity of the host star
tmodel.feh = 0 # metallicity of the host star
return tmodel | 5e74a32ef4077a990d44edb15d66e56e00925666 | 4,522 |
def actions(__INPUT):
"""
Regresamos una lista de los posibles movimientos de la matriz
"""
MOVIMIENTOS = []
m = eval(__INPUT)
i = 0
while 0 not in m[i]:
i += 1
# Espacio en blanco (#0)
j = m[i].index(0);
if i > 0:
#ACCION MOVER ARRIBA
m[i][j], m[i-1][j] = m[i-1][j], m[i][j];
MOVIMIENTOS.append(str(m))
m[i][j], m[i-1][j] = m[i-1][j], m[i][j];
if i < 3:
# ACCION MOVER ABAJO
m[i][j], m[i+1][j] = m[i+1][j], m[i][j]
MOVIMIENTOS.append(str(m))
m[i][j], m[i+1][j] = m[i+1][j], m[i][j]
if j > 0:
# ACCION MOVER IZQUIERDA
m[i][j], m[i][j-1] = m[i][j-1], m[i][j]
MOVIMIENTOS.append(str(m))
m[i][j], m[i][j-1] = m[i][j-1], m[i][j]
if j < 3:
# ACCION MOVER DERECHA
m[i][j], m[i][j+1] = m[i][j+1], m[i][j]
MOVIMIENTOS.append(str(m))
m[i][j], m[i][j+1] = m[i][j+1], m[i][j]
return MOVIMIENTOS | 46875f83d7f50bbd107be8ad5d926397960ca513 | 4,523 |
def get_massif_geom(massif: str) -> WKBElement:
"""process to get the massifs geometries:
* go on the meteofrance bra website
* then get the html "area" element
* then convert it to fake GeoJSON (wrong coordinates)
* then open it in qgis.
* Select *all* the geom of the layer.
* rotate -90°
* swap X and Y coordinates (with plugin)
* use grass v.transform with various x, y scale and rotation until you get what you want.
"""
with resource_stream("nivo_api", "cli/data/all_massifs.geojson") as fp:
gj = geojson.load(fp)
for obj in gj.features:
if obj.properties["label"].upper() == massif.upper():
return from_shape(shape(obj.geometry), 4326)
else:
raise ValueError(f"Massif {massif} geometry cannot be found.") | 194ef4274dfd240af65b61781f39464e0cde4b3d | 4,524 |
def _to_arrow(x):
"""Move data to arrow format"""
if isinstance(x, cudf.DataFrame):
return x.to_arrow()
else:
return pa.Table.from_pandas(x, preserve_index=False) | c88c40d2d35f681ff268347c36e2cae4a52576d0 | 4,525 |
def painel(request):
""" Exibe o painel do usuário. """
return render(request, "lancamentos/painel.html") | ff40db732402077eb6678f8586582877d96e3ede | 4,526 |
from shutil import which as shwhich
import os
def which(program, mode=os.F_OK | os.X_OK, path=None):
"""
Mimics the Unix utility which.
For python3.3+, shutil.which provides all of the required functionality.
An implementation is provided in case shutil.which does
not exist.
:param program: (required) string
Name of program (can be fully-qualified path as well)
:param mode: (optional) integer flag bits
Permissions to check for in the executable
Default: os.F_OK (file exists) | os.X_OK (executable file)
:param path: (optional) string
A custom path list to check against. Implementation taken from
shutil.py.
Returns:
A fully qualified path to program as resolved by path or
user environment.
Returns None when program can not be resolved.
"""
try:
return shwhich(program, mode, path)
except ImportError:
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
for pathdir in path:
pathdir = pathdir.strip('"')
exe_file = os.path.join(pathdir, program)
if is_exe(exe_file):
return exe_file
return None | fbba58ba489db2c2813e4aadf9781c35d6955f0f | 4,527 |
def q_statistic(y, c1, c2):
""" Q-Statistic.
Parameters
----------
y : numpy.array
Target sample.
c1 : numpy.array
Output of the first classifier.
c2 : numpy.array
Output of the second classifier.
Returns
-------
float
Return the Q-Statistic measure between the classifiers 'c1' and 'c2'.
Q-Statistic takes value in the range of [-1, 1]:
- is zero if 'c1' and 'c2' are independent.
- is positive if 'c1' and 'c2' make similar predictions.
- is negative if 'c1' and 'c2' make different predictions.
References
----------
.. [1] Zhi-Hua Zhou. (2012), pp 105:
Ensemble Methods Foundations and Algorithms
Chapman & Hall/CRC Machine Learning & Pattern Recognition Series.
"""
a, b, c, d = contingency_table(y, c1, c2)
return (a * d - b * c) / (a * d + b * c) | 83f83bffcb469ff45c22a1f35efc6e60ccdd0d2d | 4,528 |
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
Taken from: https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
"""
return np.isnan(y), lambda z: z.nonzero()[0] | b6bd981369403a5542f8bcefb3e8a68315fb697f | 4,529 |
import sys
def lid_mle_amsaleg(knn_distances):
"""
Local intrinsic dimension (LID) estimators from the papers,
1. Amsaleg, Laurent, et al. "Estimating local intrinsic dimensionality." Proceedings of the 21th ACM SIGKDD
International Conference on Knowledge Discovery and Data Mining. ACM, 2015.
2. Ma, Xingjun, et al. "Characterizing adversarial subspaces using local intrinsic dimensionality."
arXiv preprint arXiv:1801.02613 (2018).
:param knn_distances: numpy array of k nearest neighbor distances. Has shape `(n, k)` where `n` is the
number of points and `k` is the number of neighbors.
:return: `lid_est` is a numpy array of shape `(n, )` with the local intrinsic dimension estimates
in the neighborhood of each point.
"""
n, k = knn_distances.shape
# Replace 0 distances with a very small float value
knn_distances = np.clip(knn_distances, sys.float_info.min, None)
log_dist_ratio = np.log(knn_distances) - np.log(knn_distances[:, -1].reshape((n, 1)))
# lid_est = -k / np.sum(log_dist_ratio, axis=1)
lid_est = -(k - 1) / np.sum(log_dist_ratio, axis=1)
return lid_est | 2936489035f76a4825a3cb0a64e22febeaf6f541 | 4,530 |
def _rebase_bv(bv: BinaryView, dbg: DebugAdapter.DebugAdapter) -> BinaryView:
"""Get a rebased BinaryView for support of ASLR compatible binaries."""
new_base = dbg.target_base()
if core_ui_enabled() and new_base != bv.start:
dbg.quit()
raise Exception('[!] Can\'t do necessary rebase in GUI, try headless operation')
new_bv = bv.rebase(new_base)
if new_bv is None: # None if rebasing is unecessary
return bv
print('[*] Rebasing bv from 0x%x to 0x%x' % (bv.start, new_base))
new_bv.update_analysis_and_wait() # required after rebase
return new_bv | f02c031d65ab0758c63536f30dd9229f495b4014 | 4,531 |
import re
def convert_parameters(child, text=False, tail=False, **kwargs):
"""
Get child text or tail
:param child:
:param text:
:param tail:
:return:
"""
p = re.compile(r'\S')
# Remove empty info
child_text = child.text if child.text else ''
child_tail = child.tail if child.tail else ''
child_text = child_text if p.search(child_text) else ''
child_tail = child_tail if p.search(child_tail) else ''
# all
if text and tail:
convert_string = child_text + child_tail
# only_text
elif text:
convert_string = child_text
# only_tail
elif tail:
convert_string = child_tail
else:
convert_string = ''
# replace params
mybatis_param_list = get_params(child)
for mybatis_param in mybatis_param_list:
convert_value = ''
if mybatis_param.sql_param.is_function:
# eval function
convert_value = __eval_function(mybatis_param, **kwargs)
else:
# 类型转换
param_value = __get_param(mybatis_param.param_name, **kwargs)
print(mybatis_param.param_name+ ' value:'+str(param_value))
convert_value = PY_MYBATIS_TYPE_HANDLER.convert(mybatis_param.python_type, mybatis_param.sql_type,
param_value,
PyMybatisTypeHandler.PYTHON2SQL_TYPE_HANDLER_CONVERT_MODE)
#longjb modify 2021.10.29:
if convert_value!='null' and len(convert_value)>0 and( mybatis_param.sql_type=='raw' or mybatis_param.python_type=='raw'):
convert_value= convert_value.replace("'","`")
#convert_value= convert_value[1:len(convert_value)-1]
# print('name:'+str(mybatis_param.name))
# print('value:'+convert_value)
# print('sql_type:'+str(mybatis_param.sql_type))
# print('python_type:'+str(mybatis_param.python_type))
convert_string = convert_string.replace(mybatis_param.full_name, convert_value, 1)
# convert CDATA string
convert_cdata(convert_string)
return convert_string | 2421e515491f1256c56eb9ac6935a3c0c1de64be | 4,532 |
def Get_Country_Name_From_ISO3_Extended(countryISO):
"""
Creates a subset of the quick chart data for a specific country. The subset includes all those rows containing
the given country either as the origin or as the country of asylum.
"""
countryName = ""
# June-22 - This function has been updated to include a to upper without a check on if the data is null or not
# So we need to wrap it in a try catch
try:
countryName = Country.get_country_name_from_iso3(countryISO)
except:
print("Failed to get the country from get_country_name_from_iso3.")
# Now lets try to find it for the three typical non-standard codes
if countryName is None or countryName == "":
print("Non-standard ISO code:", countryISO)
if countryISO == "UKN":
countryName = "Various / unknown"
elif countryISO == "STA":
countryName = "Stateless"
elif countryISO == "TIB":
countryName = "Tibetan"
else:
print("!!SERIOUS!! Unknown ISO code identified:", countryISO)
# Lets add a sensible default here...
countryName = "Various / unknown"
return countryName | d6e5b34223582f3a5a5ca20fd798ef5cfb1b1e8d | 4,533 |
import sys
def to_cpu(x):
""" Move cupy arrays (or dicts/lists of arrays) to CPU """
if len(sys.argv) > 1:
if type(x) == dict:
return {k:to_cpu(a) for (k, a) in x.items()}
elif type(x) == list:
return [to_cpu(a) for a in x]
else:
return cp.asnumpy(x)
else:
return x | 60ddb9774b5447862d8f5d1f605e9027c3f7c471 | 4,534 |
import re
import os
def parse_mapfile(map_file_path):
"""Parse the '.map' file"""
def parse_keyboard_function(f, line):
"""Parse keyboard-functions in the '.map' file"""
search = re.search(r'(0x\S+)\s+(0x\S+)', next(f))
position = int( search.group(1), 16 )
length = int( search.group(2), 16 )
search = re.search(r'0x\S+\s+(\S+)', next(f))
name = search.group(1)
return {
'keyboard-functions': {
name: {
'position': position,
'length': length,
},
},
}
def parse_layout_matrices(f, line):
"""Parse layout matrix information in the '.map' file"""
name = re.search(r'.progmem.data.(_kb_layout\S*)', line).group(1)
search = re.search(r'(0x\S+)\s+(0x\S+)', next(f))
position = int( search.group(1), 16 )
length = int( search.group(2), 16 )
return {
'layout-matrices': {
name: {
'position': position,
'length': length,
},
},
}
# --- parse_mapfile() ---
# normalize paths
map_file_path = os.path.abspath(map_file_path)
# check paths
if not os.path.exists(map_file_path):
raise ValueError("invalid 'map_file_path' given")
output = {}
f = open(map_file_path)
for line in f:
if re.search(r'^\s*\.text\.kbfun_', line):
dict_merge(output, parse_keyboard_function(f, line))
elif re.search(r'^\s*\.progmem\.data.*layout', line):
dict_merge(output, parse_layout_matrices(f, line))
return output | c84b10e95a212f7cc878d9d6a555a0b3ffc67728 | 4,535 |
def thetaG(t,t1,t2):
"""
Return a Gaussian pulse.
Arguments:
t -- time of the pulse
t1 -- initial time
t2 -- final time
Return:
theta -- Scalar or vector with the dimensions of t,
"""
tau = (t2-t1)/5
to = t1 + (t2-t1)/2
theta = (np.sqrt(np.pi)/(2*tau))*np.exp(-((t-to)/tau)**2)
return theta | 9e05358bfbf5f11b30f2a6b44504214ab4db4ea5 | 4,536 |
def choose_string(g1, g2):
"""Function used by merge_similar_guesses to choose between 2 possible
properties when they are strings.
If the 2 strings are similar, or one is contained in the other, the latter is returned
with an increased confidence.
If the 2 strings are dissimilar, the one with the higher confidence is returned, with
a weaker confidence.
Note that here, 'similar' means that 2 strings are either equal, or that they
differ very little, such as one string being the other one with the 'the' word
prepended to it.
>>> s(choose_string(('Hello', 0.75), ('World', 0.5)))
('Hello', 0.25)
>>> s(choose_string(('Hello', 0.5), ('hello', 0.5)))
('Hello', 0.75)
>>> s(choose_string(('Hello', 0.4), ('Hello World', 0.4)))
('Hello', 0.64)
>>> s(choose_string(('simpsons', 0.5), ('The Simpsons', 0.5)))
('The Simpsons', 0.75)
"""
v1, c1 = g1 # value, confidence
v2, c2 = g2
if not v1:
return g2
elif not v2:
return g1
v1, v2 = v1.strip(), v2.strip()
v1l, v2l = v1.lower(), v2.lower()
combined_prob = 1 - (1 - c1) * (1 - c2)
if v1l == v2l:
return (v1, combined_prob)
# check for common patterns
elif v1l == 'the ' + v2l:
return (v1, combined_prob)
elif v2l == 'the ' + v1l:
return (v2, combined_prob)
# if one string is contained in the other, return the shortest one
elif v2l in v1l:
return (v2, combined_prob)
elif v1l in v2l:
return (v1, combined_prob)
# in case of conflict, return the one with highest confidence
else:
if c1 > c2:
return (v1, c1 - c2)
else:
return (v2, c2 - c1) | e39a66c9f3f941b12225dde879bc92956694d2d0 | 4,537 |
def update_alert_command(client: MsClient, args: dict):
"""Updates properties of existing Alert.
Returns:
(str, dict, dict). Human readable, context, raw response
"""
alert_id = args.get('alert_id')
assigned_to = args.get('assigned_to')
status = args.get('status')
classification = args.get('classification')
determination = args.get('determination')
comment = args.get('comment')
args_list = [assigned_to, status, classification, determination, comment]
check_given_args_update_alert(args_list)
json_data, context = add_args_to_json_and_context(alert_id, assigned_to, status, classification, determination,
comment)
alert_response = client.update_alert(alert_id, json_data)
entry_context = {
'MicrosoftATP.Alert(val.ID === obj.ID)': context
}
human_readable = f'The alert {alert_id} has been updated successfully'
return human_readable, entry_context, alert_response | 237aa63f449dc6395390a26007b15123d5763874 | 4,538 |
def create_payment(context: SagaContext) -> SagaContext:
"""For testing purposes."""
context["payment"] = "payment"
return context | e96db6e57996d8f704e453bf14b8e4a3c63da1a6 | 4,539 |
import asyncio
async def TwitterAuthURLAPI(
request: Request,
current_user: User = Depends(User.getCurrentUser),
):
"""
Twitter アカウントと連携するための認証 URL を取得する。<br>
認証 URL をブラウザで開くとアプリ連携の許可を求められ、ユーザーが許可すると /api/twitter/callback に戻ってくる。
JWT エンコードされたアクセストークンがリクエストの Authorization: Bearer に設定されていないとアクセスできない。<br>
"""
# コールバック URL を設定
## Twitter API の OAuth 連携では、事前にコールバック先の URL をデベロッパーダッシュボードから設定しておく必要がある
## 一方 KonomiTV サーバーの URL はまちまちなので、コールバック先の URL を一旦 https://app.konomi.tv/api/redirect/twitter に集約する
## この API は、リクエストを "server" パラメーターで指定された KonomiTV サーバーの TwitterAuthCallbackAPI にリダイレクトする
## 最後に KonomiTV サーバーがリダイレクトを受け取ることで、コールバック対象の URL が定まらなくても OAuth 連携ができるようになる
## Twitter だけ他のサービスと違い OAuth 1.0a なので、フローがかなり異なる
## ref: https://github.com/tsukumijima/KonomiTV-API
callback_url = f'https://app.konomi.tv/api/redirect/twitter?server={request.url.scheme}://{request.url.netloc}/'
# OAuth1UserHandler を初期化し、認証 URL を取得
## signin_with_twitter を True に設定すると、oauth/authenticate の認証 URL が生成される
## oauth/authorize と異なり、すでにアプリ連携している場合は再承認することなくコールバック URL にリダイレクトされる
## ref: https://developer.twitter.com/ja/docs/authentication/api-reference/authenticate
try:
oauth_handler = tweepy.OAuth1UserHandler(Interlaced(1), Interlaced(2), callback=callback_url)
authorization_url = await asyncio.to_thread(oauth_handler.get_authorization_url, signin_with_twitter=True) # 同期関数なのでスレッド上で実行
except tweepy.TweepyException:
raise HTTPException(
status_code = status.HTTP_422_UNPROCESSABLE_ENTITY,
detail = 'Failed to get Twitter authorization URL',
)
# 仮で TwitterAccount のレコードを作成
## 戻ってきたときに oauth_token がどのユーザーに紐づいているのかを判断するため
## TwitterAuthCallbackAPI は仕組み上認証をかけられないので、勝手に任意のアカウントを紐付けられないためにはこうせざるを得ない
twitter_account = TwitterAccount()
twitter_account.user = current_user
twitter_account.name = 'Temporary'
twitter_account.screen_name = 'Temporary'
twitter_account.icon_url = 'Temporary'
twitter_account.access_token = oauth_handler.request_token['oauth_token'] # 暫定的に oauth_token を格納 (認証 URL の ?oauth_token= と同じ値)
twitter_account.access_token_secret = oauth_handler.request_token['oauth_token_secret'] # 暫定的に oauth_token_secret を格納
await twitter_account.save()
return {'authorization_url': authorization_url} | 2245c3b2d842c455fa9cb36390c84c8470c3b8e1 | 4,540 |
import random
def post_sunday(request):
"""Post Sunday Details, due on the date from the form"""
date_form = SelectDate(request.POST or None)
if request.method == 'POST':
if date_form.is_valid():
groups = DetailGroup.objects.filter(semester=get_semester())
details = settings.SUNDAY_DETAILS
g = [e for e in groups]
groups = g
random.shuffle(groups)
random.shuffle(details)
emails = []
for group in groups:
if len(details) <= 0:
break
group_detail = SundayGroupDetail(
group=group, due_date=date_form.cleaned_data['due_date']
)
group_detail.save()
for _ in range(group.size()):
if len(details) <= 0:
break
d = details.pop()
det = SundayDetail(
short_description=d['name'],
long_description="\n".join(d['tasks']),
due_date=date_form.cleaned_data['due_date']
)
det.save()
group_detail.details.add(det)
group_detail.save()
emails.append(
build_sunday_detail_email(
group_detail,
request.scheme + "://" + request.get_host()
)
)
det_manager_email = Position.objects.get(
title=Position.PositionChoices.DETAIL_MANAGER
).brothers.first().user.email
for (subject, message, to) in emails:
send_mail(subject, message, det_manager_email, to)
context = {
'form': date_form,
'date': 'sunday',
}
return render(request, 'detail-manager/post-details.html', context) | 84787109d0981920bbced7a734d0b67c84d4a9a7 | 4,541 |
from typing import Dict
from typing import List
def reconstruct(lvl: Level, flow_dict: Dict[int, Dict[int, int]], info: Dict[int, NodeInfo]) -> List[List[int]]:
"""Reconstruct agent paths from the given flow and node information"""
paths: List[List[int]] = [[]] * len(lvl.scenario.agents)
start_flows = flow_dict[0]
agent_starts = {agent.origin: i for i, agent in enumerate(lvl.scenario.agents)}
for n in start_flows:
if start_flows[n] > 0:
agent = agent_starts[info[n].id]
paths[agent] = follow_path(n, flow_dict, info)
return paths | d792ed6b937f49177ac85609ada3edb2089e2642 | 4,542 |
import traceback
def arch_explain_instruction(bv, instruction, lifted_il_instrs):
""" Returns the explanation string from explanations_en.json, formatted with the preprocessed instruction token list """
if instruction is None:
return False, []
parsed = parse_instruction(bv, instruction, lifted_il_instrs)
if len(parsed) == 0:
return False, []
out = []
out_bool = False
for name in parsed:
name = find_proper_name(name).lower()
if name in explanations:
try:
# Get the string from the JSON and format it
out_bool = out_bool or name not in dont_supersede_llil
out.append(explanations[name].format(instr=preprocess(bv, parsed, lifted_il_instrs, name)))
except (AttributeError, KeyError):
# Usually a bad format string. Shouldn't show up unless something truly weird happens.
log_error("Bad Format String in binja_explain_instruction")
traceback.print_exc()
out.append(name)
return out_bool, out | 57c6146ac06317df8a9e9b846a279fa950a970bc | 4,543 |
from lpot.ux.utils.workload.workload import Workload
from typing import Dict
from typing import Any
import os
import json
def execute_tuning(data: Dict[str, Any]) -> dict:
"""Get configuration."""
if not str(data.get("id", "")):
message = "Missing request id."
mq.post_error(
"tuning_finish",
{"message": message, "code": 404},
)
raise Exception(message)
request_id: str = data["id"]
workdir = Workdir(request_id=request_id)
workload_path: str = workdir.workload_path
try:
workload_data = _load_json_as_dict(
os.path.join(workload_path, "workload.json"),
)
except Exception as err:
mq.post_error(
"tuning_finish",
{"message": repr(err), "code": 404, "id": request_id},
)
raise err
workload = Workload(workload_data)
tuning: Tuning = Tuning(workload, workdir.workload_path, workdir.template_path)
send_data = {
"message": "started",
"id": request_id,
"size_fp32": get_size(tuning.model_path),
}
workdir.clean_logs()
workdir.update_data(
request_id=request_id,
model_path=tuning.model_path,
model_output_path=tuning.model_output_path,
status="wip",
)
executor = Executor(
workspace_path=workload_path,
subject="tuning",
data=send_data,
log_name="output",
)
proc = executor.call(
tuning.command,
)
tuning_time = executor.process_duration
if tuning_time:
tuning_time = round(tuning_time, 2)
log.debug(f"Elapsed time: {tuning_time}")
logs = [os.path.join(workload_path, "output.txt")]
parser = TuningParser(logs)
if proc.is_ok:
response_data = parser.process()
if isinstance(response_data, dict):
response_data["id"] = request_id
response_data["tuning_time"] = tuning_time
response_data["size_int8"] = get_size(tuning.model_output_path)
response_data["model_output_path"] = tuning.model_output_path
response_data["size_fp32"] = get_size(tuning.model_path)
response_data["is_custom_dataloader"] = bool(workdir.template_path)
workdir.update_data(
request_id=request_id,
model_path=tuning.model_path,
model_output_path=tuning.model_output_path,
metric=response_data,
status="success",
execution_details={"tuning": tuning.serialize()},
)
response_data["execution_details"] = {"tuning": tuning.serialize()}
log.debug(f"Parsed data is {json.dumps(response_data)}")
mq.post_success("tuning_finish", response_data)
return response_data
else:
log.debug("FAIL")
workdir.update_data(
request_id=request_id,
model_path=tuning.model_path,
status="error",
)
mq.post_failure("tuning_finish", {"message": "failed", "id": request_id})
raise ClientErrorException("Tuning failed during execution.") | 370630145325b2166030c6402ed17bce2cf9ed70 | 4,544 |
def get_subnet_mask(subnet: int, v6: bool) -> int:
"""Get the subnet mask given a CIDR prefix 'subnet'."""
if v6:
return bit_not((1 << (128 - subnet)) - 1, 128)
else:
return bit_not((1 << (32 - subnet)) - 1, 32) | 57c8de0bff70b0939dd8c646da0840be7c2839e1 | 4,545 |
import argparse
def get_parser():
"""Return base parser for scripts.
"""
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Tuning configuration file (examples: configs/tuning)')
return parser | 6f394f836fae278b659a0612a088f53563c8f34b | 4,546 |
def home(request):
"""return HttpResponse('<h1>Hello, Welcome to this test</h1>')"""
"""Le chemin des templates est renseigne dans "DIRS" de "TEMPLATES" dans settings.py
DONC PAS BESOIN DE RENSEIGNER LE CHEMIN ABSOLU"""
return render(request, "index.html") | 04a671daa9425ea76841b491f8eefd133b6e2c67 | 4,547 |
import os
def cd(path):
"""Context manager to switch working directory"""
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split('/'))
if os.path.isabs(path):
return os.path.abspath('/') + normalized
return normalized
path = normpath(path)
cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(cwd) | f1664765e26e3ff4ec8a70d16d6beca5a23f4d68 | 4,548 |
def extract_commands(data, *commands):
"""Input function to find commands output in the "data" text"""
ret = ""
hostname = _ttp_["variable"]["gethostname"](data, "input find_command function")
if hostname:
for command in commands:
regex = r"{}[#>] *{} *\n([\S\s]+?)(?={}[#>]|$)".format(
hostname, command, hostname
)
match = search(regex, data)
if match:
ret += "\n{}\n".format(match.group())
if ret:
return ret, None
return data, None | 6fcbf9584f5a2f799839c9964a5ae6235f4e8b50 | 4,549 |
def get_version() -> str:
"""
Returns the version string for the ufotest project. The version scheme of ufotest loosely follows the
technique of `Semantic Versioning <https://semver.org/>`_. Where a minor version change may introduce backward
incompatible changes, due to the project still being in active development with many features being subject to
change.
The return value of this function is subject to the "get_version" filter hook, which is able to modify the version
string *after* it has been loaded from the file and sanitized.
*EXAMPLE*
.. code-block:: python
version = get_version() # "1.2.1"
:returns: The version string without any additional characters or whitespaces.
"""
with open(VERSION_PATH) as version_file:
version = version_file.read()
version = version.replace(' ', '').replace('\n', '')
# Here we actually need to check if the plugin management system is actually initialized (this is what the boolean
# return of is_prepared indicates) because the version function needs to be functional even when the ufotest
# installation folder and thus the config file does not yet exist.
if CONFIG.is_prepared():
version = CONFIG.pm.apply_filter('get_version', value=version)
return version | b34eac3aef7661b65408c60ce606cd24a06ae0ee | 4,550 |
async def clear_pending_revocations(request: web.BaseRequest):
"""
Request handler for clearing pending revocations.
Args:
request: aiohttp request object
Returns:
Credential revocation ids still pending revocation by revocation registry id.
"""
context: AdminRequestContext = request["context"]
body = await request.json()
purge = body.get("purge")
rev_manager = RevocationManager(context.profile)
try:
results = await rev_manager.clear_pending_revocations(purge)
except StorageError as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"rrid2crid": results}) | 98db34266f3afbe9ecfeddcf802c1441ae7ea58b | 4,551 |
from datetime import datetime
def add_filter(field, bind, criteria):
"""Generate a filter."""
if 'values' in criteria:
return '{0}=any(:{1})'.format(field, bind), criteria['values']
if 'date' in criteria:
return '{0}::date=:{1}'.format(field, bind), datetime.strptime(criteria['date'], '%Y-%m-%d').date()
if 'gte' in criteria:
return '{0}>=:{1}'.format(field, bind), criteria['gte']
if 'lte' in criteria:
return '{0}<=:{1}'.format(field, bind), criteria['lte']
raise ValueError('criteria not supported') | 2358cab297b2a2cbc42af02b3b6d14ac134c8b71 | 4,552 |
def ireject(predicate, iterable):
"""Reject all items from the sequence for which the predicate is true.
ireject(function or None, sequence) --> iterator
:param predicate:
Predicate function. If ``None``, reject all truthy items.
:param iterable:
Iterable to filter through.
:yields:
A sequence of all items for which the predicate is false.
"""
return _ifilterfalse(predicate, iterable) | 98f9416ac1db1f2909d1d895ee0c0bc70c8b2249 | 4,553 |
def construct_config_error_msg(config, errors):
"""Construct an error message for an invalid configuration setup
Parameters
----------
config: Dict[str, Any]
Merged dictionary of configuration options from CLI, user configfile and
default configfile
errors: Dict[str, Any]
Dictionary of schema validation errors passed by Marshmallow
Returns
-------
str
"""
error_msg = "Failed to parse config\n"
for error_param, exception_msg in errors.items():
error_msg += parse_config_error(error_param, exception_msg)
return error_msg | 02954620115308d7d50ca28b23b98a2ba410489f | 4,554 |
def isMSAADebugLoggingEnabled():
""" Whether the user has configured NVDA to log extra information about MSAA events. """
return config.conf["debugLog"]["MSAA"] | 8bd9359b73b643534933b90a5fb0810668ca440c | 4,555 |
def _haversine_GC_distance(φ1, φ2, λ1, λ2):
"""
Haversine formula for great circle distance. Suffers from rounding errors for
antipodal points.
Parameters
----------
φ1, φ2 : :class:`numpy.ndarray`
Numpy arrays wih latitudes.
λ1, λ2 : :class:`numpy.ndarray`
Numpy arrays wih longitude.
"""
Δλ = np.abs(λ1 - λ2)
Δφ = np.abs(φ1 - φ2)
return 2 * np.arcsin(
np.sqrt(np.sin(Δφ / 2) ** 2 + np.cos(φ1) * np.cos(φ2) * np.sin(Δλ / 2) ** 2)
) | bb57ddeacd761abead5ee499610ead8c9ba38a9f | 4,556 |
def differentiate_branch(branch, suffix="deriv"):
"""calculates difference between each entry and the previous
first entry in the new branch is difference between first and last entries in the input"""
def bud(manager):
return {add_suffix(branch,suffix):manager[branch]-np.roll(manager[branch],1)}
return bud | 298b19b1e151e04df9c040f0c48e4799bcc3f3d2 | 4,557 |
import typing
def etf_holders(apikey: str, symbol: str) -> typing.Optional[typing.List[typing.Dict]]:
"""
Query FMP /etf-holder/ API.
:param apikey: Your API key.
:param symbol: Company ticker.
:return: A list of dictionaries.
"""
path = f"etf-holder/{symbol}"
query_vars = {"apikey": apikey}
return __return_json_v3(path=path, query_vars=query_vars) | f405fa92296c28a8ba8ca87b6edac27392ec1f85 | 4,558 |
def clean_visibility_flags(horizon_dataframe: pd.DataFrame) -> pd.DataFrame:
"""
assign names to unlabeled 'visibility flag' columns -- solar presence,
lunar/interfering body presence, is-target-on-near-side-of-parent-body,
is-target-illuminated; drop then if empty
"""
flag_mapping = {
unlabeled_flag: flag_name
for unlabeled_flag, flag_name in zip(
[c for c in horizon_dataframe.columns if 'Unnamed' in c],
VISIBILITY_FLAG_NAMES
)
}
horizon_dataframe = horizon_dataframe.rename(mapper=flag_mapping, axis=1)
empty_flags = []
for flag_column in flag_mapping.values():
if horizon_dataframe[flag_column].isin([' ', '']).all():
empty_flags.append(flag_column)
return horizon_dataframe.drop(empty_flags, axis=1) | 906432120babffacb709b1d45e7c4dd86c60775d | 4,559 |
def calib(phase, k, axis=1):
"""Phase calibration
Args:
phase (ndarray): Unwrapped phase of CSI.
k (ndarray): Subcarriers index
axis (int): Axis along which is subcarrier. Default: 1
Returns:
ndarray: Phase calibrated
ref:
[Enabling Contactless Detection of Moving Humans with Dynamic Speeds Using CSI]
(http://tns.thss.tsinghua.edu.cn/wifiradar/papers/QianKun-TECS2017.pdf)
"""
p = np.asarray(phase)
k = np.asarray(k)
slice1 = [slice(None, None)] * p.ndim
slice1[axis] = slice(-1, None)
slice1 = tuple(slice1)
slice2 = [slice(None, None)] * p.ndim
slice2[axis] = slice(None, 1)
slice2 = tuple(slice2)
shape1 = [1] * p.ndim
shape1[axis] = k.shape[0]
shape1 = tuple(shape1)
k_n, k_1 = k[-1], k[1]
a = (p[slice1] - p[slice2]) / (k_n - k_1)
b = p.mean(axis=axis, keepdims=True)
k = k.reshape(shape1)
phase_calib = p - a * k - b
return phase_calib | 5e1f59c0a13440ad8e1304523976c2fbe6562d5a | 4,560 |
def rescale_as_int(
s: pd.Series, min_value: float = None, max_value: float = None, dtype=np.int16
) -> pd.Series:
"""Cannot be converted to njit because np.clip is unsupported."""
valid_dtypes = {np.int8, np.int16, np.int32}
if dtype not in valid_dtypes:
raise ValueError(f"dtype: expecting [{valid_dtypes}] but found [{dtype}]")
if min_value is None:
min_value = min(s)
if max_value is None:
max_value = max(s)
if min_value == 0 and max_value == 0:
raise ValueError("Both min_value and max_value must not be zero")
limit = max(abs(min_value), abs(max_value))
res = np.clip(s / limit, 0, 1) * np.iinfo(dtype).max
return res.astype(dtype) | 31772759c67d33f20b89fd87aa91c9249ae2bb9a | 4,561 |
def format_headers(headers):
"""Formats the headers of a :class:`Request`.
:param headers: the headers to be formatted.
:type headers: :class:`dict`.
:return: the headers in lower case format.
:rtype: :class:`dict`.
"""
dictionary = {}
for k, v in headers.items():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
dictionary[k.lower()] = v.lower()
return dictionary | 0a0890c10378d9f8e20f353b1b9383e728f0a4f7 | 4,562 |
def decode_field(value):
"""Decodes a field as defined in the 'Field Specification' of the actions
man page: http://www.openvswitch.org/support/dist-docs/ovs-actions.7.txt
"""
parts = value.strip("]\n\r").split("[")
result = {
"field": parts[0],
}
if len(parts) > 1 and parts[1]:
field_range = parts[1].split("..")
start = field_range[0]
end = field_range[1] if len(field_range) > 1 else start
if start:
result["start"] = int(start)
if end:
result["end"] = int(end)
return result | 1a1659e69127ddd3c63eb7d4118ceb4e53a28ca0 | 4,563 |
import tqdm
def compute_norm(x_train, in_ch):
"""Returns image-wise mean and standard deviation per channel."""
mean = np.zeros((1, 1, 1, in_ch))
std = np.zeros((1, 1, 1, in_ch))
n = np.zeros((1, 1, 1, in_ch))
# Compute mean.
for x in tqdm(x_train, desc='Compute mean'):
mean += np.sum(x, axis=(0, 1, 2), keepdims=True)
n += np.sum(x > 0, axis=(0, 1, 2), keepdims=True)
mean /= n
# Compute std.
for x in tqdm(x_train, desc='Compute std'):
std += np.sum((x - mean) ** 2, axis=(0, 1, 2), keepdims=True)
std = (std / n) ** 0.5
return mean, std | e49012075adfa03b33bb6308d1d50f4c22c1cc2c | 4,564 |
def _nonempty_line_count(src: str) -> int:
"""Count the number of non-empty lines present in the provided source string."""
return sum(1 for line in src.splitlines() if line.strip()) | ad2ac0723f9b3e1f36b331175dc32a8591c67893 | 4,565 |
import json
def geom_to_xml_element(geom):
"""Transform a GEOS or OGR geometry object into an lxml Element
for the GML geometry."""
if geom.srs.srid != 4326:
raise NotImplementedError("Only WGS 84 lat/long geometries (SRID 4326) are supported.")
# GeoJSON output is far more standard than GML, so go through that
return geojson_to_gml(json.loads(geom.geojson)) | a2702e8ac4e3cb24f787513f820df60ad973e305 | 4,566 |
import os
def _validate_source(source):
"""
Check that the entered data source paths are valid
"""
# acceptable inputs (for now) are a single file or directory
assert type(source) == str, "You must enter your input as a string."
assert (
os.path.isdir(source) == True or os.path.isfile(source) == True
), "Your data source string is not a valid data source."
return True | 45e1f88f6c246713f85d83cf6a9753ec67799774 | 4,567 |
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
Parameters
----------
y_true : numpy array
an array of true labels
y_pred : numpy array
an array of predicted labels
Returns
-------
recall : float
the batch-wise average of precision value
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision | d57f1d782628e312b2e52098658be81e32351f3d | 4,568 |
def get_validators(setting):
"""
:type setting: dict
"""
if 'validate' not in setting:
return []
validators = []
for validator_name in setting['validate'].keys():
loader_module = load_module(
'spreadsheetconverter.loader.validator.{}',
validator_name)
validators.append(loader_module.Validator(setting))
return validators | db3b5594122685f3190cdae053ab7a385065d17e | 4,569 |
def _check_found(py_exe, version_text, log_invalid=True):
"""Check the Python and pip version text found.
Args:
py_exe (str or None): Python executable path found, if any.
version_text (str or None): Pip version found, if any.
log_invalid (bool): Whether to log messages if found invalid.
Returns:
bool: Python is OK and pip version fits against ``PIP_SPECIFIER``.
"""
is_valid = True
message = "Needs pip%s, but found '%s' for Python '%s'"
if version_text is None or not py_exe:
is_valid = False
if log_invalid:
print_debug(message, PIP_SPECIFIER, version_text, py_exe)
elif PackagingVersion(version_text) not in PIP_SPECIFIER:
is_valid = False
if log_invalid:
print_warning(message, PIP_SPECIFIER, version_text, py_exe)
return is_valid | 5262c3e5db5384e7b4addb6288018f23100e7115 | 4,570 |
def worker(remote, parent_remote, env_fn_wrapper):
""" worker func to execute vec_env commands
"""
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
# branch out for requests
if cmd == 'step':
res = [step_env(env, action) for env, action in zip(envs, data)]
remote.send(res)
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send(CloudpickleWrapper(
(envs[0].observation_space, envs[0].action_space)
))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in envs[0].agents]):
res = [
'adversary' if a.adversary else 'agent'
for a in envs[0].agents
]
else: # fully cooperative
res = ['agent' for _ in envs[0].agents]
remote.send(res)
else:
raise NotImplementedErrors
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
except:
print('Environment runner process failed...')
finally:
for env in envs:
env.close() | aaf5a16a72e97ec46e3a1ae4676c4591bc7f0183 | 4,571 |
from functools import reduce
def greedysplit_general(n, k, sigma, combine=lambda a,
b: a + b, key=lambda a: a):
""" Do a greedy split """
splits = [n]
s = sigma(0, n)
def score(splits, sigma):
splits = sorted(splits)
return key(reduce(combine, (sigma(a, b)
for (a, b) in tools.seg_iter(splits))))
while k > 0:
usedinds = set(splits)
new = min((score(splits + [i], sigma), splits + [i])
for i in range(1, n) if i not in usedinds)
splits = new[1]
s = new[0]
k -= 1
return sorted(splits), s | 6480db8f613f37704e7bf6552407e5b0f851ab47 | 4,572 |
def public_assignment_get(assignment_id: str):
"""
Get a specific assignment spec
:param assignment_id:
:return:
"""
return success_response({
'assignment': get_assignment_data(current_user.id, assignment_id)
}) | 2f3d828975c0d7db663556da5f0dc590124075b2 | 4,573 |
def recursion_detected(frame, keys):
"""Detect if we have a recursion by finding if we have already seen a
call to this function with the same locals. Comparison is done
only for the provided set of keys.
"""
current = frame
current_filename = current.f_code.co_filename
current_function = current.f_code.co_name
current_locals = {k: v
for k, v in current.f_locals.items()
if k in keys}
while frame.f_back:
frame = frame.f_back
fname = frame.f_code.co_filename
if not(fname.endswith(".py") or
fname == "<template>"):
return False
if fname != current_filename or \
frame.f_code.co_name != current_function:
continue
if ({k: v
for k, v in frame.f_locals.items()
if k in keys} == current_locals):
return True
return False | ebf30e715d2901169095bc920e8af6c715f2a1de | 4,574 |
import argparse
def arg_parser(cmd_line=None, config=None):
"""
Parse the command line or the parameter to pass to the rest of the workflow
:param cmd_line: A string containing a command line (mainly used for
testing)
:return: An args object with overrides for the configuration
"""
default_formatter = argparse.ArgumentDefaultsHelpFormatter
main_parser = argparse.ArgumentParser(description="Promoter workflow",
formatter_class=default_formatter)
main_parser.add_argument("--release-config", required=False,
default=DEFAULT_CONFIG_RELEASE,
help="Release config file")
main_parser.add_argument("--config-root", required=False,
default=DEFAULT_CONFIG_ROOT,
help="Specify the environment type "
"Default: staging, For production"
"use rdo and downstream")
main_parser.add_argument("--log-level",
default='INFO',
help="Set the log level")
command_parser = main_parser.add_subparsers(dest='subcommand')
command_parser.required = True
promote_all_parser = command_parser.add_parser('promote-all',
help="Promote everything")
# promote-all has no sub-arguments
promote_all_parser.set_defaults(handler=promote_all)
force_promote_parser = \
command_parser.add_parser('force-promote',
help="Force promotion of a specific hash, "
"bypassing candidate selection",
formatter_class=default_formatter)
# force-promote arguments
force_promote_parser.add_argument("--commit-hash", required=True,
help="The commit hash part for the "
"candidate hash")
force_promote_parser.add_argument("--distro-hash", required=True,
help="The distro hash part for the "
"candidate hash")
force_promote_parser.add_argument("--aggregate-hash",
help="The aggregate hash part for the "
"candidate hash")
force_promote_parser.add_argument("--allowed-clients",
default="registries_client,qcow_client,"
"dlrn_client",
help="The comma separated list of "
"clients allowed to perfom the "
"promotion")
force_promote_parser.add_argument("candidate_label",
help="The label associated with the "
"candidate hash")
force_promote_parser.add_argument("target_label",
help="The label to promoted "
"the candidate hash to")
force_promote_parser.set_defaults(handler=force_promote)
if cmd_line is not None:
args = main_parser.parse_args(cmd_line.split())
else:
args = main_parser.parse_args()
return args | 6d04361584f0aaf5743e3db70410c443e5cf9b5f | 4,575 |
def pars_to_blocks(pars):
""" this simulates one of the phases the markdown library goes through when parsing text and returns the paragraphs grouped as blocks, as markdown handles them
"""
pars = list(pars)
m = markdown.Markdown()
bp = markdown.blockprocessors.build_block_parser(m)
root = markdown.util.etree.Element('div')
blocks = []
while pars:
parsbefore = list(pars)
for processor in bp.blockprocessors.values():
if processor.test(root, pars[0]):
processor.run(root, pars)
while len(parsbefore) > len(pars):
blocks.append(parsbefore[0])
parsbefore = parsbefore[1:]
if pars and pars[0].strip('\n') != parsbefore[0].strip('\n'):
strippedbefore = parsbefore[0].strip('\n')
strippedcurrent = pars[0].strip('\n')
if strippedbefore.endswith(strippedcurrent):
beforelength = len(strippedbefore)
currentlength = len(strippedcurrent)
block = strippedbefore[0:beforelength - currentlength]
blocks.append(block)
else:
raise Exception('unsupported change by blockprocessor. abort! abort!')
break
return blocks | f71d4460847ec4b69ad53470aba26c145d296388 | 4,576 |
from bs4 import BeautifulSoup
def extract_intersections_from_osm_xml(osm_xml):
"""
Extract the GPS coordinates of the roads intersections
Return a list of gps tuples
"""
soup = BeautifulSoup(osm_xml)
retval = []
segments_by_extremities = {}
Roads = []
RoadRefs = []
Coordinates = {}
for point in soup.osm.findAll('node'):
Coordinates[point['id']] = (float(point['lat']), float(point['lon']))
for way in soup.osm.findAll(lambda node : node.name=="way" and node.findAll(k='highway')):
name = ""
roadPoints = []
nodes = way.findAll('nd')
for node in nodes:
roadPoints.append(node['ref'])
RoadRefs.append(roadPoints)
# iterate over the list of street and over each segment of a street.
# for each segment extremity, build a list of segment leading to it
for roadIdx, roadRef in enumerate(RoadRefs):
for segIdx, seg in enumerate(roadRef):
coords = Coordinates[seg]
if coords not in segments_by_extremities:
segments_by_extremities[coords] = []
segments_by_extremities[coords].append([roadIdx, segIdx])
# Iterate over the extremity lists, only keep the ones with at least three segments leading to them
# Otherwise, they are not an intersection, just a turn in a road
for k in segments_by_extremities.keys():
if len(segments_by_extremities[k]) <2:
del(segments_by_extremities[k])
#finally return just the keys
return segments_by_extremities.keys() | 6cff1fe39891eb4a6c595196eabfd4569af2fd8e | 4,577 |
def spark_session(request):
"""Fixture for creating a spark context."""
spark = (SparkSession
.builder
.master('local[2]')
.config('spark.jars.packages', 'com.databricks:spark-avro_2.11:3.0.1')
.appName('pytest-pyspark-local-testing')
.enableHiveSupport()
.getOrCreate())
request.addfinalizer(lambda: spark.stop())
quiet_py4j()
return spark | e7a95ad7ebea876976923c6dd16c7a761116427d | 4,578 |
import yaml
def _load_model_from_config(config_path, hparam_overrides, vocab_file, mode):
"""Loads model from a configuration file"""
with gfile.GFile(config_path) as config_file:
config = yaml.load(config_file)
model_cls = locate(config["model"]) or getattr(models, config["model"])
model_params = config["model_params"]
if hparam_overrides:
model_params.update(hparam_overrides)
# Change the max decode length to make the test run faster
model_params["decoder.params"]["max_decode_length"] = 5
model_params["vocab_source"] = vocab_file
model_params["vocab_target"] = vocab_file
return model_cls(params=model_params, mode=mode) | 97af7dc919de5af96332c8445e162990006079f4 | 4,579 |
import ast
def _get_assignment_node_from_call_frame(frame):
"""
Helper to get the Assign or AnnAssign AST node for a call frame.
The call frame will point to a specific file and line number, and we use the
source index to retrieve the AST nodes for that line.
"""
filename = frame.f_code.co_filename
# Go up the AST from a node in the call frame line until we find an Assign or
# AnnAssign, since the (Ann)Assign may be over multiple lines.
nodes_in_line = _get_source_index(filename).get(frame.f_lineno, [])
cur_node = nodes_in_line[0]
while cur_node:
if isinstance(cur_node, (ast.Assign, ast.AnnAssign)):
return cur_node
cur_node = cur_node.parent
raise Exception("Could not find AST assignment node in the line"
f" {filename}:{frame.f_lineno}") | edb7f2425d170721e12dc4c1e2427e9584aeed8c | 4,580 |
def check_existing_user(username):
"""
a function that is used to check and return all exissting accounts
"""
return User.user_exist(username) | 573e9a8a6c0e504812d3b90eb4a27b15edec35ab | 4,581 |
def createevent():
""" An event is a (immediate) change of the world. It has no
duration, contrary to a StaticSituation that has a non-null duration.
This function creates and returns such a instantaneous situation.
:sees: situations.py for a set of standard events types
"""
sit = Situation(type = GENERIC, pattern = None)
return sit | 998f0a473c47828435d7e5310de29ade1fbd7810 | 4,582 |
def _dump_multipoint(obj, fmt):
"""
Dump a GeoJSON-like MultiPoint object to WKT.
Input parameters and return value are the MULTIPOINT equivalent to
:func:`_dump_point`.
"""
coords = obj['coordinates']
mp = 'MULTIPOINT (%s)'
points = (' '.join(fmt % c for c in pt) for pt in coords)
# Add parens around each point.
points = ('(%s)' % pt for pt in points)
mp %= ', '.join(points)
return mp | cdea05b91c251b655e08650807e3f74d3bb5e77b | 4,583 |
def do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width):
"""
This is the function to run the inference
Args:
engine : Path to the TensorRT engine
pics_1 : Input images to the model.
h_input_1: Input in the host
d_input_1: Input in the device
h_output_1: Output in the host
d_output_1: Output in the device
stream: CUDA stream
batch_size : Batch size for execution time
height: Height of the output image
width: Width of the output image
Output:
The list of output images
"""
load_images_to_buffer(pics_1, h_input_1)
with engine.create_execution_context() as context:
# Transfer input data to the GPU.
cuda.memcpy_htod_async(d_input_1, h_input_1, stream)
# Run inference.
context.profiler = trt.Profiler()
context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)])
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
# Return the host output.
out = h_output.reshape((batch_size,-1, height, width))
return out | e9e452e96d42167bf17bc6bef8dc014fa31dbe8f | 4,584 |
import ast
def make_import():
"""Import(alias* names)"""
return ast.Import(names=[make_alias()]) | e9085ee9b4b0438857b50b891fbee0b88d256f8b | 4,585 |
from typing import Union
from typing import List
def preprocess(
image: Union[np.ndarray, Image.Image],
threshold: int = None,
resize: int = 64,
quantiles: List[float] = [.01, .05, 0.1,
0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
reduction: Union[str, List[str]] = ['max', 'median', 'mean', 'min']
) -> dict:
"""
Basic preprocessing metrics for a histological image.
Args:
image (Union[np.ndarray, Image.Image]): Input image.
threshold (int, optional): Threshold for tissue detection. If not
defined Otsu's binarization will be used (which) may fail for images
with data loss or only background. Defaults to None.
resize (int, optional): For artifact() function. Defaults to 64.
quantiles (List[float], optional): For HSV_quantiles() and RGB_quantiles
functions. Defaults to
[.01, .05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99].
reduction (Union[str, List[str]], optional): Reduction methods for
sharpness() function. Defaults to ['max', 'median', 'mean', 'min'].
Raises:
TypeError: Invalid type for ``image``.
Returns:
dict: Dictionary of basic preprocessing metrics.
"""
if isinstance(image, Image.Image):
if image.mode != 'RGB':
image = image.convert('RGB')
image = np.array(image, dtype=np.uint8)
elif isinstance(image, np.ndarray):
image = image.astype(np.uint8)
else:
raise TypeError('Excpected {} or {} not {}.'.format(
np.ndarray, Image.Image, type(image)
))
# Initialize results and other shit.
results = {}
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
mask = tissue_mask(image, threshold=threshold)
# Background percentage.
results['background'] = (mask == 0).sum()/mask.size
# Sharpness.
results.update(sharpness(gray, reduction=reduction))
# Data loss.
results.update(data_loss(gray))
# Artifacts.
small_img = cv2.resize(image, (resize, resize), cv2.INTER_LANCZOS4)
small_mask = cv2.resize(mask, (resize, resize), cv2.INTER_LANCZOS4)
results.update(HSV_quantiles(
small_img, mask=small_mask, quantiles=quantiles))
results.update(RGB_quantiles(
small_img, mask=small_mask, quantiles=quantiles))
return results | afa36739309ada2e97e18e63ae65362546b1b52c | 4,586 |
def binary_distance(label1, label2):
"""Simple equality test.
0.0 if the labels are identical, 1.0 if they are different.
>>> from nltk.metrics import binary_distance
>>> binary_distance(1,1)
0.0
>>> binary_distance(1,3)
1.0
"""
return 0.0 if label1 == label2 else 1.0 | 2c4eaebda2d6955a5012cc513857aed66df60194 | 4,587 |
def fetch_collections_info(data):
"""Connect to solr_cloud status page and and return JSON object"""
url = "{0}/admin/collections?action=CLUSTERSTATUS&wt=json".format(data["base_url"])
get_data = _api_call(url, data["opener"])
solr_cloud = {}
if get_data is None:
collectd.error("solr_collectd plugin: can't get info")
solr_cloud["error"] = "Solr instance is not running in solr_cloud mode"
elif "error" in get_data:
collectd.warning("%s" % get_data["error"]["msg"])
solr_cloud["error"] = get_data["error"]["msg"]
elif "cluster" in get_data:
if "cluster" not in data["custom_dimensions"]:
data["custom_dimensions"]["cluster"] = data["cluster"]
solr_cloud["live_nodes"] = get_data["cluster"]["live_nodes"]
solrCollections = get_data["cluster"]["collections"]
for collection in solrCollections:
solr_cloud[collection] = {}
solrShards = get_data["cluster"]["collections"][collection]["shards"]
for shard in solrShards.keys():
solr_cloud[collection][shard] = {}
for coreNodes in solrShards[shard]["replicas"]:
coreNode = solrShards[shard]["replicas"][coreNodes]
core = coreNode["core"]
solr_cloud[collection][shard][core] = {}
# if 'leader' in coreNode.keys() and coreNode['base_url'] == data['base_url']:
# collectd.debug('{0} - Solr running in solr_cloud mode'.format(data['member_id']))
solr_cloud[collection][shard][core]["node"] = coreNode["node_name"]
solr_cloud[collection][shard][core]["base_url"] = coreNode["base_url"]
solr_cloud[collection][shard][core]["state"] = coreNode["state"]
if "leader" in coreNode:
solr_cloud[collection][shard][core]["leader"] = coreNode["leader"]
else:
solr_cloud[collection][shard][core]["leader"] = "false"
return solr_cloud | cfb3e4dda7986f4f13fd24b5e26a12ae96ceb6e6 | 4,588 |
def calc_commission_futures_global(trade_cnt, price):
"""
国际期货:差别很大,最好外部自定义自己的计算方法,这里只简单按照0.002计算
:param trade_cnt: 交易的股数(int)
:param price: 每股的价格(美元)
:return: 计算结果手续费
"""
cost = trade_cnt * price
# 国际期货各个券商以及代理方式差别很大,最好外部自定义计算方法,这里只简单按照0.002计算
commission = cost * 0.002
return commission | ddd2c4571abfcdf7021a28b6cc78fe6441da2bd3 | 4,589 |
def is_section_command(row):
"""CSV rows are cosidered new section commands if they start with
<SECTION> and consist of at least two columns column.
>>> is_section_command('<SECTION>\tSection name'.split('\t'))
True
>>> is_section_command('<other>\tSection name'.split('\t'))
False
>>> is_section_command(['<SECTION>', 'Section name', 'some more'])
True
"""
return len(row) >= 2 and row[0] == __CSV_SECTION_PREFIX | 7942625e119c4a0d3707fd5884ade6e48b2dfb1a | 4,590 |
import os
def download(auth, url, headers, output_path, size, overwrite,
f_name=None,
ext=None,
block_size=4096,
callback=None):
"""
Call GET for a file stream.
:Args:
- auth (:class:`.Credentials`): The session credentials object.
- url (str): The complete endpoint URL.
- headers (dict): The headers to be used in the request.
- output_path (str): Full file path to download the data to.
- size (int): File size of the file to be downloaded as retrieved
by a HEAD request.
- overwrite (bool): If ``True``, download the new data over an
existing file.
:Kwargs:
- f_name (str): Used to specify a filename if one is not already
included in the URL. The default is ``None``.
- ext (str): Used to specify a file extension if one is not already
included in the URL. The default is ``None``.
- block_size (int): Used to vary the upload chunk size.
The default is 4096 bytes. Determines the frequency with which the
callback is called.
- callback (func): A function to be called to report download progress.
The function must take three arguments: the percent downloaded (float), the
bytes downloaded (float), and the total bytes to be downloaded (float).
:Returns:
- The raw server response.
:Raises:
- :exc:`.RestCallException` is the call failed, a file operation
failed, or returned a non-200 status.
"""
filename = filename_from_url(url, ext) if not f_name else f_name
downloadfile = os.path.join(output_path, filename)
if os.path.exists(downloadfile) and not overwrite:
LOG.warning(
"File {0} already exists. Not overwriting.".format(downloadfile))
return True
LOG.debug("GET call URL: {0}, callback: {1}, file: "
"{2}, size: {3}, overwrite: {4}, block_size: {5}".format(url,
callback,
downloadfile,
size,
overwrite,
block_size))
LOG.info("Starting download to {0}".format(downloadfile))
if size > 0:
data_downloaded = float(0)
use_callback = hasattr(callback, "__call__")
try:
with open(downloadfile, "wb") as handle:
response = _call(auth, 'GET', url, headers=headers, stream=True)
for block in response.iter_content(block_size):
if not block:
LOG.info("Download complete")
break
handle.write(block)
if size > 0 and use_callback:
data_downloaded += len(block)
callback(float(data_downloaded/size*100), data_downloaded, float(size))
return response
except RestCallException:
try:
os.remove(downloadfile)
except:
pass
raise
except EnvironmentError as exp:
try:
os.remove(downloadfile)
except:
pass
raise RestCallException(type(exp), str(exp), exp) | 46566124cc1a5425217655239c976fcf9ab378e8 | 4,591 |
def to_int(matrix):
"""
Funciton to convert the eact element of the matrix to int
"""
for row in range(rows(matrix)):
for col in range(cols(matrix)):
for j in range(3):
matrix[row][col][j] = int(matrix[row][col][j])
return matrix | 9f277ab0c0fe7df145e8a4c0da36fba25a523756 | 4,592 |
def create_tastypie_resource(class_inst):
"""
Usage: url(r'^api/', include(create_tastypie_resource(UfsObjFileMapping).urls)),
Access url: api/ufs_obj_file_mapping/?format=json
:param class_inst:
:return:
"""
return create_tastypie_resource_class(class_inst)() | cba76e51073612124c5cd968c9360e9c4748d604 | 4,593 |
def make_collector(entries):
""" Creates a function that collects the location data from openLCA. """
def fn(loc):
entry = [loc.getCode(), loc.getName(), loc.getRefId()]
entries.append(entry)
return fn | 83fb167c38626fde79262a32f500b33a72ab8308 | 4,594 |
def apiname(funcname):
""" Define what name the API uses, the short or the gl version.
"""
if funcname.startswith('gl'):
return funcname
else:
if funcname.startswith('_'):
return '_gl' + funcname[1].upper() + funcname[2:]
else:
return 'gl' + funcname[0].upper() + funcname[1:] | 06575fce76ac02990c973a6dd17ff177ae5e3ddc | 4,595 |
def add_numeric_gene_pos(gene_info):
"""
Add numeric gene (start) genomic position to a gene_info dataframe
"""
gene_chr_numeric = gene_info['chr']
gene_chr_numeric = ['23' if x == 'X' else x for x in gene_chr_numeric]
gene_chr_numeric = ['24' if x == 'Y' else x for x in gene_chr_numeric]
gene_start_vec = gene_info['start']
gene_start_vec = [str(x).zfill(10) for x in gene_start_vec]
gene_pos_numeric = [x + '.' + y for x, y in zip(gene_chr_numeric, gene_start_vec)]
gene_pos_numeric = np.array([float(x) for x in gene_pos_numeric])
gene_info['genome_pos_numeric'] = gene_pos_numeric
return gene_info | ab77e6c3a1f6e8d780f5b83a3beb4d94eaf8198b | 4,596 |
import pathlib
def read_list_from_file(filename: str) -> set:
"""Build a set from a simple multiline text file.
Args:
filename: name of the text file
Returns:
a set of the unique lines from the file
"""
filepath = pathlib.Path(__file__).parent.joinpath(filename)
lines = filepath.read_text().splitlines()
return set(lines) | c6fd5f80e05cc74bad600a7af21e36b5bd672b63 | 4,597 |
def parseAnswerA(answer, index, data):
"""
parseAnswerA(data): Grab our IP address from an answer to an A query
"""
retval = {}
text = (str(answer[0]) + "." + str(answer[1])
+ "." + str(answer[2]) + "." + str(answer[3]))
retval["ip"] = text
#
# TODO: There may be pointers even for A responses. Will have to check into this later.
#
retval["sanity"] = []
return(retval, text) | 6cf1f01b6584219644093d7f0a1a730262b03b32 | 4,598 |
import os
def _get_files(data_path, modality, img_or_label):
"""Gets files for the specified data type and dataset split.
Args:
data: String, desired data ('image' or 'label').
dataset_split: String, dataset split ('train', 'val', 'test')
Returns:
A list of sorted file names or None when getting label for
test set.
"""
if "CT" in modality:
subject_path = os.path.join(data_path, _FOLDERS_MAP[img_or_label])
elif "MR" in modality:
subject_path = os.path.join(data_path, _MODALITY_MAP[modality][1], _FOLDERS_MAP[img_or_label])
if "MR_T1" in modality and _FOLDERS_MAP[img_or_label]==_FOLDERS_MAP["image"]:
subject_path = os.path.join(subject_path, _MODALITY_MAP[modality][2])
else:
raise ValueError("Unknown data modality")
filenames = file_utils.get_file_list(subject_path,
fileStr=_POSTFIX_MAP[modality][img_or_label],
fileExt=[_DATA_FORMAT_MAP[img_or_label]],
sort_files=True)
return filenames | 99687ddb26bed76d2c609848ba79d4ad795b6827 | 4,599 |
Subsets and Splits