content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import re import argparse def soundtone_type(value): """ Parse tone sounds parameters from args. value: 'square:90hz,10s,100%' returns: {'waveform': 'square', 'frequency': '90', 'amplitude': '100'}' """ abbr_map = {"hz": "frequency", "%": "amplitude", "s": "duration"} tone_form, generator_raw_params = value.lower().split(":", 1) parameters = {"waveform": tone_form} for param in generator_raw_params.split(","): match = re.match(r"(\d+)(\D+)$", param) if not match: raise argparse.ArgumentTypeError(f"invalid tone parameter, format: '{generator_raw_params}'.") param_name, param_value = abbr_map[match.group(2)], int(match.group(1)) if param_name == "amplitude": param_value = param_value / 100 parameters[param_name] = param_value return parameters
cdbf98939ac99210c2722653427cd8a7b2e847e2
3,000
def rouge_l_sentence_level(evaluated_sentences, reference_sentences): """Computes ROUGE-L (sentence level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge- working-note-v1.3.1.pdf. Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentences: The sentences from the referene set Returns: A float: F_lcs Raises: ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") reference_words = _split_into_words(reference_sentences) evaluated_words = _split_into_words(evaluated_sentences) m = len(reference_words) n = len(evaluated_words) lcs = _len_lcs(evaluated_words, reference_words) return _f_p_r_lcs(lcs, m, n)
168b3202baa5e8d185d5d181b3b468b810cf92fd
3,001
import numpy as np from scipy.signal import medfilt, medfilt2d def median(array, width=None, axis=None, even=False): """Replicate the IDL ``MEDIAN()`` function. Parameters ---------- array : array-like Compute the median of this array. width : :class:`int`, optional Size of the neighborhood in which to compute the median (*i.e.*, perform median filtering). If omitted, the median of the whole array is returned. axis : :class:`int`, optional Compute the median over this axis for a multi-dimensional array. If ommitted, the median over the entire array will be returned. If set, this function will behave as though `even` is ``True``. even : :class:`bool`, optional If set to ``True``, the median of arrays with an even number of elements will be the average of the middle two values. Returns ------- array-like The median of the array. Raises ------ :exc:`ValueError` If `width` is set, and the input `array` is not 1 or 2 dimensional. Notes ----- * For arrays with an even number of elements, the :func:`numpy.median` function behaves like ``MEDIAN(array, /EVEN)``, so the absence of the `even` keyword has to turn *off* that behavior. * For median filtering, this uses :func:`scipy.signal.medfilt` and :func:`scipy.signal.medfilt2d` under the hood, but patches up the values on the array boundaries to match the return values of the IDL ``MEDIAN()`` function. """ if width is None: if axis is None: f = array.flatten() if f.size % 2 == 1 or even: return np.median(array) else: i = f.argsort() return f[i[f.size//2]] else: return np.median(array, axis=axis) else: if array.ndim == 1: medarray = medfilt(array, min(width, array.size)) istart = int((width - 1)/2) iend = array.size - int((width + 1)/2) i = np.arange(array.size) w = (i < istart) | (i > iend) medarray[w] = array[w] return medarray elif array.ndim == 2: medarray = medfilt2d(array, min(width, array.size)) istart = int((width-1)/2) iend = (array.shape[0] - int((width+1)/2), array.shape[1] - int((width+1)/2)) i = np.arange(array.shape[0]) j = np.arange(array.shape[1]) w = ((i < istart) | (i > iend[0]), (j < istart) | (j > iend[1])) medarray[w[0], :] = array[w[0], :] medarray[:, w[1]] = array[:, w[1]] return medarray else: raise ValueError("Invalid number of dimensions for input array!")
829d3c00055c57a5368d366dac04731353ace5e6
3,002
def stitch_frame(frames, _): """ Stitching for single frame. Simply returns the frame of the first index in the frames list. """ return frames[0]
833ceb66f9df61e042d1c936c68b8a77566545c4
3,003
def project_add(): """ Desc: 新增项目接口 """ form_data = eval(request.get_data(as_text=True)) pro_name, remark = form_data['projectName'], form_data['remark'] user_id = get_jwt_identity() response = ProjectM().add_project(user_id, pro_name, remark) return response
b03a07e129f5c52b70a6db3c687318225090318b
3,004
def end_of_next_month(dt): """ Return the end of the next month """ month = dt.month + 2 year = dt.year if month > 12: next_month = month - 12 year+=1 else: next_month = month return ( dt.replace( year=year, month=next_month, day=1 ) - timedelta(days=1) )
0ee3ac845275cc0a101f2cd1603d2de268ef9108
3,005
import os import subprocess def get_version(): """ Reads version from git status or PKG-INFO https://gist.github.com/pwithnall/7bc5f320b3bdf418265a """ # noinspection PyUnresolvedReferences git_dir = os.path.join(base_dir, '.git') if os.path.isdir(git_dir): # Get the version using "git describe". cmd = 'git describe --tags --match [0-9]*'.split() try: version = subprocess.check_output(cmd).decode().strip() except subprocess.CalledProcessError: return None # PEP 386 compatibility if '-' in version: version = '.post'.join(version.split('-')[:2]) # Don't declare a version "dirty" merely because a time stamp has # changed. If it is dirty, append a ".dev1" suffix to indicate a # development revision after the release. with open(os.devnull, 'w') as fd_devnull: subprocess.call(['git', 'status'], stdout=fd_devnull, stderr=fd_devnull) cmd = 'git diff-index --name-only HEAD'.split() try: dirty = subprocess.check_output(cmd).decode().strip() except subprocess.CalledProcessError: return None if dirty != '': version += '.dev1' else: # Extract the version from the PKG-INFO file. try: with open('PKG-INFO') as v: version = version_re.search(v.read()).group(1) except OSError: version = None return version
8aa95e3c9206d4fe93d33b16d4889d014d354f68
3,006
import argparse def parse_args(): """Parse the arguments.""" parser = argparse.ArgumentParser( "dashboard", description="Data Visualization for the simulation outcome" ) parser.add_argument( "--datadir", type=str, required=True, help="The path to the simulation data folder.", ) parser.add_argument( "--env_name", type=str, default=None, help="The name of the environment to create.", ) arguments = parser.parse_args() return arguments
747d8a06dc5e519f185eb06634383b73e0aeb3f2
3,007
def test_build_dynamic__with_location_mobility_data(monkeypatch): """ Ensure dynamic mixing matrix can use location-based mobility data set by the user + Google. """ def get_fake_mobility_data(*args, **kwargs): vals = {"work": [1, 1.5, 1.3, 1.1]} days = [0, 1, 2, 3] return vals, days monkeypatch.setattr(mobility, "get_mobility_data", get_fake_mobility_data) #monkeypatch.setattr(location_adjuster, "get_country_mixing_matrix", _get_country_mixing_matrix) mobility_params = { "mixing": { "school": { "append": False, "times": get_date_from_base([0, 1, 2, 3]), "values": [1, 0.5, 0.3, 0.1], } }, "age_mixing": None, "microdistancing": {}, "square_mobility_effect": False, **UNTESTED_PARAMS, } mm_func = build_dynamic_mixing_matrix( base_matrices=MIXING_MATRICES, country=Country(iso3="AUS"), mobility=Mobility(**mobility_params), ) mm = mm_func(0) assert_allclose(mm, MM, atol=0.01, verbose=True) mm = mm_func(2) expected_mm = MM.copy() + (0.3 - 1) * SCHOOL_MM + (1.3 - 1) * WORK_MM assert_allclose(mm, expected_mm, atol=0.01, verbose=True)
a85722c24f57918f16e35ee2ae57cefdd23824fb
3,008
def margin_to_brightness(margin, max_lead=30, pct_pts_base=0): """"Tweak max_lead and pct_pts_base to get the desired brightness range""" return int((abs(margin) / max_lead) * 100) + pct_pts_base
d6f101c52ddee9f520e36e31fac7042e0aba3992
3,009
def RotateFullImage2D(src, dst, angle, scale=1.0, interp=InterpolationType.linear): """\ Rotate an image resizing the output to fit all the pixels. Rotates an image clockwise by a given angle (in degrees). The values of unknown pixels in the output image are set to 0. The output Image is guaranteed to contain all the pixels of the rotated image. Thus, its dimensions can be different from those of the input one. An optional scale parameter can be provided: if set, the image will also be scaled. :param src: source image :param dst: destination image :param angle: the rotation angle in degrees :param scale: scaling factor :param interp: InterpolationType to be used :return: None """ return _ecvl.RotateFullImage2D(src, dst, angle, scale, interp)
faedd430ae87f32e56c13ad50125051daa5994f3
3,010
def render_practice_text_field_validation1(request): """テキストフィールドのバリデーションの練習""" template = loader.get_template( 'webapp1/practice/vuetify-text-field-validation1.html') # ----------------------------------- # 1 # 1. host1/webapp1/templates/webapp1/practice/vuetify-text-field-validation1.html を取ってきます。 # ---------------------------------------------------- context = { } return HttpResponse(template.render(context, request))
714093e2b606cab0bbfb094025b5608d781f8aab
3,011
import os def cpsf_critical(request): """ cpsf_critical page, deals with file upload and allows the user to spawn off the update task """ if 'project' not in request.session: return HttpResponseRedirect(reverse('index')) transcription_location = os.path.join(settings.ESTORIA_BASE_LOCATION, request.session['project'], 'transcriptions', 'criticalXML') return _upload_and_process_xml(request, cpsf_critical_xml, transcription_location, 'estoria_app/cpsf_critical.html', 'CPSF Critical')
77282754af35bfecc1bc9ae3d444024453fe4ddb
3,012
def hlc3(high, low, close, offset=None, **kwargs): """Indicator: HLC3""" # Validate Arguments high = verify_series(high) low = verify_series(low) close = verify_series(close) offset = get_offset(offset) # Calculate Result hlc3 = (high + low + close) / 3.0 # Offset if offset != 0: hlc3 = hlc3.shift(offset) # Name & Category hlc3.name = "HLC3" hlc3.category = "overlap" return hlc3
16bb3e49f5017f13c84046dce880dd3f022eb15e
3,013
async def findID(context: Context, dueDateID: str = ""): """Find Due date !ass find subject_name """ if not dueDateID: return await notEnoughArgs(context) try: dueDates = DueDateData().findById(context, dueDateID) if len(dueDates) == 0: return await context.send(Helper.talkLikeABot(f"There is no due date id as : {dueDateID}")) return await context.send(Helper.talkDueDateAsBot(dueDates)) except Exception as e: return await context.send(e)
a6aab2219fcb29e073ccb5d73e440dd7a42163b9
3,014
def front_page() -> HTML: """ Renders the front page """ return render_template("frontPage.html")
2ec04c4c24b1aade9f7389b29bd91985b657fc67
3,015
async def async_setup_entry(hass, entry): """Set up the Samsung TV platform.""" # Initialize bridge data = entry.data.copy() bridge = _async_get_device_bridge(data) if bridge.port is None and bridge.default_port is not None: # For backward compat, set default port for websocket tv data[CONF_PORT] = bridge.default_port hass.config_entries.async_update_entry(entry, data=data) bridge = _async_get_device_bridge(data) def stop_bridge(event): """Stop SamsungTV bridge connection.""" bridge.stop() entry.async_on_unload( hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_bridge) ) hass.data[DOMAIN][entry.entry_id] = bridge hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True
a5e411560c8f3f1e609d6675061a72000984e0ce
3,016
def bring_contact_bonus_list(pb_client, obj_pb_ids, arm_pb_id, table_pb_id): """ For some bring goals, may be useful to also satisfy an object touching table and not touching arm condition. """ correct_contacts = [] for o in obj_pb_ids: o2ee_contact = len(pb_client.getContactPoints(o, arm_pb_id)) > 0 o2t_contact = len(pb_client.getContactPoints(o, table_pb_id)) > 0 correct_contacts.append(not o2ee_contact and o2t_contact) return correct_contacts
6c0033b0bfb1d3f4d08c8ca114855e089fe852f7
3,017
def topic(**kwargs): """ :param to: Topic ID :return: """ return api_request('topic', kwargs)
2d2af8f74db1ffde7732ecff529911b7058154bf
3,018
def load_data(filename: str) -> pd.DataFrame: """ Load city daily temperature dataset and preprocess data. Parameters ---------- filename: str Path to house prices dataset Returns ------- Design matrix and response vector (Temp) """ daily_temp_df = pd.read_csv(filename, parse_dates={'DayOfYear': ['Date']}) daily_temp_df = clean_data(daily_temp_df) return daily_temp_df
381eac22a1c3c0c9ad85d0c416fb1c182429153e
3,019
def group_intents(input_file, intent_file, slot_file): """ Groups the dataset based on the intents and returns it. Args: input_file : The path to the input file intent_file : The path to the intent file slot_file : The path to the slot file Returns: A dict mapping intents to a list of tuples. Each tuple contains the input sentence and it's corresponding slots have a given intent. """ intent_groups = defaultdict(list) with open(input_file, 'r') as input_fd, \ open(intent_file, 'r') as intent_fd, \ open(slot_file, 'r') as slot_fd: for ip, intent, slot in zip(input_fd, intent_fd, slot_fd): ip, intent, slot = ip.rstrip(), intent.rstrip(), slot.rstrip() intent_groups[intent].append((ip, slot)) return intent_groups
8db186fc91ddc3adfbc163ef63e66cc408f3b47d
3,020
def _get_path(string): # gets file path from variable name """ Gets path that a variable holds, convert it to start from root (.), esolves any symbolic link and returns the converted path. """ varname = string.replace("(long)", "") try: path = c.VAR_STACK[varname] except KeyError: if c.verbose: print "[-] ERROR: {0} is not a variable holding path".format(varname) return 1 path = _res_path(path) return _abspath(path)
794660ed9571dded27f5907c4ac1d9cdc99f41b6
3,021
def create_service_endpoint(service_endpoint_type, authorization_scheme, name, github_access_token=None, github_url=None, azure_rm_tenant_id=None, azure_rm_service_principal_id=None, azure_rm_service_prinicipal_key=None, azure_rm_subscription_id=None, azure_rm_subscription_name=None, organization=None, project=None, detect=None): """Create a service endpoint :param service_endpoint_type: Type of service endpoint :type service_endpoint_type: str :param name: Name of service endpoint to create :type name: str :param authorization_scheme: Authorization to be used in service endpoint creation Github service endpoint supports PersonalAccessToken AzureRm service endpoint supports ServicePrincipal :type authorization_scheme: str :param github_access_token: PAT token of github for creating github service endpoint :type github_access_token: str :param github_url: Url for github for creating service endpoint :type github_url: str :param azure_rm_tenant_id: tenant id for creating azure rm service endpoint :type azure_rm_tenant_id: str :param azure_rm_service_principal_id: service principal id for creating azure rm service endpoint :type azure_rm_service_principal_id: str :param azure_rm_service_prinicipal_key: key/password for service principal used to create azure rm service endpoint :type azure_rm_service_prinicipal_key: str :param azure_rm_subscription_id: subscription id for azure rm service endpoint :type azure_rm_subscription_id: str :param azure_rm_subscription_name: name of azure subscription for azure rm service endpoint :type azure_rm_subscription_name: str :param organization: Azure Devops organization URL. Example: https://dev.azure.com/MyOrganizationName/ :type organization: str :param project: Name or ID of the project. :type project: str :param detect: Automatically detect organization. Default is "on". :type detect: str :rtype: :class:`ServiceEndpoint <service_endpoint.v4_1.models.ServiceEndpoint>` """ try: organization, project = resolve_instance_and_project(detect=detect, organization=organization, project=project) client = get_service_endpoint_client(organization) if (service_endpoint_type == SERVICE_ENDPOINT_TYPE_GITHUB and authorization_scheme == SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN): service_endpoint_authorization = EndpointAuthorization( parameters={'accessToken': github_access_token}, scheme=SERVICE_ENDPOINT_AUTHORIZATION_PERSONAL_ACCESS_TOKEN) service_endpoint_to_create = ServiceEndpoint( authorization=service_endpoint_authorization, name=name, type=SERVICE_ENDPOINT_TYPE_GITHUB, url=github_url) return client.create_service_endpoint(service_endpoint_to_create, project) if (service_endpoint_type == SERVICE_ENDPOINT_TYPE_AZURE_RM and authorization_scheme == SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL): service_endpoint_authorization = EndpointAuthorization( parameters={'tenantid': azure_rm_tenant_id, 'serviceprincipalid': azure_rm_service_principal_id, 'authenticationType': 'spnKey', 'serviceprincipalkey': azure_rm_service_prinicipal_key}, scheme=SERVICE_ENDPOINT_AUTHORIZATION_SERVICE_PRINCIPAL) service_endpoint_data = { 'subscriptionId': azure_rm_subscription_id, 'subscriptionName': azure_rm_subscription_name, 'environment': 'AzureCloud', 'creationMode': 'Manual' } service_endpoint_to_create = ServiceEndpoint( authorization=service_endpoint_authorization, data=service_endpoint_data, name=name, type=SERVICE_ENDPOINT_TYPE_AZURE_RM, url='https://management.azure.com/') return client.create_service_endpoint(service_endpoint_to_create, project) raise CLIError('This combination of endpoint type is not supported with this authorization scheme.') except VstsServiceError as ex: raise CLIError(ex)
75ad8fdb237d4dac9105bc33b96273f67482375f
3,022
import math def voronoi_diagram_interpolation(interpolationcellid, id0, id1, voronoiDataset0, voronoiDataset1, centerlines, step, clippingPoints): """Given two Voronoi datasets interpolate the data sets along the centerline. Args: interpolationcellid (int): LineID of the centerline id0 (int): Start ID. id1 (int): Stop ID. voronoiDataset0 (vtkPolyData): First Voronoi dataset. voronoiDataset1 (vtkPolyData): Second Voronoi dataset. centerlines (vtkPolyData): Centerline to interpolate along. step (int): Direction to interpolate clippingPoints (vtkPoints): Location of clipping points. Returns: finalNewVoronoiPoints (vtkPoints): New points to the Voronoi diagram. finalRadiusArray (vtkDoubelArray): Array to hold the radius for each point. """ cellLine = extract_single_line(centerlines, interpolationcellid) startPoint = clippingPoints.GetPoint(id0) endPoint = clippingPoints.GetPoint(id1) startId = cellLine.FindPoint(startPoint) endId = cellLine.FindPoint(endPoint) gapStartId = startId + 1 * step gapEndId = endId - 1 * step arrivalId = gapEndId + 1 * step endSavingInterval = gapEndId + 1 * step numberOfGapPoints = int(math.fabs(gapEndId - gapStartId)) + 1 numberOfInterpolationPoints = voronoiDataset0.GetNumberOfPoints() numberOfCenterlinesPoints = cellLine.GetNumberOfPoints() numberOfAddedPoints = numberOfGapPoints * numberOfInterpolationPoints finalNewVoronoiPoints = vtk.vtkPoints() cellArray = vtk.vtkCellArray() finalRadiusArray = get_vtk_array(radiusArrayName, 1, numberOfAddedPoints) count = 0 for i in range(numberOfInterpolationPoints): voronoiPoint = voronoiDataset0.GetPoint(i) voronoiPointRadius = voronoiDataset0.GetPointData().GetArray(radiusArrayName).GetTuple1(i) centerlinePointLocator = get_vtk_point_locator(cellLine) closestPointId = centerlinePointLocator.FindClosestPoint(voronoiPoint) closestPoint = cellLine.GetPoint(closestPointId) voronoiVector = [0.0, 0.0, 0.0] voronoiVector[0] = voronoiPoint[0] - closestPoint[0] voronoiVector[1] = voronoiPoint[1] - closestPoint[1] voronoiVector[2] = voronoiPoint[2] - closestPoint[2] voronoiVectorNorm = vtk.vtkMath.Norm(voronoiVector) rotationAngle = compute_voronoi_vector_to_centerline_angle(closestPointId, voronoiVector, cellLine) PTPoints = vtk.vtkPoints() range_step = 1 if closestPointId < arrivalId else -1 for j in range(closestPointId, arrivalId, range_step): localtangent = [0.0, 0.0, 0.0] newVoronoiVector = [0.0, 0.0, 0.0] newVoronoiPoint = [0.0, 0.0, 0.0] transform = vtk.vtkTransform() point0 = cellLine.GetPoint(j) if (j < numberOfCenterlinesPoints - 1): point1 = [0.0, 0.0, 0.0] cellLine.GetPoint(j + 1, point1) localtangent[0] += point1[0] - point0[0] localtangent[1] += point1[1] - point0[1] localtangent[2] += point1[2] - point0[2] if (j > 0): point2 = [0.0, 0.0, 0.0] cellLine.GetPoint(j - 1, point2) localtangent[0] += point0[0] - point2[0] localtangent[1] += point0[1] - point2[1] localtangent[2] += point0[2] - point2[2] localnormal = cellLine.GetPointData().GetArray(parallelTransportNormalsArrayName).GetTuple3(j) localnormaldot = vtk.vtkMath.Dot(localtangent, localnormal) localtangent[0] -= localnormaldot * localnormal[0] localtangent[1] -= localnormaldot * localnormal[1] localtangent[2] -= localnormaldot * localnormal[2] vtk.vtkMath.Normalize(localtangent) transform.RotateWXYZ(rotationAngle, localtangent) transform.TransformNormal(localnormal, newVoronoiVector) vtk.vtkMath.Normalize(newVoronoiVector) newVoronoiPoint[0] = point0[0] + voronoiVectorNorm * newVoronoiVector[0] newVoronoiPoint[1] = point0[1] + voronoiVectorNorm * newVoronoiVector[1] newVoronoiPoint[2] = point0[2] + voronoiVectorNorm * newVoronoiVector[2] PTPoints.InsertNextPoint(newVoronoiPoint) numberOfPTPoints = PTPoints.GetNumberOfPoints() lastPTPoint = PTPoints.GetPoint(PTPoints.GetNumberOfPoints() - 1) voronoiPointLocator = get_vtk_point_locator(voronoiDataset1) arrivalVoronoiPointId = voronoiPointLocator.FindClosestPoint(lastPTPoint) arrivalVoronoiPoint = voronoiDataset1.GetPoint(arrivalVoronoiPointId) arrivalVoronoiPointRadius = voronoiDataset1.GetPointData().GetArray(radiusArrayName).GetTuple1( arrivalVoronoiPointId) arrivalCenterlinePointLocator = get_vtk_point_locator(cellLine) arrivalCenterlineClosestPointId = arrivalCenterlinePointLocator.FindClosestPoint(arrivalVoronoiPoint) arrivalCenterlineClosestPoint = cellLine.GetPoint(arrivalCenterlineClosestPointId) arrivalVoronoiVector = [0.0, 0.0, 0.0] arrivalVoronoiVector[0] = arrivalVoronoiPoint[0] - arrivalCenterlineClosestPoint[0] arrivalVoronoiVector[1] = arrivalVoronoiPoint[1] - arrivalCenterlineClosestPoint[1] arrivalVoronoiVector[2] = arrivalVoronoiPoint[2] - arrivalCenterlineClosestPoint[2] arrivalVoronoiVectorNorm = vtk.vtkMath.Norm(arrivalVoronoiVector) radiusArray = compute_spline(voronoiPointRadius, arrivalVoronoiPointRadius, numberOfPTPoints) vectorNormArray = compute_spline(voronoiVectorNorm, arrivalVoronoiVectorNorm, numberOfPTPoints) pointsToGap = (gapStartId - closestPointId) * step if pointsToGap < 0 or PTPoints.GetNumberOfPoints() <= pointsToGap: continue for k in range(gapStartId, endSavingInterval, step): ptpoint = PTPoints.GetPoint(pointsToGap) clpoint = cellLine.GetPoint(k) vector = [0.0, 0.0, 0.0] vector[0] = ptpoint[0] - clpoint[0] vector[1] = ptpoint[1] - clpoint[1] vector[2] = ptpoint[2] - clpoint[2] vtk.vtkMath.Normalize(vector) norm = vectorNormArray.GetTuple1(pointsToGap) newvector = [0.0, 0.0, 0.0] newvector[0] = norm * vector[0] newvector[1] = norm * vector[1] newvector[2] = norm * vector[2] newpoint = [0.0, 0.0, 0.0] newpoint[0] = clpoint[0] + newvector[0] newpoint[1] = clpoint[1] + newvector[1] newpoint[2] = clpoint[2] + newvector[2] finalNewVoronoiPoints.InsertNextPoint(newpoint) cellArray.InsertNextCell(1) cellArray.InsertCellPoint(count) if pointsToGap > 0: finalRadiusArray.SetTuple1(count, radiusArray.GetTuple1(pointsToGap)) pointsToGap += 1 count += 1 return finalNewVoronoiPoints, finalRadiusArray
13c719b89f737bac625cf76c7d64a5c34a856fdd
3,023
def plot_kde_matrix(df, w, limits=None, colorbar=True, refval=None): """ Plot a KDE matrix. Parameters ---------- df: Pandas Dataframe The rows are the observations, the columns the variables. w: np.narray The corresponding weights. colorbar: bool Whether to plot the colorbars or not. limits: dictionary, optional Dictionary of the form ``{"name": (lower_limit, upper_limit)}``. refval: dict, optional A reference parameter to be shown in the plots (e.g. the underlying ground truth parameter used to simulate the data for testing purposes). Default: None. """ grid = sns.PairGrid(df, diag_sharey=False) if limits is None: limits = {} default = (None, None) def off_diagonal(x, y, **kwargs): df = pd.concat((x, y), axis=1) plot_kde_2d(df, w, x.name, y.name, xmin=limits.get(x.name, default)[0], xmax=limits.get(x.name, default)[1], ymin=limits.get(y.name, default)[0], ymax=limits.get(y.name, default)[1], ax=plt.gca(), title=False, colorbar=colorbar, refval=refval) def scatter(x, y, **kwargs): alpha = w / w.max() colors = np.zeros((alpha.size, 4)) colors[:, 3] = alpha plt.gca().scatter(x, y, color="k") if refval is not None: plt.gca().scatter([refval[x.name]], [refval[y.name]], color='C1') plt.gca().set_xlim(*limits.get(x.name, default)) plt.gca().set_ylim(*limits.get(y.name, default)) def diagonal(x, **kwargs): df = pd.concat((x,), axis=1) plot_kde_1d(df, w, x.name, xmin=limits.get(x.name, default)[0], xmax=limits.get(x.name, default)[1], ax=plt.gca(), refval=refval) grid.map_diag(diagonal) grid.map_upper(scatter) grid.map_lower(off_diagonal) return grid
a0272a0f819fc5bca6144c9a8293f29f415327b8
3,024
from typing import Optional import os def get_root(user_id: Optional[int]) -> str: """ Return the absolute path to the current authenticated user's data storage root directory :param user_id: current user ID (None if user auth is disabled) :return: user's data storage path """ root = app.config['DATA_FILE_ROOT'] if user_id: root = os.path.join(root, str(user_id)) return os.path.abspath(os.path.expanduser(root))
ce8dad4ea50534bba61948e064e47d8a22ed874f
3,025
import requests import os def download_model(model_id, file_format="json", save=True, path="."): """ Download models from BiGG. You can chose to save the file or to return the JSON data. Parameters ---------- model_id : str A valid id for a model in BiGG. file_format : str If you want to save the file, you can import the model in the following formats: 1. json (JSON format) 2. xml (SBML) 3. xml.gz (SBML compressed) 4. mat (MATLAB) save : bool If True, writes the model to a file with the model name (the path can be specified). path : str Specifies in which folder the model should be written if *save* is True. Returns ------- model : Model If save is False, it returns the parsed model. If save is True, it saves the model in the requested format. Raises ------ requests.HTTPError If the request has failed. """ if save: response = requests.get("http://bigg.ucsd.edu/static/models/%s.%s" % (model_id, file_format), stream=True) response.raise_for_status() with open(os.path.join(path, "%s.%s" % (model_id, file_format)), "wb") as model_file: for block in response.iter_content(1024): model_file.write(block) else: response = requests.get("http://bigg.ucsd.edu/static/models/%s.json" % model_id, stream=True) response.raise_for_status() return model_from_dict(response.json())
b95c8b1231ef0907044649856045aeef7c0b70eb
3,026
import os def open_expand(file_path, *args, **kwargs): """ Allows to use '~' in file_path. """ return open(os.path.expanduser(file_path), *args, **kwargs)
6ad3d6ae98bdb2295e66f4d52a8282dfd3162a3d
3,027
import os def get_app(): """load API modules and return the WSGI application""" global get_app, _app, login_manager _app = Flask(__name__, instance_relative_config=True, instance_path=os.environ.get('UKNOW_CONFIG')) _app.config.from_object(DefaultConfig()) _app.secret_key = 'WTF is this!!' # Should have this to work login_manager = LoginManager() login_manager.init_app(_app) import_all_modules(__file__, __name__) get_app = lambda: _app return _app
9f9715e928b6b4829d5fc2cb53b72298858282a5
3,028
import logging def _submit_to_all_logs(log_list, certs_chain): """Submits the chain to all logs in log_list and validates SCTs.""" log_id_to_verifier = _map_log_id_to_verifier(log_list) chain_der = [c.to_der() for c in certs_chain] raw_scts_for_cert = [] for log_url in log_list.keys(): res = _submit_to_single_log(log_url, chain_der) if res: raw_scts_for_cert.append(res) else: logging.info("No SCT from log %s", log_url) validated_scts = [] for raw_sct in raw_scts_for_cert: key_id = raw_sct.id.key_id try: log_id_to_verifier[key_id].verify_sct(raw_sct, certs_chain) validated_scts.append(raw_sct) except error.SignatureError as err: logging.warning( 'Discarding SCT from log_id %s which does not validate: %s', key_id.encode('hex'), err) except KeyError as err: logging.warning('Could not find CT log validator for log_id %s. ' 'The log key for this log is probably misconfigured.', key_id.encode('hex')) scts_for_cert = [tls_message.encode(proto_sct) for proto_sct in validated_scts if proto_sct] sct_list = client_pb2.SignedCertificateTimestampList() sct_list.sct_list.extend(scts_for_cert) return tls_message.encode(sct_list)
16081a2ddbc924c0490af5f7c3ffc625300486cd
3,029
def update(oid, landingZoneProgressItemDetails): """ This function updates an existing landingZoneProgressItem in the landingZoneProgressItem list :param id: id of the landingZoneProgressItem to update in the landingZoneProgressItem list :param landingZoneProgressItem: landingZoneProgressItem to update :return: updated landingZoneProgressItem """ app.logger.debug("landingZoneProgressItem: ") app.logger.debug(pformat(landingZoneProgressItemDetails)) app.logger.debug(oid) app.logger.debug(landingZoneProgressItemDetails["id"]) if landingZoneProgressItemDetails["id"] != oid: abort(400, "Key mismatch in path and body") # Does the landingZoneProgressItem exist in landingZoneProgressItems? existing_landingZoneProgressItem = ( db.session.query(LandingZoneProgressItem) .filter(LandingZoneProgressItem.id == oid) .one_or_none() ) # Does landingZoneProgressItem exist? if existing_landingZoneProgressItem is not None: schema = LandingZoneProgressItemSchema() update_landingZoneProgressItem = schema.load( landingZoneProgressItemDetails, session=db.session ) update_landingZoneProgressItem.id = oid db.session.merge(update_landingZoneProgressItem) db.session.commit() # return the updated landingZoneProgressItem in the response data = schema.dump(update_landingZoneProgressItem) app.logger.debug("landingZoneProgressItem data:") app.logger.debug(pformat(data)) return data, 200 # otherwise, nope, landingZoneProgressItem doesn't exist, so that's an error else: abort(404, "LandingZoneProgressItem not found")
af9d09aab0c5dfb3defea4db428265a71c60ed76
3,030
import sqlite3 def create_connection(db_file: str): """Create database file.""" conn = None try: conn = sqlite3.connect(db_file) print(sqlite3.version) except Error as e: print(e) return conn
a50dff80de36e391aeea7f6867cc85334f4bc690
3,031
def _simplex_gradient(op, grad_wrt_weight): """Register gradient for SimplexInterpolationOp.""" grad_wrt_input = simplex_gradient( input=op.inputs[0], weight=op.outputs[0], grad_wrt_weight=grad_wrt_weight, lattice_sizes=op.get_attr('lattice_sizes')) return [grad_wrt_input]
58447f073cdf4feb6e1f115b039c8573ab1048ca
3,032
def generate_experiment(): """ Generate elastic scattering experiments which are reasonable but random """ exp_dict = {} exp_keys = ['qmin', 'qmax', 'qbin', 'rmin', 'rmax', 'rstep'] exp_ranges = [(0, 1.5), (19., 25.), (.8, .12), (0., 2.5), (30., 50.), (.005, .015)] for n, k in enumerate(exp_keys): exp_dict[k] = rs.uniform(exp_ranges[n][0], exp_ranges[n][1]) exp_dict['sampling'] = rs.choice(['full', 'ns']) return exp_dict
f913cbfc6f871fa290dd6cdb3e5da874b06243b4
3,033
def connectDB(): """function to start the database connection using MongoClient from pymongo and the connection link from .env file path. Using certifi to provide certificate in order to enable the connection Returns: Cursor: database white-shark """ try: client = MongoClient(f"{MONGO_URI}", tlsCAFile=ca) return client["white-shark"] except: print("Connection failed")
6d04fbc03ed45d5ec2b868dd52270f7dd1d7339d
3,034
import gzip def load_dicom(filename): """Loads in a given dicom file using a pydicom library :param filename: a path to the .dcm.gz or .dcm file :type filename: Union[str, os.path] :return: pydicom.dataset.FileDataset or pydicom.dicomdir.DicomDir :raises TypeError: raised if the file extension does not end with .dcm nor .gz """ if filename.endswith('.dcm'): ds = dicom.dcmread(filename) elif filename.endswith('.gz'): with gzip.open(filename) as fd: ds = dicom.dcmread(fd, force=True) else: raise TypeError ds.file_meta.TransferSyntaxUID = dicom.uid.ImplicitVRLittleEndian return ds
898b2003049dd91d53f57e28208ad82b5449632e
3,035
import torch def load_pytorch_policy(fpath, itr, deterministic=False): """ Load a pytorch policy saved with Spinning Up Logger.""" fname = osp.join(fpath, 'pyt_save', 'model'+itr+'.pt') print('\n\nLoading from %s.\n\n'%fname) model = torch.load(fname) # make function for producing an action given a single state def get_action(x): with torch.no_grad(): x = torch.as_tensor(x, dtype=torch.float32) if deterministic: action = model.pi(x)[0].mean.numpy() else: action = model.act(x) return action return get_action
d368f9b120c78c00e446ab6a4b2b63e893507de7
3,036
def make_server(dashboard): """ Creates the server by mounting various API endpoints and static file content for the dashboard Parameters ---------- dashboard : plsexplain.Dashboard The dashboard instance to server Returns ------- FastAPI The application instance that hosts the dashboard instance. """ app = FastAPI() asset_folder = join(abspath(dirname(dirname(__file__))), "client/dist/images") app.add_api_route("/api/metadata", get_model_metadata(dashboard), methods=["get"]) app.add_api_route("/api/performance", get_model_performance(dashboard), methods=["get"]) app.add_api_route("/api/model/features", get_feature_importance(dashboard), methods=["get"]) app.add_api_route("/api/model/features/{name:str}", get_feature_profile(dashboard), methods=["get"]) app.add_api_route("/api/dataset", get_dataset(dashboard), methods=["get"]) app.add_api_route("/api/predictions/{index:int}/breakdown", get_prediction_breakdown(dashboard), methods=["get"]) app.add_api_route("/api/predictions/{index}/profile/{feature}", get_prediction_profile(dashboard), methods=["get"]) app.mount("/images", StaticFiles(directory=asset_folder), name="static") app.add_api_route("/{sub_path:path}", get_client_app, methods=["get"], response_class=HTMLResponse) return app
a7e7e599ba8166d4a27818dcc21da25bb66a4171
3,037
def bash_complete_line(line, return_line=True, **kwargs): """Provides the completion from the end of the line. Parameters ---------- line : str Line to complete return_line : bool, optional If true (default), will return the entire line, with the completion added. If false, this will instead return the strings to append to the original line. kwargs : optional All other keyword arguments are passed to the bash_completions() function. Returns ------- rtn : set of str Possible completions of prefix """ # set up for completing from the end of the line split = line.split() if len(split) > 1 and not line.endswith(" "): prefix = split[-1] begidx = len(line.rsplit(prefix)[0]) else: prefix = "" begidx = len(line) endidx = len(line) # get completions out, lprefix = bash_completions(prefix, line, begidx, endidx, **kwargs) # reformat output if return_line: preline = line[:-lprefix] rtn = {preline + o for o in out} else: rtn = {o[lprefix:] for o in out} return rtn
571e0822cd7a4d44e19c969072d624123640d1f1
3,038
def use_board(name): """ Use Board. """ _init_pins() return r_eval("pins::use_board(\"" + name + "\")")
5f08450f48fca6ca827383f4a57f006ee6e50836
3,039
def add_parameter(name, initial_value=1.0, **kwargs): """Adds a new global parameter to the model. :param name: the name for the new global parameter :type name: str :param initial_value: optional the initial value of the parameter (defaults to 1) :type initial_value: float :param kwargs: optional parameters, recognized are: * | `model`: to specify the data model to be used (if not specified | the one from :func:`.get_current_model` will be taken) * all other parameters from :func:`set_parameters`. :return: the newly created parameter """ dm = kwargs.get('model', model_io.get_current_model()) assert (isinstance(dm, COPASI.CDataModel)) model = dm.getModel() assert (isinstance(model, COPASI.CModel)) parameter = model.createModelValue(name, initial_value) if parameter is None: raise ValueError('A global parameter named ' + name + ' already exists') set_parameters(name, **kwargs) return parameter
8fa0839f1a38fa78add8ab35b2eb03f0c3d4bbd8
3,040
from samplesheets.models import GenericMaterial def get_sample_libraries(samples, study_tables): """ Return libraries for samples. :param samples: Sample object or a list of Sample objects within a study :param study_tables: Rendered study tables :return: GenericMaterial queryset """ if type(samples) not in [list, QuerySet]: samples = [samples] sample_names = [s.name for s in samples] study = samples[0].study library_names = [] for k, assay_table in study_tables['assays'].items(): sample_idx = get_index_by_header( assay_table, 'name', obj_cls=GenericMaterial, item_type='SAMPLE' ) for row in assay_table['table_data']: if row[sample_idx]['value'] in sample_names: last_name = get_last_material_name(row, assay_table) if last_name not in library_names: library_names.append(last_name) return GenericMaterial.objects.filter( study=study, name__in=library_names ).order_by('name')
284d08b313d982a4b6d6fe9d780f1a668f036455
3,041
def parse_next_frame(data): """ Parse the next packet from this MQTT data stream. """ if not data: return None, b'' if len(data) < 2: # Not enough data yet return None, data packet_type, flag1, flag2, flag3, flag4 = bitstruct.unpack('u4b1b1b1b1', data[0:1]) length = None # Figure out the length of the packet seek_point = 0 seek_multiplier = 1 packet_length = 0 encoded_byte = -1 while (encoded_byte & 128) != 0: seek_point += 1 if len(data) < 1 + seek_point: # Not enough data return None, data encoded_byte, = bitstruct.unpack('u8', data[seek_point:seek_point+1]) packet_length += (encoded_byte & 127) * seek_multiplier seek_multiplier = seek_multiplier * 128 if seek_multiplier > 128 * 128 * 128: raise ParseFailure() # Do we have the whole packet? if len(data) < 1 + seek_point + packet_length: # Not the whole packet yet return None, data # Build the frame frame = Frame( packet_type=PacketType(packet_type), flags=(flag1, flag2, flag3, flag4), body=data[1 + seek_point:packet_length + 1 + seek_point]) # Return the data we didn't consume data = data[1 + seek_point + packet_length:] return frame, data
ce725ce871fdbd45fbf5d7367049171e7001469b
3,042
import random def pick_glance_api_server(): """Return which Glance API server to use for the request This method provides a very primitive form of load-balancing suitable for testing and sandbox environments. In production, it would be better to use one IP and route that to a real load-balancer. Returns (host, port) """ host_port = random.choice(FLAGS.glance_api_servers) host, port_str = host_port.split(':') port = int(port_str) return host, port
e32e75b675f0b3e07c71ae172423b6393f213a4d
3,043
def remove_punctuation(transcriptions): """ :param: transcriptions is the dictionary containing text file that has been converted into an array. :return: cleaned string of words This function removes punctuations from the story """ parsed_string = dumps(transcriptions) punctuations = '''[],!.'"\\?''' for char in parsed_string: if char in punctuations: parsed_string = parsed_string.replace(char, '') return parsed_string
5800a97a2a232f41161c9c8357cd826212d8302e
3,044
def snakify(str_: str) -> str: """Convert a string to snake case Args: str_: The string to convert """ return str_.replace(" ", "_").lower()
c40d972fc99f2cb99f3c2b4a83296e793018c32b
3,045
def search_images( project, image_name_prefix=None, annotation_status=None, return_metadata=False ): """Search images by name_prefix (case-insensitive) and annotation status :param project: project name or folder path (e.g., "project1/folder1") :type project: str :param image_name_prefix: image name prefix for search :type image_name_prefix: str :param annotation_status: if not None, annotation statuses of images to filter, should be one of NotStarted InProgress QualityCheck Returned Completed Skipped :type annotation_status: str :param return_metadata: return metadata of images instead of names :type return_metadata: bool :return: metadata of found images or image names :rtype: list of dicts or strs """ project, project_folder = get_project_and_folder_metadata(project) team_id, project_id = project["team_id"], project["id"] if annotation_status is not None: annotation_status = common.annotation_status_str_to_int( annotation_status ) if project_folder is not None: project_folder_id = project_folder["id"] else: project_folder_id = get_project_root_folder_id(project) result_list = [] params = { 'team_id': team_id, 'project_id': project_id, 'annotation_status': annotation_status, 'offset': 0, 'folder_id': project_folder_id } if image_name_prefix is not None: params['name'] = image_name_prefix total_got = 0 total_images = 0 while True: response = _api.send_request( req_type='GET', path='/images-folders', params=params ) if not response.ok: raise SABaseException( response.status_code, "Couldn't search images " + response.text ) response = response.json() images = response["images"] folders = response["folders"] results_images = images["data"] for r in results_images: if return_metadata: result_list.append(r) else: result_list.append(r["name"]) total_images += len(results_images) if images["count"] <= total_images: break total_got += len(results_images) + len(folders["data"]) params["offset"] = total_got if return_metadata: def process_result(x): x["annotation_status"] = common.annotation_status_int_to_str( x["annotation_status"] ) return x return list(map(process_result, result_list)) else: return result_list
dc5733e0c22419f850592ee2f8dd3b13e177c99b
3,046
def bibtexNoteszotero(bibtex_names): """ params: bibtex_names, {} response, {} return: notes_dict, {} """ # notes_dict = {} notes_dict["itemType"] = "note" notes_dict["relations"] = {} notes_dict["tags"] = [] notes_dict["note"] = bibtex_names["notes"].strip() # return notes_dict
97e30f746f59ee5e1cfed8581a2dc272fc4b477f
3,047
def iliev_test_5(N=10000, Ns=10, L=15. | units.kpc, dt=None): """ prepare iliev test and return SPH and simplex interfaces """ gas, sources = iliev_test_5_ic(N, Ns, L) conv = nbody_system.nbody_to_si(1.0e9 | units.MSun, 1.0 | units.kpc) sph = Fi(conv, use_gl=False, mode='periodic', redirection='none') sph.initialize_code() sph.parameters.use_hydro_flag = True sph.parameters.radiation_flag = False sph.parameters.self_gravity_flag = False sph.parameters.gamma = 1 sph.parameters.isothermal_flag = True sph.parameters.integrate_entropy_flag = False sph.parameters.timestep = dt sph.parameters.verbosity = 0 sph.parameters.pboxsize = 2*L sph.commit_parameters() sph.gas_particles.add_particles(gas) sph.commit_particles() # sph.start_viewer() rad = SimpleX(number_of_workers=1, redirection='none') rad.initialize_code() rad.parameters.box_size = 2*L rad.parameters.hilbert_order = 0 rad.commit_parameters() gas.add_particles(sources) rad.particles.add_particles(gas) rad.commit_particles() return sph, rad
f4675e8c7f51c18cb31644295a1d5945e453de5b
3,048
def run(model, model_params, T, method, method_params, num_iter, tmp_path="/tmp/consistency_check.txt", seed=None, verbose=False, simplified_interface=True): """ Wrapper around the full consistency check pipeline. Parameters ---------- model : str Name of the generative model. Implemented models are: gn, generalized_gn. model_params : dict Parameters of the generative model. T : int Number of generative step. method : str Name of the inference method. Implemented methods are: degree, OD, random_expand, snowball_sampling, biased_snowball_sampling. method_params : dict Parameters of the inference method. num_iter : int Number of repetition of the inference method. Note that all repetitions run on the same model instance. tmp_path : str Location where temporary files will be written. verbose : bool Output logs to stdout. simplified_interface : bool Assume that the generator is compiled with a simplified interface (i.e., not Boost). Returns ------- scores : list of dict A list of scores (one per repetition). Each entry of the list corresponds to a repetition of the method. An entry in the list is a dictionary, whose key is the name of the comparison measure. Warning ------- This function has side-effects. It writes and read from a temporary location (defaulted to /tmp/) to communicate with pre-compiled modules. If multiple instances run at the same time, make sure to pass different temporary paths to each instances. """ # Tests if {model} & available_models == set(): raise NotImplementedError("Model '" + str(model) + "' not implemented.") if {method} & available_methods == set(): raise NotImplementedError("Method '" + str(method) + "' not implemented.") # Generate history generated_history = gn.run(model, model_params, T, verbose=verbose, seed=seed, simplified_interface=simplified_interface) encoded_history, encoding, tag_encoding = obfuscate_history(generated_history, seed=seed) _write_history(encoded_history, tmp_path) # Infer and compute similarity scores = [] for i in range(num_iter): output = im.run(tmp_path, method, method_params, verbose=verbose) if len(generated_history) != len(output): RuntimeError("Length of generated and inferred data don't match.") inferred = deobfuscate_history([x[0] for x in output], encoding, tag_encoding) res = cp.corr(generated_history, [(e, _[1]) for e, _ in zip(inferred, output)]) scores.append(res) # Garbage collection remove(tmp_path) return scores
152eb33e3e6c68306c3f965f734bc8d7ddeb4010
3,049
def clean_street(address: str) -> str: """ Function to clean street strings. """ address = address.lower() address = _standardize_street(address) address = _abb_replace(address) address = _ordinal_rep(address) if address in SPECIAL_CASES.keys(): # Special cases address = SPECIAL_CASES[address] return address
af166bd9ebd51e1a157135587c4efdb6469181ea
3,050
def unroll_policy_for_eval( sess, env, inputs_feed, prev_state_feed, policy_outputs, number_of_steps, output_folder, ): """unrolls the policy for testing. Args: sess: tf.Session env: The environment. inputs_feed: dictionary of placeholder for the input modalities. prev_state_feed: placeholder for the input to the prev_state of the model. policy_outputs: tensor that contains outputs of the policy. number_of_steps: maximum number of unrolling steps. output_folder: output_folder where the function writes a dictionary of detailed information about the path. The dictionary keys are 'states' and 'distance'. The value for 'states' is the list of states that the agent goes along the path. The value for 'distance' contains the length of shortest path to the goal at each step. Returns: states: list of states along the path. distance: list of distances along the path. """ prev_state = [ np.zeros((1, FLAGS.lstm_cell_size), dtype=np.float32) for _ in range(2) ] prev_action = np.zeros((1, 1, FLAGS.action_size + 1), dtype=np.float32) obs = env.reset() distances_to_goal = [] states = [] unique_id = '{}_{}'.format(env.cur_image_id(), env.goal_string) for _ in range(number_of_steps): distances_to_goal.append( np.min([ len( nx.shortest_path(env.graph, env.pose_to_vertex(env.state()), env.pose_to_vertex(target_view))) for target_view in env.targets() ])) states.append(env.state()) feed_dict = {inputs_feed[mtype]: [[obs[mtype]]] for mtype in inputs_feed} feed_dict[prev_state_feed[0]] = prev_state[0] feed_dict[prev_state_feed[1]] = prev_state[1] action_values, prev_state = sess.run(policy_outputs, feed_dict=feed_dict) chosen_action = np.argmax(action_values[0]) obs, _, done, info = env.step(np.int32(chosen_action)) prev_action[0][0][chosen_action] = 1. prev_action[0][0][-1] = float(info['success']) # If the agent chooses action stop or the number of steps exceeeded # env._episode_length. if done: break # logging.info('distance = %d, id = %s, #steps = %d', distances_to_goal[-1], output_path = os.path.join(output_folder, unique_id + '.npy') with tf.gfile.Open(output_path, 'w') as f: print 'saving path information to {}'.format(output_path) np.save(f, {'states': states, 'distance': distances_to_goal}) return states, distances_to_goal
59979b74f7ff7dcdaf7c875ce93d3333f467cd0d
3,051
import time import os def get_result_filename(params, commit=''): """ 获取时间 :return: """ save_result_dir = params['test_save_dir'] batch_size = params['batch_size'] epochs = params['epochs'] max_length_inp = ['max_dec_len'] embedding_dim = ['embed_size'] now_time = time.strftime('%Y_%m_%d_%H_%M_%S') filename = now_time + '_batch_size_{}_epochs_{}_max_length_inp_{}_embedding_dim_{}{}.csv'.format(batch_size, epochs, max_length_inp, embedding_dim, commit) result_save_path = os.path.join(save_result_dir, filename) return result_save_path
51b79d6a5850b50fcb41847ba16acfc12d7c323a
3,052
def is_iterable(o: any) -> bool: """ Checks if `o` is iterable Parameters ---------- o : any The value to be checked. Examples -------- >>> is_iterable(list(range(5))) True >>> is_iterable(5) False >>> is_iterable('hello world') True >>> is_iterable(None) False """ try: _ = iter(o) except TypeError: return False return True
f3124d5ead76977c45899c589e0c6873abafd773
3,053
def fit(init_file, semipar=False): """ """ check_presence_init(init_file) dict_ = read(init_file) # Perform some consistency checks given the user's request check_presence_estimation_dataset(dict_) check_initialization_dict(dict_) # Semiparametric Model if semipar is True: quantiles, mte_u, X, b1_b0 = semipar_fit(init_file) # change to dict_ # Construct MTE # Calculate the MTE component that depends on X mte_x = np.dot(X, b1_b0) # Put the MTE together mte = mte_x.mean(axis=0) + mte_u # Accounting for variation in X mte_min = np.min(mte_x) + mte_u mte_max = np.max(mte_x) + mte_u rslt = { "quantiles": quantiles, "mte": mte, "mte_x": mte_x, "mte_u": mte_u, "mte_min": mte_min, "mte_max": mte_max, "X": X, "b1-b0": b1_b0, } # Parametric Normal Model else: check_par(dict_) rslt = par_fit(dict_) return rslt
05d484c0aae6e739881714eb7ec81c982503cf15
3,054
def date2gpswd(date): """Convert date to GPS week and day of week, return int tuple (week, day). Example: >>> from datetime import date >>> date2gpswd(date(2017, 5, 17)) (1949, 3) >>> date2gpswd(date(1917, 5, 17)) Traceback (most recent call last): ... ValueError: Invalid date: 1917-05-17, too early. """ return __date2weeksday(date, GPS_START_DATE)
29000e900ffb743b29d41fa752fc4da5f470e1b8
3,055
import html def __make_sliders(no, f): """Create dynamic sliders for a specific field""" style = {'width':'20%', 'display': 'none'} return html.Div(id={'index': f'Slider_{no}', 'type':'slider'}, children=[__make_slider(no, i) for i in range(1,f+1)], style=style)
b4fb97042e22d06e903f77a13381f9323acacacf
3,056
def kurtosis(iterable, sample=False): """ Returns the degree of peakedness of the given list of values: > 0.0 => sharper peak around mean(list) = more infrequent, extreme values, < 0.0 => wider peak around mean(list), = 0.0 => normal distribution, = -3 => flat """ a = iterable if isinstance(iterable, list) else list(iterable) return moment(a, 4, sample) / (moment(a, 2, sample) ** 2.0 or 1) - 3
ba53f2425de5ffbf6cff0724e7128953554c829b
3,057
def gen_data(shape, dtype, epsilon): """Generate data for testing the op.""" var = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype) m = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype) v = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype) grad = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype) lr = np.random.rand(1).astype(dtype) beta1 = np.random.rand(1).astype(dtype) beta2 = np.random.rand(1).astype(dtype) beta1_power = beta1 * beta1 inputs = [var, m, v, grad, lr, beta1, beta1_power, beta2] one = np.array([1]).astype(dtype) epsilon = np.array([epsilon]).astype(dtype) out_m = beta1 * m + (one - beta1) * grad out_v = np.maximum(beta2 * v, np.abs(grad)) out_var = var - lr * out_m / ((one - beta1_power) * (out_v + epsilon)) expects = [out_var, out_m, out_v] args = inputs return inputs, expects, args
75f790eda84ab2e718d504d26a303a6639775829
3,058
def factor_tmom_T1_RTN_60(df: pd.DataFrame): """ factor example """ factor = df['return'].rolling(60).sum() return factor
53b5700902bf409015f4e1c2063f741cea3736ee
3,059
def get_dataset_info(dataset_name='mnist'): """Method to return dataset information for a specific dataset_name. Args: dataset_name: a string representing the dataset to be loaded using tfds Returns: A dictionary of relevant information for the loaded dataset. """ ds_info = tfds.builder(dataset_name).info dataset_information = { 'num_classes': ds_info.features['label'].num_classes, 'data_shape': ds_info.features['image'].shape, 'train_num_examples': ds_info.splits['train'].num_examples, } return dataset_information
b4e36c966a34a3eacd327484e8d88d54303c0ea8
3,060
def full_model(mode, hparams): """Make a clause search model including input pipeline. Args: mode: Either 'train' or 'eval'. hparams: Hyperparameters. See default_hparams for details. Returns: logits, labels Raises: ValueError: If the model returns badly shaped tensors. """ if hparams.use_averages: raise NotImplementedError('Figure out how to eval with Polyak averaging') kind, model = all_models.make_model(name=hparams.model, mode=mode, hparams=hparams, vocab=FLAGS.vocab) batch_size = mode_batch_size(mode, hparams) if kind == 'sequence': # Read _, conjectures, clauses, labels = inputs.sequence_example_batch( mode=mode, batch_size=batch_size, shuffle=True) clauses = tf.reshape(clauses, [2 * batch_size, -1]) labels = tf.reshape(labels, [2 * batch_size]) # Embed vocab_size, _ = inputs.read_vocab(FLAGS.vocab) conjectures, clauses = model_utils.shared_embedding_layer( (conjectures, clauses), dim=hparams.embedding_size, size=vocab_size) # Classify conjectures = model.conjecture_embedding(conjectures) conjectures = tf.reshape( tf.tile(tf.reshape(conjectures, [batch_size, 1, -1]), [1, 2, 1]), [2 * batch_size, -1]) clauses = model.axiom_embedding(clauses) logits = model.classifier(conjectures, clauses) elif kind == 'tree': examples = inputs.proto_batch(mode=mode, batch_size=batch_size) def weave(**ops): return clause_loom.weave_clauses( examples=examples, vocab=FLAGS.vocab, **ops) logits, labels = model(weave) elif kind == 'fast': examples = inputs.proto_batch(mode=mode, batch_size=batch_size) conjecture_sizes, conjecture_flat, clauses, labels = ( gen_clause_ops.random_clauses_as_fast_clause( examples, vocab=FLAGS.vocab)) conjectures = jagged.Jagged(conjecture_sizes, conjecture_flat) logits = model(conjectures, clauses) # Done! return fix_logits(kind, logits), labels
3ab3a089628f9460b9f1ae9800ec9003fdae5d17
3,061
def aggregatePredictions(df_pred, threshold=0.8): """ Aggregates probabilistic predictions, choosing the state with the largest probability, if it exceeds the threshold. :param pd.DataFrame df_pred: columns: state rows: instance values: float :param float threshold: :return pd.Series: index: instance values: state or np.nan if below threshold """ MISSING = -1 columns = df_pred.columns values = [] df = df_pred.applymap(lambda v: 1 if v >= threshold else MISSING) for idx, row in df_pred.iterrows(): row_list = row.tolist() pos = row_list.index(max(row_list)) values.append(columns[pos]) ser = pd.Series(values, index=df_pred.index) ser = ser.apply(lambda v: np.nan if v == MISSING else v) return ser
a5d8efbe24d45279e80ff461e900dd3ac4921659
3,062
import os def GetLocalInstanceConfig(local_instance_id): """Get the path of instance config. Args: local_instance_id: Integer of instance id. Return: String, path of cf runtime config. """ cfg_path = os.path.join(GetLocalInstanceRuntimeDir(local_instance_id), constants.CUTTLEFISH_CONFIG_FILE) if os.path.isfile(cfg_path): return cfg_path return None
4f550234a1defe41995af8fc4bea5e73c001e157
3,063
from typing import Any def is_input_element(obj: Any) -> bool: """ Returns True, if the given object is an :class:`.InputElement`, or a subclass of InputElement. """ return isinstance(obj, InputElement)
c3fbaea9588d40e2fa370aab32688c7e926bd265
3,064
def line_intersects_grid((x0,y0), (x1,y1), grid, grid_cell_size=1): """ Performs a line/grid intersection, finding the "super cover" of a line and seeing if any of the grid cells are occupied. The line runs between (x0,y0) and (x1,y1), and (0,0) is the top-left corner of the top-left grid cell. >>> line_intersects_grid((0,0),(2,2),[[0,0,0],[0,1,0],[0,0,0]]) True >>> line_intersects_grid((0,0),(0.99,2),[[0,0,0],[0,1,0],[0,0,0]]) False """ grid_cell_size = float(grid_cell_size) x0 = x0 / grid_cell_size x1 = x1 / grid_cell_size y0 = y0 / grid_cell_size y1 = y1 / grid_cell_size dx = abs(x1 - x0) dy = abs(y1 - y0) x = int(math.floor(x0)) y = int(math.floor(y0)) if dx != 0: dt_dx = 1.0 / dx else: dt_dx = inf if dy != 0: dt_dy = 1.0 / dy else: dt_dy = inf t = 0.0 n = 1 if (dx == 0): x_inc = 0 t_next_horizontal = dt_dx elif (x1 > x0): x_inc = 1 n += int(math.floor(x1)) - x t_next_horizontal = (math.floor(x0) + 1 - x0) * dt_dx else: x_inc = -1 n += x - int(math.floor(x1)) t_next_horizontal = (x0 - math.floor(x0)) * dt_dx if (dy == 0): y_inc = 0 t_next_vertical = dt_dy elif (y1 > y0): y_inc = 1 n += int(math.floor(y1)) - y t_next_vertical = (math.floor(y0) + 1 - y0) * dt_dy else: y_inc = -1 n += y - int(math.floor(y1)) t_next_vertical = (y0 - math.floor(y0)) * dt_dy while (n > 0): if grid[y][x] == 1: return True if (t_next_vertical < t_next_horizontal): y += y_inc t = t_next_vertical t_next_vertical += dt_dy else: x += x_inc t = t_next_horizontal t_next_horizontal += dt_dx n -= 1 return False
f9710a61bcb101202295e50efb86800e855f00d5
3,065
def SimuGumbel(n, m, theta): """ # Gumbel copula Requires: n = number of variables to generate m = sample size theta = Gumbel copula parameter """ v = [np.random.uniform(0,1,m) for i in range(0,n)] X = levy_stable.rvs(alpha=1/theta, beta=1,scale=(np.cos(np.pi/(2*theta)))**theta,loc=0, size=m) phi_t = lambda t: np.exp(-t**(1/theta)) u = [phi_t(-np.log(v[i])/X) for i in range(0,n)] return u
55eba3c327b99b0bd6157b61dff9d161feda0519
3,066
import math def Norm(x, y): """求一个二维向量模长""" return math.pow(math.pow(x, 2) + math.pow(y, 2), 0.5)
4c161ada3c446d996f6e33be602a9475948f5bf8
3,067
from typing import Optional from typing import Union from typing import Callable from typing import List def make_roi(ms_experiment: ms_experiment_type, tolerance: float, max_missing: int, min_length: int, min_intensity: float, multiple_match: str, targeted_mz: Optional[np.ndarray] = None, start: Optional[int] = None, end: Optional[int] = None, mz_reduce: Union[str, Callable] = "mean", sp_reduce: Union[str, Callable] = "sum", mode: Optional[str] = None ) -> List[Roi]: """ Make Region of interest from MS data in centroid mode. Used by MSData to as the first step of the centWave algorithm. Parameters ---------- ms_experiment: pyopenms.MSExperiment max_missing : int maximum number of missing consecutive values. when a row surpass this number the roi is considered as finished and is added to the roi list if it meets the length and intensity criteria. min_length : int The minimum length of a roi to be considered valid. min_intensity : float Minimum intensity in a roi to be considered valid. tolerance : float mz tolerance to connect values across scans start : int, optional First scan to analyze. If None starts at scan 0 end : int, optional Last scan to analyze. If None, uses the last scan number. multiple_match : {"closest", "reduce"} How to match peaks when there is more than one match. If mode is `closest`, then the closest peak is assigned as a match and the others are assigned to no match. If mode is `reduce`, then unique mz and intensity values are generated using the reduce function in `mz_reduce` and `sp_reduce` respectively. mz_reduce : "mean" or Callable function used to reduce mz values. Can be a function accepting numpy arrays and returning numbers. Only used when `multiple_match` is reduce. See the following prototype: .. code-block:: python def mz_reduce(mz_match: np.ndarray) -> float: pass sp_reduce : {"mean", "sum"} or Callable function used to reduce intensity values. Can be a function accepting numpy arrays and returning numbers. Only used when `multiple_match` is reduce. To use custom functions see the prototype shown on `mz_reduce`. targeted_mz : numpy.ndarray, optional if a list of mz is provided, roi are searched only using this list. mode : str, optional mode used to create Roi objects. Returns ------- roi: list[Roi] Notes ----- To create a ROI, m/z values in consecutive scans are connected if they are within the tolerance`. If there's more than one possible m/z value to connect in the next scan, two different strategies are available, using the `multiple_match` parameter: If "closest" is used, then m/z values are matched to the closest ones, and the others are used to create new ROI. If "reduce" is used, then all values within the tolerance are combined. m/z and intensity values are combined using the `mz_reduce` and `sp_reduce` parameters respectively. If no matching value has be found in a scan, a NaN is added to the ROI. If no matching values are found in `max_missing` consecutive scans the ROI is flagged as finished. In this stage, two checks are made before the ROI is considered valid: 1. The number of non missing values must be higher than `min_length`. 2. The maximum intensity value in the ROI must be higher than `min_intensity`. If the two conditions are meet, the ROI is added to the list of valid ROI. """ if start is None: start = 0 if end is None: end = ms_experiment.getNrSpectra() if targeted_mz is None: mz_seed, _ = ms_experiment.getSpectrum(start).get_peaks() targeted = False else: mz_seed = targeted_mz targeted = True size = end - start rt = np.zeros(size) processor = _RoiProcessor(mz_seed, max_missing=max_missing, min_length=min_length, min_intensity=min_intensity, tolerance=tolerance, multiple_match=multiple_match, mz_reduce=mz_reduce, sp_reduce=sp_reduce, mode=mode) for k_scan in range(start, end): sp = ms_experiment.getSpectrum(k_scan) rt[k_scan - start] = sp.getRT() mz, spint = sp.get_peaks() processor.add(mz, spint, targeted=targeted) processor.append_to_roi(rt, targeted=targeted) # add roi not completed during last scan processor.flag_as_completed() processor.append_to_roi(rt) return processor.roi
f8b3edbe24091082d1d20af6fdd7875449716a43
3,068
def get_all_zcs_containers(session, start=None, limit=None, return_type=None, **kwargs): """ Retrieves details for all Zadara Container Services (ZCS) containers configured on the VPSA. :type session: zadarapy.session.Session :param session: A valid zadarapy.session.Session object. Required. :type start: int :param start: The offset to start displaying ZCS containers from. Optional. :type: limit: int :param limit: The maximum number of ZCS containers to return. Optional. :type return_type: str :param return_type: If this is set to the string 'json', this function will return a JSON string. Otherwise, it will return a Python dictionary. Optional (will return a Python dictionary by default). :rtype: dict, str :returns: A dictionary or JSON data set as a string depending on return_type parameter. """ parameters = verify_start_limit(start, limit) path = '/api/containers.json' return session.get_api(path=path, parameters=parameters, return_type=return_type, **kwargs)
6d2d6ba7037323174d93d2191aef93c072bd0030
3,069
def jordan_wigner(n): """ Generates the Jordan-Wigner representation of the fermionic creation, annihilation, and Majorana operators for an n-mode system. The convention for the Majorana operators is as follows: c_j=aj^{dag}+aj c_{n+j}=i(aj^{dag}-aj) """ s = ket(2, 0) @ dag(ket(2, 1)) S = su_generators(2) a = {} # Dictionary for the annihilation operators c = {} # Dictionary for the Majorana operators for j in range(1, n + 1): a[j] = tensor([S[3], j - 1], s, [S[0], n - j]) c[j] = dag(a[j]) + a[j] c[n + j] = 1j * (dag(a[j]) - a[j]) return a, c
193a2b91f84e2789b46a8767900938bcac8f83f9
3,070
import logging def make_update(label: str, update_time: str, repeat: str, data: str, news: str) -> list[dict]: """Schedules an update with name 'label' to happen in 'interval' seconds. Updates saved covid data, news and repeats the update depending on the content of the respective parameters. Adds to global 'scheduled_updates' list and returns scheduler queue. """ # Check that at least one option has been chosen if not data and not news: logging.warning("Attempted to schedule update without selecting any options.") return scheduler.queue # Check update will be in at least 5 seconds from current time interval = hhmm_to_seconds(update_time) - hhmm_to_seconds( current_time_hhmm() ) if interval < 5: logging.warning("Attempted to schedule update too soon.") return scheduler.queue # Dictionary to store all information about the update update = { 'title': label, 'content': f"At {update_time} this update will: " } if data: # Schedule data update update['data'] = schedule_covid_updates(interval, label) update['content'] += "update covid data, " logging.info("Covid data update has been scheduled for %s", update_time) if news: # Schedule news update update['news'] = scheduler.enter(interval, 1, update_news, (label,)) update['content'] += "update covid news, " logging.info("News update has been scheduled for %s", update_time) if repeat: # Schedule update to repeat in 24 hrs update['repeat'] = scheduler.enter( 60*60*24, 1, make_update, (label, update_time, repeat, data, news) ) update['content'] += "repeat in 24 hours, " logging.info("Update %s has been scheduled to repeat itself in 24 hours", label) # Clean up update content to be displayed update['content'] = update['content'][ :len( update['content'] ) - 2 ] scheduled_updates.append(update) return scheduler.queue
d284d51695229005650eed58de10297ad200c8e4
3,071
def performTest(name, test): #{{{ """ Given a series of writes in `test', generate a format string and pass it to the vulnerable program. If the writes were successful without destroying any other memory locations, return True. Terminates after 2 seconds to handle infinite loops in libformatstr. """ f = FormatStr(maxbuf) for (k,v) in test.items(): f[k] = v (out, err, fill) = (None, None, None) def sighandler(signum, frame): raise Exception("Command timed out") signal.signal(signal.SIGALRM, sighandler) signal.alarm(2) try: payload = f.payload(offset, padding=shift) if len(payload) > maxbuf: print "[-] payload is longer than allowed (%d vs %s)" % (len(payload), maxbuf) (out, err, fill) = checkOutput(payload) except Exception,e: print "[-] Exception occurred: %s" % e signal.alarm(0) if err == None or not checkMemoryDump(err, fill, f.mem): print "[-] FAILED: Test \"%s\" failed" % name return False else: print "[+] SUCCESS: Test \"%s\" succeeded" % name return True
829e19dbb45cfcc1788365f1c3a5459209c42e5e
3,072
def readsignal_VEC(name , fa): """ Reads the time signal stored in the file var.txt and written in a single column format. Returns the signal into the single vector signal. fa is an instrumental amplification factor """ path = '../data/' channel = np.loadtxt(path + name + '.txt') ndats = len(channel) signal = np.zeros([ndats], dtype=float) for i in range(ndats): signal[i]=channel[i]*fa # return ndats , signal
2365988aa8baf717f332a021e24c1a7ca6d24243
3,073
import os def extract_slide_texture_features(index, output_segment, slide_path, halo_roi_path, method_data): """Extract slide texture features Args: index (string): main index string output_segment (string): path to write result parquet slide_path (string): path to the whole slide image halo_roi_path (string): path to halo roi path method_data (dict): method parameters with annotation and tile details including annotationLabel, stainChannel and tileSize Returns: tuple: path to features saved as a np.array & path to feature metadata saved as a parquet. """ print ("Hello from extract_slide_texture_features()") annotation_name, stain_channel, TILE_SIZE = method_data['annotationLabel'], method_data['stainChannel'], method_data['tileSize'] dest_dir=f"/gpfs/mskmind_ess/aukermaa/data/{index}/original_glcm_ClusterTendency/" os.makedirs(dest_dir, exist_ok=True) img_arr, sample_arr, mask_arr = get_slide_roi_masks( slide_path=slide_path, halo_roi_path=halo_roi_path, annotation_name=annotation_name) vectors = get_stain_vectors_macenko(sample_arr) print ("Stain vectors=", vectors) print ("Max x levels:", img_arr.shape[0]) if (os.path.exists(f"{dest_dir}/vector.npy")): print ("Output already generated, not doing anything...") return dest_dir, output_segment features = np.array([]) nrow = 0 for x in range(0, img_arr.shape[0], TILE_SIZE): nrow += 1 for y in range(0, img_arr.shape[1], TILE_SIZE): img_patch = img_arr [x:x+TILE_SIZE, y:y+TILE_SIZE, :] mask_patch = mask_arr[x:x+TILE_SIZE, y:y+TILE_SIZE] if mask_patch.sum() == 0: continue address = f"{index}_{x}_{y}" try: texture_values = extract_patch_texture_features(address, img_patch, mask_patch, stain_vectors=vectors, stain_channel=stain_channel, glcm_feature='original_glcm_ClusterTendency') if not texture_values is None: features = np.append(features, texture_values) except Exception as exc: print (f"Skipped tile {address} because: {exc}") print (f"On row {nrow} of {len(range(0, img_arr.shape[0], TILE_SIZE))}") n, (smin, smax), sm, sv, ss, sk = stats.describe(features) hist_features = { 'main_index': index, 'pixel_original_glcm_ClusterTendency_nobs': n, 'pixel_original_glcm_ClusterTendency_min': smin, 'pixel_original_glcm_ClusterTendency_max': smax, 'pixel_original_glcm_ClusterTendency_mean': sm, 'pixel_original_glcm_ClusterTendency_variance': sv, 'pixel_original_glcm_ClusterTendency_skewness': ss, 'pixel_original_glcm_ClusterTendency_kurtosis': sk } data_table = pd.DataFrame(data=hist_features, index=[0]).set_index('main_index') print (data_table) pq.write_table(pa.Table.from_pandas(data_table), output_segment) print ("Saved to", output_segment) np.save(f"{dest_dir}/vector.npy", features) return dest_dir, output_segment
25ed7ea09178217ffb5f082e38c58ee1441e68e3
3,074
import socket def connect(host="localhost", port=27450): """Connect to server.""" client = socket(AF_INET, SOCK_DGRAM) client.connect((host, port)) return client
c60bd35b75ee9b3b5aee898b0ee58b95562627c1
3,075
def is_coroutine_generator(obj): """ Returns whether the given `obj` is a coroutine generator created by an `async def` function, and can be used inside of an `async for` loop. Returns ------- is_coroutine_generator : `bool` """ if isinstance(obj, AsyncGeneratorType): code = obj.ag_code elif isinstance(obj, CoroutineType): code = obj.cr_code elif isinstance(obj, GeneratorType): code = obj.gi_code else: return False if code.co_flags&CO_ASYNC_GENERATOR: return True return False
fea1d344f32a0fffe7fe0bb344299e5fd54a6baa
3,076
def get_scheme(scheme_id): """ Retrieve the scheme dict identified by the supplied scheme ID Returns: An scheme dict """ for node in sd["nodes"]: if scheme_id == node["id"]: return node
41c4b30496656201c58563fd5cc3ab3abe7ecf95
3,077
def Axicon(phi, n1, x_shift, y_shift, Fin): """ Fout = Axicon(phi, n1, x_shift, y_shift, Fin) :ref:`Propagates the field through an axicon. <Axicon>` Args:: phi: top angle of the axicon in radians n1: refractive index of the axicon material x_shift, y_shift: shift from the center Fin: input field Returns:: Fout: output field (N x N square array of complex numbers). Example: :ref:`Bessel beam with axicon <BesselBeam>` """ Fout = Field.copy(Fin) k = 2*_np.pi/Fout.lam theta = _np.arcsin(n1*_np.cos(phi/2)+phi/2-_np.pi/2) Ktheta = k * theta yy, xx = Fout.mgrid_cartesian xx -= x_shift yy -= y_shift fi = -Ktheta*_np.sqrt(xx**2+yy**2) Fout.field *= _np.exp(1j*fi) return Fout
a333d29c94e79bdee1ef17ceb3993b28f2e9bd5d
3,078
import urllib import types def addrAndNameToURI(addr, sname): """addrAndNameToURI(addr, sname) -> URI Create a valid corbaname URI from an address string and a stringified name""" # *** Note that this function does not properly check the address # string. It should raise InvalidAddress if the address looks # invalid. if type(addr) is not types.StringType or \ type(sname) is not types.StringType: raise CORBA.BAD_PARAM(omniORB.BAD_PARAM_WrongPythonType, COMPLETED_NO) if addr == "": raise CosNaming.NamingContextExt.InvalidAddress() if sname == "": return "corbaname:" + addr else: stringToName(sname) # This might raise InvalidName return "corbaname:" + addr + "#" + urllib.quote(sname)
fd54c23b4e3396b224341fa54c106e4523b55314
3,079
def blkdev_uname_to_taptype(uname): """Take a blkdev uname and return the blktap type.""" return parse_uname(uname)[1]
e7165a9bd987d4820ed5486a06a1ceceec9c5564
3,080
def detection_layer(inputs, n_classes, anchors, img_size, data_format): """Creates Yolo final detection layer. Detects boxes with respect to anchors. Args: inputs: Tensor input. n_classes: Number of labels. anchors: A list of anchor sizes. img_size: The input size of the model. data_format: The input format. Returns: Tf value [box_centers, box_shapes, confidence, classes] """ n_anchors = len(anchors) inputs = tf.keras.layers.Conv2D(filters=n_anchors * (5 + n_classes), kernel_size=1, strides=1, use_bias=True, data_format=data_format)(inputs) # Shape of each cell in image shape = inputs.get_shape().as_list() grid_shape = shape[2:4] if data_format == 'channel_first' else shape[1:3] if data_format == 'channels_first': # Put the channel's dim to the last position inputs = tf.transpose(inputs, [0, 2, 3, 1]) inputs = tf.reshape(inputs, [-1, n_anchors * grid_shape[0] * grid_shape[1], 5 + n_classes]) # Strides = # of cells in an image strides = (img_size[0] // grid_shape[0], img_size[1] // grid_shape[1]) box_centers, box_shapes, confidence, classes = \ tf.split(inputs, [2, 2, 1, n_classes], axis=-1) x = tf.range(grid_shape[0], dtype=tf.float32) y = tf.range(grid_shape[1], dtype=tf.float32) x_offset, y_offset = tf.meshgrid(x, y) x_offset = tf.reshape(x_offset, (-1, 1)) y_offset = tf.reshape(y_offset, (-1, 1)) x_y_offset = tf.concat([x_offset, y_offset], axis=-1) x_y_offset = tf.tile(x_y_offset, [1, n_anchors]) x_y_offset = tf.reshape(x_y_offset, [1, -1, 2]) box_centers = tf.nn.sigmoid(box_centers) box_centers = (box_centers + x_y_offset) * strides anchors = tf.tile(anchors, [grid_shape[0] * grid_shape[1], 1]) box_shapes = tf.exp(box_shapes) * tf.cast(anchors, dtype=tf.float32) confidence = tf.nn.sigmoid(confidence) classes = tf.nn.sigmoid(classes) inputs = tf.concat([box_centers, box_shapes, confidence, classes], axis=-1) return inputs
85ada39e57c80eced3dbdc145759a1caa609607d
3,081
from operator import pos def create_position_tear_sheet(returns, positions, show_and_plot_top_pos=2, hide_positions=False, sector_mappings=None, transactions=None, estimate_intraday='infer', return_fig=False): """ Generate a number of plots for analyzing a strategy's positions and holdings. - Plots: gross leverage, exposures, top positions, and holdings. - Will also print the top positions held. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. show_and_plot_top_pos : int, optional By default, this is 2, and both prints and plots the top 10 positions. If this is 0, it will only plot; if 1, it will only print. hide_positions : bool, optional If True, will not output any symbol names. Overrides show_and_plot_top_pos to 0 to suppress text output. sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. transactions : pd.DataFrame, optional Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. estimate_intraday: boolean or str, optional Approximate returns for intraday strategies. See description in create_full_tear_sheet. return_fig : boolean, optional If True, returns the figure that was plotted on. """ positions = utils.check_intraday(estimate_intraday, returns, positions, transactions) if hide_positions: show_and_plot_top_pos = 0 vertical_sections = 7 if sector_mappings is not None else 6 fig = plt.figure(figsize=(14, vertical_sections * 6)) gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5) ax_exposures = plt.subplot(gs[0, :]) ax_top_positions = plt.subplot(gs[1, :], sharex=ax_exposures) ax_max_median_pos = plt.subplot(gs[2, :], sharex=ax_exposures) ax_holdings = plt.subplot(gs[3, :], sharex=ax_exposures) ax_long_short_holdings = plt.subplot(gs[4, :]) ax_gross_leverage = plt.subplot(gs[5, :], sharex=ax_exposures) positions_alloc = pos.get_percent_alloc(positions) plotting.plot_exposures(returns, positions, ax=ax_exposures) plotting.show_and_plot_top_positions( returns, positions_alloc, show_and_plot=show_and_plot_top_pos, hide_positions=hide_positions, ax=ax_top_positions) plotting.plot_max_median_position_concentration(positions, ax=ax_max_median_pos) plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings) plotting.plot_long_short_holdings(returns, positions_alloc, ax=ax_long_short_holdings) plotting.plot_gross_leverage(returns, positions, ax=ax_gross_leverage) if sector_mappings is not None: sector_exposures = pos.get_sector_exposures(positions, sector_mappings) if len(sector_exposures.columns) > 1: sector_alloc = pos.get_percent_alloc(sector_exposures) sector_alloc = sector_alloc.drop('cash', axis='columns') ax_sector_alloc = plt.subplot(gs[6, :], sharex=ax_exposures) plotting.plot_sector_allocations(returns, sector_alloc, ax=ax_sector_alloc) for ax in fig.axes: plt.setp(ax.get_xticklabels(), visible=True) if return_fig: return fig
87338d6acb2852f9d45fa21cb4005c602cbfc909
3,082
import csv def statement_view(request, statement_id=None): """Send a CSV version of the statement with the given ``statement_id`` to the user's browser. """ statement = get_object_or_404( Statement, pk=statement_id, account__user=request.user) response = HttpResponse(mimetype='text/csv') filename = "%s (%s).csv" % (statement.title, statement.from_date.strftime('%B %Y')) response['Content-Disposition'] = 'attachment; filename=%s' % (filename,) writer = csv.writer(response) headings = ["Tag pool name", "Tag name", "Message direction", "Total cost"] writer.writerow(headings) line_item_list = statement.lineitem_set.all() for line_item in line_item_list: writer.writerow([ line_item.tag_pool_name, line_item.tag_name, line_item.message_direction, line_item.total_cost]) return response
c5a475086ee4a75fa76efae9cdf7bd185c8aa78a
3,083
def get_proton_gamma(energy): """Returns relativistic gamma for protons.""" return energy / PROTON_MASS
049e92cf85824561a50f559ef54045865da2a69b
3,084
def demandNameItem(listDb,phrase2,mot): """ put database name of all items in string to insert in database listDb: list with datbase name of all items phrase2: string with database name of all items mot: database name of an item return a string with database name of all items separated with ',' """ for i in range(len(listDb)): mot = str(listDb[i]) phrase2 += mot if not i == len(listDb)-1: phrase2 += ',' return phrase2
67af8c68f0ba7cd401067e07c5de1cd25de9e66c
3,085
def escape_yaml(data: str) -> str: """ Jinja2 фильтр для экранирования строк в yaml экранирует `$` """ return data.replace("$", "$$")
d1142af7447ad372e6b0df5848beb28e0dd84e68
3,086
def stokes_linear(theta): """Stokes vector for light polarized at angle theta from the horizontal plane.""" if np.isscalar(theta): return np.array([1, np.cos(2*theta), np.sin(2*theta), 0]) theta = np.asarray(theta) return np.array([np.ones_like(theta), np.cos(2*theta), np.sin(2*theta), np.zeros_like(theta)]).T
a35f342ee32cdf54e432ee52e1faefbfb3b24382
3,087
import re from datetime import datetime def validate_item_field(attr_value, attr_form): """ :param attr_value: item的属性 :param attr_form: item category的属性规则 :return: """ if not isinstance(attr_form, dict): return -1, {"error": "attr_form is not a dict."} required = attr_form.get('required') if required == 'false': return 0, {"msg": "success"} field = attr_form.get('field') if not field: return -1, {"error": "field missed."} if field == "string": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]): return -1, {"error": "invalid string length."} if attr_form.get('valid_rule') == "none": return 0, {"msg": "success"} elif attr_form.get('valid_rule') == "IPaddress": pattern = re.compile(r'\d+\.\d+\.\d+\.\d+') # 匹配IP地址有待改进 elif attr_form.get('valid_rule') == "email": pattern = re.compile(r'^(\w)+(\.\w+)*@(\w)+((\.\w+)+)$') elif attr_form.get('valid_rule') == "phone": pattern = re.compile(r'^\d{11}$') else: return -1, {"error": "invalid valid_rule."} match = pattern.match(attr_value) if not match: return -1, {"error": "did not match rule: %s" % attr_form.get('valid_rule')} elif field == "text": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} if len(attr_value) < int(attr_form["min_length"]) or len(attr_value) > int(attr_form["max_length"]): return -1, {"error": "invalid string length."} elif field == "select": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a dict."} if attr_value not in attr_form["choice"][1:-1].split("|"): return -1, {"error": "invalid choice."} elif field == "multiple_select": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a dict."} for each in attr_value.split("|"): if each not in attr_form["choice"][1:-1].split("|"): return -1, {"error": "invalid choice."} elif field == "integer": if not isinstance(attr_value, int): return -1, {"error": "attr_value is not a integer."} if attr_value < int(attr_form["min_value"]) or attr_value > int(attr_form["max_value"]): return -1, {"error": "invalid integer value."} elif field == "datetime": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} try: date_object = datetime.datetime.strptime( attr_value, '%Y%m%d%H%M%S') except ValueError: return -1, {"error": "time data '%s' does not match format" % attr_value} elif field == "reference": if not isinstance(attr_value, str): return -1, {"error": "attr_value is not a string."} item_obj = Item.objects(id=attr_value) if not item_obj: return -1, {"error": "unknown item."} if item_obj.category.id != attr_form["reference"]: return -1, {"error": "wrong category."} return 0, {"msg": "success"}
ac4687b576bb29707f55a2cb4627dc67ff07b2fa
3,088
def display_instances(image, boxes, masks, ids, names, scores): """ take the image and results and apply the mask, box, and Label """ n_instances = boxes.shape[0] colors = random_colors(n_instances) if not n_instances: print('NO INSTANCES TO DISPLAY') else: assert boxes.shape[0] == masks.shape[-1] == ids.shape[0] for i, color in enumerate(colors): # we want the colours to only ne in one color: SIFR orange ff5722 # color = (255, 87, 34) if not np.any(boxes[i]): continue y1, x1, y2, x2 = boxes[i] label = names[ids[i]] score = scores[i] if scores is not None else None caption = '{} {:.2f}'.format(label, score) if score else label mask = masks[:, :, i] image = apply_mask(image, mask, color) image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) image = cv2.putText( image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2 ) return image
4268d08a7e413a0558e2b0386cbd184ffaba05ba
3,089
import json def read_squad_examples(input_file, is_training): """Read a SQuAD json file into a list of SquadExample.""" with tf.io.gfile.GFile(input_file, "r") as reader: input_data = json.load(reader)["data"] examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None orig_answer_text = None is_impossible = False if is_training: if FLAGS.train_version == "v2": is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] start_position = answer["answer_start"] else: start_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, paragraph_text=paragraph_text, orig_answer_text=orig_answer_text, start_position=start_position, is_impossible=is_impossible) examples.append(example) return examples
1c893c8f443bca9c707498650142ecd5262d619d
3,090
import os def load_gamma_telescope(): """https://archive.ics.uci.edu/ml/datasets/MAGIC+Gamma+Telescope""" url='https://archive.ics.uci.edu/ml/machine-learning-databases/magic/magic04.data' filepath = os.path.join(get_data_dir(), "magic04.data") maybe_download(filepath, url) data = pd.read_csv(filepath) data.columns = ['fLength', 'fWidth', 'fSize', 'fConc', 'fConc1', 'fAsym', 'fM3Long', 'fM3Trans', 'fAlpha', 'fDist', 'class'] X = data.drop(['class'], axis=1) y = data['class'] return X, y
575636be935fb8a44a13b4fc418504315a90a83c
3,091
def qarange(start, end, step): """ Convert the cyclic measurement and control data into the required array :param start: :param end: :param step: :return: np.array """ if Decimal(str(end)) - Decimal(str(start)) < Decimal(str(step)) or step == 0: return [start] start_decimal = str(start)[::-1].find('.') step_decimal = str(step)[::-1].find('.') data_decimal = max([step_decimal, start_decimal]) if data_decimal == -1: data_decimal = 0 data_number = int((Decimal(str(end)) - Decimal(str(start))) / Decimal(str(step))) end_data = round(start + data_number * step, data_decimal) data_np = np.linspace(start, end_data, data_number + 1) data_list = [round(data, data_decimal) for data in data_np] return data_list
6e0331160f6501b4106c9e6379762f9c4bf87f1b
3,092
def get_default_render_layer(): """Returns the default render layer :return: """ return pm.ls(type='renderLayer')[0].defaultRenderLayer()
b134b52bf35a46c10460ab612b14fcea44895a45
3,093
def translation(shift): """Translation Matrix for 2D""" return np.asarray(planar.Affine.translation(shift)).reshape(3, 3)
2b77265545194cabfc44728dbc7c5c95d808da38
3,094
def pad(adjacency_matrices, size): """Pads adjacency matricies to the desired size This will pad the adjacency matricies to the specified size, appending zeros as required. The output adjacency matricies will all be of size 'size' x 'size'. Args: adjacency_matrices: The input list of adjacency matricies. size: The desired dimension of the output matricies. Returns: The resulting list of adjacency matricies. """ padding = size - adjacency_matrices.shape[1] return np.pad(adjacency_matrices, [(0, 0), (0, padding), (0, padding)], mode='constant')
dc015eb4dd41dcf3f88ef6317dab2e3f57709453
3,095
def mapping_activities_from_log(log, name_of_activity): """ Returns mapping activities of activities. :param name_of_activity: :param log: :return: mapping """ mapping_activities = dict() unique_activities = unique_activities_from_log(log, name_of_activity) for index, activity in enumerate(unique_activities): mapping_activities[activity] = index return mapping_activities
82fc23f08e9ae3629c654a5c04bcfcecb76a8cb3
3,096
def pad(x, paddings, axes=None): """ Pads a tensor with zeroes along each of its dimensions. TODO: clean up slice / unslice used here Arguments: x: the tensor to be padded paddings: the length of the padding along each dimension. should be an array with the same length as x.axes. Each element of the array should be either an integer, in which case the padding will be symmetrical, or a tuple of the form (before, after) axes: the axes to be given to the padded tensor. If unsupplied, we create new axes of the correct lengths. Returns: TensorOp: symbolic expression for the padded tensor """ if len(x.axes) != len(paddings): raise ValueError(( "pad's paddings has length {pad} which needs to be the same " "as the number of axes in x ({x})" ).format( pad=len(paddings), x=len(x.axes), )) def pad_to_tuple(pad): if isinstance(pad, int): pad = (pad, pad) return pad def to_slice(pad): s = (pad[0], -pad[1]) s = tuple(None if p == 0 else p for p in s) return slice(s[0], s[1], 1) paddings = tuple(pad_to_tuple(pad) for pad in paddings) if axes is None: axes = make_axes( make_axis(length=axis.length + pad[0] + pad[1], name=axis.name) if pad != (0, 0) else axis for axis, pad in zip(x.axes, paddings) ) slices = tuple(to_slice(p) for p in paddings) return _unslice(x, slices, axes)
484676f81ec2f394dbe5f58831b708b1794c4424
3,097
def labels_to_1hotmatrix(labels, dtype=int): """ Maps restricted growth string to a one-hot flag matrix. The input and the output are equivalent representations of a partition of a set of n elelements. labels: restricted growth string: n-vector with entries in {0,...,n-1}. The first entry is 0. Other entries cannot exceed any previous entry by more than 1. dtype: optional, default=int. Element data type for returned matrix. bool or float can also be used. Returns (m,n) matrix, with 0/1 entries, where m is the number of blocks in the partition and n is the numer of elements in the partitioned set. Columns are one-hot. If return_matrix[i,j], then element j is in block i. """ m = 1 + labels.max() B = np.arange(m).reshape(-1,1) == labels return B.astype(dtype,copy=False)
eef80548e340477bf6881d0d14e434e0ee2f44da
3,098
import select def recall(logits, target, topk=[1,5,10], typeN=8): """Compute top K recalls of a batch. Args: logits (B x max_entities, B x max_entities x max_rois): target (B x max_entities, B x max_entities x max_rois): topk: top k recalls to compute Returns: N: number of entities in the batch TPs: topk true positives in the batch bound: max number of groundable entities """ logits, target, N, types = select(logits, target) topk = [topk] if isinstance(topk, int) else sorted(topk) TPs = [0] * len(topk) bound = target.max(-1, False)[0].sum().item() # at least one detected typeTPs = th.zeros(typeN, device=types.device) typeN = th.zeros_like(typeTPs) #print("target entity type count: ", types.shape, types.sum(dim=0), target.shape) if max(topk) == 1: top1 = th.argmax(logits, dim=1) one_hots = th.zeros_like(target) one_hots.scatter_(1, top1.view(-1, 1), 1) TPs = (one_hots * target).sum().item() hits = (one_hots * target).sum(dim=1) >= 1 typeTPs += types[hits].sum(dim=0) typeN += types.sum(dim=0) else: logits = th.sort(logits, 1, descending=True)[1] for i, k in enumerate(topk): one_hots = th.zeros_like(target) one_hots.scatter_(1, logits[:, :k], 1) TPs[i] = ((one_hots * target).sum(dim=1) >= 1).float().sum().item() # hit if at least one matched if i == 0: hits = (one_hots * target).sum(dim=1) >= 1 typeTPs += types[hits].sum(dim=0) typeN += types.sum(dim=0) #print(TPs, N) #print(typeTPs) #print(typeN) return N, th.Tensor(TPs + [bound]), (typeTPs.cpu(), typeN.cpu())
ea3ec996808e25566e5bd3dd33f1a56232e5ba7a
3,099