content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def authority_b(request, configuration, authority_a): """ Intermediate authority_a valid_authority -> authority_a -> authority_b """ authority = configuration.manager.get_or_create_ca("authority_b", hosts=["*.com"], certificate_authority=authority_a) request.addfinalizer(authority.delete_files) return authority
ac9ec3c33e06ba506efe51f4ceafa96cfe91f761
19,500
def derive(pattern): """ Calculate the first derivative pattern pattern. Smoothes the input first, so noisy patterns shouldn't be much of a problem. """ return np.gradient(smooth_pattern(pattern))
4f2bdbb34f4d36427b3d9c1effff4d13a7d41552
19,501
def gaussian(nm, a, x0, sigma): """ gaussian function """ gaussian_array = a * np.exp(- ((nm - x0) ** 2.0) / (2 * (sigma ** 2.0))) return gaussian_array
2c8ba6bb93565ff9ae79f0a0b6643994730bb672
19,502
def list_to_filename(filelist): """Returns a list if filelist is a list of length greater than 1, otherwise returns the first element """ if len(filelist) > 1: return filelist else: return filelist[0]
32a88235196e104fa043d2b77af1f09d4e9164e9
19,503
from datetime import datetime import six def _get_expiration_seconds(expiration): """Convert 'expiration' to a number of seconds in the future. :type expiration: int, long, datetime.datetime, datetime.timedelta :param expiration: When the signed URL should expire. :rtype: int :returns: a timestamp as an absolute number of seconds. """ # If it's a timedelta, add it to `now` in UTC. if isinstance(expiration, datetime.timedelta): now = _NOW().replace(tzinfo=UTC) expiration = now + expiration # If it's a datetime, convert to a timestamp. if isinstance(expiration, datetime.datetime): micros = _microseconds_from_datetime(expiration) expiration = micros // 10**6 if not isinstance(expiration, six.integer_types): raise TypeError('Expected an integer timestamp, datetime, or ' 'timedelta. Got %s' % type(expiration)) return expiration
3eb2c56211b3cfab8f35634b83ef8c77e4ea2221
19,504
import click def parse_encoding(format_, track, supplied_encoding, prompt_encoding): """Get the encoding from the FLAC files, otherwise require the user to specify it.""" if format_ == "FLAC": if track["precision"] == 16: return "Lossless", False elif track["precision"] == 24: return "24bit Lossless", False if supplied_encoding and list(supplied_encoding) != [None, None]: return supplied_encoding if prompt_encoding: return _prompt_encoding() click.secho( "An encoding must be specified if the files are not lossless.", fg="red" ) raise click.Abort
d56ad0d15176a963e62a33d4b4cd799d1e68281e
19,505
import os import stat def check_core_dump_setting(): """ checking os core dump setting is right """ errors = [] ret, out = run_shell('ulimit -c') limit = out.strip('\n').strip() if limit != 'unlimited': errors.append(f'core dump file setting limit="{limit}" is incorrect, should set to unlimited') with open('/proc/sys/kernel/core_pattern', 'r') as core_pattern_file: core_pattern = core_pattern_file.read().strip() core_path = os.path.dirname(core_pattern) if os.path.isabs(core_path): if os.path.exists(core_path): core_dir_stat = os.stat(core_path) permission = stat.S_IMODE(core_dir_stat.st_mode) if permission != 0o777: errors.append(f'core dump pattern path {core_path}\'s permission mask {permission} is not right') else: errors.append(f'core dump pattern path {core_path} is not exists') else: errors.append(f'core dump pattern path {core_pattern} is not a absolute path') return errors
e2735badead3f03b0c35d0fecb157b8a1f8d82db
19,506
def disaggregate(model, mains, model_name, num_seq_per_batch, seq_len, appliance, target_scale, stride=1): """ Disaggregation function to predict all results for whole time series mains. :param model: tf model object :param mains: numpy.ndarray, shape(-1,) :param model_name: name of the used model :param num_seq_per_batch: int, number of sequences to have in the batch :param seq_len: int, length of the sequence :param appliance: str, name of the appliance :param target_scale: int, scaling factor of predicted value :param stride: int, stride of moving window :return: p: np.ndarray, shape(-1,), disaggregated power of the appliance metrics = dict containing the metrics """ # Converting mains array into batches for prediction mains = mains.reshape(-1,) agg_batches = mains_to_batches(mains, num_seq_per_batch, seq_len, stride=stride, pad=True) if (appliance == 'fridge') or (appliance == 'Refrigerator') or (appliance == 'REFRIGERATOR'): if target_scale: target_max = target_scale else: target_max = 313 target_min = 0 input_max = 7879 input_min = 80 elif (appliance == 'washing machine') or (appliance == 'Washing_Machine') or (appliance == 'WASHING_MACHINE'): if target_scale: target_max = target_scale else: target_max = 3999 target_min = 0 input_max = 7879 input_min = 80 elif (appliance == 'dishwasher') or (appliance == 'Dishwasher') or (appliance == 'DISHWASHER'): if target_scale: target_max = target_scale else: target_max = 500 target_min = 0 input_max = 7879 input_min = 80 elif (appliance == 'Electric_Vehicle') or (appliance == 'electric vehicle') or (appliance=='ELECTRIC_VEHICLE'): if target_scale: target_max = target_scale else: target_max = 6000 target_min = 0 input_max = 7879 input_min = 80 elif (appliance == 'DRYER'): if target_scale: target_max = target_scale else: target_max = 2500 target_min = 0 input_max = 7879 input_min = 80 # list to store predictions y_net = [] for id, batch in enumerate(agg_batches): X_pred = np.copy(batch.reshape(-1, seq_len, 1)) X_pred /= (input_max-input_min) X_pred = X_pred * 10 y_net.append(model.predict(X_pred)) # converting the predictions to rectangles rectangles = pred_to_rectangles(y_net, num_seq_per_batch, seq_len, stride) return rectangles
2053e9dc74d188ab41dbeb2fc1af8cd4bbd6dfae
19,507
import pathlib from pathlib import Path def set_path_to_file(categoria: str) -> pathlib.PosixPath: """ Receba uma string com o nome da categoria da lesgilação e retorna um objeto pathlib.PosixPath """ fpath = Path(f"./data/{categoria}") fpath.mkdir(parents=True, exist_ok=True) return fpath
98455978e695d34deb27dc59807e06f1a4daff96
19,508
def transpose_nested_dictionary(nested_dict): """ Given a nested dictionary from k1 -> k2 > value transpose its outer and inner keys so it maps k2 -> k1 -> value. """ result = defaultdict(dict) for k1, d in nested_dict.items(): for k2, v in d.items(): result[k2][k1] = v return result
39f8faa319063ac533b375c5ae0ac1c10a8fd770
19,509
def auth_code(): """ Функция для обработки двухфакторной аутентификации :return: Код для двухфакторной аутентификации :rtype: tuple(str, bool) """ tmp = input('Введи код: ') return tmp, True
8b0ae26cfdd1aa9f7b9c7a0433075494fe354185
19,510
from typing import cast def graph_file_read_mtx(Ne: int, Nv: int, Ncol: int, directed: int, filename: str,\ RemapFlag:int=1, DegreeSortFlag:int=0, RCMFlag:int=0, WriteFlag:int=0) -> Graph: """ This function is used for creating a graph from a mtx graph file. compared with the graph_file_read function, it will skip the mtx head part Ne : the total number of edges of the graph Nv : the total number of vertices of the graph Ncol: how many column of the file. Ncol=2 means just edges (so no weight and weighted=0) and Ncol=3 means there is weight for each edge (so weighted=1). directed: 0 means undirected graph and 1 means directed graph filename: the file that has the edge list RemapFlag: if the vertex ID is larger than the total number of vertices, we will relabel the vertex ID DegreeSortFlag: we will let small vertex ID be the vertex whose degree is small RCMFlag: we will remap the vertex ID based on the RCM algorithm WriteFlag: we will output the final edge list src->dst array as a new input file. Returns ------- Graph The Graph class to represent the data See Also -------- Notes ----- Raises ------ RuntimeError """ cmd = "segmentedGraphFileMtx" args = "{} {} {} {} {} {} {} {} {}".format(Ne, Nv, Ncol, directed, filename, \ RemapFlag, DegreeSortFlag, RCMFlag, WriteFlag) print(args) repMsg = generic_msg(cmd=cmd, args=args) return Graph(*(cast(str, repMsg).split('+')))
7babe91d1daad745a94ea542ebda0cff9eaedf4b
19,511
def _get_uri(tag, branch, sha1): """ Set the uri -- common code used by both install and debian upgrade """ uri = None if tag: uri = 'ref/' + tag elif branch: uri = 'ref/' + branch elif sha1: uri = 'sha1/' + sha1 else: # FIXME: Should master be the default? log.debug("defaulting to master branch") uri = 'ref/master' return uri
33f2662a8c6e9ad61136e357af495fc47d7c9dfc
19,512
def compute_inliers (BIH, corners): """ Function: compute_inliers ------------------------- given a board-image homography and a set of all corners, this will return the number that are inliers """ #=====[ Step 1: get a set of all image points for vertices of board coords ]===== all_board_points = [] for i in range(9): for j in range(9): all_board_points.append ((i, j)) all_BIH_ip = [board_to_image_coords (BIH, bp) for bp in all_board_points] #=====[ Step 2: get booleans for each corner being an inlier ]===== num_inliers = sum ([is_BIH_inlier (all_BIH_ip, corner) for corner in corners]) return num_inliers
431e5e82e127f404b142940de79c8bed79021423
19,513
def plotPayloadStates(full_state, posq, tf_sim): """This function plots the states of the payload""" # PL_states = [xl, vl, p, wl] fig8, ax11 = plt.subplots(3, 1, sharex=True ,sharey=True) fig8.tight_layout() fig9, ax12 = plt.subplots(3, 1, sharex=True, sharey=True) fig9.tight_layout() fig10, ax13 = plt.subplots(3, 1, sharex=True ,sharey=True) fig10.tight_layout() fig11, ax14 = plt.subplots(3, 1, sharex=True ,sharey=True) fig11.tight_layout() fig12, ax15 = plt.subplots(1, 1, sharex=True ,sharey=True) fig12.tight_layout() time = np.linspace(0, tf_sim*1e-3, num=len(full_state)) pos = full_state[:,0:3] linVel = full_state[:,3:6] angVel = full_state[:,9:12] p = full_state[:,6:9] ts = 'time [s]' ############################################################################################### ax11[0].plot(time, pos[:,0], c='k', lw=0.75, label='Actual'), ax11[1].plot(time, pos[:,1], lw=0.75, c='k'), ax11[2].plot(time, pos[:,2], lw=0.75, c='k') ax11[0].set_ylabel('x [m]',), ax11[1].set_ylabel('y [m]'), ax11[2].set_ylabel('z [m]') ax11[0].legend() fig8.supxlabel(ts,fontsize='small') grid = plt.GridSpec(3,1) create_subtitle(fig8, grid[0, ::], 'Actual Payload Positions') ############################################################################################### ax12[0].plot(time, linVel[:,0],lw=0.75, c='k', label='Actual'), ax12[1].plot(time, linVel[:,1],lw=0.75, c='k'), ax12[2].plot(time, linVel[:,2],lw=0.75, c='k') ax12[0].set_ylabel('vx [m/s]'), ax12[1].set_ylabel('vy [m/s]'), ax12[2].set_ylabel('vz [m/s]') ax12[0].legend() fig9.supxlabel(ts,fontsize='small') grid = plt.GridSpec(3,1) create_subtitle(fig9, grid[0, ::], 'Actual Payload Linear Velocities') ############################################################################################### ax13[0].plot(time, angVel[:,0],c='k',lw=1, label='Actual'), ax13[1].plot(time, angVel[:,1],c='k',lw=1), ax13[2].plot(time, angVel[:,2],c='k',lw=1) ax13[0].set_ylabel('wx [deg/s]',labelpad=-5), ax13[1].set_ylabel('wy [deg/s]',labelpad=-5), ax13[2].set_ylabel('wz [deg/s]',labelpad=-5) fig10.supxlabel(ts,fontsize='small') grid = plt.GridSpec(3,1) create_subtitle(fig10, grid[0, ::], ' Actual Payload Angular Velocities') ############################################################################################### ax14[0].plot(time, p[:,0],c='k',lw=1, label='Actual'), ax14[1].plot(time, p[:,1],c='k',lw=1), ax14[2].plot(time, p[:,2],c='k',lw=1) ax14[0].set_ylabel('px',labelpad=-5), ax14[1].set_ylabel('py',labelpad=-5), ax14[2].set_ylabel('pz',labelpad=-5) fig11.supxlabel(ts,fontsize='small') grid = plt.GridSpec(3,1) create_subtitle(fig11, grid[0, ::], 'Cable Directional Unit Vector') ############################################################################################### norm_x = np.zeros((len(full_state),)) for i in range(0, len(norm_x)): norm_x[i] = np.linalg.norm(pos[i,:] - posq[i,:]) ax15.plot(time, norm_x,c='k',lw=1, label='Norm') ax15.set_ylabel('||xq - xp||',labelpad=-2) fig12.supxlabel(ts,fontsize='small') grid = plt.GridSpec(3,1) create_subtitle(fig12, grid[0, ::], 'Diff between Quadrotor and Payload Positions (Norm)') return fig8, fig9, fig10, fig11, fig12
56018bbb5dfaca76dae62940c572aacfef31ad1e
19,514
def draw(p): """ Draw samples based on probability p. """ return np.searchsorted(np.cumsum(p), np.random.random(), side='right')
84b087c9eb6bfdac4143a464399f85cad0169000
19,515
import functools def _autoinit(func): """Decorator to ensure that global variables have been initialized before running the decorated function. Args: func (callable): decorated function """ @functools.wraps(func) def _wrapped(*args, **kwargs): init() return func(*args, **kwargs) return _wrapped
b39242a9f600a7bbeaf43d73ae3529dcad9c3857
19,516
from datetime import datetime def export_testing_time_result(): """ Description: I refer tp the answer at stockoverFlow: https://stackoverflow.com/questions/42957871/return-a-created-excel-file-with-flask :return: A HTTP response which is office excel binary data. """ target_info = request.form["target"] workbook = ResultReport.general_report(target_info=target_info) general_report_datetime = datetime.now().isoformat(timespec='seconds').split("T")[0] return Response( save_virtual_workbook(workbook=workbook), headers={ 'Content-Disposition': f'attachment; filename=D-Link_Wi-Fi_Testing_Time_Report_{general_report_datetime}.xlsx', 'Content-type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' } )
ab0505449361b036ed94eb919a3c876a8776b839
19,517
def profile_view(user_request: 'Queryset') -> 'Queryset': """ Функция, которая производит обработку данных пользователя и выборку из БД вакансий для конкретного пользователя """ user_id = user_request[0]['id'] area = user_request[0]['area'] experience = user_request[0]['experience'] salary = user_request[0]['salary'] without_salary = user_request[0]['without_salary'] if without_salary is False: vacancies_list = Vacancies.objects.filter(area=area, experience=experience, salary_from__lte=salary, salary_to__gte=salary, ).exclude(banned_by_users=user_id, ).values('name', 'url', 'id', ).order_by('-published') else: vacancies_list = Vacancies.objects.filter(area=area, experience=experience, ).exclude(banned_by_users=user_id, ).values('name', 'url', 'id', ).order_by('-published') update_shown_vacancy_to_user(user_id, vacancies_list) recommended_vacancies_id = recommendations(user_request) if recommended_vacancies_id: recommended_vacancies = Vacancies.objects.filter(id__in=recommended_vacancies_id, ).values('name', 'url') else: recommended_vacancies = None return vacancies_list, recommended_vacancies
6ec9a6c00cead62c2d30dcb627a575b072bcde60
19,518
def config_vrf(dut, **kwargs): """ #Sonic cmd: Config vrf <add | delete> <VRF-name> eg: config_vrf(dut = dut1, vrf_name = 'Vrf-test', config = 'yes') eg: config_vrf(dut = dut1, vrf_name = 'Vrf-test', config = 'no') """ st.log('Config VRF API') if 'config' in kwargs: config = kwargs['config'] else: config = 'yes' if 'vrf_name' in kwargs: if not isinstance(kwargs['vrf_name'],list): vrf_name = [kwargs['vrf_name']] else: vrf_name = kwargs['vrf_name'] else: st.log("Mandatory parameter vrfname is not found") if 'skip_error' in kwargs: skip_error = kwargs['skip_error'] else: skip_error = False cli_type = kwargs.pop('cli_type', st.get_ui_type(dut)) if cli_type == 'click': my_cmd = '' if config.lower() == 'yes': for vrf in vrf_name: my_cmd += 'sudo config vrf add {}\n'.format(vrf) else: for vrf in vrf_name: my_cmd += 'sudo config vrf del {}\n'.format(vrf) if skip_error: try: st.config(dut, my_cmd) return True except Exception: st.log("Error handled..by API") return False else: st.config(dut, my_cmd) return True elif cli_type == 'klish': command = '' if config.lower() == 'yes': for vrf in vrf_name: command = command + "\n" + "ip vrf {}".format(vrf) else: for vrf in vrf_name: command = command + "\n" + "no ip vrf {}".format(vrf) output = st.config(dut, command, skip_error_check=skip_error, type="klish", conf=True) if "Could not connect to Management REST Server" in output: st.error("klish mode not working.") return False return True elif cli_type in ['rest-patch','rest-put']: http_method = kwargs.pop('http_method',cli_type) rest_urls = st.get_datastore(dut,'rest_urls') if config.lower() == 'yes': for vrf in vrf_name: rest_url = rest_urls['vrf_config'].format(vrf) ocdata = {"openconfig-network-instance:network-instance":[{"name":vrf,"config":{"name":vrf,"enabled":bool(1)}}]} response = config_rest(dut, http_method=http_method, rest_url=rest_url, json_data=ocdata) if not response: st.log(response) return False elif config.lower() == 'no': for vrf in vrf_name: rest_url = rest_urls['vrf_config'].format(vrf) response = delete_rest(dut, rest_url=rest_url) if not response: st.log(response) return False return True else: st.log("Unsupported cli")
9d6d7e85762610103277345d1e12d4ef2c3f3d9f
19,519
def _worker_command_line(thing, arguments): """ Create a worker command line suitable for Popen with only the options the worker process requires """ def a(name): "options with values" return [name, arguments[name]] * (arguments[name] is not None) def b(name): "boolean options" return [name] * bool(arguments[name]) return ( ['ckanapi', 'dump', thing, '--worker'] + a('--config') + a('--ckan-user') + a('--remote') + a('--apikey') + b('--get-request') + ['value-here-to-make-docopt-happy'] )
945e9da452b438b08aacbf967b93f10f717c5003
19,520
from typing import Optional def get_endpoint(arn: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointResult: """ Resource Type Definition for AWS::S3Outposts::Endpoint :param str arn: The Amazon Resource Name (ARN) of the endpoint. """ __args__ = dict() __args__['arn'] = arn if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('aws-native:s3outposts:getEndpoint', __args__, opts=opts, typ=GetEndpointResult).value return AwaitableGetEndpointResult( arn=__ret__.arn, cidr_block=__ret__.cidr_block, creation_time=__ret__.creation_time, id=__ret__.id, network_interfaces=__ret__.network_interfaces, status=__ret__.status)
518fbd1ca92373bcbea5d10a44605b4990242d02
19,521
import sys def solveConsLaborIntMarg( solution_next, PermShkDstn, TranShkDstn, LivPrb, DiscFac, CRRA, Rfree, PermGroFac, BoroCnstArt, aXtraGrid, TranShkGrid, vFuncBool, CubicBool, WageRte, LbrCost, ): """ Solves one period of the consumption-saving model with endogenous labor supply on the intensive margin by using the endogenous grid method to invert the first order conditions for optimal composite consumption and between consumption and leisure, obviating any search for optimal controls. Parameters ---------- solution_next : ConsumerLaborSolution The solution to the next period's problem; must have the attributes vPfunc and bNrmMinFunc representing marginal value of bank balances and minimum (normalized) bank balances as a function of the transitory shock. PermShkDstn: [np.array] Discrete distribution of permanent productivity shocks. TranShkDstn: [np.array] Discrete distribution of transitory productivity shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor. CRRA : float Coefficient of relative risk aversion over the composite good. Rfree : float Risk free interest rate on assets retained at the end of the period. PermGroFac : float Expected permanent income growth factor for next period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. Currently not handled, must be None. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. TranShkGrid: np.array Grid of transitory shock values to use as a state grid for interpolation. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. Not yet handled, must be False. CubicBool: boolean An indicator for whether the solver should use cubic or linear interpolation. Cubic interpolation is not yet handled, must be False. WageRte: float Wage rate per unit of labor supplied. LbrCost: float Cost parameter for supplying labor: u_t = U(x_t), x_t = c_t*z_t^LbrCost, where z_t is leisure = 1 - Lbr_t. Returns ------- solution_now : ConsumerLaborSolution The solution to this period's problem, including a consumption function cFunc, a labor supply function LbrFunc, and a marginal value function vPfunc; each are defined over normalized bank balances and transitory prod shock. Also includes bNrmMinNow, the minimum permissible bank balances as a function of the transitory productivity shock. """ # Make sure the inputs for this period are valid: CRRA > LbrCost/(1+LbrCost) # and CubicBool = False. CRRA condition is met automatically when CRRA >= 1. frac = 1.0 / (1.0 + LbrCost) if CRRA <= frac * LbrCost: print( "Error: make sure CRRA coefficient is strictly greater than alpha/(1+alpha)." ) sys.exit() if BoroCnstArt is not None: print("Error: Model cannot handle artificial borrowing constraint yet. ") sys.exit() if vFuncBool or CubicBool is True: print("Error: Model cannot handle cubic interpolation yet.") sys.exit() # Unpack next period's solution and the productivity shock distribution, and define the inverse (marginal) utilty function vPfunc_next = solution_next.vPfunc TranShkPrbs = TranShkDstn.pmf TranShkVals = TranShkDstn.X PermShkPrbs = PermShkDstn.pmf PermShkVals = PermShkDstn.X TranShkCount = TranShkPrbs.size PermShkCount = PermShkPrbs.size uPinv = lambda X: CRRAutilityP_inv(X, gam=CRRA) # Make tiled versions of the grid of a_t values and the components of the shock distribution aXtraCount = aXtraGrid.size bNrmGrid = aXtraGrid # Next period's bank balances before labor income # Replicated axtraGrid of b_t values (bNowGrid) for each transitory (productivity) shock bNrmGrid_rep = np.tile(np.reshape(bNrmGrid, (aXtraCount, 1)), (1, TranShkCount)) # Replicated transitory shock values for each a_t state TranShkVals_rep = np.tile(np.reshape(TranShkVals, (1, TranShkCount)), (aXtraCount, 1)) # Replicated transitory shock probabilities for each a_t state TranShkPrbs_rep = np.tile(np.reshape(TranShkPrbs, (1, TranShkCount)), (aXtraCount, 1)) # Construct a function that gives marginal value of next period's bank balances *just before* the transitory shock arrives # Next period's marginal value at every transitory shock and every bank balances gridpoint vPNext = vPfunc_next(bNrmGrid_rep, TranShkVals_rep) # Integrate out the transitory shocks (in TranShkVals direction) to get expected vP just before the transitory shock vPbarNext = np.sum(vPNext * TranShkPrbs_rep, axis=1) # Transformed marginal value through the inverse marginal utility function to "decurve" it vPbarNvrsNext = uPinv(vPbarNext) # Linear interpolation over b_{t+1}, adding a point at minimal value of b = 0. vPbarNvrsFuncNext = LinearInterp(np.insert(bNrmGrid, 0, 0.0), np.insert(vPbarNvrsNext, 0, 0.0)) # "Recurve" the intermediate marginal value function through the marginal utility function vPbarFuncNext = MargValueFunc(vPbarNvrsFuncNext, CRRA) # Get next period's bank balances at each permanent shock from each end-of-period asset values # Replicated grid of a_t values for each permanent (productivity) shock aNrmGrid_rep = np.tile(np.reshape(aXtraGrid, (aXtraCount, 1)), (1, PermShkCount)) # Replicated permanent shock values for each a_t value PermShkVals_rep = np.tile(np.reshape(PermShkVals, (1, PermShkCount)), (aXtraCount, 1)) # Replicated permanent shock probabilities for each a_t value PermShkPrbs_rep = np.tile(np.reshape(PermShkPrbs, (1, PermShkCount)), (aXtraCount, 1)) bNrmNext = (Rfree / (PermGroFac * PermShkVals_rep)) * aNrmGrid_rep # Calculate marginal value of end-of-period assets at each a_t gridpoint # Get marginal value of bank balances next period at each shock vPbarNext = (PermGroFac * PermShkVals_rep) ** (-CRRA) * vPbarFuncNext(bNrmNext) # Take expectation across permanent income shocks EndOfPrdvP = (DiscFac * Rfree * LivPrb * np.sum(vPbarNext * PermShkPrbs_rep, axis=1, keepdims=True)) # Compute scaling factor for each transitory shock TranShkScaleFac_temp = (frac * (WageRte * TranShkGrid) ** (LbrCost * frac) * (LbrCost ** (-LbrCost * frac) + LbrCost ** frac )) # Flip it to be a row vector TranShkScaleFac = np.reshape(TranShkScaleFac_temp, (1, TranShkGrid.size)) # Use the first order condition to compute an array of "composite good" x_t values corresponding to (a_t,theta_t) values xNow = (np.dot(EndOfPrdvP, TranShkScaleFac)) ** (-1.0 / (CRRA - LbrCost * frac)) # Transform the composite good x_t values into consumption c_t and leisure z_t values TranShkGrid_rep = np.tile(np.reshape(TranShkGrid, (1, TranShkGrid.size)), (aXtraCount, 1)) xNowPow = xNow ** frac # Will use this object multiple times in math below # Find optimal consumption from optimal composite good cNrmNow = (((WageRte * TranShkGrid_rep) / LbrCost) ** (LbrCost * frac)) * xNowPow # Find optimal leisure from optimal composite good LsrNow = (LbrCost / (WageRte * TranShkGrid_rep)) ** frac * xNowPow # The zero-th transitory shock is TranShk=0, and the solution is to not work: Lsr = 1, Lbr = 0. cNrmNow[:, 0] = uPinv(EndOfPrdvP.flatten()) LsrNow[:, 0] = 1.0 # Agent cannot choose to work a negative amount of time. When this occurs, set # leisure to one and recompute consumption using simplified first order condition. # Find where labor would be negative if unconstrained violates_labor_constraint = (LsrNow > 1.0) EndOfPrdvP_temp = np.tile(np.reshape(EndOfPrdvP, (aXtraCount, 1)), (1, TranShkCount)) cNrmNow[violates_labor_constraint] = uPinv(EndOfPrdvP_temp[violates_labor_constraint]) LsrNow[violates_labor_constraint] = 1.0 # Set up z=1, upper limit # Calculate the endogenous bNrm states by inverting the within-period transition aNrmNow_rep = np.tile(np.reshape(aXtraGrid, (aXtraCount, 1)), (1, TranShkGrid.size)) bNrmNow = aNrmNow_rep - WageRte * TranShkGrid_rep + cNrmNow + WageRte * TranShkGrid_rep * LsrNow # Add an extra gridpoint at the absolute minimal valid value for b_t for each TranShk; # this corresponds to working 100% of the time and consuming nothing. bNowArray = np.concatenate( (np.reshape(-WageRte * TranShkGrid, (1, TranShkGrid.size)), bNrmNow), axis=0 ) # Consume nothing cNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), cNrmNow), axis=0) # And no leisure! LsrNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), LsrNow), axis=0) LsrNowArray[0, 0] = 1.0 # Don't work at all if TranShk=0, even if bNrm=0 LbrNowArray = 1.0 - LsrNowArray # Labor is the complement of leisure # Get (pseudo-inverse) marginal value of bank balances using end of period # marginal value of assets (envelope condition), adding a column of zeros # zeros on the left edge, representing the limit at the minimum value of b_t. vPnvrsNowArray = np.concatenate((np.zeros((1, TranShkGrid.size)), uPinv(EndOfPrdvP_temp))) # Construct consumption and marginal value functions for this period bNrmMinNow = LinearInterp(TranShkGrid, bNowArray[0, :]) # Loop over each transitory shock and make a linear interpolation to get lists # of optimal consumption, labor and (pseudo-inverse) marginal value by TranShk cFuncNow_list = [] LbrFuncNow_list = [] vPnvrsFuncNow_list = [] for j in range(TranShkGrid.size): # Adjust bNrmNow for this transitory shock, so bNrmNow_temp[0] = 0 bNrmNow_temp = (bNowArray[:, j] - bNowArray[0, j]) # Make consumption function for this transitory shock cFuncNow_list.append(LinearInterp(bNrmNow_temp, cNowArray[:, j])) # Make labor function for this transitory shock LbrFuncNow_list.append(LinearInterp(bNrmNow_temp, LbrNowArray[:, j])) # Make pseudo-inverse marginal value function for this transitory shock vPnvrsFuncNow_list.append(LinearInterp(bNrmNow_temp, vPnvrsNowArray[:, j])) # Make linear interpolation by combining the lists of consumption, labor and marginal value functions cFuncNowBase = LinearInterpOnInterp1D(cFuncNow_list, TranShkGrid) LbrFuncNowBase = LinearInterpOnInterp1D(LbrFuncNow_list, TranShkGrid) vPnvrsFuncNowBase = LinearInterpOnInterp1D(vPnvrsFuncNow_list, TranShkGrid) # Construct consumption, labor, pseudo-inverse marginal value functions with # bNrmMinNow as the lower bound. This removes the adjustment in the loop above. cFuncNow = VariableLowerBoundFunc2D(cFuncNowBase, bNrmMinNow) LbrFuncNow = VariableLowerBoundFunc2D(LbrFuncNowBase, bNrmMinNow) vPnvrsFuncNow = VariableLowerBoundFunc2D(vPnvrsFuncNowBase, bNrmMinNow) # Construct the marginal value function by "recurving" its pseudo-inverse vPfuncNow = MargValueFunc2D(vPnvrsFuncNow, CRRA) # Make a solution object for this period and return it solution = ConsumerLaborSolution(cFunc=cFuncNow, LbrFunc=LbrFuncNow, vPfunc=vPfuncNow, bNrmMin=bNrmMinNow ) return solution
cc54bdcd8a94b6bd65c459603db7921100c28ef5
19,522
def append_child(node, child): """Appends *child* to *node*'s children Returns: int: 1 on success, 0 on failure """ return _cmark.node_append_child(node, child)
70770596cf470987ff20abbe94f8b97c0050f86a
19,523
def run_chain(init_part, chaintype, length, ideal_population, id, tag): """Runs a Recom chain, and saves the seats won histogram to a file and returns the most Gerrymandered plans for both PartyA and PartyB Args: init_part (Gerrychain Partition): initial partition of chain chaintype (String): indicates which proposal to be used to generate spanning tree during Recom. Must be either "tree" or "uniform_tree" length (int): total steps of chain id (String): id of experiment, used when printing progress tag (String): tag added to filename to identify run Raises: RuntimeError: If chaintype is not "tree" nor 'uniform_tree" Returns: list of partitions generated by chain """ graph = init_part.graph for edge in graph.edges(): graph.edges[edge]['cut_times'] = 0 graph.edges[edge]['sibling_cuts'] = 0 if 'siblings' not in graph.edges[edge]: graph.edges[edge]['siblings'] = tuple([edge]) popbound = within_percent_of_ideal_population(init_part, config['EPSILON']) # Determine proposal for generating spanning tree based upon parameter if chaintype == "tree": tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population, epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'], method=facefinder.my_mst_bipartition_tree_random) elif chaintype == "uniform_tree": tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population, epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'], method=facefinder.my_uu_bipartition_tree_random) else: print("Chaintype used: ", chaintype) raise RuntimeError("Chaintype not recognized. Use 'tree' or 'uniform_tree' instead") # Chain to be run chain = MarkovChain(tree_proposal, Validator([popbound]), accept=accept.always_accept, initial_state=init_part, total_steps=length) electionDict = { 'seats' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')), 'won' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')), 'efficiency_gap' : (lambda x: x[config['ELECTION_NAME']].efficiency_gap()), 'mean_median' : (lambda x: x[config['ELECTION_NAME']].mean_median()), 'mean_thirdian' : (lambda x: x[config['ELECTION_NAME']].mean_thirdian()), 'partisan_bias' : (lambda x: x[config['ELECTION_NAME']].partisan_bias()), 'partisan_gini' : (lambda x: x[config['ELECTION_NAME']].partisan_gini()) } # Run chain, save each desired statistic, and keep track of cuts. Save most # left gerrymandered partition statistics = {statistic : [] for statistic in config['ELECTION_STATISTICS']} # Value of a partition is determined by each of the Gerry Statistics. # Lexicographical ordering is used, such that if two partitions have the same # value under the first Gerry Statistic, then the second is used as a tie # breaker, and so on. leftManderVal = [float('inf')] * len(config['GERRY_STATISTICS']) leftMander = None for i, partition in enumerate(chain): for edge in partition["cut_edges"]: graph.edges[edge]['cut_times'] += 1 for sibling in graph.edges[edge]['siblings']: graph.edges[sibling]['sibling_cuts'] += 1 # Save statistics of partition for statistic in config['ELECTION_STATISTICS']: statistics[statistic].append(electionDict[statistic](partition)) # Update left mander if applicable curPartVal = [electionDict[statistic](partition) for statistic in config['GERRY_STATISTICS']] if curPartVal < leftManderVal: leftManderVal = curPartVal leftMander = partition if i % 500 == 0: print('{}: {}'.format(id, i)) saveRunStatistics(statistics, tag) return leftMander
5b6db6ede5e8b7c8bcc46f91131fafed22741775
19,524
def cliquenet_s2(**kwargs): """CliqueNet-S2""" model = cliquenet(input_channels=64, list_channels=[36, 80, 150, 120], list_layer_num=[5, 5, 6, 6]) return model
6b99b3575d9bd245aea615eedbffa95ca7fd3076
19,525
def decConvert(dec): """ This is a number-word converter, but for decimals. Parameters ----- dec:str This is the input value numEngA: dict A dictionary of values that are only up to single digits frstDP: int The first decimal place scndDP: int The second decimal place Returns ----- :str This checks to see if there is a valid scndp, i.e., not zero, and then then returns a valid decmial value in English format. """ numEngA = { 0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', } numEngB = { 1: 'ten', 2: 'twenty', 3: 'thirty', 4: 'fourty', 5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety', } frstDP = int(dec[0]); scndDP = int(dec[1]); return ' and ' + numEngA[frstDP] + ' ' + numEngA[scndDP] if not scndDP else ' and ' + numEngB[frstDP]
dedfb67448e4bd2402acb4c561ebb4669d7bc58d
19,526
import os def train_model(base_model, training_dataset, validation_dataset, output_dir, loss=None, num_epochs=100, patience=20, learning_rate=1e-4): """ Train a model with the given data. Parameters ---------- base_model training_dataset validation_dataset output_dir loss num_epochs patience learning_rate Returns ------- history """ os.makedirs(output_dir, exist_ok=True) # Set up callbacks cb = [] # checkpoint model_weight_file = os.path.join(output_dir, 'model_best.h5') cb.append(tf.keras.callbacks.ModelCheckpoint(output_dir + '\\{epoch:02d}-{val_loss:.2f}_model_best.h5', verbose=1, save_weights_only=True, save_best_only=False, monitor='val_loss')) # val_loss cb.append(tf.keras.callbacks.EarlyStopping(monitor='val_loss', verbose=1, patience=patience)) history_csv_file = os.path.join(output_dir, 'history.csv') cb.append(tf.keras.callbacks.CSVLogger(history_csv_file, append=True, separator=',')) # model = ModelMGPU(base_model, gpus=2) FIXME: model = base_model model.compile(Adam(learning_rate=learning_rate), loss=loss) history = model.fit(training_dataset, validation_data=validation_dataset, # steps_per_epoch=846, epochs=num_epochs, callbacks=cb, verbose=1, shuffle=False, use_multiprocessing=True, workers=8) return history
5b6fdf7e7e63c0dbdbe62ecb401dad4b790075d1
19,527
import time def fit(data, weights, model_id, initial_parameters, tolerance=None, max_number_iterations=None, \ parameters_to_fit=None, estimator_id=None, user_info=None): """ Calls the C interface fit function in the library. (see also http://gpufit.readthedocs.io/en/latest/bindings.html#python) All 2D NumPy arrays must be in row-major order (standard in NumPy), i.e. array.flags.C_CONTIGUOUS must be True (see also https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#internal-memory-layout-of-an-ndarray) :param data: The data - 2D NumPy array of dimension [number_fits, number_points] and data type np.float32 :param weights: The weights - 2D NumPy array of the same dimension and data type as parameter data or None (no weights available) :param model_id: The model ID :param initial_parameters: Initial values for parameters - NumPy array of dimension [number_fits, number_parameters] and data type np.float32 :param tolerance: The fit tolerance or None (will use default value) :param max_number_iterations: The maximal number of iterations or None (will use default value) :param parameters_to_fit: Which parameters to fit - NumPy array of length number_parameters and type np.int32 or None (will fit all parameters) :param estimator_id: The Estimator ID or None (will use default values) :param user_info: User info - NumPy array of type np.char or None (no user info available) :return: parameters, states, chi_squares, number_iterations, execution_time """ # check all 2D NumPy arrays for row-major memory layout (otherwise interpretation of order of dimensions fails) if not data.flags.c_contiguous: raise RuntimeError('Memory layout of data array mismatch.') if weights is not None and not weights.flags.c_contiguous: raise RuntimeError('Memory layout of weights array mismatch.') if not initial_parameters.flags.c_contiguous: raise RuntimeError('Memory layout of initial_parameters array mismatch.') # size check: data is 2D and read number of points and fits if data.ndim != 2: raise RuntimeError('data is not two-dimensional') number_points = data.shape[1] number_fits = data.shape[0] # size check: consistency with weights (if given) if weights is not None and data.shape != weights.shape: raise RuntimeError('dimension mismatch between data and weights') # the unequal operator checks, type, length and content (https://docs.python.org/3.7/reference/expressions.html#value-comparisons) # size check: initial parameters is 2D and read number of parameters if initial_parameters.ndim != 2: raise RuntimeError('initial_parameters is not two-dimensional') number_parameters = initial_parameters.shape[1] if initial_parameters.shape[0] != number_fits: raise RuntimeError('dimension mismatch in number of fits between data and initial_parameters') # size check: consistency with parameters_to_fit (if given) if parameters_to_fit is not None and parameters_to_fit.shape[0] != number_parameters: raise RuntimeError( 'dimension mismatch in number of parameters between initial_parameters and parameters_to_fit') # default value: tolerance if not tolerance: tolerance = 1e-4 # default value: max_number_iterations if not max_number_iterations: max_number_iterations = 25 # default value: estimator ID if not estimator_id: estimator_id = EstimatorID.LSE # default value: parameters_to_fit if parameters_to_fit is None: parameters_to_fit = np.ones(number_parameters, dtype=np.int32) # now only weights and user_info could be not given # type check: data, weights (if given), initial_parameters are all np.float32 if data.dtype != np.float32: raise RuntimeError('type of data is not np.float32') if weights is not None and weights.dtype != np.float32: raise RuntimeError('type of weights is not np.float32') if initial_parameters.dtype != np.float32: raise RuntimeError('type of initial_parameters is not np.float32') # type check: parameters_to_fit is np.int32 if parameters_to_fit.dtype != np.int32: raise RuntimeError('type of parameters_to_fit is not np.int32') # type check: valid model and estimator id if not _valid_id(ModelID, model_id): raise RuntimeError('Invalid model ID, use an attribute of ModelID') if not _valid_id(EstimatorID, estimator_id): raise RuntimeError('Invalid estimator ID, use an attribute of EstimatorID') # we don't check type of user_info, but we extract the size in bytes of it if user_info is not None: user_info_size = user_info.nbytes else: user_info_size = 0 # pre-allocate output variables parameters = np.zeros((number_fits, number_parameters), dtype=np.float32) states = np.zeros(number_fits, dtype=np.int32) chi_squares = np.zeros(number_fits, dtype=np.float32) number_iterations = np.zeros(number_fits, dtype=np.int32) # conversion to ctypes types for optional C interface parameters using NULL pointer (None) as default argument if weights is not None: weights_p = weights.ctypes.data_as(gpufit_func.argtypes[3]) else: weights_p = None if user_info is not None: user_info_p = user_info.ctypes.data_as(gpufit_func.argtypes[11]) else: user_info_p = None # call into the library (measure time) t0 = time.perf_counter() status = gpufit_func( gpufit_func.argtypes[0](number_fits), \ gpufit_func.argtypes[1](number_points), \ data.ctypes.data_as(gpufit_func.argtypes[2]), \ weights_p, \ gpufit_func.argtypes[4](model_id), \ initial_parameters.ctypes.data_as(gpufit_func.argtypes[5]), \ gpufit_func.argtypes[6](tolerance), \ gpufit_func.argtypes[7](max_number_iterations), \ parameters_to_fit.ctypes.data_as(gpufit_func.argtypes[8]), \ gpufit_func.argtypes[9](estimator_id), \ gpufit_func.argtypes[10](user_info_size), \ user_info_p, \ parameters.ctypes.data_as(gpufit_func.argtypes[12]), \ states.ctypes.data_as(gpufit_func.argtypes[13]), \ chi_squares.ctypes.data_as(gpufit_func.argtypes[14]), \ number_iterations.ctypes.data_as(gpufit_func.argtypes[15])) t1 = time.perf_counter() # check status if status != Status.Ok: # get error from last error and raise runtime error error_message = error_func() raise RuntimeError('status = {}, message = {}'.format(status, error_message)) # return output values return parameters, states, chi_squares, number_iterations, t1 - t0
54c0f3a740589509d908e8c68625e33dccfbe1f8
19,528
import json def json_get(cid, item): """gets item from json file with user settings""" with open('data/%s.json' %cid) as f: user = json.load(f) return user[item]
dedb369aba555ca5359e291bc39504dd4b14a790
19,529
from . import __version__, status_show_server def debug_info(): """ This function varies version-by-version, designed to help the authors of this package when there's an issue. Returns: A dictionary that contains debug info across the interpret package. """ debug_dict = {} debug_dict["interpret.__version__"] = __version__ debug_dict["interpret.status_show_server"] = status_show_server() debug_dict["interpret.static_system_info"] = static_system_info() debug_dict["interpret.dynamic_system_info"] = dynamic_system_info() return debug_dict
4d751d245d6d5bf48680bcd217675f0a6df44d49
19,530
import random def adaptive_monte_carlo(func, z_min, z_max, epsilon): """ Perform adaptive Monte Carlo algorithm to a specific function. Uniform random variable is used in this case. The calculation starts from 10 division of the original function range. Each step, it will divide the region which has the largest variance. Input: func: the function of integrand z_min: lower limit of the integration z_max: upper limit of the integration epsilon: desired relative accuracy of the result Returns: new_I: numerical integral with required relative accuracy err: error of estimation of the integral evaluations: count of function evaluations""" # However, we can speed up this small sampling process inside each # sub-interval @jit(nopython=True) def loop(upper, lower, func, sampling_size): elements = [] for _ in range(sampling_size): z = random.uniform(lower, upper) elements.append(func(z)) return elements def monte_carlo(): # Monte Carlo integration in each of the sub-interval var_array = [] I_array = [] for i in range(len(intervals) - 1): # random sampling in each of the interval elements = loop( intervals[i], intervals[i + 1], func, sampling_size) # integral of segment of integration average = sum(elements) / sampling_size # weight of integral is correspond to the width of the sub-interval weight = intervals[i + 1] - intervals[i] I_array.append(weight * average) # add up the integral value # calculate the variance of this segment of integration var = sum((elements[i] - average)**2 for i in range(sampling_size)) var_array.append(var) # add variance to the array # return the integral value and variance of each sub-interval in an # array return I_array, var_array evaluation = 0 n = 10 # number of divisions sampling_size = 100 # 1000 sampling points in each division # Initial trail intervals = np.linspace(z_min, z_max, n) I_array, var_array = monte_carlo() evaluation += (len(intervals) - 1) * sampling_size new_I = sum(I_array) relative_accuracy = 1 # assign a non-zero value of initial relative accuracy while relative_accuracy >= epsilon and relative_accuracy != 0: old_I = new_I # adaption # find the index of the largest variance largest_var_index = var_array.index(max(var_array)) # removing the result of section with largest variance I_array = np.delete(I_array, largest_var_index) var_array = np.delete(var_array, largest_var_index) # divide sub-interval with the largest variance into 10 more # sub-intervals intervals = np.insert(intervals, largest_var_index + 1, np.linspace(intervals[largest_var_index], intervals[largest_var_index + 1], n, endpoint=False)) intervals = np.delete(intervals, largest_var_index) # run Monte Carlo in the new intervals I_array, var_array = monte_carlo() new_I = sum(I_array) # calculate relative accuracy relative_accuracy = abs((new_I - old_I) / old_I) # amount of evaluations increases by the number of intervals * random # points in each interval evaluation += (len(intervals) - 1) * sampling_size # print((len(intervals)-1)*sampling_size,new_I,relative_accuracy) # # show realtime evaluations err = 0 for i in range(len(intervals) - 1): # sum up the variance of each interval err += ((intervals[i + 1] - intervals[i]) / (z_max - z_min))**2 * var_array[i] # divide the standard deviation by sqrt of n to get standard error (error # of estimation) err = np.sqrt(err / (len(intervals) * sampling_size)) return new_I, err, evaluation
7735c9e3df1ddfb912dd9a2dfcf01699a56262d8
19,531
import scipy.stats def compute_statistics(measured_values, estimated_values): """Calculates a collection of common statistics comporaring the measured and estimated values. Parameters ---------- measured_values: numpy.ndarray The experimentally measured values with shape=(number of data points) estimated_values: numpy.ndarray The computationally estimated values with shape=(number of data points) Returns ------- numpy.ndarray An array of the summarised statistics, containing the Slope, Intercept, R, R^2, p, RMSE, MSE, MUE, Tau list of str Human readable labels for each of the statistics. """ statistics_labels = [ Statistics.Slope, Statistics.Intercept, Statistics.R, Statistics.R2, Statistics.P, Statistics.RMSE, Statistics.MSE, Statistics.MUE, Statistics.Tau ] summary_statistics = np.zeros(len(statistics_labels)) ( summary_statistics[0], summary_statistics[1], summary_statistics[2], summary_statistics[4], _ ) = scipy.stats.linregress(measured_values, estimated_values) summary_statistics[3] = summary_statistics[2] ** 2 summary_statistics[5] = np.sqrt(np.mean((estimated_values - measured_values) ** 2)) summary_statistics[6] = np.mean(estimated_values - measured_values) summary_statistics[7] = np.mean(np.absolute(estimated_values - measured_values)) summary_statistics[8], _ = scipy.stats.kendalltau(measured_values, estimated_values) return summary_statistics, statistics_labels
cde9fd0094a6d8330f552ec98c11647c0a76bc42
19,532
def num_encode(n): """Convert an integer to an base62 encoded string.""" if n < 0: return SIGN_CHARACTER + num_encode(-n) s = [] while True: n, r = divmod(n, BASE) s.append(ALPHABET[r]) if n == 0: break return u''.join(reversed(s))
bd0f34122fa490cfafea2a5b60d6a919a7a8c253
19,533
import torch def hard_example_mining(dist_mat, labels, return_inds=False): """For each anchor, find the hardest positive and negative sample. Args: dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N] labels: pytorch LongTensor, with shape [N] return_inds: whether to return the indices. Save time if `False`(?) Returns: dist_ap: pytorch Variable, distance(anchor, positive); shape [N] dist_an: pytorch Variable, distance(anchor, negative); shape [N] p_inds: pytorch LongTensor, with shape [N]; indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1 n_inds: pytorch LongTensor, with shape [N]; indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1 NOTE: Only consider the case in which all labels have same num of samples, thus we can cope with all anchors in parallel. """ assert len(dist_mat.size()) == 2 assert dist_mat.size(0) == dist_mat.size(1) N = dist_mat.size(0) print(N) # shape [N, N] is_pos = labels.expand(N, N).eq(labels.expand(N, N).t()) is_neg = labels.expand(N, N).ne(labels.expand(N, N).t()) # `dist_ap` means distance(anchor, positive) # both `dist_ap` and `relative_p_inds` with shape [N, 1] dist_ap, relative_p_inds = torch.max( dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True) # `dist_an` means distance(anchor, negative) # both `dist_an` and `relative_n_inds` with shape [N, 1] dist_an, relative_n_inds = torch.min( dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True) # shape [N] dist_ap = dist_ap.squeeze(1) dist_an = dist_an.squeeze(1) if return_inds: # shape [N, N] ind = (labels.new().resize_as_(labels) .copy_(torch.arange(0, N).long()) .unsqueeze(0).expand(N, N)) # shape [N, 1] p_inds = torch.gather( ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data) n_inds = torch.gather( ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data) # shape [N] p_inds = p_inds.squeeze(1) n_inds = n_inds.squeeze(1) return dist_ap, dist_an, p_inds, n_inds return dist_ap, dist_an
d3e36f7c7088b1a1d457869701c99d2c2013a283
19,534
import os def failure_comment(request, comment_id): """Отклоняет отзыв с указанием причины""" user = request.user comment = Comment.objects.get(recipient_user=user.id, id=comment_id, failure=False) form = AcceptForm(request.POST or None, initial=model_to_dict(comment), instance=comment) if form.is_valid(): new_failure = form.save() user = User.objects.get(id=request.user.id) user.useraccept.failure = True user.useraccept.save() for user in comment.recipient_user.all(): users = User.objects.get(id=user.id) if not users.useraccept.failure: break else: new_failure.failure = True recipient_users = construct_message(comment) data_comment = f'''" Status - failure, user - {comment.user}, recipient_users - {recipient_users}, comment - {comment.comment_for_rating}, reason - {new_failure.failure_text}"''' command = r'''curl -H "Content-type:application/json" --data '{"data":''' + data_comment + r'''}' http://localhost:3001/mineBlock''' os.system(command) new_failure.save() return HttpResponseRedirect(reverse_lazy('comment_list')) return HttpResponseRedirect(reverse_lazy('comment_list')) return render(request, 'failure_form.html', {'form': form, 'comment': comment})
745b10d95ea6fec68a14cfcf28eba9315af0c64c
19,535
from typing import Optional from typing import cast from typing import Sized def CodedVideoGrain(src_id_or_meta=None, flow_id_or_data=None, origin_timestamp=None, creation_timestamp=None, sync_timestamp=None, rate=Fraction(25, 1), duration=Fraction(1, 25), cog_frame_format=CogFrameFormat.UNKNOWN, origin_width=1920, origin_height=1080, coded_width=None, coded_height=None, is_key_frame=False, temporal_offset=0, length=None, cog_frame_layout=CogFrameLayout.UNKNOWN, unit_offsets=None, src_id=None, source_id=None, format=None, layout=None, flow_id=None, data=None): """\ Function called to construct a coded video grain either from existing data or with new data. First method of calling: CodedVideoGrain(meta, data) where meta is a dictionary containing the grain metadata, and data is a bytes-like object which contains the grain's payload. Optionally the data element can be replaced with an Awaitable that will return a data element when awaited. This is useful for grains that are backed with some sort of asynchronous IO system. A properly formated metadata dictionary for a Video Grain should look like: { "@_ns": "urn:x-ipstudio:ns:0.1", "grain": { "grain_type": "audio", "source_id": src_id, # str or uuid.UUID "flow_id": flow_id, # str or uuid.UUID "origin_timestamp": origin_timestamp, # str or mediatimestamps.Timestamp "sync_timestamp": sync_timestamp, # str or mediatimestamps.Timestamp "creation_timestamp": creation_timestamp, # str or mediatimestamps.Timestamp "rate": { "numerator": 0, # int "denominator": 1, # int }, "duration": { "numerator": 0, # int "denominator": 1, # int }, "cog_coded_frame": { "format": cog_frame_format, # int or CogFrameFormat "origin_width": origin_width, # int "origin_height": origin_height, # int "coded_width": coded_width, # int "coded_height": coded_height, # int "layout": cog_frame_layout, # int or CogFrameLayout "is_key_frame": False, # bool "temporal_offset": temporal_offset, # int "unit_offsets": [0, 16, 27] # list of int (optional) } } } Alternatively it may be called as: CodedVideoGrain(src_id, flow_id, origin_timestamp=None, sync_timestamp=None, rate=Fraction(25, 1), duration=Fraction(1, 25), cog_frame_format=CogFrameFormat.UNKNOWN, origin_width=1920, origin_height=1080, is_key_frame=False, coded_width=None, coded_height=None, temporal_offset=0, length=None, cog_frame_layout=CogFrameLayout.UNKNOWN, unit_offsets=None, data=None): in which case a new grain will be constructed with type "coded_video" and the specified metadata. If the data argument is None and the length argument is not then a new bytearray object will be constructed with size equal to length. In either case the value returned by this function will be an instance of the class mediagrains.grain.CODEDVIDEOGRAIN (the parameters "source_id" and "src_id" are aliases for each other. source_id is probably prefered, but src_id is kept avaialble for backwards compatibility) """ meta: Optional[CodedVideoGrainMetadataDict] = None if cog_frame_format is None: cog_frame_format = format if source_id is not None: src_id = source_id if cog_frame_layout is None: cog_frame_layout = layout if isinstance(src_id_or_meta, dict): meta = cast(CodedVideoGrainMetadataDict, src_id_or_meta) if data is None and not isinstance(flow_id_or_data, UUID): data = flow_id_or_data else: if src_id is None and isinstance(src_id_or_meta, UUID): src_id = src_id_or_meta if flow_id is None and isinstance(flow_id_or_data, UUID): flow_id = flow_id_or_data if coded_width is None: coded_width = origin_width if coded_height is None: coded_height = origin_height if length is None: if data is not None and hasattr(data, "__len__"): length = len(cast(Sized, data)) else: length = 0 if meta is None: if src_id is None or flow_id is None: raise AttributeError("Must include either metadata, or src_id, and flow_id") cts = creation_timestamp if cts is None: cts = Timestamp.get_time() if origin_timestamp is None: origin_timestamp = cts if sync_timestamp is None: sync_timestamp = origin_timestamp meta = { "@_ns": "urn:x-ipstudio:ns:0.1", "grain": { "grain_type": "coded_video", "source_id": str(src_id), "flow_id": str(flow_id), "origin_timestamp": str(mediatimestamp(origin_timestamp)), "sync_timestamp": str(mediatimestamp(sync_timestamp)), "creation_timestamp": str(mediatimestamp(cts)), "rate": { "numerator": Fraction(rate).numerator, "denominator": Fraction(rate).denominator, }, "duration": { "numerator": Fraction(duration).numerator, "denominator": Fraction(duration).denominator, }, "cog_coded_frame": { "format": cog_frame_format, "origin_width": origin_width, "origin_height": origin_height, "coded_width": coded_width, "coded_height": coded_height, "layout": cog_frame_layout, "is_key_frame": is_key_frame, "temporal_offset": temporal_offset } }, } if data is None: data = bytearray(length) if "grain" in meta and "cog_coded_frame" in meta['grain'] and unit_offsets is not None: meta['grain']['cog_coded_frame']['unit_offsets'] = unit_offsets return CODEDVIDEOGRAIN(meta, data)
5ab51dbbcaaff04f9c56434998d49434aec6f58e
19,536
def test_log_likelihood(model, X_test, y_test): """ Marginal log likelihood for GPy model on test data""" _, test_log_likelihood, _ = model.inference_method.inference( model.kern.rbf_1, X_test, model.likelihood.Gaussian_noise_1, y_test, model.mean_function, model.Y_metadata) return test_log_likelihood
61c04b4b3cb12472769699f37601154398df0959
19,537
from airfs._core.io_base_raw import ObjectRawIOBase from airfs._core.io_base_buffered import ObjectBufferedIOBase from airfs._core.io_random_write import ( ObjectRawIORandomWriteBase, ObjectBufferedIORandomWriteBase, ) def test_object_buffered_base_io(): """Tests airfs._core.io_buffered.ObjectBufferedIOBase""" # Mock sub class name = "name" size = 10000 flushed = bytearray() raw_flushed = bytearray() buffer_size = 100 flush_sleep = 0 def flush(data): """Dummy flush""" flushed.extend(data) time.sleep(flush_sleep) class DummySystem: """Dummy system""" client = None def __init__(self, **_): """Do nothing""" @staticmethod def getsize(*_, **__): """Returns fake result""" return size @staticmethod def head(*_, **__): """Returns fake result""" return {} @staticmethod def relpath(path): """Returns fake result""" return path @staticmethod def get_client_kwargs(*_, **__): """Returns fake result""" return {} class DummyRawIO(ObjectRawIOBase): """Dummy IO""" _SYSTEM_CLASS = DummySystem def _flush(self, buffer): """Do nothing""" raw_flushed.extend(buffer) def _read_range(self, start, end=0): """Read fake bytes""" return ((size if end > size else end) - start) * b"0" class DummyBufferedIO(ObjectBufferedIOBase): """Dummy buffered IO""" _RAW_CLASS = DummyRawIO DEFAULT_BUFFER_SIZE = buffer_size MINIMUM_BUFFER_SIZE = 10 MAXIMUM_BUFFER_SIZE = 10000 def ensure_ready(self): """Ensure flush is complete""" while any(1 for future in self._write_futures if not future.done()): time.sleep(0.01) def __init(self, *arg, **kwargs): ObjectBufferedIOBase.__init__(self, *arg, **kwargs) self.close_called = False def _close_writable(self): """Checks called""" self.close_called = True self.ensure_ready() def _flush(self): """Flush""" self._write_futures.append( self._workers.submit(flush, self._write_buffer[: self._buffer_seek]) ) class DummyRawIOPartFlush(DummyRawIO, ObjectRawIORandomWriteBase): """Dummy IO with part flush support""" _size = 20 def _flush(self, buffer, start, *_): """Do nothing""" if start == 50: # Simulate buffer that need to wait previous one time.sleep(0.1) raw_flushed.extend(buffer) class DummyBufferedIOPartFlush(ObjectBufferedIORandomWriteBase): """Dummy buffered IO with part flush support""" _RAW_CLASS = DummyRawIOPartFlush # Tests: Read until end object_io = DummyBufferedIO(name) assert object_io.read() == size * b"0" # Tests: Read when already at end assert object_io.read() == b"" # Tests: Read, max buffer object_io = DummyBufferedIO(name) assert object_io._max_buffers == size // buffer_size object_io = DummyBufferedIO(name, max_buffers=5) assert object_io.read(100) == 100 * b"0" # Tests: Read by parts assert sorted(object_io._read_queue) == list( range(100, 100 + buffer_size * 5, buffer_size) ) assert object_io._seek == 100 assert object_io.read(150) == 150 * b"0" assert sorted(object_io._read_queue) == list( range(200, 200 + buffer_size * 5, buffer_size) ) assert object_io._seek == 250 assert object_io.read(50) == 50 * b"0" assert sorted(object_io._read_queue) == list( range(300, 300 + buffer_size * 5, buffer_size) ) assert object_io._seek == 300 assert object_io.read() == (size - 300) * b"0" assert not object_io._read_queue # Tests: Read small parts part = buffer_size // 10 object_io.seek(0) for index in range(1, 15): assert object_io.read(part) == part * b"0" assert object_io._seek == part * index # Tests: Read, change seek object_io.seek(450) assert sorted(object_io._read_queue) == list( range(450, 450 + buffer_size * 5, buffer_size) ) object_io.seek(700) assert sorted(object_io._read_queue) == list( range(700, 700 + buffer_size * 5, buffer_size) ) # Tests: Read buffer size (No copy mode) object_io.seek(0) assert object_io.read(buffer_size) == buffer_size * b"0" object_io.seek(size - buffer_size // 2) assert object_io.read(buffer_size) == b"0" * (buffer_size // 2) object_io._seek = size # Tests: Read, EOF before theoretical EOF def read_range(*_, **__): """Returns empty bytes""" return b"" object_io = DummyBufferedIO(name, max_buffers=5) object_io._read_range = read_range assert object_io.read() == b"" # Tests write (with auto flush) assert bytes(flushed) == b"" object_io = DummyBufferedIO(name, mode="w") assert object_io.write(250 * b"0") == 250 object_io.ensure_ready() assert object_io._buffer_seek == 50 assert bytes(object_io._write_buffer) == 50 * b"0" + 50 * b"\0" assert object_io._get_buffer().tobytes() == 50 * b"0" assert object_io._seek == 2 assert len(flushed) == 200 assert bytes(flushed) == 200 * b"0" # Tests manual flush object_io.flush() object_io.ensure_ready() assert object_io._seek == 3 assert bytes(flushed) == 250 * b"0" assert object_io._buffer_seek == 0 # Tests write, only buffered should flush flushed = bytearray() raw_flushed = bytearray() assert bytes(flushed) == b"" assert bytes(raw_flushed) == b"" with DummyBufferedIO(name, mode="w") as object_io: assert object_io.write(150 * b"0") == 150 object_io.ensure_ready() assert len(flushed) == 100 assert object_io._buffer_seek == 50 assert len(object_io._get_buffer()) == 50 object_io.raw._write_buffer = object_io._get_buffer() assert len(object_io.raw._get_buffer()) == 50 assert len(flushed) == 150 assert not len(raw_flushed) # Tests write small data flushed by raw object_io = DummyBufferedIO(name, mode="w") assert object_io.write(10 * b"0") == 10 object_io.close() assert bytes(raw_flushed) == 10 * b"0" # Test max buffer object_io = DummyBufferedIO(name, mode="w", max_buffers=2) flush_sleep = object_io._FLUSH_WAIT assert object_io.write(1000 * b"0") == 1000 flush_sleep = 0 # Test default implementation with part flush support raw_flushed[:] = b"" content = os.urandom(100) with DummyBufferedIOPartFlush(name, mode="w", buffer_size=10) as object_io: object_io.write(content) assert raw_flushed == content
e72324d158ae9dfc8b859e5ce055230da83819fe
19,538
def fov_geometry(release='sva1',size=[530,454]): """ Return positions of each CCD in PNG image for a given data release. Parameters: release : Data release name (currently ['sva1','y1a1'] size : Image dimensions in pixels [width,height] Returns: list : A list of [id, xmin, ymin, xmax, ymax] for each CCD """ SIZE=size WIDTH=SIZE[0] HEIGHT=SIZE[1] # CCDs belonging to each row ROWS = [ [3,2,1], #range(3,0,-1), [7,6,5,4], #range(7,3,-1), [12,11,10,9,8], #range(12,7,-1), [18,17,16,15,14,13], #range(18,12,-1), [24,23,22,21,20,19], #range(24,18,-1), [31,30,29,28,27,26,25], #range(31,24,-1), [38,37,36,35,34,33,32], #range(38,31,-1), [44,43,42,41,40,39], #range(44,38,-1), [50,49,48,47,46,45], #range(50,44,-1), [55,54,53,52,51], #range(55,50,-1), [59,58,57,56], #range(59,55,-1), [62,61,60], #range(62,59,-1) ] if release.lower() == 'sva1': # These are the old SV pngs, not the ones made for Y2A1 # Boder padding in x,y; assumed symmetric PAD = [0,0] ROWS = [r[::-1] for r in ROWS[::-1]] else: PAD = [0.02*WIDTH,0.02*HEIGHT] ROWS = ROWS NROWS = len(ROWS) # Number of rows NCCDS = [len(row) for row in ROWS] CCD_SIZE = [float(WIDTH-2*PAD[0])/max(NCCDS), float(HEIGHT-2*PAD[1])/NROWS] # CCD dimension (assumed to span image) ret = [] for i,ccds in enumerate(ROWS): for j,ccd in enumerate(ccds): xpad = (SIZE[0] - len(ccds)*CCD_SIZE[0])/2. ypad = PAD[1] xmin = xpad + j*CCD_SIZE[0] xmax = xmin + CCD_SIZE[0] ymin = ypad + i*CCD_SIZE[1] ymax = ymin + CCD_SIZE[1] # These are output as ints now ret += [[int(ccd), int(xmin), int(ymin), int(xmax), int(ymax)]] return sorted(ret)
a7e118ed223a91d5e939b24baa8bbfb0858064b9
19,539
def parse_descriptor(desc: str) -> 'Descriptor': """ Parse a descriptor string into a :class:`Descriptor`. Validates the checksum if one is provided in the string :param desc: The descriptor string :return: The parsed :class:`Descriptor` :raises: ValueError: if the descriptor string is malformed """ i = desc.find("#") if i != -1: checksum = desc[i + 1:] desc = desc[:i] computed = DescriptorChecksum(desc) if computed != checksum: raise ValueError("The checksum does not match; Got {}, expected {}".format(checksum, computed)) return _parse_descriptor(desc, _ParseDescriptorContext.TOP)
b82c04f6cdc6d4e9b5247463e6fcbdcf12c5ffc7
19,540
def HHMMSS_to_seconds(string): """Converts a colon-separated time string (HH:MM:SS) to seconds since midnight""" (hhs,mms,sss) = string.split(':') return (int(hhs)*60 + int(mms))*60 + int(sss)
f7a49ad5d14eb1e26acba34946830710384780f7
19,541
import os def create_check_warnings_reference(warnings_file): """Read warnings_file and compare it with the warnings the compiler actually reported. If the reference file is missing we just check that there are any warnings at all.""" if not os.path.isfile(warnings_file): return check_missing_warnings else: reference = open(warnings_file, "rb").read() return partial(_help_check_warnings_reference, reference=reference)
d556225616005125e7e8e772f1f48705e4c1186c
19,542
def fetch_user_profile(user_id): """ This function lookup a dictionary given an user ID. In production, this should be replaced by querying external database. user_id: User ID using which external Database will be queried to retrieve user profile. return: Returns an user profile corresponding to the user ID, if not found returns a default profile type. """ if user_id in USER_PROFILES: return USER_PROFILES[user_id] else: return {"profile": "free"}
c9ee521dc909f865232ec5d39b456bafd0c996dc
19,543
def _replace_token_range(tokens, start, end, replacement): """For a range indicated from start to end, replace with replacement.""" tokens = tokens[:start] + replacement + tokens[end:] return tokens
2848a3ad2d448e062facf78264fb1d15a1c3985c
19,544
def softmax_loss_vectorized(W, X, y, reg): """ Softmax loss function, vectorized version. Inputs and outputs are the same as softmax_loss_naive. """ # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) num_train = X.shape[0] num_classes = W.shape[1] num_dimensions = X.shape[1] dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using no explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# #copy paste from http://cs231n.github.io/neural-networks-case-study/#loss score = X.dot(W) # (N,C) score = score - np.amax(score,axis = 1,keepdims = True) score = np.exp(score) probs = score/np.sum(score,axis = 1, keepdims = True) loss = -1*np.log(probs[np.arange(num_train),y]).sum()/num_train loss = loss + 0.5 * reg * np.sum(W * W) #http://cs231n.github.io/neural-networks-case-study/#grad dscores = probs #(N,C) dscores[range(num_train),y] -= 1 dscores = dscores / num_train dW = np.dot(X.T,dscores) dW += reg * W ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW
27232f9988f98359dab225a47c04a9d4799a38b1
19,545
def norm(a): """normalizes input matrix between 0 and 1 Args: a: numpy array Returns: normalized numpy array """ return (a - np.amin(a))/(np.amax(a)-np.amin(a))
6c246926b8a5c91ea5a674447679a7d1cf7a2c8e
19,546
def collect_path(rf, method="quick", verbose=True): """ Collect paths from RandomForest objects. This function is the most time-consuming part. Output: A list of outputs from get_path_to_max_prediction_terminal_node. """ n_tree = len(rf) result = [] if method == "quick": for i in range(n_tree): if verbose: if (i+1) % 100 == 0: print("Construct the %s tree graph out of %s trees" %(i+1, n_tree)) dot_data = tree.export_graphviz(rf.estimators_[i], out_file = None, rounded = True, special_characters = True) G = Graph(dot_data) result.append(G.get_path_to_max_prediction_terminal_node()) else: result.append(return_node_path_to_max_prediction(rf.estimators_[i], verbose=False)) return result
d2ecb0f277eafb483d38376278635f1143c5e7f2
19,547
def calculate_variance(beta): """ This function calculates variance of curve beta :param beta: numpy ndarray of shape (2,M) of M samples :rtype: numpy ndarray :return variance: variance """ n, T = beta.shape betadot = gradient(beta, 1. / (T - 1)) betadot = betadot[1] normbetadot = zeros(T) centroid = calculatecentroid(beta) integrand = zeros((n, n, T)) t = linspace(0, 1, T) for i in range(0, T): normbetadot[i] = norm(betadot[:, i]) a1 = (beta[:, i] - centroid) a1 = a1.reshape((n, 1)) integrand[:, :, i] = a1.dot(a1.T) * normbetadot[i] l = trapz(normbetadot, t) variance = trapz(integrand, t, axis=2) variance /= l return (variance)
179ae9dde0979f909525c44c94ea11ded7a776d5
19,548
def get_datastore_mo(client, soap_stub, datacenter_name, datastore_name): """ Return datastore managed object with specific datacenter and datastore name """ datastore = get_datastore(client, datacenter_name, datastore_name) if not datastore: return None datastore_mo = vim.Datastore(datastore, soap_stub) return datastore_mo
f759dccd61caa7cd290a2a00b0ebbcb80dc8fa7e
19,549
def _merge_dictionaries(dict1: dict, dict2: dict) -> dict: """ Recursive merge dictionaries. :param dict1: Base dictionary to merge. :param dict2: Dictionary to merge on top of base dictionary. :return: Merged dictionary """ for key, val in dict1.items(): if isinstance(val, dict): dict2_node = dict2.setdefault(key, {}) _merge_dictionaries(val, dict2_node) else: if key not in dict2: dict2[key] = val return dict2
322cd1e3cf01d97ebc8ecb450772ca328afee121
19,550
def integrate_profile(rho0, s0, r_s, r_1, rmax_fac=1.2, rmin_fac=0.01, r_min=None, r_max=None): """ Solves the ODE describing the to obtain the density profile :returns: the integration domain in kpc and the solution to the density profile in M_sun / kpc^3 """ G = 4.3e-6 # units kpc and solar mass length_scale = np.sqrt(s0 ** 2 * (4 * np.pi * G * rho0) ** -1) if r_max is None: x_max = rmax_fac * r_1 / length_scale else: x_max = r_max/length_scale if r_min is None: x_min = r_1 * rmin_fac / length_scale else: x_min = r_min/length_scale # solve the ODE with initial conditions phi_0, phi_prime_0 = 0, 0 N = 600 xvalues = np.linspace(x_min, x_max, N) res = solve_ivp(ode_system, (x_min, x_max), [phi_0, phi_prime_0], t_eval=xvalues) return res['t'] * length_scale, rho0 * np.exp(res.y[0])
af139c2f8211f11d2a71e1da73ebdd3f9f6c4fe7
19,551
def create_eval_dataset( task, batch_size, subset): """Create datasets for evaluation.""" if batch_size % jax.device_count() != 0: raise ValueError(f"Batch size ({batch_size}) must be divisible by " f"the number of devices ({jax.device_count()}).") per_device_batch_size = batch_size // jax.device_count() dataset_builder = tfds.builder(task) eval_split = deterministic_data.get_read_instruction_for_host( subset, dataset_builder.info.splits[subset].num_examples) eval_ds = deterministic_data.create_dataset( dataset_builder, split=eval_split, num_epochs=1, shuffle=False, batch_dims=[jax.local_device_count(), per_device_batch_size], preprocess_fn=_preprocess_cifar10) return dataset_builder.info, eval_ds
4446f322d38a736942125a9427db1e68bb008e5e
19,552
def rad2deg(angle): """ Converts radians to degrees Parameters ---------- angle : float, int Angle in radians Returns ------- ad : float Angle in radians Examples -------- >>> rad2deg(pi) 180.000000000000 >>> rad2deg(pi/2) 90.0000000000000 >>> rad2deg(2*pi) 360.000000000000 """ ad = ( (angle)*(180/pi) ).evalf() return ad
e8bdc1d914c139d7d3847223ecfb8b0399eda5ca
19,553
def center_crop(im, size, is_color=True): """ Crop the center of image with size. Example usage: .. code-block:: python im = center_crop(im, 224) :param im: the input image with HWC layout. :type im: ndarray :param size: the cropping size. :type size: int :param is_color: whether the image is color or not. :type is_color: bool """ h, w = im.shape[:2] h_start = (h - size) / 2 w_start = (w - size) / 2 h_end, w_end = h_start + size, w_start + size if is_color: im = im[h_start:h_end, w_start:w_end, :] else: im = im[h_start:h_end, w_start:w_end] return im
ac280efd4773613f08632fe836eecc16be23adf8
19,554
def read_json(file_path: str) -> Jelm: """reads from a json file path""" with open(file_path) as fp: dump = fp.read() return reads_json(dump)
a3166cbe3bc98478a89574af7493a740826ab366
19,555
def fillCells(cell_bounds, rdx, rdy, rdbathy, dlim=0.0, drymin=0.0, drymax=0.99, pland=None, rotated=False, median_depth=False, smc=False, setadj=False): """Returns a list of depth and land-sea data to correspond with cell bounds list""" print('[INFO] Calculating cell depths') ncells = np.shape(cell_bounds)[0] # cell depths array as depth, proportion of dry cells and cell type cell_depths = np.zeros([ncells,3]) cell_depths[:,2] = 1 # set to default ww3 wet cell value if dlim > 0.0: print('[WARN] Dry depth limit is set greater than zero, changing sign for depth negative convention') dlim = dlim * -1.0 # if rdx and rdy are 1D arrays, combine to form 2d arrays #if len(np.shape(rdx)) == 1: # chkx, chky = np.meshgrid(rdx, rdy) #else: chkx = rdx chky = rdy for lp in range(np.shape(cell_bounds)[0]): if np.mod(lp, 2500) == 0: print('[INFO] ... done %d points out of %d' %tuple([lp, ncells])) xsw = cell_bounds[lp,0] ysw = cell_bounds[lp,1] xne = cell_bounds[lp,2] yne = cell_bounds[lp,3] if len(np.shape(rdx)) == 1: # regular bathy indsx = np.where((chkx >= xsw) & (chkx < xne)) indsy = np.where((chky >= ysw) & (chky < yne)) ndepths = np.size(indsx) * np.size(indsy) else: # rotated pole bathy inds = np.where(((chkx >= xsw) & (chkx < xne) & (chky >= ysw) & (chky < yne))) ndepths = np.size(inds) / 2 if ndepths > 0: if len(np.shape(rdx)) == 1: # regular bathy bathytmp = rdbathy[np.min(indsy):np.max(indsy)+1, np.min(indsx):np.max(indsx)+1].flatten() else: # rotated pole bathy bathytmp = rdbathy[inds] # only use wet depths in calculations if np.size(bathytmp[bathytmp<dlim]) > 0: if median_depth: depth = np.median(bathytmp[bathytmp<dlim]) else: depth = np.mean(bathytmp[bathytmp<dlim]) else: depth = 99.99 # use all depths for dry percentage calculation pcdry = np.size(np.where(bathytmp >= dlim)[0]) # add wet cell land percentages if this info has been loaded in if pland is not None: if len(np.shape(rdx)) == 1: # regular bathy plandtmp = pland[np.min(indsy):np.max(indsy)+1, np.min(indsx):np.max(indsx)+1].flatten() else: # rotated pole bathy plandtmp = pland[inds] if np.size(bathytmp[bathytmp < dlim]) > 0: plandsum = np.sum(plandtmp[bathytmp < dlim]) pcdry = np.float(pcdry) + plandsum pcdry = np.float(pcdry) / np.float(ndepths) cell_depths[lp,0] = depth cell_depths[lp,1] = pcdry # mark cells for removal/tiering based on percentage dry if pcdry >= drymax: # reject dry cells cell_depths[lp,2] = 0 elif pcdry > drymin: # set partially dry points for tiering cell_depths[lp,2] = -1 else: print('[WARNING] No source data found in cell, returning zero value') # second pass through cells to switch cells adjacent to coast to type -2 # sets required additional tiering in next step if smc and setadj: print('[INFO] Checking for points adjacent to dry cells') #cellsbox = setCellsXYbox(smccells) inds = np.where(cell_depths[:,2] == 0) adjdry = [] for cnt, lp in enumerate(inds[0]): if np.mod(cnt, 2500) == 0: print('[INFO] ... done %d points out of %d' %tuple([cnt, np.size(inds)])) intersects = chkAdj(lp, cell_bounds, altbounds=None) #intersects = chkAdjxy(lp, cellsbox, altbox=None) switch_drytype = False if np.any(intersects is not None): for chkcell in intersects: if chkcell is not None: if cell_depths[chkcell,2] != 0: cell_depths[chkcell,2] = -1 switch_drytype = True if switch_drytype: adjdry.append(lp) if np.size(np.array(adjdry)) > 0: cell_depths[adjdry,2] = -2 # for non-smc grids set cells marked -1 to 1 (wet) if not smc: print('[INFO] Not SMC grid - switching tier values to wet cells') cell_depths[cell_depths[:,2] == -2, 2] = -1 cell_depths[:,2] = np.abs(cell_depths[:,2]) print(np.min(cell_depths[lp,2])) return cell_depths
b40a8c3d5171c40cebf397b93e697dfaef1820ec
19,556
def mark_item_complete(todo: TodoAndCompleted, idx: int) -> TodoAndCompleted: """ Pop todo['todo'][idx] and append it to todo['complete']. Parameters ---------- todo : TodoAndCompleted A dict containing a to-do list and a list of completed tasks. idx : int Index of an item to move from todo['todo'] to todo['complete']. Returns ------- TodoAndCompleted Copy of `todo` with `todo['todo'][idx]` moved to `todo['complete']. """ _todo = todo.copy() # Your code here... return _todo
a1efac8801fa8af3b93352dbb3ce8790b29a99f2
19,557
import six import math import warnings def spatial_pyramid_pooling_2d(x, pyramid_height, pooling_class=None, pooling=None): """Spatial pyramid pooling function. It outputs a fixed-length vector regardless of input feature map size. It performs pooling operation to the input 4D-array ``x`` with different kernel sizes and padding sizes, and then flattens all dimensions except first dimension of all pooling results, and finally concatenates them along second dimension. At :math:`i`-th pyramid level, the kernel size :math:`(k_h^{(i)}, k_w^{(i)})` and padding size :math:`(p_h^{(i)}, p_w^{(i)})` of pooling operation are calculated as below: .. math:: k_h^{(i)} &= \\lceil b_h / 2^i \\rceil, \\\\ k_w^{(i)} &= \\lceil b_w / 2^i \\rceil, \\\\ p_h^{(i)} &= (2^i k_h^{(i)} - b_h) / 2, \\\\ p_w^{(i)} &= (2^i k_w^{(i)} - b_w) / 2, where :math:`\\lceil \\cdot \\rceil` denotes the ceiling function, and :math:`b_h, b_w` are height and width of input variable ``x``, respectively. Note that index of pyramid level :math:`i` is zero-based. See detail in paper: `Spatial Pyramid Pooling in Deep Convolutional \ Networks for Visual Recognition \ <https://arxiv.org/abs/1406.4729>`_. Args: x (~chainer.Variable): Input variable. The shape of ``x`` should be ``(batchsize, # of channels, height, width)``. pyramid_height (int): Number of pyramid levels pooling_class (MaxPooling2D): *(deprecated since v4.0.0)* Only MaxPooling2D is supported. Please use the ``pooling`` argument instead since this argument is deprecated. pooling (str): Currently, only ``max`` is supported, which performs a 2d max pooling operation. Replaces the ``pooling_class`` argument. Returns: ~chainer.Variable: Output variable. The shape of the output variable will be :math:`(batchsize, c \\sum_{h=0}^{H-1} 2^{2h}, 1, 1)`, where :math:`c` is the number of channels of input variable ``x`` and :math:`H` is the number of pyramid levels. .. note:: This function uses some pooling classes as components to perform spatial pyramid pooling. Currently, it only supports :class:`~functions.MaxPooling2D` as elemental pooling operator so far. """ bottom_c, bottom_h, bottom_w = x.shape[1:] ys = [] # create pooling functions for different pyramid levels and apply it for pyramid_level in six.moves.range(pyramid_height): num_bins = int(2 ** pyramid_level) ksize_h = int(math.ceil(bottom_h / (float(num_bins)))) remainder_h = ksize_h * num_bins - bottom_h pad_h = remainder_h // 2 ksize_w = int(math.ceil(bottom_w / (float(num_bins)))) remainder_w = ksize_w * num_bins - bottom_w pad_w = remainder_w // 2 ksize = (ksize_h, ksize_w) pad = (pad_h, pad_w) if pooling_class is not None: warnings.warn('pooling_class argument is deprecated. Please use ' 'the pooling argument.', DeprecationWarning) if (pooling_class is None) == (pooling is None): raise ValueError('Specify the pooling operation either using the ' 'pooling_class or the pooling argument.') if (pooling_class is chainer.functions.MaxPooling2D or pooling == 'max'): pooler = chainer.functions.MaxPooling2D( ksize=ksize, stride=None, pad=pad, cover_all=True) else: pooler = pooling if pooling is not None else pooling_class raise ValueError('Unsupported pooling operation: ', pooler) y_var = pooler.apply((x,))[0] n, c, h, w = y_var.shape ys.append(y_var.reshape((n, c * h * w, 1, 1))) return chainer.functions.concat(ys)
fd64c335afe43a35a8917cf8079ae41d410f9dc4
19,558
from typing import OrderedDict async def retrieve_seasons_and_teams(client, url): # noqa: E999 """ Retrieves seasons and teams for a single player. """ doc = await get_document(client, url) teams = doc.xpath( "//table[@id='stats_basic_nhl' or @id='stats_basic_plus_nhl']" + "/tbody/tr/td[2]/a/text()") seasons = doc.xpath( "//table[@id='stats_basic_nhl' or @id='stats_basic_plus_nhl']" + "/tbody/tr/th[@data-stat='season']/text()") teams = list(OrderedDict.fromkeys(teams).keys()) seasons = [ int(seasons[0].split("-")[0]), int(seasons[-1].split("-")[0]) + 1] return teams, seasons
b68d95a46b5f036ec53826fa0321273b168d2261
19,559
def array_match_difference_1d(a, b): """Return the summed difference between the elements in a and b.""" if len(a) != len(b): raise ValueError('Both arrays must have the same length') if len(a) == 0: raise ValueError('Arrays must be filled') if type(a) is not np.ndarray: a = np.array(a) if type(b) is not np.ndarray: b = np.array(b) return np.sum(np.abs(a - b))
b82a6e36bdfe2757bc1a86bb4d95c156ac847474
19,560
def data_len(system: System) -> int: """Compute number of entries required to serialize all entities in a system.""" entity_lens = [entity.state_size() + entity.control_size() for entity in system.entities] return sum(entity_lens)
8ebc98e713052dd215ffaecfd5338535e4662bd2
19,561
def keep_row(row): """ :param row: a list for the row in the data :return: True if we should keep row; False if we should discard row """ if row[_INDICES["Actor1CountryCode"]] in _COUNTRIES_OF_INTEREST or \ row[_INDICES["Actor2CountryCode"]] in _COUNTRIES_OF_INTEREST: return True return False
5124583806c02034c0c11518b25639ebd61aaccf
19,562
def _grad_shapelets(X, y, n_classes, weights, shapelets, lengths, alpha, penalty, C, fit_intercept, intercept_scaling, sample_weight): """Compute the gradient of the loss with regards to the shapelets.""" n_samples, n_timestamps = X.shape # Derive distances between shapelets and time series distances = _derive_all_squared_distances( X, n_samples, n_timestamps, shapelets, lengths, alpha) distances = np.asarray(distances).T # Add intercept if fit_intercept: distances = np.c_[np.ones(n_samples) * intercept_scaling, distances] weight_idx = 1 else: weight_idx = 0 # Derive probabilities and cross-entropy loss if weights.ndim == 1: proba = _expit(distances @ weights) proba = np.clip(proba, 1e-8, 1 - 1e-8) else: proba = _softmax(distances @ weights, n_samples, n_classes) proba = np.clip(proba, 1e-8, 1 - 1e-8) # Reshape some arrays if weights.ndim == 1: proba_minus_y = (proba - y)[:, None] else: proba_minus_y = proba - y # Compute the gradients gradients = _compute_shapelet_grad( X, n_samples, n_timestamps, weights, shapelets, lengths, alpha, proba_minus_y, weight_idx, sample_weight ) gradients = np.concatenate(gradients) return gradients
fb16c9aaaf06ae322f9781d8089fd084fc7d299a
19,563
from typing import List import requests def get_tags_list(server_address: str, image_name: str) -> List[str]: """ Returns list of tags connected with an image with a given name :param server_address: address of a server with docker registry :param image_name: name of an image :return: list of tags connected with a given image In case of any problems during getting list of tags - it throws an error """ url = f"http://{server_address}/v2/{image_name}/tags/list" result = requests.get(url) if not result or result.status_code != HTTPStatus.OK: err_message = Texts.TAGS_GET_ERROR_MSG logger.exception(err_message) raise RuntimeError(err_message) return result.json().get("tags")
3c20ef85f77689cdbc25a131bbe1f1cc1431528a
19,564
def build_graph(graph_attrs, meta_data, nodes, edges): """ Build the Graph with specific nodes and edges. :param graph_attrs: dictionary with graph attributes :param nodes: list of nodes where each node is tuple (node_name, type, attrs) nodes=[ ('input', 'Parameter', {}), ('weights', 'Const', {}), ('conv', 'Convolution', {}), ('output', 'Result', {}) ] :param edges: list of edges where each edge is tuple (node_out, node_in, attrs) edges=[ ('input', 'conv', {'out': 0, 'in': 0}), ('weights', 'conv', {'out': 0, 'in': 1}), ('conv', 'output', {'out': 0, 'in': 0}) ] :return: generated graph. """ graph = Graph() graph.graph = graph_attrs graph.meta_data = meta_data for node in nodes: create_node(graph, node[0], node[1], node[2]) for edge in edges: out_port = edge[2].get('out', 0) in_port = edge[2].get('in', 0) connect_nodes_by_name(graph, edge[0], out_port, edge[1], in_port) graph.clean_up() return graph
9238da38d6b8d65f9bff7c344d2fe8d4ed71dc90
19,565
from typing import Callable from typing import Any import pickle def cached_value(func: Callable[[], Any], path) -> Any: """ Tries to load data from the pickle file. If the file doesn't exist, the func() method is run and its results are saved into the file. Then the result is returned. """ if exists(path): with open(path, 'rb') as file: result = pickle.load(file) else: try: result = func() with open(path, 'wb') as file: pickle.dump(result, file, protocol=3) except CachedValueException: logger = qf_logger.getChild(__name__) logger.error('Error while processing {}'.format(func)) return result
b74c9b79cf74c32c5d1befaad293cdc2dbf3b5c3
19,566
def expensehistory(): """Show history of expenses or let the user update existing expense""" # User reached route via GET if request.method == "GET": # Get all of the users expense history ordered by submission time history = tendie_expenses.getHistory(session["user_id"]) # Get the users spend categories categories = tendie_categories.getSpendCategories(session["user_id"]) # Get the users payers (for modal) payers = tendie_account.getPayers(session["user_id"]) return render_template("expensehistory.html", history=history, categories=categories, payers=payers, isDeleteAlert=False) # User reached route via POST else: # Initialize users action userHasSelected_deleteExpense = False # Determine what action was selected by the user (button/form trick from: https://stackoverflow.com/questions/26217779/how-to-get-the-name-of-a-submitted-form-in-flask) if "btnDeleteConfirm" in request.form: userHasSelected_deleteExpense = True elif "btnSave" in request.form: userHasSelected_deleteExpense = False else: return apology("Doh! Spend Categories is drunk. Try again!") # Get the existing expense record ID from the DB and build a data structure to store old expense details oldExpense = tendie_expenses.getExpense( request.form, session["user_id"]) # Make sure an existing record was found otherwise render an error message if oldExpense["id"] == None: return apology("The expense record you're trying to update doesn't exist") # Delete the existing expense record if userHasSelected_deleteExpense == True: # Delete the old record from the DB deleted = tendie_expenses.deleteExpense( oldExpense, session["user_id"]) if not deleted: return apology("The expense was unable to be deleted") # Get the users expense history, spend categories, payers, and then render the history page w/ delete alert history = tendie_expenses.getHistory(session["user_id"]) categories = tendie_categories.getSpendCategories( session["user_id"]) payers = tendie_account.getPayers(session["user_id"]) return render_template("expensehistory.html", history=history, categories=categories, payers=payers, isDeleteAlert=True) # Update the existing expense record else: # Update the old record with new details from the form expensed = tendie_expenses.updateExpense( oldExpense, request.form, session["user_id"]) if not expensed: return apology("The expense was unable to be updated") # Redirect to results page and render a summary of the updated expense return render_template("expensed.html", results=expensed)
15ce57d9b246fd81bc8f38fcda11330de3ff50a5
19,567
def resize(a, new_shape): """resize(a,new_shape) returns a new array with the specified shape. The original array's total size can be any size. """ a = ravel(a) if not len(a): return zeros(new_shape, a.typecode()) total_size = multiply.reduce(new_shape) n_copies = int(total_size / len(a)) extra = total_size % len(a) if extra != 0: n_copies = n_copies+1 extra = len(a)-extra a = concatenate( (a,)*n_copies) if extra > 0: a = a[:-extra] return reshape(a, new_shape)
fcbce959a0ff6bd31a269be89b50956ccc6f6883
19,568
def rotate180(image_np): """Rotates the given image by 180 degrees.""" if image_np is None: return None return np.fliplr(np.flipud(image_np))
d851314620d527b6c33e19389b5fc19035edcdb3
19,569
import requests def proxy_view(request, url, domain=None, secure=False, requests_args=None, template_name="proxy/debug.html"): """ Forward as close to an exact copy of the request as possible along to the given url. Respond with as close to an exact copy of the resulting response as possible. If there are any additional arguments you wish to send to requests, put them in the requests_args dictionary. """ requests_args = (requests_args or {}).copy() headers = get_headers(request.META) params = request.GET.copy() proxy_domain = settings.PROXY_DOMAIN protocol = 'http' if secure: protocol = 'https' url = '%s://%s/%s' % (protocol, proxy_domain, url[1:] if url.startswith('/') else url) if 'headers' not in requests_args: requests_args['headers'] = {} if 'data' not in requests_args: requests_args['data'] = request.body if 'params' not in requests_args: requests_args['params'] = QueryDict('', mutable=True) if 'cookies' not in requests_args and getattr(settings, 'PROXY_SET_COOKIES', False): headers = dict([ (kk, vv) for kk, vv in headers.items() if kk.lower() != 'cookie' ]) requests_args['cookies'] = get_cookies(request, proxy_domain) # Overwrite any headers and params from the incoming request with explicitly # specified values for the requests library. headers.update(requests_args['headers']) params.update(requests_args['params']) # If there's a content-length header from Django, it's probably in all-caps # and requests might not notice it, so just remove it. for key in headers.keys(): if key.lower() == 'content-length': del headers[key] requests_args['headers'] = headers requests_args['params'] = params if settings.DEBUG and request.method != 'HEAD': requests_args['allow_redirects'] = False response = requests.request(request.method, url, **requests_args) if getattr(settings, 'PROXY_SET_COOKIES', False): set_cookies(request, proxy_domain, response.cookies) content_type = response.headers['content-type'] content = response.content show_debug = False if 'html' in content_type.lower(): content = rewrite_response(content, proxy_domain, secure=secure) show_debug = settings.DEBUG elif 'javascript' in content_type.lower(): content = rewrite_script(content, proxy_domain, secure=secure) if show_debug: ctx = { 'url': url, 'requests_args': requests_args, 'response': content, 'headers': response.headers, 'status': response.status_code, } if int(response.status_code) in (301, 302): redirection = response.headers['location'] ctx['redirection'] = proxy_reverse(redirection, secure) proxy_response = render(request, template_name, ctx) else: proxy_response = HttpResponse( content, status=response.status_code) excluded_headers = set([ # Hop-by-hop headers # ------------------ # Certain response headers should NOT be just tunneled through. These # are they. For more info, see: # http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1 'connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade', # Although content-encoding is not listed among the hop-by-hop headers, # it can cause trouble as well. Just let the server set the value as # it should be. 'content-encoding', # Since the remote server may or may not have sent the content in the # same encoding as Django will, let Django worry about what the length # should be. 'content-length', ]) for key, value in response.headers.iteritems(): if key.lower() in excluded_headers: continue proxy_response[key] = value return proxy_response
29c06a332f533227202cb8a0287adf1c7d01a3da
19,570
def poll_for_staleness(id_or_elem, wait=10, frequency=1): """Use WebDriverWait with expected_conditions.staleness_of to wait for an element to be no longer attached to the DOM. :argument id_or_elem: The identifier of the element, or its element object. :argument wait: The amount of seconds to wait before throwing a TimeoutException. :argument frequency: The amount of seconds between each poll. :return: False if the element is still attached to the DOM, true otherwise. """ elem = _get_elem(id_or_elem) try: logger.debug('Waiting for element to be removed from DOM: {}' .format(elem)) return (WebDriverWait(_test.browser, wait, poll_frequency=frequency) .until(EC.staleness_of(elem))) except TimeoutException: _raise("Element was not removed from the DOM.")
bd96a71795869817e1a284a1671645b503fcd223
19,571
def check_y(y, allow_empty=False, allow_constant=True): """Validate input data. Parameters ---------- y : pd.Series allow_empty : bool, optional (default=False) If True, empty `y` raises an error. allow_constant : bool, optional (default=True) If True, constant `y` does not raise an error. Returns ------- y : pd.Series Raises ------ ValueError, TypeError If y is an invalid input """ # Check if pandas series or numpy array if not (isinstance(y, pd.Series) or isinstance(y, pd.DataFrame)): raise TypeError(f"`y` must be a pandas Series, but found type: {type(y)}") if not allow_constant: if np.all(y == y.iloc[0]): raise ValueError("All values of `y` are the same.") # check time index check_time_index(y.index, allow_empty=allow_empty) return y
570aa15347377bddbe96919cc1560157905c91ce
19,572
def preprocess_features(features): """Row-normalize feature matrix and convert to tuple representation""" rowsum = np.array(features.sum(1), dtype=float) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) features = r_mat_inv.dot(features) return sparse_to_tuple(features)
dba875e19918cb11bae31a575f35d79519d2d897
19,573
import http import socket import ssl def CheckReachability(urls, http_client=None): """Check whether the hosts of given urls are reachable. Args: urls: iterable(str), The list of urls to check connection to. http_client: httplib2.Http, an object used by gcloud to make http and https connections. Defaults to an non-authenticated Http object from the googlecloudsdk.core.credentials.http module. Returns: list(Failure): Reasons for why any urls were unreachable. The list will be empty if all urls are reachable. """ if not http_client: http_client = http.Http(auth=False) failures = [] for url in urls: try: response, _ = http_client.request(url, method='HEAD') # TODO(user): Investigate other possible exceptions that might be thrown. except (httplib.HTTPException, socket.error, ssl.SSLError, httplib2.HttpLib2Error) as err: message = 'Cannot reach {0} ({1})'.format(url, type(err).__name__) failures.append(Failure(message=message, exception=err)) else: if response.status != httplib.OK: message = 'Cannot reach {0} ([{1}] {2})'.format(url, response.status, response.reason) failures.append(Failure(message=message, response=response)) return failures
9d16e7be2bcc8a1f887a7f86dc464d44de681088
19,574
def dataset_prediction_results(dataset, event, model_factory_fn=pohmm_factory, min_history=90, max_history=None, out_name=None): """ Obtain predictions for each model. Create stratified folds Train on 1-n_folds. Use the last fold to make predictions for each event """ print('Running:', out_name, flush=True) # Load and preprocess the dataset df = load_data(dataset) # from .data import reduce_dataset # df = reduce_dataset(df, num_users=5, min_samples=1, max_samples=1) df = preprocess_data(df, event, ['tau']) # fold, ref user, query user, query session, into future, event, ground truth, prediction baseline_col = 'baseline_tau' prediction_col = 'prediction_tau' work_done = 0 work = len(df.index.unique()) progress = ProgressBar(work) progress.animate(work_done) def _predictions(df): if max_history is None: upper = len(df) - 1 else: upper = min(max_history, len(df) - 1) results = [] for i in range(min_history, upper + 1): hmm = model_factory_fn(df[:i]) pred = hmm.predict_df(df[:i], next_pstate=df.iloc[i]['event'])[0] # pred = hmm.predict_df(df[:i])[0] baseline_pred = df['tau'].values[:i].mean(axis=0) results.append([i, df.iloc[i]['event'], df.iloc[i]['tau'], pred, baseline_pred]) nonlocal work_done work_done += 1 progress.animate(work_done) results = pd.DataFrame(results, columns=['event_idx', 'event', 'tau', prediction_col, baseline_col]) return results pred = df.groupby(level=[0, 1]).apply(_predictions) pred['SMAPE_tau'] = SMAPE(pred['tau'], pred[prediction_col]) pred['SMAPE_baseline_tau'] = SMAPE(pred['tau'], pred[baseline_col]) pred = pred.reset_index(level=df.index.nlevels, drop=True) save_results(pred, out_name + '_predictions') return
4e11a6e3b144c4b37529465c3517481666bebd78
19,575
def num(value): """Parse number as float or int.""" value_float = float(value) try: value_int = int(value) except ValueError: return value_float return value_int if value_int == value_float else value_float
a2ea65c2afa0005dbe4450cb383731b029cb68df
19,576
def __format_event_start_date_and_time(t): """Formats datetime into e.g. Tue Jul 30 at 5PM""" strftime_format = "%a %b %-d at %-I:%M %p" return t.strftime(strftime_format)
4db0b37351308dfe1e7771be9a9ad8b98f2defa6
19,577
def collect_properties(service_instance, view_ref, obj_type, path_set=None, include_mors=False): """ Collect properties for managed objects from a view ref Check the vSphere API documentation for example on retrieving object properties: - http://goo.gl/erbFDz Args: si (ServiceInstance): ServiceInstance connection view_ref (vim.view.*): Starting point of inventory navigation obj_type (vim.*): Type of managed object path_set (list): List of properties to retrieve include_mors (bool): If True include the managed objects refs in the result Returns: A list of properties for the managed objects """ collector = service_instance.content.propertyCollector # Create object specification to define the starting point of # inventory navigation obj_spec = vmodl.query.PropertyCollector.ObjectSpec() obj_spec.obj = view_ref obj_spec.skip = True # Create a traversal specification to identify the path for collection traversal_spec = vmodl.query.PropertyCollector.TraversalSpec() traversal_spec.name = 'traverseEntities' traversal_spec.path = 'view' traversal_spec.skip = False traversal_spec.type = view_ref.__class__ obj_spec.selectSet = [traversal_spec] # Identify the properties to the retrieved property_spec = vmodl.query.PropertyCollector.PropertySpec() property_spec.type = obj_type if not path_set: property_spec.all = True property_spec.pathSet = path_set # Add the object and property specification to the # property filter specification filter_spec = vmodl.query.PropertyCollector.FilterSpec() filter_spec.objectSet = [obj_spec] filter_spec.propSet = [property_spec] # Retrieve properties props = collector.RetrieveContents([filter_spec]) data = [] for obj in props: properties = {} for prop in obj.propSet: properties[prop.name] = prop.val if include_mors: properties['obj'] = obj.obj data.append(properties) return data
39abeff44fefc6b93284b7ec10e66a8a224ce73d
19,578
from typing import Optional def to_all_gpus( cpu_index: faiss.Index, co: Optional['faiss.GpuMultipleClonerOptions'] = None ) -> faiss.Index: """ TODO: docstring """ n_gpus = faiss.get_num_gpus() assert n_gpus != 0, 'Attempting to move index to GPU without any GPUs' gpu_index = faiss.index_cpu_to_all_gpus(cpu_index, co=co) return gpu_index
58619c3745078004a97225d2091154f6ab140814
19,579
from typing import List from typing import MutableMapping def parse_template_mapping( template_mapping: List[str] ) -> MutableMapping[str, str]: """Parses a string template map from <key>=<value> strings.""" result = {} for mapping in template_mapping: key, value = mapping.split("=", 1) result[key] = value return result
49eb029a842be7c31d33444235452ecad4701476
19,580
import os def isGZ(fn): """ Tests whether a file is gz-compressed. :param fn: a filename :type fn: str :returns: True if fn is gz-compressed otherwise False """ assert os.path.exists(fn) with open(fn, 'rb') as fi: b1, b2 = fi.read(1), fi.read(1) return b1 == b'\x1f' and b2 == b'\x8b'
d71fe08a10554eae2909287a1c19dadf795a4592
19,581
def select_dim_over_nm(max_n, max_m, d, coef_nd, coef_md, coef_nm, coef_n, coef_m, rest, max_mem): """Finds the optimal values for `n` and `m` to fit in available memory. This function should be called for problems where the GPU needs to hold two blocks of data (one of size m, one of size n) and one kernel block (of size n x m). Parameters ----------- max_n : int The maximum value for n (the first dimension of the problem) max_m : int The maximum value for m (the second dimension of the problem) d : int The dimensionality of the data coef_nd : float How many n*d blocks need to be held in memory coef_md : float How many m*d blocks need to be held in memory coef_nm : float How many m*n blocks need to be held in memory coef_n : float How many n-dimensional vectors need to be held in memory coef_m : float How many m-dimensional vectors need to be held in memory rest : float additional bytes to be kept in memory max_mem : float The amount of available memory in bytes. This is the main problem constraint Returns ------- out_n : int The dimension n to use in order to fit in available memory out_m : int The dimension m to use in order to fit in available memory Notes ------ The equation gives a hyperbola. We intersect the hyperbola with a line from the origin, with the slope given by the ratio of max_m and max_n. We then solve a quadratic equation to find the intersection point. """ fac = max_m / max_n if coef_nm == 0 and (coef_nd == 0 and coef_md == 0 and coef_n == 0 and coef_m == 0): v_n = max_n elif coef_nm == 0: v_n = solve_lin(b=d * (coef_nd + fac * coef_md) + coef_n + coef_m * fac, c=rest - max_mem) else: v_n = solve_quad(a=fac * coef_nm, b=d * (fac * coef_md + coef_nd) + fac * coef_m + coef_n, c=rest - max_mem) v_m = fac * v_n out_n = int(min(v_n, max_n)) out_m = int(min(v_m, max_m)) if out_n <= 0 or out_m <= 0: raise MemoryError("Available memory %.2fMB is not enough." % (max_mem / 2**20)) return out_n, out_m
a4a824ab19a5d102461d565312ec9874a8c4e513
19,582
import os def _file_extension(filename): """Return file extension without the dot""" # openbabel expects the extension without the dot, but os.path.splitext # returns the extension with it dotext = os.path.splitext(filename)[1] return dotext[1:]
10c328d3b4670aef021c90a28f704b154be8ca5e
19,583
import os import uuid def atomic_tmp_file(final_path): """Return a tmp file name to use with atomic_install. This will be in the same directory as final_path. The temporary file will have the same extension as finalPath. It the final path is in /dev (/dev/null, /dev/stdout), it is returned unchanged and atomic_tmp_install will do nothing.""" final_dir = os.path.dirname(os.path.normpath(final_path)) # can be empty if final_dir == '/dev': return final_path final_basename = os.path.basename(final_path) final_ext = os.path.splitext(final_path)[1] base_name = "{}.{}.tmp{}".format(final_basename, uuid.uuid4(), final_ext) return os.path.join(final_dir, base_name)
4d4589a1808195c707ecf77d6bc8d9fdd2a487f1
19,584
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0): """ 4.56 in Ward and Hohmann .. plot:: import matplotlib.pyplot as plt from SimPEG import EM freq = np.logspace(-1, 6, 61) test = EM.Analytics.FDEM.hzAnalyticDipoleF(100, freq, 0.001, secondary=False) plt.loglog(freq, abs(test.real)) plt.loglog(freq, abs(test.imag)) plt.title('Response at $r$=100m') plt.xlabel('Frequency') plt.ylabel('Response') plt.legend(('real','imag')) plt.show() """ r = np.abs(r) k = np.sqrt(-1j*2.*np.pi*freq*mu*sigma) m = 1 front = m / (2. * np.pi * (k**2) * (r**5) ) back = 9 - ( 9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)) * np.exp(-1j*k*r) hz = front*back if secondary: hp =-1/(4*np.pi*r**3) hz = hz-hp if hz.ndim == 1: hz = Utils.mkvc(hz,2) return hz
94e38e60f1da4abc47c9210683815bc9f0085727
19,585
def _compute_net_budget(recarray, zonenamedict): """ :param recarray: :param zonenamedict: :return: """ recnames = _get_record_names(recarray) innames = [ n for n in recnames if n.startswith("FROM_") or n.endswith("_IN") ] outnames = [ n for n in recnames if n.startswith("TO_") or n.endswith("_OUT") ] select_fields = ["totim", "time_step", "stress_period", "name"] + list( zonenamedict.values() ) if "totim" not in recarray.dtype.names: select_fields.pop(0) select_records_in = np.in1d(recarray["name"], innames) select_records_out = np.in1d(recarray["name"], outnames) in_budget = recarray[select_fields][select_records_in] out_budget = recarray[select_fields][select_records_out] net_budget = in_budget.copy() for f in [n for n in zonenamedict.values() if n in select_fields]: net_budget[f] = np.array([r for r in in_budget[f]]) - np.array( [r for r in out_budget[f]] ) newnames = [] for n in net_budget["name"]: if n.endswith("_IN") or n.endswith("_OUT"): newnames.append("_".join(n.split("_")[:-1])) else: newnames.append("_".join(n.split("_")[1:])) net_budget["name"] = newnames return net_budget
e5e14bef5663af22f5547e36b305f858de372232
19,586
def bg_white(msg): """ return msg with a white background """ return __apply_style(__background_colors['white'],msg)
1e5aca8b0e506420b921c6833704aa32ba0c599f
19,587
def read_image_from_s3(bucket_name, key): """S3 to PIL Image""" s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) object = bucket.Object(key) response = object.get() return Image.open(response['Body'])
6d7e62e007b493f1d124c07ab0b19abe9c6bc308
19,588
import typing from typing import Counter def count_indra_apis(graph: BELGraph) -> typing.Counter[str]: """Count the APIs reported by INDRA.""" return Counter( api for _, _, d in graph.edges(data=True) if ANNOTATIONS in d and 'INDRA_API' in d[ANNOTATIONS] for api in d[ANNOTATIONS]['INDRA_API'] if api and isinstance(api, str) and api != 'nan' )
9743f59cd51506fe1157397a4096f48b3258afcc
19,589
import numpy def integrate_sed(wavelength, flambda, wlmin=None, wlmax=None): """ Calculate the flux in an SED by direct integration. A direct trapezoidal rule integration is carried out on the flambda values and the associated wavelength values. Parameters ---------- wavelength: A numpy float array of wavelength values, normally in microns flambda: A numpy float array of flux density values, normally F_lambda in W/m^2/micron wlmin: An optional float value for the minimum wavelength of the calculation, or None to have no lower limit aside from the data range wlmax: An optional float value for the maximum wavelength of the calculation, or None to have no upper limit aside from the data range Returns ------- flux1: The float value, the estimated total flux, nominally in W/m^2 if the input units are microns and W/m^2/micron; if the wavelength range is bad or the two arrays do not match in length a value of zero is returned """ if len(wavelength) != len(flambda): return 0. if wlmin is None: xmin = 0.9 * numpy.min(wavelength) else: xmin = wlmin if wlmax is None: xmax = 1.1 * numpy.max(wavelength) else: xmax = wlmax if (xmin >= xmax) or (len(wavelength) < 2): return 0. inds = numpy.argsort(wavelength) newwavelength = numpy.copy(wavelength[inds]) newflambda = numpy.copy(flambda[inds]) if (xmin > numpy.min(wavelength)) or (xmax < numpy.max(wavelength)): fl1 = numpy.interp(xmin, wavelength, flambda) fl2 = numpy.interp(xmax, wavelength, flambda) newwavelength[newwavelength < xmin] = xmin newwavelength[newwavelength > xmax] = xmax newflambda[newwavelength < xmin] = fl1 newflambda[newwavelength > xmax] = fl2 flux = numpy.trapz(newflambda, newwavelength) return flux
e2fd2c3905bba104f8d4bc376cd56585b40332bf
19,590
def computeMSSIM(groundTruth, recovered): """ Compute Mean Structural SImilarity Measure (MSSIM) between the recovered and the corresponding ground-truth image Args: :param groundTruth: ground truth reference image. numpy.ndarray (Height x Width x Spectral_Dimension) :param rc: image under evaluation. numpy.ndarray (Height x Width x Spectral_Dimension) Returns: MSSIM between `recovered` and `groundTruth` """ assert groundTruth.shape == recovered.shape, \ "Size not match for groundtruth and recovered spectral images" groundTruth = np.clip(groundTruth.astype("float64"), 0, 1) recovered = np.clip(recovered.astype("float64"), 0, 1) # to get SSIM put full = True to get values instead of mean return compare_ssim(groundTruth, recovered, multichannel=True)
a8f24531de784d3ada684b7a5841c8a5a247c6ff
19,591
def test_cache_memoize_ttl(cache, timer): """Test that cache.memoize() can set a TTL.""" ttl1 = 5 ttl2 = ttl1 + 1 @cache.memoize(ttl=ttl1) def func1(a): return a @cache.memoize(ttl=ttl2) def func2(a): return a func1(1) func2(1) assert len(cache) == 2 key1, key2 = tuple(cache.keys()) timer.time = ttl1 - 1 assert cache.has(key1) assert cache.has(key2) timer.time = ttl1 assert not cache.has(key1) assert cache.has(key2) timer.time = ttl2 assert not cache.has(key2)
87d274517c6166db6d174281e6785809e45609b8
19,592
def queues(request): """ We get here from /queues """ return render("queues.html", request, { "queuelist" : request.jt.queues()})
b8f09a074ef496a9b51d001ec8441305b51ea933
19,593
def shorten_str(string, length=30, end=10): """Shorten a string to the given length.""" if string is None: return "" if len(string) <= length: return string else: return "{}...{}".format(string[:length - end], string[- end:])
d52daec3058ddced26805f259be3fc6139b5ef1f
19,594
def A2cell(A): """Compute unit cell constants from A :param A: [G11,G22,G33,2*G12,2*G13,2*G23] G - reciprocal metric tensor :return: a,b,c,alpha, beta, gamma (degrees) - lattice parameters """ G,g = A2Gmat(A) return Gmat2cell(g)
ddec7e3f70ee2de4963f67155bda5ee8743d418d
19,595
from typing import Any def update_user_post( slug: str, post: schemas.PostCreate, db: Session = Depends(get_db), current_user: schemas.User = Depends(get_current_active_user), ) -> Any: """ Update a user Post if its owner """ post_data = get_post(db, slug) if post_data is None: raise HTTPException(status_code=404, detail="Don't find post") elif post_data.author_id != current_user.id: raise HTTPException(status_code=403, detail="Don't have permission") req_post = update_post(db=db, slug=slug, post=post) return req_post
629e580924a676cd74e728e31df6467367763a0e
19,596
from backend.caffe.path_loader import PathLoader def loadNetParameter(caffemodel): """ Return a NetParameter protocol buffer loaded from the caffemodel. """ proto = PathLoader().importProto() net = proto.NetParameter() try: with open(caffemodel, 'rb') as f: net.ParseFromString(f.read()) return net except: pass
2b0a12cb479ed1a9044da587c1673fc5f3f89e6b
19,597
def extract_keywords(header, *args): """ For a given header, find all of the keys and return an unnested dict. """ try: header = pvl.load(header) except: header = pvl.loads(header) res = {} # Iterate through all of the requested keys for a in args: try: res[a] = find_in_dict(a) except: res[a] = None return res
2d1313befa8779b5b8f6efc686f92fd213c7dfa5
19,598
import os def plot_parcel_stats_profile(fnames, figure="save", fmt="png", **kwargs): """ Plot parcel statistics """ n = len(fnames) labels = kwargs.pop("labels", n * [None]) dset = kwargs.pop("dset", "aspect-ratio") no_xlabel = kwargs.pop("no_xlabel", False) beg = kwargs.pop("begin", None) end = kwargs.pop("end", None) if dset == "aspect-ratio": dset = "aspect ratio" colors = plt.cm.tab10(np.arange(n).astype(int)) if len(labels) < n: raise ValueError("Not enough labels provided.") ncreader = nc_reader() lmax = 0 for i, fname in enumerate(fnames): ncreader.open(fname) if not ncreader.is_parcel_stats_file: raise IOError("Not a parcel diagnostic output file.") nsteps = ncreader.get_num_steps() data_mean = np.zeros(nsteps) data_std = np.zeros(nsteps) t = np.zeros(nsteps) for step in range(nsteps): t[step] = ncreader.get_dataset(step, "t") data_mean[step] = ncreader.get_dataset(step, "avg " + dset) data_std[step] = ncreader.get_dataset(step, "std " + dset) if dset == "aspect ratio": lmax = max(lmax, ncreader.get_global_attribute("lambda_max")) ncreader.close() plt.plot(t[beg:end], data_mean[beg:end], label=labels[i], color=colors[i]) plt.fill_between( t[beg:end], data_mean[beg:end] - data_std[beg:end], data_mean[beg:end] + data_std[beg:end], alpha=0.5, color=colors[i], ) if not no_xlabel: plt.xlabel(get_label("time", units["time"])) plt.grid(linestyle="dashed", zorder=-1) if dset == "aspect-ratio": plt.ylabel(r"aspect ratio $\lambda$") plt.text(t[10], 0.95 * lmax, r"$\lambda\le\lambda_{max} = " + str(lmax) + "$") plt.axhline(lmax, linestyle="dashed", color="black") elif dset == "volume": plt.ylabel(r"parcel volume / $V_{g}$") # plt.axhline(1.0, linestyle='dashed', color='black', # label=r'cell volume $V_{g}$') else: plt.ylabel(r"parcel " + dset) if not labels[0] is None: plt.legend( loc=legend_dict["loc"], ncol=legend_dict["ncol"], bbox_to_anchor=legend_dict["bbox_to_anchor"], ) plt.tight_layout() if figure == "return": return plt elif figure == "save": prefix = os.path.splitext(fnames[0])[0] + "_" if n > 1: prefix = "" dset = dset.replace(" ", "_") plt.savefig(prefix + "parcel_" + dset + "_profile." + fmt, bbox_inches="tight") else: plt.show() plt.close()
c77a5bdfdb696311835b194bf29034dbd9c07b39
19,599