content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def query_title_bar_text(shared_state): """return text for title bar, updated when screen changes.""" coll_name = shared_state["active_collection"].name str_value = f"QUERY SOURCE: {coll_name}" return str_value
2ce051cc8d6a87d3c964fba1abb502125b227717
17,673
def input_handler2(): """Run the wx event loop by processing pending events only. This is like inputhook_wx1, but it keeps processing pending events until stdin is ready. After processing all pending events, a call to time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%. This sleep time should be tuned though for best performance. """ app = wx.GetApp() global POLLTIME, ON_INTERRUPT if app is not None: if not wx.Thread_IsMain(): raise Exception('wx thread is not the main thread') evtloop = wx.EventLoop() activator = wx.EventLoopActivator(evtloop) while not stdin_ready(): while evtloop.Pending(): evtloop.Dispatch() app.ProcessIdle() try: sleep(POLLTIME) except KeyboardInterrupt: if hasattr(ON_INTERRUPT, '__call__'): ON_INTERRUPT() activator = None # del activator return 0
d9b3887f82b2a9ef19449d58e40ca18b642a2bf4
17,674
import requests def create_upload_record(env, source_id, headers, cookies): """Creates an upload resource via the G.h Source API.""" post_api_url = f"{get_source_api_url(env)}/sources/{source_id}/uploads" print(f"Creating upload via {post_api_url}") res = requests.post(post_api_url, json={"status": "IN_PROGRESS", "summary": {}}, cookies=cookies, headers=headers) if res and res.status_code == 201: res_json = res.json() return res_json["_id"] e = RuntimeError( f"Error creating upload record, status={res.status_code}, response={res.text}") complete_with_error(e)
7d8bcebec30be7ccba5406f1afc6a1b267e8e398
17,675
def get_versions(sys): """Import stuff and get versions if module Parameters ---------- sys : module The sys module object. Returns ------- module_versions : dict The module names and corresponding versions. """ module_versions = {} for name, module in sys.modules.items(): if '.' in name: continue if isinstance(name, str) and len(name) and name[0] == '_': continue module_version = LooseVersion(getattr(module, '__version__', None)) module_version = getattr(module_version, 'vstring', None) if module_version is None: module_version = None elif 'git' in module_version or '.dev' in module_version: git_path = op.dirname(op.realpath(module.__file__)) head = _get_git_head(git_path) module_version += '-HEAD:{}'.format(head) module_versions[name] = module_version return module_versions
172103da6d6f476080a1c1a33b34ebb4d028df05
17,676
def day05_part1(file: str) -> int: """ Solves advent of code: day05 part1 """ with open(file) as fid: seats = [Seat(line.strip()) for line in fid] highest_seat_num = max(seat.number for seat in seats) return highest_seat_num
5ba399053d3a7e855ded402cea60f59c9d79d9a4
17,677
def GetInput(): """Get player inputs and lower-case the input""" Input = str(input("{:>20s}".format(""))) print("\n \n \n \n \n") return Input.lower()
9d8626a9c9f0615a0453d0804b6b37244ec373c3
17,678
def ldns_fskipcs_l(*args): """LDNS buffer.""" return _ldns.ldns_fskipcs_l(*args)
44e357adf381e11aaccb78441c543438abe75ba1
17,679
def specific_parser(parser, log=False, run_folder=None, mode=None, tot_epochs=None, restoring_rep_path=None, start_from_epoch=None, pretrained_GAN=None, GAN_epoch=None, data_dir_train=None, data_dir_train2=None, data_dir_test=None, data_dir_test2=None, images_log_freq=None, batch_size=None, batch_size_SN=None, acc_log_freq=None, loss_log_freq=None, experiment_name=None, run_description=None, prc_train=None, prc_test=None, prc_val=None, sar_c=None, optical_c=None, N_classes=None, patch_size=None, SN_log_freq=None, save_model_freq=None, lambda_identity=None, D_training_ratio=None, lambda_A=None, loss_type=None, lambda_gp=None, res_block_N=None, pool_prc_O=None, pool_prc_S=None, buff_dim=None, th_low=None, th_high=None, pool=None, conditioned=None, dropping=None, th_b_h_ratio=None, th_b_l_ratio=None, th_b_h_pool=None, th_b_l_pool=None, drop_prc=None, seed=None): """ This is an intermediate layer between the general parser and the config routine to allow who use this code to easily access parameters and change them when building his experiment :param parser: :param log: decide if print or not :param run_folder: new value for run folder :param mode: train mode :param tot_epochs: :param restoring_rep_path: :param start_from_epoch: :param pretrained_GAN: :param GAN_epoch: :param data_dir_train: :param data_dir_train2: :param data_dir_test: :param data_dir_test2: :param images_log_freq: :param batch_size: :param batch_size_SN: :param acc_log_freq: :param loss_log_freq: :param experiment_name: :param run_description: :param prc_train: :param prc_test: :param prc_val: :param sar_c: :param optical_c: :param N_classes: :param patch_size: :param SN_log_freq: :param save_model_freq: :param lambda_identity: :param D_training_ratio: :param lambda_A: :param loss_type: :param lambda_gp: :param res_block_N: :param pool_prc_O: :param pool_prc_S: :param buff_dim: :param th_low: :param th_high: :param pool: :param conditioned: :param dropping: :param th_b_h_ratio: :param th_b_l_ratio: :param th_b_h_pool: :param th_b_l_pool: :param drop_prc: :return: args """ args = parser.parse_args() print('SPECIFIC CONFIG') args.log_dir = update_arg(args.log_dir, run_folder, 'log_dir', log) args.tot_epochs = update_arg(args.tot_epochs, tot_epochs, 'tot_epochs', log) args.mode = update_arg(args.mode, mode, 'mode', log) args.restoring_rep_path = update_arg(args.restoring_rep_path, restoring_rep_path, 'restoring_rep_path', log) args.start_from_epoch = update_arg(args.start_from_epoch, start_from_epoch, 'start_from_epoch', log) args.pretrained_GAN = update_arg(args.pretrained_GAN, pretrained_GAN, 'pretrained_GAN', log) args.GAN_epoch = update_arg(args.GAN_epoch, GAN_epoch, 'GAN_epoch', log) args.data_dir_train = update_arg(args.data_dir_train, data_dir_train, 'data_dir_train', log) args.data_dir_train2 = update_arg(args.data_dir_train2, data_dir_train2, 'data_dir_train2', log) args.data_dir_test = update_arg(args.data_dir_test, data_dir_test, 'data_dir_test', log) args.data_dir_test2 = update_arg(args.data_dir_test2, data_dir_test2, 'data_dir_test2', log) args.images_log_freq = update_arg(args.images_log_freq, images_log_freq, 'images_log_freq', log) args.batch_size = update_arg(args.batch_size, batch_size, 'batch_size', log) args.batch_size_SN = update_arg(args.batch_size_SN, batch_size_SN, 'batch_size_SN', log) args.acc_log_freq = update_arg(args.acc_log_freq, acc_log_freq, 'acc_log_freq', log) args.loss_log_freq = update_arg(args.loss_log_freq, loss_log_freq, 'loss_log_freq', log) args.experiment_name = update_arg(args.experiment_name, experiment_name, 'experiment_name', log) args.run_description = update_arg(args.run_description, run_description, 'run_description', log) args.prc_train = update_arg(args.prc_train, prc_train, 'prc_train', log) args.prc_test = update_arg(args.prc_test, prc_test, 'prc_test', log) args.prc_val = update_arg(args.prc_val, prc_val, 'prc_val', log) args.sar_c = update_arg(args.sar_c, sar_c, 'sar_c', log) args.optical_c = update_arg(args.optical_c, optical_c, 'optical_c', log) args.N_classes = update_arg(args.N_classes, N_classes, 'N_classes', log) args.patch_size = update_arg(args.patch_size, patch_size, 'patch_size', log) args.SN_log_freq = update_arg(args.SN_log_freq, SN_log_freq, 'SN_log_freq', log) args.save_model_freq = update_arg(args.save_model_freq, save_model_freq, 'save_model_freq', log) args.lambda_identity = update_arg(args.lambda_identity, lambda_identity, 'lambda_identity', log) args.D_training_ratio = update_arg(args.D_training_ratio, D_training_ratio, 'D_training_ratio', log) args.lambda_A = update_arg(args.lambda_A, lambda_A, 'lambda_A', log) args.loss_type = update_arg(args.loss_type, loss_type, 'loss_type', log) args.lambda_gp = update_arg(args.lambda_gp, lambda_gp, 'lambda_gp', log) args.res_block_N = update_arg(args.res_block_N, res_block_N, 'res_block_N', log) args.pool_prc_O = update_arg(args.pool_prc_O, pool_prc_O, 'pool_prc_O', log) args.pool_prc_S = update_arg(args.pool_prc_S, pool_prc_S, 'pool_prc_S', log) args.buff_dim = update_arg(args.buff_dim, buff_dim, 'buff_dim', log) args.th_low = update_arg(args.th_low, th_low, 'th_low', log) args.th_high = update_arg(args.th_high, th_high, 'th_high', log) args.pool = update_arg(args.pool, pool, 'pool', log) args.conditioned = update_arg(args.conditioned, conditioned, 'conditioned', log) args.dropping = update_arg(args.dropping, dropping, 'dropping', log) args.th_b_h_ratio = update_arg(args.th_b_h_ratio, th_b_h_ratio, 'th_b_h_ratio', log) args.th_b_l_ratio = update_arg(args.th_b_l_ratio, th_b_l_ratio, 'th_b_l_ratio', log) args.th_b_h_pool = update_arg(args.th_b_h_pool, th_b_h_pool, 'th_b_h_pool', log) args.th_b_l_pool = update_arg(args.th_b_l_pool, th_b_l_pool, 'th_b_l_pool', log) args.drop_prc = update_arg(args.drop_prc, drop_prc, 'drop_prc', log) args.seed = update_arg(args.seed, seed, 'seed', log) return args
cbce4c086da986a3232d40ae2d917b921ff64ff2
17,680
import re def to_identifier(text): """Converts text to a valid Python identifier by replacing all whitespace and punctuation and adding a prefix if starting with a digit""" if text[:1].isdigit(): text = '_' + text return re.sub('_+', '_', str(text).translate(TRANS_TABLE))
8c8ca0c52c13a7d78aa9ec2288ef86ec7e10f84a
17,681
def estimate_next_pos(measurement, OTHER = None): """Estimate the next (x, y) position of the wandering Traxbot based on noisy (x, y) measurements.""" if OTHER is None: # Setup Kalman Filter [u, P, H, R] = setup_kalman_filter() # OTHER = {'x': x, 'P': P, 'u': u, 'matrices':[H, R]} x = matrix([[measurement[0]], [measurement[1]], [0], [0], [0]]) OTHER = {'z_list': deque([]), 'x': x, 'P': P, 'u': u, 'matrices': [H, R], 'step': 1 # 'zx': [measurement[0]] } OTHER['z_list'].append(np.array(measurement)) # return measurement, OTHER # elif OTHER['step'] == 1: # # Use first three measurements to seed the filter # OTHER['step'] = 2 # OTHER['z_list'].append(np.array(measurement)) # # OTHER['zx'].append(measurement[0]) # # OTHER['x_list'].append(measurement) # return measurement, OTHER # elif OTHER['step'] == 2: # OTHER['step'] = 3 # # Get last 3 measurements # OTHER['z_list'].append(np.array(measurement)) # # OTHER['zx'].append(measurement[0]) # # Get initial estimate of state from the three measurements # OTHER['x'] = state_from_measurements(OTHER['z_list']) # # # Initialization complete # OTHER['step'] = -1 # # # Use last 20 measurements only # num_z = 1000 # # OTHER['x_list'] = deque(maxlen=num_z) # # OTHER['z_list'] = deque(maxlen=num_z+1) # # # Predict next position of robot using the dynamics and current state # next_state = robot_x_fn(OTHER['x']) # # OTHER['x_list'].append(next_state) # return (next_state.value[0][0], next_state.value[1][0]), OTHER OTHER['z_list'].append(np.array(measurement)) x, P = extended_kalman_filter(measurement, OTHER['x'], OTHER['u'], OTHER['P'], robot_F_fn, robot_x_fn, *OTHER['matrices']) # OTHER['x_list'].append(x) OTHER['x'] = x OTHER['P'] = P # print('Trace of P : '+str(P.trace())) # Predict next position of robot next_state = robot_x_fn(x) est_xy = (next_state.value[0][0], next_state.value[1][0]) # You must return xy_estimate (x, y), and OTHER (even if it is None) # in this order for grading purposes. # xy_estimate = (3.2, 9.1) # return z, OTHER return est_xy, OTHER
a6b6eba0aa7e71a986bc5bed68cc6fb955c02383
17,682
def AutoBusList(*args): """List of Buses or (File=xxxx) syntax for the AutoAdd solution mode.""" # Getter if len(args) == 0: return get_string(lib.Settings_Get_AutoBusList()) # Setter Value, = args if type(Value) is not bytes: Value = Value.encode(codec) lib.Settings_Set_AutoBusList(Value)
aab4ae15dd7b12c46eb5a75bb780ac78609273ae
17,683
from typing import Tuple from typing import Optional def validate_inputs(*, input_data: pd.DataFrame) -> Tuple[pd.DataFrame, Optional[dict]]: """Check model inputs for unprocessable values.""" # convert syntax error field names (beginning with numbers) # input_data.rename(columns=config.model_config.variables_to_rename, inplace=True) input_data["TotalCharges"] = pd.to_numeric( input_data["TotalCharges"], errors="coerce" ) relevant_data = input_data[config.model_config.features].copy() validated_data = drop_na_inputs(input_data=relevant_data) errors = None try: # replace numpy nans so that pydantic can validate MultipleChurnDataInputs( inputs=validated_data.replace({np.nan: None}).to_dict(orient="records") ) except ValidationError as error: errors = error.json() return validated_data, errors
b03e616dc10c734af282d71a650da822e961e93f
17,684
def create_env(n_envs, eval_env=False, no_log=False): """ Create the environment and wrap it if necessary :param n_envs: (int) :param eval_env: (bool) Whether is it an environment used for evaluation or not :param no_log: (bool) Do not log training when doing hyperparameter optim (issue with writing the same file) :return: (Union[gym.Env, VecEnv]) """ global hyperparams global env_kwargs # Do not log eval env (issue with writing the same file) log_dir = None if eval_env or no_log else save_path if n_envs == 1: env = DummyVecEnv([make_env(env_id, 0, args.seed, wrapper_class=env_wrapper, log_dir=log_dir, env_kwargs=env_kwargs)]) else: # env = SubprocVecEnv([make_env(env_id, i, args.seed) for i in range(n_envs)]) # On most env, SubprocVecEnv does not help and is quite memory hungry env = DummyVecEnv([make_env(env_id, i, args.seed, log_dir=log_dir, env_kwargs=env_kwargs, wrapper_class=env_wrapper) for i in range(n_envs)]) if normalize: # Copy to avoid changing default values by reference local_normalize_kwargs = normalize_kwargs.copy() # Do not normalize reward for env used for evaluation if eval_env: if len(local_normalize_kwargs) > 0: local_normalize_kwargs['norm_reward'] = False else: local_normalize_kwargs = {'norm_reward': False} if args.verbose > 0: if len(local_normalize_kwargs) > 0: print(f"Normalization activated: {local_normalize_kwargs}") else: print("Normalizing input and reward") env = VecNormalize(env, **local_normalize_kwargs) # Optional Frame-stacking if hyperparams.get('frame_stack', False): n_stack = hyperparams['frame_stack'] env = VecFrameStack(env, n_stack) print(f"Stacking {n_stack} frames") if is_image_space(env.observation_space): if args.verbose > 0: print("Wrapping into a VecTransposeImage") env = VecTransposeImage(env) return env
c0d1355cb1ea4446370a71cb49bfb6855799b4b3
17,685
def ellipse(pts, pc=None, ab=None): """ Distance function for the ellipse centered at pc = [xc, yc], with a, b = [a, b] """ if pc is None: pc = [0, 0] if ab is None: ab = [1., 2.] return dist((pts - pc)/ab) - 1.0
7ff99b98aa09d86223afe97a987176f4dc0e0f3d
17,686
def _transform( parsed_date_data: ParsedDate, parsed_output_format_data: ParsedTargetFormat, output_format: str, output_timezone: str, ) -> str: """ This function transform parsed result into target format Parameters ---------- parsed_date_data generated year, month, day, hour, minute, second parsed_output_format_data generated year token, month token, day token, hour token, minute token, second token of target format output_format target format string output_timezone target timezone string """ result = deepcopy(output_format) if output_timezone != "": parsed_date_data = _change_timezone(parsed_date_data, output_timezone) # Handle year result = _transform_year( result, parsed_output_format_data.ymd_token["year_token"], parsed_date_data.ymd["year"] ) # Handle day result = _transform_day( result, parsed_output_format_data.ymd_token["day_token"], parsed_date_data.ymd["day"] ) # Handle hours result = _transform_hms( result, str(parsed_output_format_data.hms_token["hour_token"]), bool(parsed_output_format_data.hms_token["ispm"]), parsed_date_data.hms["hour"], ) # Handle minutes result = _transform_hms( result, str(parsed_output_format_data.hms_token["minute_token"]), False, parsed_date_data.hms["minute"], ) # Handle seconds result = _transform_hms( result, str(parsed_output_format_data.hms_token["second_token"]), False, parsed_date_data.hms["second"], ) # Handle month result = _transform_month( result, parsed_output_format_data.ymd_token["month_token"], parsed_date_data.ymd["month"] ) # Handle weekday result = _transform_weekday( result, parsed_output_format_data.weekday_token, parsed_date_data.weekday ) # Handle timezone result = _transform_timezone( result, parsed_output_format_data.timezone_token, str(parsed_date_data.tzinfo["timezone"]), str(parsed_date_data.tzinfo["utc_add"]), int(parsed_date_data.tzinfo["utc_offset_hours"]), int(parsed_date_data.tzinfo["utc_offset_minutes"]), ) return result
cc51f2776165bf05af1d97bcc6eb70bd6f03702f
17,687
def stop_next_turn(): """ Dirty way to stop the MCTS in a clean way (without SIGINT or SIGTERM)... the mcts finish current turn save data and stop (if you are using dft it can take some time...) write "stop" in the file MCTS/stop_mcts :return: None """ with open(p.f_stop) as f: stop = f.read() if "stop" in stop: print("MCTS stopped with signal 'stop' in '%s' file" % p.f_stop) return True return False
eb76187f25f49ae674fefe7969277122bd18e5c8
17,689
from datetime import datetime def pull_request_average_time_between_responses(self, repo_group_id, repo_id=None, group_by='month', time_unit='hours', begin_date=None, end_date=None): """ Avegage time between responeses with merged_status and the time frame :param repo_group_id: The repository's repo_group_id :param repo_id: The repository's repo_id, defaults to None :param group_by: The time frame the data is grouped by, options are: 'day', 'week', 'month' or 'year', defaults to 'month' :param time_unit: Unit of time for data, options are: 'minutes', or 'hours', defaults to 'hours' :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00' :param end_date: Specifies the end date, defaults to datetime.now() :return: DataFrame of average time beteen responses ======= @register_metric() def pull_request_merged_status_counts(self, repo_group_id, repo_id=None, begin_date='1970-1-1 00:00:01', end_date=None, group_by='week'): >>>>>>> Stashed changes """ if not begin_date: begin_date = '1970-1-1' if not end_date: end_date = datetime.datetime.now().strftime('%Y-%m-%d') unit_options = ['year', 'month', 'week', 'day'] time_group_bys = [] for unit in unit_options.copy(): if group_by not in unit_options: continue time_group_bys.append('closed_{}'.format(unit)) del unit_options[0] if not repo_id: pr_all_SQL = s.sql.text(""" SELECT repo_id, repo_name, repo_group_id, rg_name AS repo_group_name, date_part( 'year', pr_closed_at :: DATE ) AS closed_year, date_part( 'month', pr_closed_at :: DATE ) AS closed_month, date_part( 'week', pr_closed_at :: DATE ) AS closed_week, date_part( 'day', pr_closed_at :: DATE ) AS closed_day, (EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses, (EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses, CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, count(*) AS num_pull_requests FROM ( SELECT repo_name, repo_groups.repo_group_id, rg_name, pull_requests.repo_id, pull_requests.pull_request_id, pr_closed_at, pr_created_at, pr_merged_at, (MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses FROM pull_request_message_ref, message, repo_groups, pull_requests JOIN repo ON pull_requests.repo_id = repo.repo_id WHERE pull_requests.repo_id IN (SELECT repo_id FROM repo WHERE repo_group_id = :repo_group_id) AND repo.repo_id = pull_requests.repo_id AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id AND pull_request_message_ref.msg_id = message.msg_id AND repo_groups.repo_group_id = repo.repo_group_id AND pr_created_at::DATE >= :begin_date ::DATE AND pr_closed_at::DATE <= :end_date ::DATE GROUP BY pull_requests.pull_request_id, repo.repo_id, repo.repo_name, repo_groups.repo_group_id, repo_groups.rg_name ) time_between_responses GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses, time_between_responses.repo_id, time_between_responses.repo_name, time_between_responses.repo_group_id, time_between_responses.rg_name """) else: pr_all_SQL = s.sql.text(""" SELECT date_part( 'year', pr_closed_at :: DATE ) AS closed_year, date_part( 'month', pr_closed_at :: DATE ) AS closed_month, date_part( 'week', pr_closed_at :: DATE ) AS closed_week, date_part( 'day', pr_closed_at :: DATE ) AS closed_day, (EXTRACT(epoch FROM average_time_between_responses)/3600) AS average_hours_between_responses, (EXTRACT(epoch FROM average_time_between_responses)/60) AS average_minutes_between_responses, CASE WHEN pr_merged_at IS NULL THEN 'Rejected' ELSE 'Merged' END AS merged_status, count(*) AS num_pull_requests FROM ( SELECT pull_requests.pull_request_id, pr_closed_at, pr_created_at, pr_merged_at, (MAX(message.msg_timestamp) - MIN(message.msg_timestamp)) / COUNT(DISTINCT message.msg_timestamp) AS average_time_between_responses FROM pull_requests, repo, pull_request_message_ref, message WHERE repo.repo_id = :repo_id AND repo.repo_id = pull_requests.repo_id AND pull_requests.pull_request_id = pull_request_message_ref.pull_request_id AND pull_request_message_ref.msg_id = message.msg_id AND pr_created_at::DATE >= :begin_date ::DATE AND pr_closed_at::DATE <= :end_date ::DATE GROUP BY pull_requests.pull_request_id ) time_between_responses GROUP BY closed_year, closed_month, merged_status, time_between_responses.pr_closed_at, time_between_responses.average_time_between_responses """) pr_all = pd.read_sql(pr_all_SQL, self.database, params={'repo_id': repo_id, 'repo_group_id':repo_group_id, 'begin_date': begin_date, 'end_date': end_date}) if not repo_id: pr_avg_time_between_responses = pr_all.groupby(['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys).mean().reset_index()[['merged_status', 'repo_id', 'repo_name', 'repo_group_id', 'repo_group_name'] + time_group_bys + ['average_{}_between_responses'.format(time_unit)]] else: pr_avg_time_between_responses = pr_all.groupby(['merged_status'] + time_group_bys).mean().reset_index()[time_group_bys + ['merged_status', 'average_{}_between_responses'.format(time_unit)]] return pr_avg_time_between_responses
8834586b1e761c8ba6033140f753a3ac99780da7
17,690
def create_money(request): """Create money object.""" if request.method == 'POST': form = MoneyForm(request.POST, request.FILES) if form.is_valid(): money = form.save(commit=False) money.owner = request.user money.save() return redirect(money) else: return render(request, 'nec_bank/create_money.html', {'money_form': form}) else: request.GET._mutable = True request.GET['created_date'] = timezone.now().astimezone().strftime('%Y-%m-%d %H:%M:%S') request.GET._mutable = False form = MoneyForm(request.GET) return render(request, 'nec_bank/create_money.html', {'money_form': form})
483eea12a1c2f49dd63fe2a37a529dafe3a4c6c3
17,691
def stripper(reply: str, prefix=None, suffix=None) -> str: """This is a helper function used to strip off reply prefix and terminator. Standard Python str.strip() doesn't work reliably because it operates on character-by-character basis, while prefix/terminator is usually a group of characters. Args: reply: String to be stripped. prefix: Substring to remove from the beginning of the line. suffix: Substring to remove from the end of the line. Returns: (str): Naked reply. """ if prefix is not None and reply.startswith(prefix): reply = reply[len(prefix):] if suffix is not None and reply.endswith(suffix): reply = reply[:-len(suffix)] return reply
b48281a0dedd5d7f3d476943f12ac49720e67476
17,692
def resnet_50_generator(block_fn, lst_layers, num_classes, pruning_method=None, data_format='channels_first', name=None): """Generator for ResNet v1 models. Args: block_fn: String that defines whether to use a `residual_block` or `bottleneck_block`. lst_layers: list of Ints that denotes number of blocks to include in each block group. Each group consists of blocks that take inputs of the same resolution. num_classes: Int number of possible classes for image classification. pruning_method: String that specifies the pruning method used to identify which weights to remove. data_format: String either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. name: String that specifies name for model layer. Returns: Model `function` that takes in `inputs` and `is_training` and returns the output `Tensor` of the ResNet model. """ def model(inputs, is_training): """Creation of the model graph.""" with tf.variable_scope(name, 'resnet_model'): inputs = conv2d_fixed_padding( inputs=inputs, filters=64, kernel_size=7, strides=2, pruning_method=pruning_method, data_format=data_format, name='initial_conv') inputs = tf.identity(inputs, 'initial_conv') inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=3, strides=2, padding='SAME', data_format=data_format, name='initial_max_pool') inputs = tf.identity(inputs, 'initial_max_pool') inputs = block_group( inputs=inputs, filters=64, block_fn=block_fn, blocks=lst_layers[0], strides=1, is_training=is_training, name='block_group1', pruning_method=pruning_method, data_format=data_format) inputs = block_group( inputs=inputs, filters=128, block_fn=block_fn, blocks=lst_layers[1], strides=2, is_training=is_training, name='block_group2', pruning_method=pruning_method, data_format=data_format) inputs = block_group( inputs=inputs, filters=256, block_fn=block_fn, blocks=lst_layers[2], strides=2, is_training=is_training, name='block_group3', pruning_method=pruning_method, data_format=data_format) inputs = block_group( inputs=inputs, filters=512, block_fn=block_fn, blocks=lst_layers[3], strides=2, is_training=is_training, name='block_group4', pruning_method=pruning_method, data_format=data_format) pool_size = (inputs.shape[1], inputs.shape[2]) inputs = tf.layers.average_pooling2d( inputs=inputs, pool_size=pool_size, strides=1, padding='VALID', data_format=data_format, name='final_avg_pool') inputs = tf.identity(inputs, 'final_avg_pool') inputs = tf.reshape(inputs, [-1, 2048]) inputs = tf.layers.dense( inputs=inputs, units=num_classes, kernel_initializer=tf.random_normal_initializer(stddev=.01), name='final_dense') inputs = tf.identity(inputs, 'final_dense') return inputs model.default_image_size = 224 return model
5f471b7cc3608c11515d0efb088c3c9bee0e20e6
17,693
def bracketBalanced(expression): """Check if an expression is balanced. An expression is balanced if all the opening brackets(i.e. '(, {, [') have a corresponding closing bracket(i.e. '), }, ]'). Args: expression (str) : The expression to be checked. Returns: bool: True if expression is balanced. False if not balanced. """ bracket_dict = {'(': ')', '{': '}', '[': ']'} stack = Stack() for i in range(len(expression)): if expression[i] in bracket_dict.keys(): stack.push(expression[i]) elif expression[i] in bracket_dict.values(): if stack.isEmpty() or expression[i] != bracket_dict[stack.peek()]: return False else: stack.pop() if stack.isEmpty(): return True else: return False
bb6ebeb681fb9425c923a4fdcc41c6158ece332a
17,694
def Leq(pressure, reference_pressure=REFERENCE_PRESSURE, axis=-1): """ Time-averaged sound pressure level :math:`L_{p,T}` or equivalent-continious sound pressure level :math:`L_{p,eqT}` in dB. :param pressure: Instantaneous sound pressure :math:`p`. :param reference_pressure: Reference value :math:`p_0`. :param axis: Axis. .. math:: L_{p,T} = L_{p,eqT} = 10.0 \\log_{10}{ \\left( \\frac{\\frac{1}{T} \\int_{t_1}^{t_2} p^2 (t) \\mathrm{d} t }{p_0^2} \\right)} See section 2.3. """ return 10.0 * np.log10((pressure**2.0).mean(axis=axis) / reference_pressure**2.0)
bf7c640a361f3c07aef70310a213f2603a441664
17,695
import re def trimBody(body): """ Quick function for trimming away the fat from emails """ # Cut away "On $date, jane doe wrote: " kind of texts body = re.sub( r"(((?:\r?\n|^)((on .+ wrote:[\r\n]+)|(sent from my .+)|(>+[ \t]*[^\r\n]*\r?\n[^\n]*\n*)+)+)+)", "", body, flags=re.I | re.M, ) # Crop out quotes lines = body.split("\n") body = "\n".join([x for x in lines if not x.startswith(">")]) # Remove hyperlinks body = re.sub(r"[a-z]+://\S+", "", body) # Remove email addresses body = re.sub(r"(<[^>]+>\s*\S+@\S+)", "", body) body = re.sub(r"(\S+@\S+)", "", body) return body
19fcb7313e66d7e710781cf195a7550d050b4848
17,696
def fit_ellipses(contours): """ Fit ellipses to contour(s). Parameters ---------- contours : ndarray or list Contour(s) to fit ellipses to. Returns ------- ellipses : ndarray or list An array or list corresponding to dimensions to ellipses fitted. """ if isinstance(contours, list): ret = [cv2.fitEllipse(c) for c in contours] else: ret = cv2.fitEllipse(contours) return ret
9246182a1f96ca1691bcddf34271586be93dcf41
17,698
def get_argument_parser() -> ArgumentParser: """ Get command line arguments. """ parser = ArgumentParser( description="Say Hello") subparsers = parser.add_subparsers(title="subcommands") parser_count_above_below = subparsers.add_parser("say-hello") parser_count_above_below.add_argument('-n', '--name', help="a name") parser_count_above_below.set_defaults(func=do_say_hello) return parser
f799991025283bf4ce2dbcceed845662312cd6d0
17,699
from typing import List def parse_mint_studies_response(xml_raw) -> List[MintStudy]: """Parse the xml response to a MINT find DICOM studies call Raises ------ DICOMTrolleyError If parsing fails """ try: studies = ElementTree.fromstring(xml_raw).findall( MintStudy.xml_element ) except ParseError as e: raise DICOMTrolleyError( f"Could not parse server response as MINT " f"studies. Response was: {xml_raw}" ) from e return [MintStudy.init_from_element(x) for x in studies]
465d9156be75144bacd1c84316660ea48a3f276e
17,701
def lookup_no_interp(x, dx, xi, y, dy, yi): """ Return the indices for the closest values for a look-up table Choose the closest point in the grid x ... range of x values xi ... interpolation value on x-axis dx ... grid width of x ( dx = x[1]-x[0]) (same for y) return: idxX and idxY """ if xi > x[0] and xi < x[-1]: xid = (xi - x[0]) / dx xid_floor = np.floor(xid) if xid - xid_floor < dx / 2: idxX = xid_floor else: idxX = xid_floor + 1 elif xi < x[0]: idxX = 0 else: idxX = len(x) - 1 if yi > y[0] and yi < y[-1]: yid = (yi - y[0]) / dy yid_floor = np.floor(yid) if yid - yid_floor < dy / 2: idxY = yid_floor else: idxY = yid_floor + 1 elif yi < y[0]: idxY = 0 else: idxY = len(y) - 1 return idxX, idxY
cdee658cc50af9ba25902bdbe4274cd49a5c5d89
17,702
def advertisement_data_complete_builder(list_of_ad_entries): """ Generate a finalized advertisement data value from a list of AD entries that can be passed to the BLEConnectionManager to set the advertisement data that is sent during advertising. :param list_of_ad_entries: List of AD entries (can be built using blesuite.utils.gap_utils.advertisement_data_entry_builder) :type list_of_ad_entries: [str,] :return: Finalized AD data :rtype: str """ data = "" for ad in list_of_ad_entries: length = len(ad) ad_string = chr(length) + ad data = data + ad_string return data
c0f9040c36216cb519706c347d6644405fae0b7f
17,703
def process_vocab_table(vocab, vocab_size, vocab_threshold, vocab_lookup, unk, pad): """process vocab table""" default_vocab = [unk, pad] if unk in vocab: del vocab[unk] if pad in vocab: del vocab[pad] vocab = { k: vocab[k] for k in vocab.keys() if vocab[k] >= vocab_threshold } if vocab_lookup is not None: vocab = { k: vocab[k] for k in vocab.keys() if k in vocab_lookup } sorted_vocab = sorted(vocab, key=vocab.get, reverse=True) sorted_vocab = default_vocab + sorted_vocab vocab_table = sorted_vocab[:vocab_size] vocab_size = len(vocab_table) vocab_index = tf.contrib.lookup.index_table_from_tensor( mapping=tf.constant(vocab_table), default_value=0) vocab_inverted_index = tf.contrib.lookup.index_to_string_table_from_tensor( mapping=tf.constant(vocab_table), default_value=unk) return vocab_table, vocab_size, vocab_index, vocab_inverted_index
fa4860aac095d531e39008da99d42059e38716ec
17,704
def get_mag_msg(stamp, mag): """ Get magnetometer measurement as ROS sensor_msgs::MagneticField """ # init: mag_msg = MagneticField() # a. set header: mag_msg.header.stamp = stamp mag_msg.header.frame_id = '/imu_link' # b. mag: ( mag_msg.magnetic_field.x, mag_msg.magnetic_field.y, mag_msg.magnetic_field.z ) = mag # finally: return mag_msg
ffa661ae168136fcbf626e08f85e19ba356a2e26
17,705
async def UserMeAPI( current_user: User = Depends(User.getCurrentUser), ): """ 現在ログイン中のユーザーアカウントの情報を取得する。<br> JWT エンコードされたアクセストークンがリクエストの Authorization: Bearer に設定されていないとアクセスできない。 """ # 一番よく使う API なので、リクエスト時に twitter_accounts テーブルに仮のアカウントデータが残っていたらすべて消しておく ## Twitter 連携では途中で連携をキャンセルした場合に仮のアカウントデータが残置されてしまうので、それを取り除く if await TwitterAccount.filter(icon_url='Temporary').count() > 0: await TwitterAccount.filter(icon_url='Temporary').delete() current_user = await User.filter(id=current_user.id).get() # current_user のデータを更新 await current_user.fetch_related('twitter_accounts') return current_user
0a884c54d1e01b5ae9a31848b081566d35830de6
17,706
def midpt(pt1, pt2): """ Get the midpoint for two arbitrary points in space. """ return rg.Point3d((pt1[0] + pt2[0])/2, (pt1[1] + pt2[1])/2, (pt1[2] + pt2[2])/2 )
324e9fd6fe6ea257a130fcfe51eb73bf0957e57c
17,707
from collections import defaultdict from astropy.io import fits import siteUtils from bot_eo_analyses import make_file_prefix, glob_pattern,\ from bot_data_handling import most_common_dark_files def dark_current_jh_task(det_name): """JH version of single sensor execution of the dark current task.""" get_amplifier_gains, bias_filename, dark_current_task,\ plot_ccd_total_noise, get_mask_files run = siteUtils.getRunNumber() file_prefix = make_file_prefix(run, det_name) acq_jobname = siteUtils.getProcessName('BOT_acq') dark_files \ = siteUtils.dependency_glob(glob_pattern('dark_current', det_name), acq_jobname=acq_jobname, description="Dark current frames:") if not dark_files: print("dark_current_task: No dark files found for detector", det_name) return None dark_files_linear_fit = list(dark_files) dark_files = most_common_dark_files(dark_files) if len(dark_files_linear_fit) == len(dark_files): # These data only have one integration time, so skip linear # fit of dark current signal vs integration time. dark_files_linear_fit = None mask_files = get_mask_files(det_name) eotest_results_file \ = siteUtils.dependency_glob('{}_eotest_results.fits'.format(file_prefix), jobname='read_noise_BOT')[0] gains = get_amplifier_gains('{}_eotest_results.fits'.format(file_prefix)) bias_frame = bias_filename(run, det_name) dark_curr_pixels, dark95s \ = dark_current_task(run, det_name, dark_files, gains, mask_files=mask_files, bias_frame=bias_frame, dark_files_linear_fit=dark_files_linear_fit) plot_ccd_total_noise(run, det_name, dark_curr_pixels, dark95s, eotest_results_file) return dark_curr_pixels, dark95s
a2d627b21340382018826bb583ad31c8509f9bbe
17,708
def tweetnacl_crypto_box_open(max_messagelength=256): """ max_messagelength: maximum length of the message, in bytes. i.e., the symbolic execution will not consider messages longer than max_messagelength """ proj = tweetnaclProject() state = funcEntryState(proj, "crypto_box_curve25519xsalsa20poly1305_tweet_open", [ ("m", pointerToUnconstrainedPublic()), # Output parameter, will hold plaintext, length 'clen' ("c", pointerToUnconstrainedPublic()), # ciphertext: length 'clen' ("clen", publicValue()), # length of ciphertext. Not a pointer ("n", pointerTo(secretArray(24), 24)), # nonce, size crypto_box_NONCEBYTES ("pk", pointerTo(publicArray(32), 32)), # public key, size crypto_box_PUBLICKEYBYTES ("sk", pointerTo(secretArray(32), 32)) # secret key, size crypto_box_SECRETKEYBYTES ]) state.add_constraints(getArgBVS(state, 'clen') <= max_messagelength) addDevURandom(state) return (proj, state)
5b69127c70d3286c2c54b898d541db9f90c1ff51
17,710
def balanced_parentheses_checker(symbol_string): """Verify that a set of parentheses is balanced.""" opening_symbols = '{[(' closing_symbols = '}])' opening_symbols_stack = data_structures.Stack() symbol_count = len(symbol_string) counter = 0 while counter < symbol_count: current_symbol = symbol_string[counter] if current_symbol in '{[(': opening_symbols_stack.push(current_symbol) else: if not opening_symbols_stack.is_empty() and \ opening_symbols.index(opening_symbols_stack.peek()) == \ closing_symbols.index(current_symbol): opening_symbols_stack.pop() else: counter = symbol_count counter += 1 return opening_symbols_stack.is_empty() and counter == symbol_count
04624d403f5af94c42122258df28363cb8bcf20d
17,711
from typing import Optional def _wrap_outcoming( store_cls: type, wrapped_method: str, trans_func: Optional[callable] = None ): """Output-transforming wrapping of the wrapped_method of store_cls. The transformation is given by trans_func, which could be a one (trans_func(x) or two (trans_func(self, x)) argument function. Args: store_cls: The class that will be transformed wrapped_method: The method (name) that will be transformed. trans_func: The transformation function. wrap_arg_idx: The index of the Returns: Nothing. It transforms the class in-place >>> from on.trans import store_wrap >>> S = store_wrap(dict) >>> _wrap_outcoming(S, '_key_of_id', lambda x: f'wrapped_{x}') >>> s = S({'a': 1, 'b': 2}) >>> list(s) ['wrapped_a', 'wrapped_b'] >>> _wrap_outcoming(S, '_key_of_id', lambda self, x: f'wrapped_{x}') >>> s = S({'a': 1, 'b': 2}); assert list(s) == ['wrapped_a', 'wrapped_b'] >>> class A: ... def __init__(self, prefix='wrapped_'): ... self.prefix = prefix ... def _key_of_id(self, x): ... return self.prefix + x >>> _wrap_outcoming(S, '_key_of_id', A(prefix='wrapped_')._key_of_id) >>> s = S({'a': 1, 'b': 2}); assert list(s) == ['wrapped_a', 'wrapped_b'] >>> >>> S = store_wrap(dict) >>> _wrap_outcoming(S, '_obj_of_data', lambda x: x * 7) >>> s = S({'a': 1, 'b': 2}) >>> list(s.values()) [7, 14] """ if trans_func is not None: wrapped_func = getattr(store_cls, wrapped_method) if not _has_unbound_self(trans_func): # print(f"00000: {store_cls}: {wrapped_method}, {trans_func}, {wrapped_func}, {wrap_arg_idx}") @wraps(wrapped_func) def new_method(self, x): # # Long form (for explanation) # super_method = getattr(super(store_cls, self), wrapped_method) # output_of_super_method = super_method(x) # transformed_output_of_super_method = trans_func(output_of_super_method) # return transformed_output_of_super_method return trans_func( getattr(super(store_cls, self), wrapped_method)(x) ) else: # print(f"11111: {store_cls}: {wrapped_method}, {trans_func}, {wrapped_func}, {wrap_arg_idx}") @wraps(wrapped_func) def new_method(self, x): # # Long form (for explanation) # super_method = getattr(super(store_cls, self), wrapped_method) # output_of_super_method = super_method(x) # transformed_output_of_super_method = trans_func(self, output_of_super_method) # return transformed_output_of_super_method return trans_func( self, getattr(super(store_cls, self), wrapped_method)(x) ) setattr(store_cls, wrapped_method, new_method)
5ea4782f528c7822d8906cde415cc318353b54ba
17,712
import math def quantize(x): """convert a float in [0,1] to an int in [0,255]""" y = math.floor(x*255) return y if y<256 else 255
b941a11d0d6af3162c964568e2d97c8d81cd1442
17,713
import logging def initialize_logger(prefix): """ Initialization of logging subsystem. Two logging handlers are brought up: 'fh' which logs to a log file and 'ch' which logs to standard output. :param prefix: prefix that is added to the filename :return logger: return a logger instance """ logger = logging.getLogger('charm-cli') logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.INFO) logger.addHandler(ch) try: if prefix: log_filename = '{}_charm-cli.log'.format(prefix) else: log_filename = 'charm-cli.log' fh = logging.FileHandler(log_filename, 'w') fh.setLevel(logging.INFO) logger.addHandler(fh) except IOError as error: logger.warning('WARNING: Cannot create log file! Run charm-cli from a directory to ' 'which you have write access.') logger.warning(error.msg) pass return logger
a6883736b17b9dc213bf4d26fd153fc8d0e11025
17,714
from typing import Optional import pickle def login(token_path: str) -> Optional[Credentials]: """ Trigger the authentication so that we can store a new token.pickle. """ flow = InstalledAppFlow.from_client_secrets_file( 'gcal/credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(token_path, 'wb') as token: pickle.dump(creds, token) return creds
72c7164297cfc17c661253f9496f8323cc3f217c
17,716
import requests def get_auth_token(context, scope): """ Get a token from the auth service to allow access to a service :param context: context of the test :return: the token """ secret = get_client_secret(context) data = { 'grant_type': 'client_credentials', 'scope': scope } response = requests.post( '{}/token'.format(context.services['auth']), data=data, headers={'Content-Type': 'application/x-www-form-urlencoded'}, timeout=REQUEST_TIMEOUT, verify=context.keychain['CA_CRT'], auth=(context.client_id, secret) ) return response.json()['access_token']
d36e66ed08f637f93b2f9226e7ccaeb9cbe07a2c
17,717
def getDefaultFontFamily(): """Returns the default font family of the application""" return qt.QApplication.instance().font().family()
408aa406d09dcc788bff46c3346307713f5b0fdf
17,718
def result(a, b, operator): """This function return result""" lambda_ops = { "+": (lambda x,y: x+y), "-": (lambda x,y: x-y), "*": (lambda x,y: x*y), "/": (lambda x,y: x/y), "//": (lambda x,y: x//y), "%": (lambda x,y: x%y), } r = False error = '' if operator in lambda_ops: if (operator == "/" or operator == "//" or operator == "%" ) and b==0: error = "Oops, division or modulo by zero" else: r = lambda_ops[operator](a, b) else: error = "Use either + - * / or % next time" return r, error
febfacf3aa94bc15931cf79979329b3b1a5c7bc5
17,719
def get_deconv_filter(f_shape): """ reference: https://github.com/MarvinTeichmann/tensorflow-fcn """ width = f_shape[0] heigh = f_shape[0] f = ceil(width/2.0) c = (2 * f - 1 - f % 2) / (2.0 * f) bilinear = np.zeros([f_shape[0], f_shape[1]]) for x in range(width): for y in range(heigh): value = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) bilinear[x, y] = value weights = np.zeros(f_shape, dtype=np.float32) for i in range(f_shape[2]): weights[:, :, i, i] = bilinear return weights
ec0a5617ad149708d195ab4701860f12ef695de1
17,721
def twitter_split_handle_from_txt(tweet): """ Looks for RT @twitterhandle: or just @twitterhandle in the beginning of the tweet. The handle is split off and returned as two separate strings. :param tweet: (str) The tweet text to split. :return: (str, str) twitter_handle, rest_of_tweet """ match = TWEET_TARGET_RE.search(tweet) if match is not None: match = match.group() tweet = tweet.replace(match, '') return match, tweet
1f98e5e5f2c1369ca673e6b15114f379786d1e8f
17,722
def points(piece_list): """Calculating point differential for the given board state""" # Args: (1) piece list # Returns: differential (white points - black points) # The points are calculated via the standard chess value system: # Pawn = 1, King = 3, Bishop = 3, Rook = 5, Queen = 9 # King = 100 (arbitrarily large) differential = 0 # For all white pieces... for i in range(0,16): # If the piece is active, add its points to the counter if piece_list[i].is_active: differential = differential + piece_list[i].value # For all black pieces... for i in range(16,32): # If the piece is active, subtract its points from the counter if piece_list[i].is_active: differential = differential - piece_list[i].value # Return point differential return differential
d8f36fd887a846a20999a0a99ad672d2902473d4
17,723
def grid_sampler(x, grid, name=None): """ :alias_main: paddle.nn.functional.grid_sampler :alias: paddle.nn.functional.grid_sampler,paddle.nn.functional.vision.grid_sampler :old_api: paddle.fluid.layers.grid_sampler This operation samples input X by using bilinear interpolation based on flow field grid, which is usually generated by :code:`affine_grid` . The grid of shape [N, H, W, 2] is the concatenation of (x, y) coordinates with shape [N, H, W] each, where x is indexing the 4th dimension (in width dimension) of input data x and y is indexing the 3rd dimension (in height dimension), finally results is the bilinear interpolation value of 4 nearest corner points. The output tensor shape will be [N, C, H, W]. .. code-block:: text Step 1: Get (x, y) grid coordinates and scale to [0, H-1/W-1]. .. code-block:: text grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1) grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1) Step 2: Indices input data X with grid (x, y) in each [H, W] area, and bilinear interpolate point value by 4 nearest points. wn ------- y_n ------- en | | | | d_n | | | | x_w --d_w-- grid--d_e-- x_e | | | | d_s | | | | ws ------- y_s ------- wn x_w = floor(x) // west side x coord x_e = x_w + 1 // east side x coord y_n = floor(y) // north side y coord y_s = y_s + 1 // south side y coord d_w = grid_x - x_w // distance to west side d_e = x_e - grid_x // distance to east side d_n = grid_y - y_n // distance to north side d_s = y_s - grid_y // distance to south side wn = X[:, :, y_n, x_w] // north-west point value en = X[:, :, y_n, x_e] // north-east point value ws = X[:, :, y_s, x_w] // south-east point value es = X[:, :, y_s, x_w] // north-east point value output = wn * d_e * d_s + en * d_w * d_s + ws * d_e * d_n + es * d_w * d_n Args: x(Variable): The input tensor, which is a 4-D tensor with shape [N, C, H, W], N is the batch size, C is the channel number, H and W is the feature height and width. The data type is float32 or float64. grid(Variable): Input grid tensor of shape [N, H, W, 2]. The data type is float32 or float64. name(str, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default. Returns: Variable: Output of shape [N, C, H, W] data samples input X using bilnear interpolation based on input grid. The data type is same as input tensor. Examples: .. code-block:: python import paddle.fluid as fluid # use with affine_grid x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32') theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32') grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32]) out = fluid.layers.grid_sampler(x=x, grid=grid) """ helper = LayerHelper("grid_sampler", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler') check_variable_and_dtype(grid, 'grid', ['float32', 'float64'], 'grid_sampler') if not isinstance(x, Variable): return ValueError("The x should be a Variable") if not isinstance(grid, Variable): return ValueError("The grid should be a Variable") out = helper.create_variable_for_type_inference(x.dtype) ipts = {'X': x, 'Grid': grid} helper.append_op(type='grid_sampler', inputs=ipts, outputs={'Output': out}) return out
06c57d6e6d0a476b42b2472284368352fbd48bc3
17,724
def SpecSwitch(spec_id): """ Create hotkey function that switches hotkey spec. :param spec_id: Hotkey spec ID or index. :return: Hotkey function. """ # Create hotkey function that switches hotkey spec func = partial(spec_switch, spec_id) # Add `call in main thread` tag func = tag_call_in_main_thread(func) # Return the hotkey function return func
20ba2ae59d717866474c236d0d97273755a035c8
17,726
def get_package_version() -> str: """Returns the package version.""" metadata = importlib_metadata.metadata(PACKAGE_NAME) # type: ignore version = metadata["Version"] return version
a24286ef2a69f60871b41eda8e5ab39ba7f756c0
17,727
def safe_name(dbname): """Returns a database name with non letter, digit, _ characters removed.""" char_list = [c for c in dbname if c.isalnum() or c == '_'] return "".join(char_list)
2ce4978c3467abaddf48c1d1ab56ed773b335652
17,728
def _parse_mro(mro_file_name): """Parse an MRO file into python objects.""" # A few helpful pyparsing constants EQUALS, SEMI, LBRACE, RBRACE, LPAREN, RPAREN = map(pp.Suppress, '=;{}()') mro_label = pp.Word(pp.alphanums + '_') mro_modifier = pp.oneOf(["in", "out", "src"]) mro_type = pp.oneOf([ "bool", "bool[]", "int", "int[]", "float", "float[]", "map", "map[]", "string", "string[]", "string[][]", "path", "path[]", "py"] + \ utils.MARTIAN_FILETYPES + [x +'[]' for x in utils.MARTIAN_FILETYPES]) # First parse includes include = pp.Literal("@include").suppress() + pp.quotedString includes = pp.ZeroOrMore(include).setResultsName("includes") includes.addParseAction(pp.removeQuotes) # Then parse filetypes filetype = pp.Literal("filetype").suppress() + pp.oneOf(utils.MARTIAN_FILETYPES) + SEMI filetypes = pp.ZeroOrMore(filetype).setResultsName("filetypes") ##################################################### # Stage ##################################################### # Now define the parts of a stage # First we have a "stage entry", which is a line in the stage body, it looks like "in int lane" stage_entry = pp.Group(mro_modifier + mro_type + pp.Optional(pp.Word(pp.printables, excludeChars=',')) + pp.Optional(pp.QuotedString('"'))) # Note that stage entries a comma-delimited, but there's a trailing comma so we need the # pp.Empty option for matching stage_entries = pp.delimitedList(pp.Or([stage_entry, pp.Empty()])) # Each stage can have two parts, the main part and a "split using" part split = (pp.Literal("split using").suppress() + LPAREN + pp.Optional(pp.Group(stage_entries).setResultsName("split")) + RPAREN) stage = pp.Group(pp.Literal("stage").suppress() + mro_label + LPAREN + pp.Group(stage_entries).setResultsName("stage_entries") + RPAREN + pp.Optional(split)) # Now create a dict of the stages, with the MRO labels for keys stages = pp.Dict(pp.ZeroOrMore(stage)).setResultsName("stages") ##################################################### # Pipeline ##################################################### ## Calls call_entry = pp.Group(pp.Word(pp.printables, excludeChars="=") + EQUALS + pp.Word(pp.printables, excludeChars=',')) call_entries = pp.delimitedList(pp.Or([call_entry, pp.Empty()])) call_modifier = pp.oneOf(["local", "preflight"]) call = pp.Group(pp.Literal("call").suppress() + pp.ZeroOrMore(call_modifier).suppress() + mro_label + LPAREN + pp.Group(call_entries).setResultsName("call_entries") + RPAREN) calls = pp.Dict(pp.ZeroOrMore(call)).setResultsName("pipeline_calls") ## Return return_entry = call_entry return_entries = pp.delimitedList(pp.Or([return_entry, pp.Empty()])) return_ = (pp.Literal("return").suppress() + LPAREN + pp.Group(return_entries).setResultsName("pipeline_return") + RPAREN) ## Pipeline header pipeline_header_entry = pp.Group(mro_modifier + mro_type + pp.Word(pp.printables, excludeChars=",") + pp.Optional(pp.quotedString)) pipeline_header_entries = pp.delimitedList(pp.Or([pipeline_header_entry, pp.Empty()])) pipeline = (pp.Literal("pipeline").suppress() + mro_label.setResultsName("pipeline_name") + LPAREN + pp.Group(pipeline_header_entries).setResultsName("pipeline_header") + RPAREN + LBRACE + calls + return_ + RBRACE) mro_file = pp.Each([pp.Optional(includes), filetypes, stages, pp.Optional(pipeline)]) mro_file.ignore(pp.pythonStyleComment) result = mro_file.parseFile(mro_file_name) return result
cf2561d1b72c2899fa495c2e83b683b7980b47ab
17,729
import math def tabulate_stats(stats: rl_common.Stats) -> str: """Pretty-prints the statistics in `stats` in a table.""" res = [] for (env_name, (reward_type, reward_path)), vs in stats.items(): for seed, (x, _log_dir) in enumerate(vs): row = { "env_name": env_name, "reward_type": reward_type, "reward_path": reward_path, "seed": seed, } row.update(x) filtered_row = {} for k, v in row.items(): if k.endswith("_std"): k = k[:-4] + "_se" v = v / math.sqrt(row["n_traj"]) new_k = _filter_key(k) if new_k is not None: filtered_row[new_k] = v res.append(filtered_row) return tabulate.tabulate(res, headers="keys")
e853de6ac15e639d7348ee5a423afbdbbf296e7f
17,730
def LLR_binom(k, n, p0, EPS=1E-15): """ Log likelihood ratio test statistic for the single binomial pdf. Args: k : number of counts (numpy array) n : number of trials p0 : null hypothesis parameter value Returns: individual log-likelihood ratio values """ phat = k/n # maximum likelihood estimate phat[phat < EPS] = 2*EPS # Log-likelihood (density) ratios LLR = 2*( (k*np.log(phat)+(n-k)*np.log(1-phat)) - (k*np.log(p0)+(n-k)*np.log(1-p0))) return LLR
a423b81a374398b88881ee45a665e7f9a648c4c1
17,731
def concatenate_shifts(shifts): """ Take the shifts, which are relative to the previous shift, and sum them up so that all of them are relative to the first.""" # the first shift is 0,0,0 for i in range(2, len(shifts)): # we start at the third s0 = shifts[i-1] s1 = shifts[i] s1.x += s0.x s1.y += s0.y s1.z += s0.z return shifts
f4b0a41db1db78e3b5f25ca198fdb6cebd6476ca
17,732
import json import hashlib def users(user_id=None, serialize=True): """ The method returns users in a json responses. The json is hashed to increase security. :param serialize: Serialize helps indicate the format of the response :param user_id: user id intended to be searched :return: Json format or plain text depending in the serialize parameter """ users = DATA_CONTROLLER.get_user_by_id(user_id=user_id, serialize=True) page = request.args.get("limit") number_of_pages = None pages = [] if page: number_of_pages = int(ceil(float(len(users)) / PAGE_SIZE)) converted_page = int(page) if converted_page > number_of_pages or converted_page < 0: return make_response("", 404) from_index = (converted_page - 1) * PAGE_SIZE to_index = from_index + PAGE_SIZE users = users[from_index:to_index] if number_of_pages: pages = range(1, number_of_pages + 1) if serialize: data = { "users": users, "total": len(users), "pages": pages } json_data = json.dumps(data) response = make_response(jsonify(data), 200) # Caching response.headers["ETag"] = str(hashlib.sha256(json_data).hexdigest()) # Entity tag uniquely identifies request response.headers["Cache-Control"] = "private, max-age=300" return response
b939860a7e8794f8e53f63594a3000a0425cb319
17,733
def get_namenode_setting(namenode): """Function for getting the namenode in input as parameter setting from the configuration file. Parameters ---------- namenode --> str, the namenode for which you want to get the setting info Returns ------- conf['namenodes_setting'][namenode] --> dict, the namenode setting info """ return conf['namenodes_setting'][namenode]
e389d7a20a7b0ef7f32b51c7bb31068cb152fc2b
17,736
from typing import Tuple def cds(identity: str, sequence: str, **kwargs) -> Tuple[sbol3.Component, sbol3.Sequence]: """Creates a Coding Sequence (CDS) Component and its Sequence. :param identity: The identity of the Component. The identity of Sequence is also identity with the suffix '_seq'. :param sequence: The DNA sequence of the Component encoded in IUPAC. :param kwargs: Keyword arguments of any other Component attribute. :return: A tuple of Component and Sequence. """ cds_component, cds_seq = dna_component_with_sequence(identity, sequence, **kwargs) cds_component.roles. append(sbol3.SO_CDS) return cds_component, cds_seq
15d99917b840cf2881e1a90c1835c356622511a7
17,737
import math def calc_innovation(xEst, PEst, y, LMid): """ Compute innovation and Kalman gain elements """ # Compute predicted observation from state lm = get_landmark_position_from_state(xEst, LMid) delta = lm - xEst[0:2] q = (delta.T @ delta)[0, 0] y_angle = math.atan2(delta[1, 0], delta[0, 0]) - xEst[2, 0] yp = np.array([[math.sqrt(q), pi_2_pi(y_angle)]]) # compute innovation, i.e. diff with real observation innov = (y - yp).T # Yt-Yt* innov[1] = pi_2_pi(innov[1]) # compute matrixes for Kalman Gain H = jacob_h(q, delta, xEst, LMid) S = H @ PEst @ H.T + Py return innov, S, H
146695c107c46d2736d0e4ecc191d9a399ca8159
17,739
from typing import Any def next_key(basekey: str, keys: dict[str, Any]) -> str: """Returns the next unused key for basekey in the supplied dictionary. The first try is `basekey`, followed by `basekey-2`, `basekey-3`, etc until a free one is found. """ if basekey not in keys: return basekey i = 2 while f"{basekey}-{i}" in keys: i = i + 1 return f"{basekey}-{i}"
e1da51c79fd465088294e053fdc970934268211b
17,740
import yaml def load_mmio_overhead_elimination_map(yaml_path): """ Load a previously dumped mmio overhead elimination map """ with open(yaml_path, "r") as yaml_file: res = yaml.safe_load(yaml_file.read()) res_map = { 'overall': res[0]['overall'], 'per_model': res[1]['per_model'], } if len(res) > 2: res_map['per_access_context'] = res[2]['per_access_context'] return res_map
ae0ead1aa8c9f26acad9a23a35791592efbfe47e
17,741
from MoinMoin.util import diff_html from MoinMoin.util import diff_text def execute(pagename, request): """ Handle "action=diff" checking for either a "rev=formerrevision" parameter or rev1 and rev2 parameters """ if not request.user.may.read(pagename): Page(request, pagename).send_page() return try: date = request.values['date'] try: date = long(date) # must be long for py 2.2.x except StandardError: date = 0 except KeyError: date = 0 try: rev1 = int(request.values.get('rev1', -1)) except StandardError: rev1 = 0 try: rev2 = int(request.values.get('rev2', 0)) except StandardError: rev2 = 0 if rev1 == -1 and rev2 == 0: rev1 = request.rev if rev1 is None: rev1 = -1 # spacing flag? ignorews = int(request.values.get('ignorews', 0)) _ = request.getText # get a list of old revisions, and back out if none are available currentpage = Page(request, pagename) currentrev = currentpage.current_rev() if currentrev < 2: request.theme.add_msg(_("No older revisions available!"), "error") currentpage.send_page() return if date: # this is how we get called from RecentChanges rev1 = 0 log = editlog.EditLog(request, rootpagename=pagename) for line in log.reverse(): if date >= line.ed_time_usecs and int(line.rev) != 99999999: rev1 = int(line.rev) break else: rev1 = 1 rev2 = 0 if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0: rev1, rev2 = rev2, rev1 if rev1 == -1: oldrev = currentrev - 1 oldpage = Page(request, pagename, rev=oldrev) elif rev1 == 0: oldrev = currentrev oldpage = currentpage else: oldrev = rev1 oldpage = Page(request, pagename, rev=oldrev) if rev2 == 0: newrev = currentrev newpage = currentpage else: newrev = rev2 newpage = Page(request, pagename, rev=newrev) oldlog = oldpage.editlog_entry() newlog = newpage.editlog_entry() if not oldlog or not newlog: # We use "No log entries found." msg because we already have i18n # for that. Better would "At least one log entry was not found.". request.theme.add_msg(_("No log entries found."), "error") currentpage.send_page() return edit_count = abs(newrev - oldrev) # Start output # This action generates content in the user language request.setContentLanguage(request.lang) request.theme.send_title(_('Diff for "%s"') % (pagename, ), pagename=pagename, allow_doubleclick=1) f = request.formatter request.write(f.div(1, id="content")) oldrev = oldpage.get_real_rev() newrev = newpage.get_real_rev() title = _('Differences between revisions %d and %d') % (oldrev, newrev) if edit_count > 1: title += ' ' + _('(spanning %d versions)') % (edit_count, ) title = f.text(title) page_url = wikiutil.escape(currentpage.url(request), True) def enabled(val): return not val and u' disabled="disabled"' or u'' revert_html = "" if request.user.may.revert(pagename): revert_html = """ <form action="%s" method="get"> <div style="text-align:center"> <input name="action" value="revert" type="hidden"> <input name="rev" value="%d" type="hidden"> <input value="%s" type="submit"%s> </div> </form> """ % (page_url, rev2, _("Revert to this revision"), enabled(newrev < currentrev)) other_diff_button_html = """ <td style="border:0;"> <form action="%s" method="get"> <div style="text-align:%s"> <input name="action" value="diff" type="hidden"> <input name="rev1" value="%d" type="hidden"> <input name="rev2" value="%d" type="hidden"> <input value="%s" type="submit"%s> </div> </form> </td> """ navigation_html = """ <span class="diff-header">%%s</span> <table class="diff"> <tr> %(button)s <td style="border:0"> %%s </td> %(button)s </tr> </table> """ % {'button': other_diff_button_html} prev_oldrev = (oldrev > 1) and (oldrev - 1) or 1 next_oldrev = (oldrev < currentrev) and (oldrev + 1) or currentrev prev_newrev = (newrev > 1) and (newrev - 1) or 1 next_newrev = (newrev < currentrev) and (newrev + 1) or currentrev navigation_html = navigation_html % (title, page_url, "left", prev_oldrev, oldrev, _("Previous change"), enabled(oldrev > 1), revert_html, page_url, "right", newrev, next_newrev, _("Next change"), enabled(newrev < currentrev), ) request.write(f.rawHTML(navigation_html)) def rev_nav_link(enabled, old_rev, new_rev, caption, css_classes, enabled_title, disabled_title): if enabled: return currentpage.link_to(request, on=1, querystr={ 'action': 'diff', 'rev1': old_rev, 'rev2': new_rev, }, css_class="diff-nav-link %s" % css_classes, title=enabled_title) + request.formatter.text(caption) + currentpage.link_to(request, on=0) else: return '<span class="diff-no-nav-link %(css_classes)s" title="%(disabled_title)s">%(caption)s</span>' % { 'css_classes': css_classes, 'disabled_title': disabled_title, 'caption': caption, } rev_info_html = """ <div class="diff-info diff-info-header">%%(rev_first_link)s %%(rev_prev_link)s %(rev_header)s %%(rev_next_link)s %%(rev_last_link)s</div> <div class="diff-info diff-info-rev-size"><span class="diff-info-caption">%(rev_size_caption)s:</span> <span class="diff-info-value">%%(rev_size)d</span></div> <div class="diff-info diff-info-rev-author"><span class="diff-info-caption">%(rev_author_caption)s:</span> <span class="diff-info-value">%%(rev_author)s</span></div> <div class="diff-info diff-info-rev-comment"><span class="diff-info-caption">%(rev_comment_caption)s:</span> <span class="diff-info-value">%%(rev_comment)s</span></div> """ % { 'rev_header': _('Revision %(rev)d as of %(date)s'), 'rev_size_caption': _('Size'), 'rev_author_caption': _('Editor'), 'rev_ts_caption': _('Date'), 'rev_comment_caption': _('Comment'), } rev_info_old_html = rev_info_html % { 'rev_first_link': rev_nav_link(oldrev > 1, 1, newrev, u'\u21e4', 'diff-first-link diff-old-rev', _('Diff with oldest revision in left pane'), _("No older revision available for diff")), 'rev_prev_link': rev_nav_link(oldrev > 1, prev_oldrev, newrev, u'\u2190', 'diff-prev-link diff-old-rev', _('Diff with older revision in left pane'), _("No older revision available for diff")), 'rev_next_link': rev_nav_link((oldrev < currentrev) and (next_oldrev < newrev), next_oldrev, newrev, u'\u2192', 'diff-next-link diff-old-rev', _('Diff with newer revision in left pane'), _("Can't change to revision newer than in right pane")), 'rev_last_link': '', 'rev': oldrev, 'rev_size': oldpage.size(), 'rev_author': oldlog.getEditor(request) or _('N/A'), 'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(oldlog.ed_time_usecs)) or _('N/A'), 'rev_comment': wikiutil.escape(oldlog.comment) or '', } rev_info_new_html = rev_info_html % { 'rev_first_link': '', 'rev_prev_link': rev_nav_link((newrev > 1) and (oldrev < prev_newrev), oldrev, prev_newrev, u'\u2190', 'diff-prev-link diff-new-rev', _('Diff with older revision in right pane'), _("Can't change to revision older than revision in left pane")), 'rev_next_link': rev_nav_link(newrev < currentrev, oldrev, next_newrev, u'\u2192', 'diff-next-link diff-new-rev', _('Diff with newer revision in right pane'), _("No newer revision available for diff")), 'rev_last_link': rev_nav_link(newrev < currentrev, oldrev, currentrev, u'\u21e5', 'diff-last-link diff-old-rev', _('Diff with newest revision in right pane'), _("No newer revision available for diff")), 'rev': newrev, 'rev_size': newpage.size(), 'rev_author': newlog.getEditor(request) or _('N/A'), 'date': request.user.getFormattedDateTime(wikiutil.version2timestamp(newlog.ed_time_usecs)) or _('N/A'), 'rev_comment': wikiutil.escape(newlog.comment) or '', } if request.user.show_fancy_diff: request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body(), old_top=rev_info_old_html, new_top=rev_info_new_html, old_top_class="diff-info", new_top_class="diff-info"))) newpage.send_page(count_hit=0, content_only=1, content_id="content-below-diff") else: request.write(f.rawHTML('<table class="diff"><tr><td class="diff-info">%s</td><td class="diff-info">%s</td></tr></table>' % (rev_info_old_html, rev_info_new_html))) lines = diff_text.diff(oldpage.getlines(), newpage.getlines()) if not lines: msg = f.text(" - " + _("No differences found!")) if edit_count > 1: msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % { 'count': edit_count}) + f.paragraph(0) request.write(msg) else: if ignorews: request.write(f.text(_('(ignoring whitespace)')), f.linebreak()) else: qstr = {'action': 'diff', 'ignorews': '1', } if rev1: qstr['rev1'] = str(rev1) if rev2: qstr['rev2'] = str(rev2) request.write(f.paragraph(1), Page(request, pagename).link_to(request, text=_('Ignore changes in the amount of whitespace'), querystr=qstr, rel='nofollow'), f.paragraph(0)) request.write(f.preformatted(1)) for line in lines: if line[0] == "@": request.write(f.rule(1)) request.write(f.text(line + '\n')) request.write(f.preformatted(0)) request.write(f.div(0)) # end content div request.theme.send_footer(pagename) request.theme.send_closing_html()
7305385a84c561fc6c5a8b0e8c52635cf19c77d6
17,742
def SetBmaskName(enum_id, bmask, name): """ Set bitmask name (only for bitfields) @param enum_id: id of enum @param bmask: bitmask of the constant @param name: name of bitmask @return: 1-ok, 0-failed """ return idaapi.set_bmask_name(enum_id, bmask, name)
2a134b496214d7f8e8887dc5a3ca93264da08f5b
17,743
import math def angle_to(x: int, y: int) -> int: """Return angle for given vector pointing from orign (0,0), adjusted for north=0""" #xt,yt = y,x #rad = math.atan2(yt,xt) rad = math.atan2(x,y) if rad < 0.0: rad = math.pi + (math.pi + rad) return rad
545cfa3769da10eea2138132295e3387c4556b39
17,744
import torch def adj(triples, num_nodes, num_rels, cuda=False, vertical=True): """ Computes a sparse adjacency matrix for the given graph (the adjacency matrices of all relations are stacked vertically). :param edges: List representing the triples :param i2r: list of relations :param i2n: list of nodes :return: sparse tensor """ r, n = num_rels, num_nodes size = (r * n, n) if vertical else (n, r * n) from_indices = [] upto_indices = [] for fr, rel, to in triples: offset = rel.item() * n if vertical: fr = offset + fr.item() else: to = offset + to.item() from_indices.append(fr) upto_indices.append(to) indices = torch.tensor([from_indices, upto_indices], dtype=torch.long, device=d(cuda)) assert indices.size(1) == len(triples) assert indices[0, :].max() < size[0], f'{indices[0, :].max()}, {size}, {r}' assert indices[1, :].max() < size[1], f'{indices[1, :].max()}, {size}, {r}' return indices.t(), size
8531170c20c39011efcc7a2223c4da49c41ffabb
17,746
def join_b2_path(b2_dir, b2_name): """ Like os.path.join, but for B2 file names where the root directory is called ''. :param b2_dir: a directory path :type b2_dir: str :param b2_name: a file name :type b2_name: str """ if b2_dir == '': return b2_name else: return b2_dir + '/' + b2_name
20f4e6e54f7f3b4a1583b503d4aa2d8995318978
17,747
def actual_line_flux(wavelength,flux, center=None,pass_it=True): """Measure actual line flux: parameters ---------- wavelength: float array flux: float array center: float wavelength to center plot on output parameters ----------------- flux in line integrated over region flux in background over same region Notes ----- In novae the line profile is composed of a superposition of emission from different regions, sometimes optically thick, sometimes thin, but not gaussian in shape. Here we plot the profile provide endpoints (w1,f1), (w2,f2) at the flux level of the background """ import numpy as np from pylab import plot,xlim, ylim, title, xlabel, ylabel, ginput, figure, subplot from scipy.interpolate import interp1d # find plot center and range if type(center) == type(None): center=wavelength.mean() x1 = center - 7./300*center x2 = center + 7./300*center q = (wavelength > x1) & (wavelength < x2) w = wavelength[q] flx = flux[q] y2 = flx.max() f = figure() ax = subplot(111) getit = True while getit: ax.plot(w,flx,ls='steps',color='darkblue') print ("please click the desired limits of the profile at the background level") print ("no timeout") aa = ginput(n=2,timeout=0) x1,y1 = aa[0] x2,y2 = aa[1] x1 = float(x1) x2 = float(x2) y1 = float(y1) y2 = float(y2) q = (w >= x1) & (w <= x2) bg = interp1d([x1,x2],[y1,y2],) ax.fill_between(w[q], bg(w[q]), flx[q], color='c') ans = input("Do you want to continue ?") if (ans.upper()[0] != 'Y') & (pass_it == False) : print ("Answer is not yes\n TRY AGAIN ") ax.cla() else: getit = False # not compute the fluxes w = w[q] flx = flx[q] tot_flx = [] tot_bkg = (y2+y1)*(x2-x1)*0.5 for k in range(1,len(w)): tot_flx.append(0.25*(flx[k-1]+flx[k])*(w[k-1]+w[k])) line_flx = np.asarray(tot_flx).sum() - tot_bkg print (type(line_flx), line_flx) print (type(tot_bkg), tot_bkg) print (type( 0.5*(x2+x1) ), 0.5*(x2+x1) ) print ( (" wavelength = %10.2f\n line flux = %10.3e\n"+ " background flux = %10.2e\n FWZI = %10.2f\n") %( (x2+x1)*0.5, line_flx, tot_bkg, (x2-x1) ) ) return {'wavelength':(x2+x1)*0.5, 'line_flux':line_flx, 'integrated_background_flux':tot_bkg, "FWZI":(x2-x1)}
6d4d36e6e632e605158da704cc1e3462cdc173e1
17,748
def _too_many_contigs(ref_file): """Check for more contigs than the maximum samblaster deduplication supports. """ max_contigs = 32768 return len(list(ref.file_contigs(ref_file))) >= max_contigs
03a01719b634d6eea143306f96cae6ea26e3f1f9
17,749
def survival_regression_metric(metric, outcomes_train, outcomes_test, predictions, times): """Compute metrics to assess survival model performance. Parameters ----------- metric: string Measure used to assess the survival regression model performance. Options include: - `brs` : brier score - `ibs` : integrated brier score - `auc`: cumulative dynamic area under the curve - `ctd` : concordance index inverse probability of censoring weights (ipcw) predictions: np.array A numpy array of survival time predictions for the samples. outcomes_train : pd.DataFrame A pandas dataframe with rows corresponding to individual samples and columns 'time' and 'event' for test data. outcomes_test : pd.DataFrame A pandas dataframe with rows corresponding to individual samples and columns 'time' and 'event' for training data. times: np.array The time points at which to compute metric value(s). Returns ----------- float: The metric value for the specified metric. """ survival_train = util.Surv.from_dataframe('event', 'time', outcomes_train) survival_test = util.Surv.from_dataframe('event', 'time', outcomes_test) predictions_test = predictions if metric == 'brs': return metrics.brier_score(survival_train, survival_test, predictions_test, times)[-1] elif metric == 'ibs': return metrics.integrated_brier_score(survival_train, survival_test, predictions_test, times) elif metric == 'auc': return metrics.cumulative_dynamic_auc(survival_train, survival_test, 1-predictions_test, times)[0] elif metric == 'ctd': vals = [] for i in range(len(times)): vals.append(metrics.concordance_index_ipcw(survival_train, survival_test, 1-predictions_test[:,i], tau=times[i])[0]) return vals else: raise NotImplementedError()
244767ca0533af2fe792fe226fb2a9eb5e166201
17,750
import signal def _get_all_valid_corners(img_arr, crop_size, l_thresh, corner_thresh): """Get all valid corners for random cropping""" valid_pix = img_arr >= l_thresh kernel = np.ones((crop_size, crop_size)) conv = signal.correlate2d(valid_pix, kernel, mode='valid') return conv > (corner_thresh * crop_size ** 2)
6e357a6585e8485de74e01ced62eedcbfe33bdec
17,751
def remove_dup(a): """ remove duplicates using extra array """ res = [] count = 0 for i in range(0, len(a)-1): if a[i] != a[i+1]: res.append(a[i]) count = count + 1 res.append(a[len(a)-1]) print('Total count of unique elements: {}'.format(count + 1)) return res
8286c07098c078cd61d4890cd120723b9e9f04e7
17,752
def mjd2crnum(mjd): """ Converts MJD to Carrington Rotation number Mathew Owens, 16/10/20 """ return 1750 + ((mjd-45871.41)/27.2753)
233f91e6de4c5105732fc2c8f9f33d054491e1d2
17,753
def poly_print_simple(poly,pretty=False): """Show the polynomial in descending form as it would be written""" # Get the degree of the polynomial in case it is in non-normal form d = poly.degree() if d == -1: return f"0" out = "" # Step through the ascending list of coefficients backward # We do this because polynomials are usually written in descending order for pwr in range(d,-1,-1): # Skip the zero coefficients entirely if poly[pwr] == 0: continue coe = poly[pwr] val = abs(coe) sgn = "-" if coe//val == -1 else "+" # When the coefficient is 1 or -1 don't print it unless it is the # coefficient for x^0 if val == 1 and pwr != 0: val = "" # If it is the first term include the sign of the coefficient if pwr == d: if sgn == "+": sgn = "" # Handle powers of 1 or 0 that appear as the first term if pwr == 1: s = f"{sgn}{val}x" elif pwr == 0: s = f"{sgn}{val}" else: if pretty == False: s = f"{sgn}{val}x^{pwr}" else: s = f"{sgn}{val}x$^{{{pwr}}}$" # If the power is 1 just show x rather than x^1 elif pwr == 1: s = f" {sgn} {val}x" # If the power is 0 only show the sign and value elif pwr == 0: s = f" {sgn} {val}" # Otherwise show everything else: if pretty == False: s = f" {sgn} {val}x^{pwr}" else: s = f" {sgn} {val}x$^{{{pwr}}}$" out += s return out
903f9d4a703e3f625da5f13f6fe084e8894d723b
17,754
def parse_file_(): """ Retrieves the parsed information by specifying the file, the timestamp and latitude + longitude. Don't forget to encode the plus sign '+' = %2B! Example: GET /parse/data/ecmwf/an-2017-09-14.grib?timestamp=2017-09-16T15:21:20%2B00:00&lat=48.398400&lon=9.591550 :param fileName: path to a retrieved ecmwf grib file. :return: OK including json content or empty not found """ try: [point, date] = validate_request_parameters() except ValueError, e: return misc.create_response(jsonify(message=e.message), 400) file_name = misc.build_file_name(date) path_to_file = file_directory + os.sep + file_name files = file_status.get_available_files() if file_name not in files or not os.path.isfile(path_to_file): msg = {'message': 'Given filename={} could not be found in the available files are attached.'.format(file_name, files), 'data': {'files': files}} return misc.create_response(jsonify(transform_message(msg).data), 404) result = cache.cache.get(request.url) # check cache if not result: result = parse_action.parse(path_to_file, point, date) return Response(json.dumps(transform(result), default=CopernicusData.json_serial, indent=2), mimetype="text/json", status=200)
be54ab0dc3b9ec0a0f5462684f66d209e6e72728
17,755
def make_model(drc_csv: str, sat_tables: list, sector_info_csv: str, ia_tables=None, units_csv='', compartments_csv='', locations_csv='') -> model.Model: """ Creates a full EE-IO model with all information required for calculations, JSON-LD export, validation, etc. :param drc_csv: CSV file with the direct requirements matrix A :param sat_tables: a list of CSV files with satellite tables :param sector_info_csv: CSV file with sector metadata :param ia_tables: an optional list of CSV files with impact assessment factors. :param units_csv: optional file with unit metadata :param compartments_csv: optional file with compartment metadata :param locations_csv: optional file with location metadata """ drc = read_csv_data_frame(drc_csv) sat_table = make_sat_table(*sat_tables) sectors = ref.SectorMap.read(sector_info_csv) ia_table = None if ia_tables is not None and len(ia_tables) > 0: ia_table = ia.Table() for iat in ia_tables: ia_table.add_file(iat) def read_map(name, clazz): if name is None or name == '': return clazz.create_default() else: return clazz.read(name) units = read_map(units_csv, ref.UnitMap) compartments = read_map(compartments_csv, ref.CompartmentMap) locations = read_map(locations_csv, ref.LocationMap) return model.Model(drc, sat_table, sectors, ia_table, units, compartments, locations)
f990f1617de29f75e4f86acc7647f7d9a06bfa53
17,756
def newNXentry(parent, name): """Create new NXentry group. Args: parent (h5py.File or h5py.Group): hdf5 file handle or group group (str): group name without extension (str) Returns: hdf5.Group: new NXentry group """ grp = parent.create_group(name) grp.attrs["NX_class"] = "NXentry" if "NX_class" in parent.attrs: if parent.attrs["NX_class"] == "NXentry": grp.attrs["NX_class"] = "NXsubentry" return grp
1529fbe80ca8a23f8cd7717c5df5d7d239840149
17,757
from datetime import datetime def _build_europe_gas_day_tzinfo(): """ Build the Europe/Gas_Day based on the CET time. :raises ValueError: When something is wrong with the CET/CEST definition """ zone = 'Europe/Gas_Day' transitions = _get_transitions() transition_info_cet = _get_transition_info_cet() difference_sec = 3600 * 6 transition_info_gas_day = [] for dt1, dt2, name in transition_info_cet: sec1 = dt1.seconds - difference_sec hours1 = sec1 / (60 * 60) gas_dt1 = datetime.timedelta(hours=hours1) sec2 = dt2.seconds - difference_sec hours2 = sec2 / (60 * 60) gas_dt2 = datetime.timedelta(hours=hours2) if name == 'CET': name = 'CET' elif name == 'CEST': name = 'CEST' else: raise ValueError("tz name not CET or CEST") transition_info_gas_day.append((gas_dt1, gas_dt2, name)) gas_day_cls = type('Europe/Gas_Day', (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info_gas_day )) _tzinfo_cache[zone] = gas_day_cls() return _tzinfo_cache[zone]
abc83bc3096c0dacfb7d9be88fdf4043ee328cf1
17,758
from typing import Set from typing import Optional from typing import List from typing import Dict def prepare_variants_relations_data( queryset: "QuerySet", fields: Set[str], attribute_ids: Optional[List[int]], warehouse_ids: Optional[List[int]], ) -> Dict[int, Dict[str, str]]: """Prepare data about variants relation fields for given queryset. It return dict where key is a product pk, value is a dict with relation fields data. """ warehouse_fields = ProductExportFields.WAREHOUSE_FIELDS attribute_fields = ProductExportFields.VARIANT_ATTRIBUTE_FIELDS result_data: Dict[int, dict] = defaultdict(dict) fields.add("variants__pk") if attribute_ids: fields.update(ProductExportFields.VARIANT_ATTRIBUTE_FIELDS.values()) if warehouse_ids: fields.update(ProductExportFields.WAREHOUSE_FIELDS.values()) relations_data = queryset.values(*fields) for data in relations_data.iterator(): pk = data.get("variants__pk") image = data.pop("variants__images__image", None) result_data = add_image_uris_to_data( pk, image, "variants__images__image", result_data ) # handle attribute and warehouse data attribute_data: dict = {} warehouse_data: dict = {} attribute_pk = str(data.pop(attribute_fields["attribute_pk"], "")) attribute_data = { "slug": data.pop(attribute_fields["slug"], None), "value": data.pop(attribute_fields["value"], None), } warehouse_pk = str(data.pop(warehouse_fields["warehouse_pk"], "")) warehouse_data = { "slug": data.pop(warehouse_fields["slug"], None), "qty": data.pop(warehouse_fields["quantity"], None), } if attribute_ids and attribute_pk in attribute_ids: result_data = add_attribute_info_to_data( pk, attribute_data, "variant attribute", result_data ) if warehouse_ids and warehouse_pk in warehouse_ids: result_data = add_warehouse_info_to_data(pk, warehouse_data, result_data) result: Dict[int, Dict[str, str]] = { pk: { header: ", ".join(sorted(values)) if isinstance(values, set) else values for header, values in data.items() } for pk, data in result_data.items() } return result
37c275898bb69fc61cbdddf2f4914ac77f22e7ed
17,759
import torch def vnorm(velocity, window_size): """ Normalize velocity with latest window data. - Note that std is not divided. Only subtract mean - data should have dimension 3. """ v = velocity N = v.shape[1] if v.dim() != 3: print("velocity's dim must be 3 for batch operation") exit(-1) on_gpu = v.is_cuda if not on_gpu and torch.cuda.is_available(): v = v.cuda() padding_size = window_size - 1 batch_size = v.shape[0] pad = v[:, 0, :].reshape(batch_size, 1, 3).expand(batch_size, padding_size, 3) v_normed = torch.cat([pad, v], dim=1) for i in range(window_size-1, 0, -1): v_normed[:, i-1:i-1+N, :] += v v_normed = v_normed[:, :N, :] / float(window_size) v_normed = v - v_normed if not on_gpu: v_normed = v_normed.cpu() return v_normed
69f3c8cd6a5628d09a7fd78f3b5b779611a68411
17,760
import torch def valid_collate_fn(data): """Build mini-batch tensors from a list of (image, caption) tuples. Args: data: list of (image, caption) tuple. - image: torch tensor of shape (3, 256, 256). - caption: torch tensor of shape (?); variable length. Returns: images: torch tensor of shape (batch_size, 3, 256, 256). targets: torch tensor of shape (batch_size, padded_length). lengths: list; valid length for each padded caption. """ # Sort a data list by caption length data.sort(key=lambda x: len(x[1]), reverse=True) images, captions, product_ids, query_ids, boxes = zip(*data) # Merge images (convert tuple of 3D tensor to 4D tensor) images_lengths = [image.shape[0] for image in images] images_tensor = torch.zeros(len(images), max(images_lengths), 2048) images_masks = torch.zeros(len(images), max(images_lengths)) boxes_tensor = torch.zeros(len(images), max(images_lengths), 4) for i, image in enumerate(images): end = images_lengths[i] images_tensor[i, :end,:] = image[:,:] images_masks[i, :end] = 1 boxes_tensor[i, :end-1,:] = boxes[i][:,:] #images_tensor[i, :end,:] = image[:end,:] #images = torch.stack(images, 0) # Merget captions (convert tuple of 1D tensor to 2D tensor) lengths = [len(cap) for cap in captions] targets = torch.zeros(len(captions), max(lengths)).long() txt_masks = torch.zeros(len(captions), max(lengths)) for i, cap in enumerate(captions): end = lengths[i] targets[i, :end] = cap[:end] txt_masks[i, :end] = 1 return images_tensor, targets, images_lengths, lengths, images_masks, txt_masks, product_ids, query_ids, boxes_tensor
3eabc71d3ae68d4ca24d1a146be4fab8543c8566
17,761
def parse_statement(tokens): """ statement: | 'while' statement_list 'do' statement_list 'end' | 'while' statement_list 'do' 'end' | 'if' if_body | num_literal | string_literal | builtin | identifier """ if tokens.consume_maybe("while"): condition = parse_statement_list(tokens) tokens.consume_only("do") statement = WhileStatement(condition) if tokens.consume_maybe("end"): return statement body = parse_statement_list(tokens) statement.body = body tokens.consume_only("end") return statement if tokens.consume_maybe("if"): return parse_if_body(tokens) token = tokens.consume() return Statement(token)
dfa26e4b834104a85b7266628fb6635c86e64b09
17,762
def vector_to_pytree_fun(func): """Make a pytree -> pytree function from a vector -> vector function.""" def wrapper(state): return func(Vector(state)).pytree return wrapper
79cc16c0e9187fc3bb944f40433ba1bd7850cb6e
17,763
def _filter_contacts(people_filter, maillist_filter, qs, values): """Helper for filtering based on subclassed contacts. Runs the filter on separately on each subclass (field defined by argument, the same values are used), then filters the queryset to only keep items that have matching. """ people = Person.objects.filter(**{people_filter + '__in': values}) mailing_lists = Maillist.objects.filter(**{maillist_filter + '__in': values}) return qs.filter(Q(contact__in=people) | Q(contact__in=mailing_lists))
704396156370596433e78be1bb7bf5b4a77f284d
17,764
def viz_property(statement, properties): """Create properties for graphviz element""" if not properties: return statement + ";"; return statement + "[{}];".format(" ".join(properties))
518d4a662830737359a8b3cb9cec651394823785
17,765
def create_fsl_fnirt_nonlinear_reg(name='fsl_fnirt_nonlinear_reg'): """ Performs non-linear registration of an input file to a reference file using FSL FNIRT. Parameters ---------- name : string, optional Name of the workflow. Returns ------- nonlinear_register : nipype.pipeline.engine.Workflow Notes ----- Workflow Inputs:: inputspec.input_skull : string (nifti file) File of input brain with skull inputspec.reference_skull : string (nifti file) Target brain with skull to normalize to inputspec.fnirt_config : string (fsl fnirt config file) Configuration file containing parameters that can be specified in fnirt Workflow Outputs:: outputspec.output_brain : string (nifti file) Normalizion of input brain file outputspec.nonlinear_xfm : string Nonlinear field coefficients file of nonlinear transformation Registration Procedure: 1. Perform a nonlinear registration on an input file to the reference file utilizing affine transformation from the previous step as a starting point. 2. Invert the affine transformation to provide the user a transformation (affine only) from the space of the reference file to the input file. Workflow Graph: .. image:: ../images/nonlinear_register.dot.png :width: 500 Detailed Workflow Graph: .. image:: ../images/nonlinear_register_detailed.dot.png :width: 500 """ nonlinear_register = pe.Workflow(name=name) inputspec = pe.Node(util.IdentityInterface(fields=['input_brain', 'input_skull', 'reference_brain', 'reference_skull', 'interp', 'ref_mask', 'linear_aff', 'fnirt_config']), name='inputspec') outputspec = pe.Node(util.IdentityInterface(fields=['output_brain', 'nonlinear_xfm']), name='outputspec') nonlinear_reg = pe.Node(interface=fsl.FNIRT(), name='nonlinear_reg_1') nonlinear_reg.inputs.fieldcoeff_file = True nonlinear_reg.inputs.jacobian_file = True brain_warp = pe.Node(interface=fsl.ApplyWarp(), name='brain_warp') nonlinear_register.connect(inputspec, 'input_skull', nonlinear_reg, 'in_file') nonlinear_register.connect(inputspec, 'reference_skull', nonlinear_reg, 'ref_file') nonlinear_register.connect(inputspec, 'interp', brain_warp, 'interp') nonlinear_register.connect(inputspec, 'ref_mask', nonlinear_reg, 'refmask_file') # FNIRT parameters are specified by FSL config file # ${FSLDIR}/etc/flirtsch/TI_2_MNI152_2mm.cnf (or user-specified) nonlinear_register.connect(inputspec, 'fnirt_config', nonlinear_reg, 'config_file') nonlinear_register.connect(inputspec, 'linear_aff', nonlinear_reg, 'affine_file') nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file', outputspec, 'nonlinear_xfm') nonlinear_register.connect(inputspec, 'input_brain', brain_warp, 'in_file') nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file', brain_warp, 'field_file') nonlinear_register.connect(inputspec, 'reference_brain', brain_warp, 'ref_file') nonlinear_register.connect(brain_warp, 'out_file', outputspec, 'output_brain') return nonlinear_register
dcb723e8fc33df2c8cc167c2bedc72f802d124c5
17,766
def _subspace_plot( inputs, output, *, input_names, output_name, scatter_args=None, histogram_args=None, min_output=None, max_output=None ): """ Do actual plotting """ if scatter_args is None: scatter_args = {} if histogram_args is None: histogram_args = {} if min_output is None: min_output = min(output) if max_output is None: max_output = max(output) # see https://matplotlib.org/examples/pylab_examples/multi_image.html _, num_inputs = inputs.shape fig, axes, grid = _setup_axes(input_names=input_names) if output_name is not None: fig.suptitle(output_name) norm = _Normalize(min_output, max_output) hist_plots = [] for i in range(num_inputs): hist_plots.append(_plot_hist( inputs[:, i], axis=axes[i][i], **histogram_args )) scatter_plots = [] scatter_plots_grid = [] for y_index in range(num_inputs): scatter_plots_grid.append([]) for x_index in range(y_index): sc_plot = _plot_scatter( x=inputs[:, x_index], y=inputs[:, y_index], z=output, axis=axes[y_index][x_index], # check order norm=norm, **scatter_args ) scatter_plots.append(sc_plot) scatter_plots_grid[y_index].append(sc_plot) cbar_ax = fig.add_subplot(grid[0, 1:]) fig.colorbar( scatter_plots[0], cax=cbar_ax, orientation='horizontal', ) cbar_ax.set_aspect(1/20) return fig
720092b24f1675f4f4c64c206cd7cad8b3a6dee6
17,767
def install_microcode_filter(*args): """ install_microcode_filter(filter, install=True) register/unregister non-standard microcode generator @param filter: - microcode generator object (C++: microcode_filter_t *) @param install: - TRUE - register the object, FALSE - unregister (C++: bool) """ return _ida_hexrays.install_microcode_filter(*args)
e51c4f5bcc749692bd69c0d9024e6e004da9e9ac
17,770
def get_marvel_character_embed(attribution_text, result): """Parses a given JSON object that contains a result of a Marvel character and turns it into an Embed :param attribution_text: The attributions to give to Marvel for using the API :param result: A JSON object of a Marvel API call result :returns: A nice-looking Embed for discord users to look at """ return Embed( title=result["name"], description=result["description"], colour=PRIMARY_EMBED_COLOR ).add_field( name="Series", value="\n".join([ " * `{}`".format(series["name"]) for series in result["series"]["items"] ]) ).add_field( name="Comics", value="\n".join([ " * `{}`".format(comic["name"]) for comic in result["comics"]["items"] ]) ).set_image( url="{}.{}".format( result["thumbnail"]["path"], result["thumbnail"]["extension"] ) ).set_footer( text=attribution_text )
ed315c2581de86f539c7a08ff6c37e9a889f2670
17,771
def get_or_create_dfp_targeting_key(name, key_type='FREEFORM'): """ Get or create a custom targeting key by name. Args: name (str) Returns: an integer: the ID of the targeting key """ key_id = dfp.get_custom_targeting.get_key_id_by_name(name) if key_id is None: key_id = dfp.create_custom_targeting.create_targeting_key(name, key_type=key_type) return key_id
36211d5a383d54fde3e42d30d81778008343c676
17,772
def positive(x: Array, /) -> Array: """ Array API compatible wrapper for :py:func:`np.positive <numpy.positive>`. See its docstring for more information. """ if x.dtype not in _numeric_dtypes: raise TypeError("Only numeric dtypes are allowed in positive") return Array._new(np.positive(x._array))
2caa9b1714c549e390dba494a748aa86d5077d67
17,773
import csv def read_loss_file(path): """Read the given loss csv file and process its data into lists that can be plotted by matplotlib. Args: path (string): The path to the file to be read. Returns: A list of lists, one list for each subnetwork containing the loss values over time. """ with open(path, 'r') as csvfile: reader = csv.reader(csvfile) data = [] for row in reader: # Ignore the epoch numbers if len(data) == 0: data = [[] for _ in row[1:]] for i in range(1, len(row)): data[i-1].append(float(row[i])) return data
8e861f0bf46db5085ea2f30a7e70a4bdfa0b9697
17,774
from typing import Union def number2human(n: Union[int, float]) -> str: """ Format large number into readable string for a human Examples: >>> number2human(1000) '1.0K' >>> number2human(1200000) '1.2M' """ # http://code.activestate.com/recipes/578019 # >>> bytes2human(10000) # '9.8K' # >>> bytes2human(100001221) # '95.4M' symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = (10 ** 3) ** (i + 1) for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.1f%s' % (value, s) return "%.2f" % n
26e99ca6b3cf51bf554018e1c97a1c8bebd51811
17,775
def binomial_confidence_interval(successes, trials, error_rate): """Computes a confidence interval on the true p of a binomial. Assumes: - The given `successes` count outcomes of an iid Bernoulli trial with unknown probability p, that was repeated `trials` times. Guarantees: - The probability (over the randomness of drawing the given sample) that the true p is outside the returned interval is no more than the given `error_rate`. Args: successes: Python or numpy `int` number of successes. trials: Python or numpy `int` number of trials. error_rate: Python `float` admissible rate of mistakes. Returns: low_p: Lower bound of confidence interval. high_p: Upper bound of confidence interval. Raises: ValueError: If scipy is not available. """ def p_small_enough(p): # This is positive iff p is smaller than the desired upper bound. log_prob = stats.binom.logcdf(successes, trials, p) return log_prob - np.log(error_rate / 2.) def p_big_enough(p): # This is positive iff p is larger than the desired lower bound. # Scipy's survival function for discrete random variables excludes # the argument, but I want it included for this purpose. log_prob = stats.binom.logsf(successes-1, trials, p) return log_prob - np.log(error_rate / 2.) if successes < trials: high_p = optimize.brentq( p_small_enough, successes / float(trials), 1., rtol=1e-9) else: high_p = 1. if successes > 0: low_p = optimize.brentq( p_big_enough, 0., successes / float(trials), rtol=1e-9) else: low_p = 0. return low_p, high_p
03caf19e30a4b280c6b060b7ba4b1166b526feec
17,776
def try_parse_func_decl(start, end): """Parse a function declarator between start and end. Expects that tokens[end-1] is a close parenthesis. If a function declarator is successfully parsed, returns the decl_node.Function object. Otherwise, returns None. """ open_paren = find_pair_backward(end - 1) try: params, index = parse_parameter_list(open_paren + 1) except ParserError as e: log_error(e) return None if index == end - 1: return decl_nodes.Function( params, parse_declarator(start, open_paren))
1bcdce513fdaf6e28e034ba1578bd271a00c31a5
17,777
import contextlib def eth_getBlockTransactionCountByNumber(block_number: int) -> int: """ See EthereumAPI#get_block_transaction_count_by_number. """ with contextlib.closing(EthereumAPI()) as api: return api.get_block_transaction_count_by_number(block_number)
d4fbd368bb49854ceee589ba20c956275ece95c6
17,778
from typing import List from typing import Any from typing import NamedTuple def day(db: Database, site: str = 'test', tag: str = '', search_body: str = '') -> List[Any]: """ 戻り値 名前付きタプルのリスト # xxx List[DayCount] するにはclass DayCount(NamedTuple) 必要 pypy… """ tag_where = '' body_where = '' param = [site] # type: List[Union[str, int]] if tag != '': tag_where = "AND (tags like ? or tags like ?)" param.extend([f"% {tag} %", f"% {tag}:%"]) if search_body != '': body_where = "AND body LIKE ?" param.append(f"%{search_body}%") if db.dbms == 'postgresql': date = 'to_char(DATE("datetime"),\'YYYY-MM-DD\')' else: date = 'DATE("datetime")' sql = f""" SELECT {date} as "date" , COUNT(*) as "count" FROM basedata WHERE site = ? {tag_where} {body_where} GROUP BY DATE("datetime") ORDER BY DATE("datetime") DESC LIMIT ? """ limit = 1000 # PENDING ページングする? param.append(limit) day_count = NamedTuple('day_count', (('date', str), ('count', int))) logger.log(5, "日付投稿数SQL: %s", sql) logger.log(5, "プレースホルダパラメータ: %s", param) return db.execute_fetchall(sql, param, namedtuple=day_count)
c60a4a8aabc546a2dc7b412d73c1d0a97d7ccc25
17,779
import math def ppv2( aim_stars=None, speed_stars=None, max_combo=None, nsliders=None, ncircles=None, nobjects=None, base_ar=5.0, base_od=5.0, mode=MODE_STD, mods=MODS_NOMOD, combo=None, n300=None, n100=0, n50=0, nmiss=0, score_version=1, bmap=None ): """ calculates ppv2 returns (pp, aim_pp, speed_pp, acc_pp, acc_percent) if bmap is provided, mode, base_ar, base_od, max_combo, nsliders, ncircles and nobjects are taken from it. otherwise they must be provided. if combo is None, max_combo is used. if n300 is None, max_combo - n100 - n50 - nmiss is used. """ if mode != MODE_STD: info( "ppv2 is only implemented for osu!std at the moment\n" ) raise NotImplementedError if bmap != None: mode = bmap.mode base_ar = bmap.ar base_od = bmap.od max_combo = bmap.max_combo() nsliders = bmap.nsliders ncircles = bmap.ncircles nobjects = len(bmap.hitobjects) else: if aim_stars == None: raise ValueError("missing aim_stars or bmap") if speed_stars == None: raise ValueError("missing speed_stars") if max_combo == None: raise ValueError("missing max_combo or bmap") if nsliders == None: raise ValueError("missing nsliders or bmap") if ncircles == None: raise ValueError("missing ncircles or bmap") if nobjects == None: raise ValueError("missing nobjects or bmap") if max_combo <= 0: info("W: max_combo <= 0, changing to 1\n") max_combo = 1 if combo == None: combo = max_combo - nmiss if n300 == None: n300 = nobjects - n100 - n50 - nmiss # accuracy ---------------------------------------------------- accuracy = acc_calc(n300, n100, n50, nmiss) real_acc = accuracy if score_version == 1: # scorev1 ignores sliders since they are free 300s # for whatever reason it also ignores spinners nspinners = nobjects - nsliders - ncircles real_acc = acc_calc( n300 - nsliders - nspinners, n100, n50, nmiss ) # can go negative if we miss everything real_acc = max(0.0, real_acc) elif score_version == 2: ncircles = nobjects else: info("unsupported scorev%d\n" % (score_version)) raise NotImplementedError # global values ----------------------------------------------- def low_objects(stars): multiplier = min(0.5, 0.59 + (-0.59 * math.exp(-0.0038 * nobjects))) multiplier = min(0.95 + min(0.1, nobjects / 5000), 0.55 + multiplier + max(0, 0.4 - pp_base(stars) / 12.5)) def bonus(n): if n <= 500: return multiplier elif n <= 2000: return bonus(500) + 0.3 * min(1, (n-500) / 1500) elif n > 2000: return bonus(2000) + 0.5 * math.log10(n / 2000) return bonus(nobjects) miss_penality = pow(0.97, nmiss) combo_break = pow(combo, 0.8) / pow(max_combo, 0.8) # calculate stats with mods speed_mul, ar, od, _, _ = ( mods_apply(mods, ar=base_ar, od=base_od) ) # ar bonus ---------------------------------------------------- ar_bonus = 1.0 if ar > 10.33: ar_bonus += 0.45 * (ar - 10.33) elif ar < 8.0: low_ar_bonus = 0.01 * (8.0 - ar) if mods & MODS_HD != 0: low_ar_bonus *= 2.0 ar_bonus += low_ar_bonus # aim pp ------------------------------------------------------ aim = pp_base(aim_stars) aim *= low_objects(aim_stars) aim *= miss_penality aim *= combo_break aim *= ar_bonus if mods & MODS_HD != 0: aim *= 1.02 + (11 - ar) / 50 if mods & MODS_FL != 0: aim *= max(1, 1.45 * low_objects(aim_stars)) acc_bonus = 0.5 + accuracy / 2.0 od_bonus = 0.98 + (od * od) / 2500.0 aim *= acc_bonus aim *= od_bonus # speed pp ---------------------------------------------------- speed = pp_base(speed_stars) speed *= low_objects(speed_stars) speed *= miss_penality speed *= combo_break speed *= acc_bonus speed *= od_bonus if mods & MODS_HD != 0: speed *= 1.18 # acc pp ------------------------------------------------------ acc = pow(1.52163, od) * pow(real_acc, 24.0) * 2.83 # length bonus (not the same as speed/aim length bonus) acc *= min(1.15, pow(ncircles / 1000.0, 0.3)) if mods & MODS_HD != 0: acc *= 1.02 if mods & MODS_FL != 0: acc *= 1.02 # total pp ---------------------------------------------------- final_multiplier = 1.12 if mods & MODS_NF != 0: final_multiplier *= 0.90 if mods & MODS_SO != 0: final_multiplier *= 0.95 total = ( pow( pow(aim, 1.1) + pow(speed, 1.1) + pow(acc, 1.1), 1.0 / 1.1 ) * final_multiplier ) return (total, aim, speed, acc, accuracy * 100.0)
c4cc793b7eb2acc45ca83762f946a476452a5e34
17,781
def p_portail_home(request): """ Portail d'accueil de CRUDY """ crudy = Crudy(request, "portail") title = crudy.application["title"] crudy.folder_id = None crudy.layout = "portail" return render(request, 'p_portail_home.html', locals())
a883cafd84b1ce24ead37ebbe6c0e3a15ea476c6
17,783
def A_norm(freqs,eta): """Calculates the constant scaling factor A_0 Parameters ---------- freqs : array The frequencies in Natural units (Mf, G=c=1) of the waveform eta : float The reduced mass ratio """ const = np.sqrt(2*eta/3/np.pi**(1/3)) return const*freqs**-(7/6)
74947e34efd7b6b0bb31aac35c9932623d4a28aa
17,785
from typing import IO from typing import Counter from operator import sub def task1(input_io: IO) -> int: """ Solve task 1. Parameters ---------- input_io: IO Day10 stream of adapters joltage. Return ------ int number of differentes of 1 times number of diferences of 3. """ numbers = list(read_numbers(input_io)) numbers.append(0) numbers.sort() counter = Counter(map(sub, numbers[1:], numbers[:-1])) return counter[1] * (counter[3] + 1)
b566a79013f442b7216118458958212186e57f07
17,786
def get_total_balance(view_currency='BTC') -> float: """ Shows total balance for account in chosen currency :param view_currency: currency for total balance :return: total balance amount for account """ result = pay.get_balance() balance_dict = result.get('balance') total = 0 for currency in balance_dict: total += ((balance_dict.get(currency).get(view_currency).get('total')) + (balance_dict.get(currency).get(view_currency).get('reserved'))) return total
9e595eceac7df63779cd8c8e6d155092ec76a36e
17,788
import binascii def bin_hex(binary): """ Convert bytes32 to string Parameters ---------- input: bytes object Returns ------- str """ return binascii.hexlify(binary).decode('utf-8')
41f9c8a498aa3628f64cf59c93896f42d8dfd56a
17,789