content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def genargs() -> ArgumentParser: """ Generate an input string parser :return: parser """ parser = ArgumentParser() parser.add_argument("indir", help="Location of input shexj files") parser.add_argument("outdir", help="Location of output shexc files") parser.add_argument("-s", "--save", help="Save edited shexj image before conversion", action="store_true") return parser
1958d772e316212f90d6e3b84c5452e4fc02f2da
3,658,100
def get_model_name(factory_class): """Get model fixture name by factory.""" return ( inflection.underscore(factory_class._meta.model.__name__) if not isinstance(factory_class._meta.model, str) else factory_class._meta.model)
1021f287803b5e6dd9231503d8ddab15c355a800
3,658,101
def GetSpatialFeatures(img, size=(32, 32), isFeatureVector=True): """ Extracts spatial features of the image. param: img: Source image param: size: Target image size param: isFeatureVector: Indication if the result needs to be unrolled into a feature vector returns: Spatial features """ resizedImg = cv2.resize(img, size) if isFeatureVector: return resizedImg.ravel() else: return resizedImg
36d864bddb125f7cb13c4bd800076733ab939d58
3,658,102
import html import logging import re def html_to_text(content): """Filter out HTML from the text.""" text = content['text'] try: text = html.document_fromstring(text).text_content() except etree.Error as e: logging.error( 'Syntax error while processing {}: {}\n\n' 'Falling back to regexes'.format(text, e)) text = re.sub(r'<[^>]*>', '', text) text = _to_unicode(text) content['text'] = text return content
43fc18400ef121bf12f683da03763dff229d45ae
3,658,103
async def get_robot_positions() -> control.RobotPositionsResponse: """ Positions determined experimentally by issuing move commands. Change pipette position offsets the mount to the left or right such that a user can easily access the pipette mount screws with a screwdriver. Attach tip position places either pipette roughly in the front-center of the deck area """ robot_positions = control.RobotPositions( change_pipette=control.ChangePipette( target=control.MotionTarget.mount, left=[300, 40, 30], right=[95, 40, 30] ), attach_tip=control.AttachTip( target=control.MotionTarget.pipette, point=[200, 90, 150] ), ) return control.RobotPositionsResponse(positions=robot_positions)
816f1794231aa2690665caa8eae26c301d55b198
3,658,104
def compute_basis(normal): """ Compute an orthonormal basis for a vector. """ u = [0.0, 0.0, 0.0] v = [0.0, 0.0, 0.0] u[0] = -normal[1] u[1] = normal[0] u[2] = 0.0 if ((u[0] == 0.0) and (u[1] == 0.0)): u[0] = 1.0 mag = vector_mag(u) if (mag == 0.0): return for i in range(0, 3): u[i] = u[i] / mag v = cross_product(normal, u) mag = vector_mag(v) if (mag != 0.0): for i in range(0, 3): v[i] = v[i] / mag return u, v
3623a80fcb86d506e5f9e2f94d98a69a2831b2a5
3,658,105
def do_LEE_correction(max_local_sig, u1, u2, exp_phi_1, exp_phi_2): """ Return the global p-value for an observed local significance after correcting for the look-elsewhere effect given expected Euler characteristic exp_phi_1 above level u1 and exp_phi_2 above level u2 """ n1, n2 = get_coefficients(u1,u2,exp_phi_1, exp_phi_2) this_global_p = global_pvalue(max_local_sig**2, n1, n2) print ' n1, n2 =', n1, n2 print ' local p_value = %f, local significance = %f' %(norm.cdf(-max_local_sig), max_local_sig) print 'global p_value = %f, global significance = %f' %(this_global_p, -norm.ppf(this_global_p)) return this_global_p
53ee295261d58c59aa1a0a667ec7ded2e986c256
3,658,106
def _check_password(request, mail_pass, uid): """ [メソッド概要] パスワードチェック """ error_msg = {} if len(mail_pass) <= 0: error_msg['mailPw'] = get_message('MOSJA10004', request.user.get_lang_mode()) logger.user_log('LOSI10012', request=request) logger.logic_log('LOSM17015', request=request) else: password_hash = OaseCommon.oase_hash(mail_pass) user = User.objects.get(user_id=uid) if not user: error_msg['mailPw'] = get_message('MOSJA32010', request.user.get_lang_mode()) logger.user_log('LOSI10013', request=request) logger.logic_log('LOSM17001', request=request) if user and user.password != password_hash: error_msg['mailPw'] = get_message('MOSJA32038', request.user.get_lang_mode()) logger.user_log('LOSI10013', request=request) logger.logic_log('LOSM17016', request=request) return error_msg
8753d0ed32db0c501f2f18af9ea88253b7a1add7
3,658,107
def _read_wb_indicator(indicator: str, start: int, end: int) -> pd.DataFrame: """Read an indicator from WB""" return pd.read_feather(config.paths.data + rf"/{indicator}_{start}_{end}.feather")
87a52d5f683fc9795a7baf9ff81f2961567c3a13
3,658,108
import subprocess def pr_branches() -> list[str]: """List of branches that start with 'pr-'""" out = subprocess.run( [ "git", "for-each-ref", "--shell", '--format="%(refname:strip=3)"', "refs/remotes/origin/pr-*", ], capture_output=True, ) branches = out.stdout.decode().splitlines() return [branch.replace('"', "").replace("'", "") for branch in branches]
f144d2546ef59cb392f4ad1226c2246384bdfd99
3,658,109
def scatter_raster_plot(spike_amps, spike_depths, spike_times, n_amp_bins=10, cmap='BuPu', subsample_factor=100, display=False): """ Prepare data for 2D raster plot of spikes with colour and size indicative of spike amplitude :param spike_amps: :param spike_depths: :param spike_times: :param n_amp_bins: no. of colour and size bins into which to split amplitude data :param cmap: :param subsample_factor: factor by which to subsample data when too many points for efficient display :param display: generate figure :return: ScatterPlot object, if display=True also returns matplotlib fig and ax objects """ amp_range = np.quantile(spike_amps, [0, 0.9]) amp_bins = np.linspace(amp_range[0], amp_range[1], n_amp_bins) color_bin = np.linspace(0.0, 1.0, n_amp_bins + 1) colors = (cm.get_cmap(cmap)(color_bin)[np.newaxis, :, :3][0]) spike_amps = spike_amps[0:-1:subsample_factor] spike_colors = np.zeros((spike_amps.size, 3)) spike_size = np.zeros(spike_amps.size) for iA in range(amp_bins.size): if iA == (amp_bins.size - 1): idx = np.where(spike_amps > amp_bins[iA])[0] # Make saturated spikes the darkest colour spike_colors[idx] = colors[-1] else: idx = np.where((spike_amps > amp_bins[iA]) & (spike_amps <= amp_bins[iA + 1]))[0] spike_colors[idx] = [*colors[iA]] spike_size[idx] = iA / (n_amp_bins / 8) data = ScatterPlot(x=spike_times[0:-1:subsample_factor], y=spike_depths[0:-1:subsample_factor], c=spike_amps * 1e6, cmap='BuPu') data.set_ylim((0, 3840)) data.set_color(color=spike_colors) data.set_clim(clim=amp_range * 1e6) data.set_marker_size(marker_size=spike_size) data.set_labels(title='Spike times vs Spike depths', xlabel='Time (s)', ylabel='Distance from probe tip (um)', clabel='Spike amplitude (uV)') if display: fig, ax = plot_scatter(data.convert2dict()) return data.convert2dict(), fig, ax return data
a72da0b1faacb5e13da51a2dc192778d956eb7e5
3,658,110
def is_pack_real(*args): """ is_pack_real(F) -> bool 'FF_PACKREAL' @param F (C++: flags_t) """ return _ida_bytes.is_pack_real(*args)
64e3ecf58607cf7c363e84a7e5a69ce0c76e8acc
3,658,111
import ast from typing import List from typing import Tuple def _get_sim205(node: ast.UnaryOp) -> List[Tuple[int, int, str]]: """Get a list of all calls of the type "not (a <= b)".""" errors: List[Tuple[int, int, str]] = [] if ( not isinstance(node.op, ast.Not) or not isinstance(node.operand, ast.Compare) or len(node.operand.ops) != 1 or not isinstance(node.operand.ops[0], ast.LtE) ): return errors comparison = node.operand left = to_source(comparison.left) right = to_source(comparison.comparators[0]) errors.append( (node.lineno, node.col_offset, SIM205.format(a=left, b=right)) ) return errors
f0efdf0b10a0d4ec8a4a75772277169aa708e005
3,658,112
from typing import Union def parse_boolean(val: str) -> Union[str, bool]: """Try to parse a string into boolean. The string is returned as-is if it does not look like a boolean value. """ val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return True if val in ('n', 'no', 'f', 'false', 'off', '0'): return False return val
e2cbda5a849e1166e0f2a3953220c93d1f3ba119
3,658,113
import csv from datetime import datetime def load_users(usertable): """ `usertable` is the path to a CSV with the following fields: user.* account.organisation SELECT user.*, account.organisation FROM user LEFT JOIN account ON user.account_id = account.id; """ users = [] with open(usertable) as f: reader = csv.reader(f) next(reader) # skip headers for row in reader: fields = iter(row) _ = next(fields) # id email = next(fields) hash = next(fields) password_invalid = bool(int(next(fields))) salt = next(fields) first_name = next(fields) last_name = next(fields) is_verified = bool(int(next(fields))) is_admin = bool(int(next(fields))) _ = next(fields) # verification_uuid _ = next(fields) # account_id created = datetime.datetime.strptime(next(fields), DATE_FORMAT) updated = datetime.datetime.strptime(next(fields), DATE_FORMAT) # from user.account: organisation = next(fields) # not importing these: if password_invalid or not is_verified or (is_admin and email == '[email protected]'): print('Warning: not importing user %s' % email) continue encoded = reencode_password(salt, hash) users.append(UserDef(email, encoded, first_name, last_name, organisation, created, updated)) return users
44c1dad255e2d8152fadbb53523a53002af95001
3,658,114
def get_progress_status_view(request): """Get progress status of a given task Each submitted task is identified by an ID defined when the task is created """ if 'progress_id' not in request.params: raise HTTPBadRequest("Missing argument") return get_progress_status(request.params['progress_id'])
4e68fc45443443187032ce06552e97895316be41
3,658,115
def pretty_param_string(param_ids: "collection") -> str: """Creates a nice string showing the parameters in the given collection""" return ' '.join(sorted(param_ids, key=utilize_params_util.order_param_id))
10f955480fcf760317f78d478c837c93df598e08
3,658,116
def _center_crop(image, size): """Crops to center of image with specified `size`.""" # Reference: https://github.com/mlperf/inference/blob/master/v0.5/classification_and_detection/python/dataset.py#L144 # pylint: disable=line-too-long height = tf.shape(image)[0] width = tf.shape(image)[1] out_height = size out_width = size # Reference code: # left = (width - out_width) / 2 # right = (width + out_width) / 2 # top = (height - out_height) / 2 # bottom = (height + out_height) / 2 # img = img.crop((left, top, right, bottom)) offset_height = tf.to_int32((height - out_height) / 2) offset_width = tf.to_int32((width - out_width) / 2) image = tf.image.crop_to_bounding_box( image, offset_height, offset_width, target_height=out_height, target_width=out_width, ) return image
2409d06945e77633f70de3e76f7152f61a9eaacf
3,658,117
def resample(ts, values, num_samples): """Convert a list of times and a list of values to evenly spaced samples with linear interpolation""" assert np.all(np.diff(ts) > 0) ts = normalize(ts) return np.interp(np.linspace(0.0, 1.0, num_samples), ts, values)
9453bba67add0307276ff71e85605812af337379
3,658,118
import os def merge_align_moa(data_dir, cp_moa_link): """ This function aligns L1000 MOAs with the cell painting MOAs and further fill null MOAs in one of the them (cell painting or L1000) with another, so far they are of the same broad sample ID. The function outputs aligned L1000 MOA metadata dataframe, that will be used for further analysis. params: data_dir: directory that contains L1000 files cp_moa_link: github link to cell painting MOA metadata information .csv file Returns: df_pertinfo: dataframe with aligned L1000 MOA metadata pertubation information. """ df_pertinfo_5 = pd.read_csv(os.path.join(data_dir, 'REP.A_A549_pert_info.txt'), delimiter = "\t") df_moa_cp = pd.read_csv(cp_moa_link, sep="\t") df_pertinfo_5 = df_pertinfo_5[['pert_id', 'pert_iname', 'moa']].copy() df_moa_cp = df_moa_cp[['broad_id', 'pert_iname', 'moa']].copy() df_pertinfo_5.rename(columns={"pert_id": "broad_id", "pert_iname": "pert_iname_L1000", "moa": "moa_L1000"}, inplace = True) df_moa_cp.rename(columns={"pert_iname": "pert_iname_cell_painting", "moa": "moa_cell_painting"}, inplace = True) df_pertinfo = pd.merge(df_pertinfo_5, df_moa_cp, on=['broad_id'], how = 'left') ##fill NaNs in columns - moa_L1000, pert_iname_L1000, with corresponding values in cell_painting and VICE VERSA df_pertinfo['moa_L1000'].fillna(value=df_pertinfo['moa_cell_painting'], inplace=True) df_pertinfo['moa_cell_painting'].fillna(value=df_pertinfo['moa_L1000'], inplace=True) df_pertinfo['pert_iname_cell_painting'].fillna(value=df_pertinfo['pert_iname_L1000'], inplace=True) for col in ['pert_iname_L1000', 'moa_L1000', 'pert_iname_cell_painting', 'moa_cell_painting']: df_pertinfo[col] = df_pertinfo[col].apply(lambda x: x.lower()) df_pertinfo.rename(columns={"broad_id": "pert_id", "pert_iname_L1000": "pert_iname", "moa_L1000": "moa"}, inplace = True) df_pertinfo.drop(['pert_iname_cell_painting', 'moa_cell_painting'], axis = 1, inplace = True) return df_pertinfo
898f4c49c839900d3f0f44eae589c1227e7adbd0
3,658,119
def supports_color(stream) -> bool: # type: ignore """Determine whether an output stream (e.g. stdout/stderr) supports displaying colored text. A stream that is redirected to a file does not support color. """ return stream.isatty() and hasattr(stream, "isatty")
4a427d6725206ef33b3f4da0ace6f2d6c3db78a9
3,658,120
from typing import Union from typing import Optional from typing import Tuple from typing import Dict from typing import List from bs4 import BeautifulSoup def parse_repo_links( html: Union[str, bytes], base_url: Optional[str] = None, from_encoding: Optional[str] = None, ) -> Tuple[Dict[str, str], List[Link]]: """ .. versionadded:: 0.7.0 Parse an HTML page from a simple repository and return a ``(metadata, links)`` pair. The ``metadata`` element is a ``Dict[str, str]``. Currently, the only key that may appear in it is ``"repository_version"``, which maps to the repository version reported by the HTML page in accordance with :pep:`629`. If the HTML page does not contain a repository version, this key is absent from the `dict`. The ``links`` element is a list of `Link` objects giving the hyperlinks found in the HTML page. :param html: the HTML to parse :type html: str or bytes :param Optional[str] base_url: an optional URL to join to the front of the links' URLs (usually the URL of the page being parsed) :param Optional[str] from_encoding: an optional hint to Beautiful Soup as to the encoding of ``html`` when it is `bytes` (usually the ``charset`` parameter of the response's :mailheader:`Content-Type` header) :rtype: Tuple[Dict[str, str], List[Link]] :raises UnsupportedRepoVersionError: if the repository version has a greater major component than the supported repository version """ soup = BeautifulSoup(html, "html.parser", from_encoding=from_encoding) base_tag = soup.find("base", href=True) if base_tag is not None: if base_url is None: base_url = base_tag["href"] else: base_url = urljoin(base_url, base_tag["href"]) if base_url is None: def basejoin(url: str) -> str: return url else: def basejoin(url: str) -> str: assert isinstance(base_url, str) return urljoin(base_url, url) metadata = {} pep629_meta = soup.find( "meta", attrs={"name": "pypi:repository-version", "content": True}, ) if pep629_meta is not None: metadata["repository_version"] = pep629_meta["content"] check_repo_version(metadata["repository_version"]) links = [] for link in soup.find_all("a", href=True): links.append( Link( text="".join(link.strings).strip(), url=basejoin(link["href"]), attrs=link.attrs, ) ) return (metadata, links)
556ba2bb728c26668548d4f714dc12b1cf2b48bd
3,658,121
def calc_kappa4Franci(T_K, a_H, a_H2CO3s): """ Calculates kappa4 in the PWP equation using approach from Franci's code. Parameters ---------- T_K : float temperature Kelvin a_H : float activity of hydrogen (mol/L) a_H2CO3s : float activity of carbonic acid (mol/L) Returns ------- kappa4 : float constant kappa4 in the PWP equation (cm^4/mmol/s) Notes ----- See more info under documentation for pwpRateFranci(). """ K_2 = calc_K_2(T_K) K_c = calc_K_c(T_K) kappa1 = calc_kappa1(T_K) kappa2 = calc_kappa2(T_K) kappa3 = calc_kappa3(T_K) kappa4 = (K_2/K_c)*(kappa1 + 1/a_H*(kappa2*a_H2CO3s + kappa3) ) return kappa4
a5dcab9d871c7e78031ec74fef5f172e2a37f51b
3,658,122
def get_candidates_from_single_line(single_line_address, out_spatial_reference, max_locations): """ parses the single line address and passes it to the AGRC geocoding service and then returns the results as an array of candidates """ try: parsed_address = Address(single_line_address) except Exception: return [] return make_request( parsed_address.normalized, parsed_address.zip_code or parsed_address.city, out_spatial_reference, max_locations )
a2e4c68dc5a27dea98951bfe61c5e10ff887091a
3,658,123
def create_store(): """Gathers all the necessary info to create a new store""" print("What is the name of the store?") store_name = raw_input('> ') return receipt.Store(store_name)
a9b78c73712b9ec3ed39f5851b970aa97e5d3575
3,658,124
def vgg11_bn_vib(cutting_layer, logger, num_client = 1, num_class = 10, initialize_different = False, adds_bottleneck = False, bottleneck_option = "C8S1"): """VGG 11-layer model (configuration "A") with batch normalization""" return VGG_vib(make_layers(cutting_layer,cfg['A'], batch_norm=True, adds_bottleneck = adds_bottleneck, bottleneck_option = bottleneck_option), logger, num_client = num_client, num_class = num_class, initialize_different = initialize_different)
bf893f1e720aae92275abc961675a83c507425ee
3,658,125
from pathlib import Path def get_conf_paths(project_metadata): """ Get conf paths using the default kedro patterns, and the CONF_ROOT directory set in the projects settings.py """ configure_project(project_metadata.package_name) session = KedroSession.create(project_metadata.package_name) _activate_session(session, force=True) context = session.load_context() pats = ("catalog*", "catalog*/**", "**/catalog*") conf_paths = context.config_loader._lookup_config_filepaths(Path(context.config_loader.conf_paths[0]), pats, set()) return conf_paths
f001dc7d6991c57f32afb3d8d6e607d24bfd61cd
3,658,126
import numpy import ctypes def _mat_ptrs(a): """Creates an array of pointers to matrices Args: a: A batch of matrices on GPU Returns: GPU array of pointers to matrices """ return cuda.to_gpu(numpy.arange( a.ptr, a.ptr + a.shape[0] * a.strides[0], a.strides[0], dtype=ctypes.c_void_p))
1b56c7b9cbc368612fb0f0f7ecd647b5045773a2
3,658,127
def file_upload_quota_broken(request): """ You can't change handlers after reading FILES; this view shouldn't work. """ response = file_upload_echo(request) request.upload_handlers.insert(0, QuotaUploadHandler()) return response
ed9dab36b4f67a58e90542411474da733887f4b4
3,658,128
def create_LED_indicator_rect(**kwargs) -> QPushButton: """ Useful kwargs: text: str, icon: QIcon, checked: bool, parent checked=False -> LED red checked=True -> LED green """ button = QPushButton(checkable=True, enabled=False, **kwargs) button.setStyleSheet(SS_LED_INDICATOR_RECT) return button
3323a225b3f9ac6e687bb3a3d1c5f9d6a4459384
3,658,129
import os def get_current_version_name(): """Returns the version of the current instance. If this is version "v1" of module "module5" for app "my-app", this function will return "v1". """ return os.environ['CURRENT_VERSION_ID'].split('.')[0]
cbd7fdbb9af4990e32130f2aa3af0cfe8bf59816
3,658,130
def getAlignments(infile): """ read a PSL file and return a list of PslRow objects """ psls = [] with open(infile, 'r') as f: for psl in readPsls(f): psls.append(psl) return psls
00d6c0c4e44dd3de46c3bc7f38d40fd169311164
3,658,131
import numpy def get_ring_kernel(zs,Rs): """Represents the potential influence due to a line charge density a distance *delta_z* away, at which the azimuthally symmetric charge distribution has a radius *R*.""" Logger.write('Computing ring kernels over %i x %i points...'%((len(zs),)*2)) #Form index enumerations diag_inds=numpy.diag_indices(len(zs)) triud_inds=numpy.triu_indices(len(zs),k=0) triu_inds=numpy.triu_indices(len(zs),k=1) #upper triangle tril_inds=[triu_inds[1],triu_inds[0]] #lower triangle global den1,den2 K=numpy.zeros((len(zs),)*2,dtype=numpy.float) #position "2" corresponds to test charge (rows) #position "1" corresponds to origin of field (columns) zs2=zs.reshape((len(zs),1)); zs1=zs.reshape((1,len(zs))) Rs2=Rs.reshape((len(zs),1)); Rs1=Rs.reshape((1,len(zs))) dr2=(Rs1-Rs2)**2 dz2=(zs1-zs2)**2 rmod2=(Rs1+Rs2)**2 den1=numpy.sqrt(dz2+dr2) dzs=list(numpy.diff(zs)); dzs=numpy.array(dzs+[dzs[-1]]) dRs=list(numpy.diff(Rs)); dRs=numpy.array(dRs+[dRs[-1]]) #fill in diagonal with non-vanishing separation, #proportional to geometric mean of z-bins and local radial difference den1[diag_inds]=numpy.sqrt(dRs**2+dzs**2) arg1=-(4*Rs1*Rs2)/den1**2 den2=numpy.sqrt(dz2+rmod2) arg2=+(4*Rs1*Rs2)/den2**2 #Get elliptic function values ellipk_triud=interp_ellipk(arg1[triud_inds]) ellipk2_triud=interp_ellipk(arg2[triud_inds]) K[triud_inds]=(ellipk_triud/den1[triud_inds]+\ ellipk2_triud/den2[triud_inds])/numpy.pi K[tril_inds]=K[triu_inds] return K
5cdbeb80c8658334245e4fa93f3acf9ac0f9dbc9
3,658,132
def dsdh_h(P, h, region = 0): """ Derivative of specific entropy [kJ kg / kg K kJ] w.r.t specific enthalpy at constant pressure""" if region is 0: region = idRegion_h(P, h) if region is 1: return region1.dsdh_h(P, h) elif region is 2: return region2.dsdh_h(P, h) elif region is 4: return region4.dsdh_h(P, h) else: return 0.000
0ecc9d783524873c2d8537e105c7d5e8814ec80c
3,658,133
from datetime import datetime def floor_datetime(dt, unit, n_units=1): """Floor a datetime to nearest n units. For example, if we want to floor to nearest three months, starting with 2016-05-06-yadda, it will go to 2016-04-01. Or, if starting with 2016-05-06-11:45:06 and rounding to nearest fifteen minutes, it will result in 2016-05-06-11:45:00. """ if unit == "years": new_year = dt.year - (dt.year - 1) % n_units return datetime.datetime(new_year, 1, 1, 0, 0, 0) elif unit == "months": new_month = dt.month - (dt.month - 1) % n_units return datetime.datetime(dt.year, new_month, 1, 0, 0, 0) elif unit == "weeks": _, isoweek, _ = dt.isocalendar() new_week = isoweek - (isoweek - 1) % n_units return datetime.datetime.strptime( "%d %02d 1" % (dt.year, new_week), "%Y %W %w" ) elif unit == "days": new_day = dt.day - dt.day % n_units return datetime.datetime(dt.year, dt.month, new_day, 0, 0, 0) elif unit == "hours": new_hour = dt.hour - dt.hour % n_units return datetime.datetime(dt.year, dt.month, dt.day, new_hour, 0, 0) elif unit == "minutes": new_minute = dt.minute - dt.minute % n_units return datetime.datetime( dt.year, dt.month, dt.day, dt.hour, new_minute, 0 ) elif unit == "seconds": new_second = dt.second - dt.second % n_units return datetime.datetime( dt.year, dt.month, dt.day, dt.hour, dt.minute, new_second ) else: msg = "Unknown unit type {}".format(unit) raise ValueError(msg)
8c4b61b29bf9f254e2da46097e498834b54e960f
3,658,134
def get_dataset_descriptor(project_id, dataset_id): """Get the descriptor for the dataset with given identifier.""" try: dataset = api.datasets.get_dataset_descriptor( project_id=project_id, dataset_id=dataset_id ) if not dataset is None: return jsonify(dataset) except ValueError as ex: raise srv.InvalidRequest(str(ex)) raise srv.ResourceNotFound('unknown project \'' + project_id + '\' or dataset \'' + dataset_id + '\'')
776c7f72730f52e07cf33a6f6b4c7a949810323d
3,658,135
def pe41(): """ >>> pe41() 7652413 """ primes = Primes(1000000) for perm in permutations(range(7, 0, -1)): n = list_num(perm) if primes.is_prime(n): return n return -1
bec7969b96f617848f8771dc6d85faf4b01ea648
3,658,136
def transit_flag(body, time, nsigma=2.0): """Return a flag that indicates if times occured near transit of a celestial body. Parameters ---------- body : skyfield.starlib.Star Skyfield representation of a celestial body. time : np.ndarray[ntime,] Unix timestamps. nsigma : float Number of sigma to flag on either side of transit. Returns ------- flag : np.ndarray[ntime,] Boolean flag that is True if the times occur within nsigma of transit and False otherwise. """ time = np.atleast_1d(time) obs = ephemeris.chime # Create boolean flag flag = np.zeros(time.size, dtype=np.bool) # Find transit times transit_times = obs.transit_times( body, time[0] - 24.0 * 3600.0, time[-1] + 24.0 * 3600.0 ) # Loop over transit times for ttrans in transit_times: # Compute source coordinates sf_time = ephemeris.unix_to_skyfield_time(ttrans) pos = obs.skyfield_obs().at(sf_time).observe(body) alt = pos.apparent().altaz()[0] dec = pos.cirs_radec(sf_time)[1] # Make sure body is above horizon if alt.radians > 0.0: # Estimate the amount of time the body is in the primary beam # as +/- nsigma sigma, where sigma denotes the width of the # primary beam. We use the lowest frequency and E-W (or X) polarisation, # since this is the most conservative (largest sigma). window_deg = nsigma * cal_utils.guess_fwhm( 400.0, pol="X", dec=dec.radians, sigma=True ) window_sec = window_deg * 240.0 * ephemeris.SIDEREAL_S # Flag +/- window_sec around transit time begin = ttrans - window_sec end = ttrans + window_sec flag |= (time >= begin) & (time <= end) # Return boolean flag indicating times near transit return flag
271378e0a6558491f73968200fcb24ec694f8cbe
3,658,137
def _parse_port_ranges(pool_str): """Given a 'N-P,X-Y' description of port ranges, return a set of ints.""" ports = set() for range_str in pool_str.split(','): try: a, b = range_str.split('-', 1) start, end = int(a), int(b) except ValueError: log.error('Ignoring unparsable port range %r.', range_str) continue if start < 1 or end > 65535: log.error('Ignoring out of bounds port range %r.', range_str) continue ports.update(set(range(start, end + 1))) return ports
6926b326ea301f21e2282edda3bc16169ebe90b4
3,658,138
def get_flavors(): """ Get Nectar vm flavors in a dict with openstack_id as key """ fls = Flavor.query.all() results = [] for fl in fls: results.append(repack(fl.json(), {"name": "flavor_name"}, ["id"])) return array_to_dict(results)
005ce92fa46689ea639594fd5341f327dc04704d
3,658,139
def _HasTrafficChanges(args): """True iff any of the traffic flags are set.""" traffic_flags = ['to_revision', 'to_latest'] return _HasChanges(args, traffic_flags)
3d638195f86dc9f383c01c92d475ca90dc4fa60b
3,658,140
def find_credentials(account): """ fumction that check if a credentials exists with that username and return true or false """ return Credentials.find_credentialls(account)
dc59eec797d606854fa8a668b234a5eb61f8a0f8
3,658,141
import tokenize def enumerate_imports(tokens): """ Iterates over *tokens* and returns a list of all imported modules. .. note:: This ignores imports using the 'as' and 'from' keywords. """ imported_modules = [] import_line = False from_import = False for index, tok in enumerate(tokens): token_type = tok[0] token_string = tok[1] if token_type == tokenize.NEWLINE: import_line = False from_import = False elif token_string == "import": import_line = True elif token_string == "from": from_import = True elif import_line: if token_type == tokenize.NAME and tokens[index+1][1] != 'as': if not from_import: if token_string not in reserved_words: if token_string not in imported_modules: imported_modules.append(token_string) return imported_modules
0ee4921455899b036eb808262e183a6bc9017ccc
3,658,142
def solved(maze): """Checks if the maze was solved. The maze is solved, if there is no 3 to be found. Returns: True if the maze has no 3. """ # TODO: Extend this function to properly check for 3s inside the maze. return True
15b75435167c87f7e41480fee266416c084e7eb4
3,658,143
import re def safe_htcondor_attribute(attribute: str) -> str: """Convert input attribute name into a valid HTCondor attribute name HTCondor ClassAd attribute names consist only of alphanumeric characters or underscores. It is not clearly documented, but the alphanumeric characters are probably restricted to ASCII. Attribute names created from multiple words typically capitalize the first letter in each word for readability, although all comparisions are case-insensitive. e.g., "central-manager" -> "CentralManager" Args: attribute: a string representing the name of an attribute Returns: The attribute name stripped of invalid characters and re-capitalized in the manner typical of HTCondor ClassAd attributes. Raises: None """ # splitting by invalid characters removes them from the resulting array split_attr = re.split(r"[^\w]", attribute, flags=re.ASCII) safe_attr = "".join([word.capitalize() for word in split_attr if word]) return safe_attr
7a4dda539b2379120e68737d72a80226c45f5602
3,658,144
def focal_length_to_fov(focal_length, length): """Convert focal length to field-of-view (given length of screen)""" fov = 2 * np.arctan(length / (2 * focal_length)) return fov
2803de559943ce84620ac1130c099438ec1b4b12
3,658,145
def create_generic_connection(connection, verbose: bool = False): """ Generic Engine creation from connection object :param connection: JSON Schema connection model :param verbose: debugger or not :return: SQAlchemy Engine """ options = connection.connectionOptions if not options: options = ConnectionOptions() engine = create_engine( get_connection_url(connection), **options.dict(), connect_args=get_connection_args(connection), echo=verbose, ) return engine
d0e0ebd9e3b7ffb38ec8add13619ac6224d6760e
3,658,146
def make_csv(headers, data): """ Creates a CSV given a set of headers and a list of database query results :param headers: A list containg the first row of the CSV :param data: The list of query results from the Database :returns: A str containing a csv of the query results """ # Create a list where each entry is one row of the CSV file, starting # with the headers csvRows =[','.join(headers),] # Iterate through the provided data and create the rest of the CSV's rows for datum in data: currentRow = '' for header in headers: # Get this rows value for the given header val = getattr(datum, header) if type(val) is str: # Escape the strings currentRow += '"' + val + '",' elif type(val) is float: # Don't Escape the floats currentRow += str(val) + ',' else: # If it is empty and a place holder currentRow += ',' csvRows.append(currentRow[:-1]) # Combine all of the rows into a single single string and return it. return "\n".join(csvRows)
5101d53de8dd09d8ebe743d77d71bff9aeb26334
3,658,147
def draw_color_rect(buf,ix,iy,size,wrect,color): """ draw a square centerd on x,y filled with color """ code = """ int nd = %d; int x, y, i, j; int ny = 1 + 2 * nd; int nx = ny; y = iy - nd; if (y < 0) { ny += y; y = 0; } else if ((y + ny) > dimy) ny -= y + ny - dimy; x = ix - nd; if (x < 0) { nx += x; x = 0; } else if ((x + nx) > dimx) nx -= x + nx - dimx; int k = y * dimx * 3 + 3 * x; int deltak = 3 * (dimx - nx); for (i = 0;i < ny;i++) { for (j = 0;j < nx;j++) { #if 1 *(buf+k++) = color[0]; *(buf+k++) = color[1]; *(buf+k++) = color[2]; #else *(buf+k) = (*(buf+k) / 2) + (color[0] / 2); k++; *(buf+k) = (*(buf+k) / 2) + (color[1] / 2); k++; *(buf+k) = (*(buf+k) / 2) + (color[2] / 2); k++; #endif } k += deltak; } """ %wrect (dimx,dimy) = (size[0],size[1]) #ll lqprint "XX %d %d" %(ix,iy) if(ix < 0 or iy < 0 or ix >= dimx or iy >= dimy): return() weave.inline(code,['buf' ,'ix','iy','dimx','dimy','color'])
822bc77d1e6ccb4c802a4a3335c1bba55ba14f04
3,658,148
def _compute_focus_2d(image_2d, kernel_size): """Compute a pixel-wise focus metric for a 2-d image. Parameters ---------- image_2d : np.ndarray, np.float A 2-d image with shape (y, x). kernel_size : int The size of the square used to define the neighborhood of each pixel. An odd value is preferred. Returns ------- focus : np.ndarray, np.float64 A 2-d tensor with the R(y, x) computed for each pixel of the original image. """ # mean filtered image image_filtered_mean = mean_filter(image_2d, "square", kernel_size) # compute focus metric ratio_default = np.ones_like(image_2d, dtype=np.float64) ratio_1 = np.divide(image_2d, image_filtered_mean, out=ratio_default, where=image_filtered_mean > 0) ratio_2 = np.divide(image_filtered_mean, image_2d, out=ratio_default, where=image_2d > 0) focus = np.where(image_2d >= image_filtered_mean, ratio_1, ratio_2) return focus
67b139fdef8b6501a64699344d80b19012876f86
3,658,149
from typing import Tuple def extract_value_from_config( config: dict, keys: Tuple[str, ...], ): """ Traverse a config dictionary to get some hyper-parameter's value. Parameters ---------- config A config dictionary. keys The possible names of a hyper-parameter. Returns ------- The hyper-parameter value. """ result = [] for k, v in config.items(): if k in keys: result.append(v) elif isinstance(v, dict): result += extract_value_from_config(v, keys) else: pass return result
d545d4c9298c74776ec52fb6b2c8d54d0e653489
3,658,150
import numpy def boundaryStats(a): """ Returns the minimum and maximum values of a only on the boundaries of the array. """ amin = numpy.amin(a[0,:]) amin = min(amin, numpy.amin(a[1:,-1])) amin = min(amin, numpy.amin(a[-1,:-1])) amin = min(amin, numpy.amin(a[1:-1,0])) amax = numpy.amax(a[0,:]) amax = max(amax, numpy.amax(a[1:,-1])) amax = max(amax, numpy.amax(a[-1,:-1])) amax = max(amax, numpy.amax(a[1:-1,0])) return amin, amax
6c007c6cf2c7c5774ca74365be8f63094864d962
3,658,151
from operator import add def offset_func(func, offset, *args): """ Offsets inputs by offset >>> double = lambda x: x * 2 >>> f = offset_func(double, (10,)) >>> f(1) 22 >>> f(300) 620 """ def _offset(*args): args2 = list(map(add, args, offset)) return func(*args2) with ignoring(Exception): _offset.__name__ = 'offset_' + func.__name__ return _offset
16526bc8302444a97ea27eb6088fe15604d3cf9e
3,658,152
def get_redshift_schemas(cursor, user): """ Get all the Amazon Redshift schemas on which the user has create permissions """ get_schemas_sql = "SELECT s.schemaname " \ "FROM pg_user u " \ "CROSS JOIN " \ "(SELECT DISTINCT schemaname FROM pg_tables) s " \ "WHERE has_schema_privilege(u.usename,s.schemaname,'create') = true " \ "AND u.usename = '" + user + "' " \ "AND s.schemaname NOT LIKE '%pg_%' " \ "AND s.schemaname NOT LIKE '%information_schema%' ;" try: cursor.execute(get_schemas_sql) schemas = cursor.fetchall() except Exception as e: logger.error('Error in executing SQL: {}'.format(get_schemas_sql)) raise e return convert_to_list(schemas)
2833205f3e1b863fe8e5a18da723cf1676a65485
3,658,153
def window_features(idx, window_size=100, overlap=10): """ Generate indexes for a sliding window with overlap :param array idx: The indexes that need to be windowed. :param int window_size: The size of the window. :param int overlap: How much should each window overlap. :return array view: The indexes for the windows with overlap. """ overlap = window_size - overlap sh = (idx.size - window_size + 1, window_size) st = idx.strides * 2 view = np.lib.stride_tricks.as_strided(idx, strides=st, shape=sh)[0::overlap] return view
e10caae55424134a95c2085e5f54f73d81697e92
3,658,154
import os import json import subprocess def DetectVisualStudioPath(version_as_year): """Return path to the version_as_year of Visual Studio. """ year_to_version = { '2013': '12.0', '2015': '14.0', '2017': '15.0', '2019': '16.0', } if version_as_year not in year_to_version: raise Exception(('Visual Studio version %s (from version_as_year)' ' not supported. Supported versions are: %s') % ( version_as_year, ', '.join(year_to_version.keys()))) if version_as_year in ('2017', '2019'): # The VC++ 2017+ install location needs to be located using COM instead of # the registry. For details see: # https://blogs.msdn.microsoft.com/heaths/2016/09/15/changes-to-visual-studio-15-setup/ vswhere_path = os.path.expandvars(_VSWHERE_PATH) if os.path.exists(vswhere_path): version = year_to_version[version_as_year] try: out = json.loads(subprocess.check_output([ vswhere_path, '-version', '[{},{})'.format(float(version), float(version) + 1), '-legacy', '-format', 'json', '-utf8', ])) if out: return out[0]['installationPath'] except subprocess.CalledProcessError: pass root_path = r'C:\Program Files (x86)\Microsoft Visual Studio\\' + version_as_year for edition in ['Professional', 'Community', 'Enterprise', 'BuildTools']: path = os.environ.get('vs{}_install'.format(version_as_year), os.path.join(root_path, edition)) if os.path.exists(path): return path else: version = year_to_version[version_as_year] keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version, r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version] for key in keys: path = _RegistryGetValue(key, 'InstallDir') if not path: continue path = os.path.normpath(os.path.join(path, '..', '..')) return path raise Exception(('Visual Studio Version %s (from version_as_year)' ' not found.') % (version_as_year))
034b2650909fde750e29765fd61704248079c418
3,658,155
from datetime import datetime def create_suburbans_answer(from_code, to_code, for_date, limit=3): """ Creates yandex suburbans answer for date by stations codes :param from_code: `from` yandex station code :type from_code: str :param to_code: `to` yandex station code :type to_code: str :param for_date: date for which data should be received :type for_date: date :param limit: limit of segments in answer :type limit: int :return: tuple with `answer`, `is_tomorrow` and `is_error` data :rtype: tuple """ code, data = get_yandex_raw_data(from_code, to_code, for_date) if code != 200: return yandex_error_answer, False, True from_title = data["search"]["from"]["title"] to_title = data["search"]["to"]["title"] answer = "" for segment in data["segments"]: if len(answer.split("\n\n")) > limit: break if datetime_from_string(segment["departure"]) >= datetime.now(): answer += parse_yandex_segment(segment) if answer: answer = "<b>{0}</b> => <b>{1}</b>\n\n".format( from_title, to_title ) + answer is_tomorrow = False else: for_date = date.today() + timedelta(days=1) answer += create_suburbans_answer( from_code, to_code, for_date, limit=5 )[0] is_tomorrow = True return answer, is_tomorrow, False
47b34617fdcd9fe83d1c0973c420363c05b9f70c
3,658,156
def update_user(usr): """ Update user and return new data :param usr: :return object: """ user = session.query(User).filter_by(id=usr['uid']).first() user.username = usr['username'] user.first_name = usr['first_name'] user.last_name = usr['last_name'] user.email = usr['email'] session.commit() return user
d6c078c966443c609c29bb4ee046612c748bb192
3,658,157
from datetime import datetime def get_data(date_from=None, date_to=None, location=None): """Get covid data Retrieve covid data in pandas dataframe format with the time periods and countries provided. Parameters ---------- date_from : str, optional Start date of the data range with format 'YYYY-MM-DD'. By default 'None' is used to represent 7 days prior to today's date date_to : str, optional End date of data range with format 'YYYY-MM-DD'. By default 'None' is used to represent today's date location : list, optional List of target country names. By default 'None' is used for all countries. Returns ------- pandas.DataFrame Pandas dataframe of the selected covid data. Examples -------- >>> get_data(date_from="2022-01-01", date_to="2022-01-07", location=["Canada", "China"]) """ query = "@date_from <= date <= @date_to" url = "https://covid.ourworldindata.org/data/owid-covid-data.csv" if date_from is None: date_from = ( pd.to_datetime("today").normalize() - pd.to_timedelta(7, unit="d") ).strftime("%Y-%m-%d") if date_to is None: date_to = pd.to_datetime("today").normalize().strftime("%Y-%m-%d") try: date_from != datetime.strptime(date_from, "%Y-%m-%d").strftime("%Y-%m-%d") # raise ValueError except ValueError: raise ValueError( "Invalid argument value: date_from must be in format of YYYY-MM-DD. Also check if it is a valid date." ) except TypeError: raise TypeError( "Invalid argument type: date_from must be in string format of YYYY-MM-DD." ) try: date_to != datetime.strptime(date_to, "%Y-%m-%d").strftime("%Y-%m-%d") # raise ValueError except ValueError: raise ValueError( "Invalid argument value: date_to must be in format of YYYY-MM-DD. Also check if it is a valid date." ) except TypeError: raise TypeError( "Invalid argument type: date_to must be in string format of YYYY-MM-DD." ) error_msg = ( "Invalid values: date_from should be smaller or equal" " to date_to (or today's date if date_to is not specified)." ) if pd.to_datetime(date_to) < pd.to_datetime(date_from): raise ValueError( error_msg, ) if pd.to_datetime(date_to) > pd.to_datetime("today").normalize(): raise ValueError("Invalid values: date_to should be smaller or equal to today.") if location is not None: if not (isinstance(location, list)): raise TypeError( "Invalid argument type: location must be a list of strings." ) for item in location: if not (isinstance(item, str)): raise TypeError( "Invalid argument type: values inside location list must be a strings." ) query += " and location in @location" try: covid_df = pd.read_csv(url, parse_dates=["date"]) except BaseException: return "The link to the data is broken." covid_df = covid_df.query(query) covid_df = covid_df[~covid_df["iso_code"].str.startswith("OWID")] return covid_df
14067432e5b6d51b60312707cc817acbe904ef0b
3,658,158
from typing import List from typing import Dict from typing import Any def group_by_lambda(array: List[dict], func: GroupFunc) -> Dict[Any, List[dict]]: """ Convert list of objects to dict of list of object when key of dict is generated by func. Example:: grouped = group_by_lambda(detections, lambda x: x.get(DEVICE_ID)) :param array: list of objects to group :param func: give object as param and return key or None, when key is None then object will be excluded The ``func(obj, ret)`` callback provided as arg: Args: * ``obj``: next element from ``array`` * ``ret``: dictionary of just grouped objects Return effect: * ``None``: object will not be added anywhere * *some value* : object will be append to array in *some value* key Note: there are some wrappers for this functions like ``group_by_device_id()``, ``group_by_timestamp_division()``, ``group_by_timestamp_division()``, ``group_by_resolution()``. :return: dict of list of object """ ret = {} for o in array: key = func(o, ret) if key is None: continue os = get_and_set(ret, key, []) os.append(o) return ret
a92733a21b5e6e932be6d95ff79939ca26e3d429
3,658,159
def update(isamAppliance, is_primary, interface, remote, port, health_check_interval, health_check_timeout, check_mode=False, force=False): """ Updating HA configuration """ # Call to check function to see if configuration already exist update_required = _check_enable(isamAppliance, is_primary=is_primary, interface=interface, remote=remote, port=port, health_check_interval=health_check_interval, health_check_timeout=health_check_timeout) if force is True or update_required is True: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_put("Updating HA configuration", module_uri, { "is_primary": is_primary, "interface": interface, "remote": remote, "port": port, "health_check_interval": health_check_interval, "health_check_timeout": health_check_timeout }, requires_modules=requires_module, requires_version=requires_version) else: return isamAppliance.create_return_object()
b4da64648a46e30e7220d308266e4c4cc68e25ff
3,658,160
import scipy import numpy def compare_images(image_file_name1, image_file_name2, no_print=True): """ Compare two images by calculating Manhattan and Zero norms """ # Source: http://stackoverflow.com/questions/189943/ # how-can-i-quantify-difference-between-two-images img1 = imread(image_file_name1).astype(float) img2 = imread(image_file_name2).astype(float) if img1.size != img2.size: m_norm, z_norm = 2*[2*IMGTOL] else: # Element-wise for Scipy arrays diff = img1-img2 # Manhattan norm m_norm = scipy.sum(numpy.abs(diff)) # Zero norm z_norm = scipy.linalg.norm(diff.ravel(), 0) result = bool((m_norm < IMGTOL) and (z_norm < IMGTOL)) if not no_print: print( 'Image 1: {0}, Image 2: {1} -> ({2}, {3}) [{4}]'.format( image_file_name1, image_file_name2, m_norm, z_norm, result ) ) return result
c554750ae94b5925d283e0a9d8ff198e51abe29b
3,658,161
def prepare_update_mutation_classes(): """ Here it's preparing actual mutation classes for each model. :return: A tuple of all mutation classes """ _models = get_enabled_app_models() _classes = [] for m in _models: _attrs = prepare_update_mutation_class_attributes(model=m) # Creating a fake base class for making mutate properly. _base_class = class_factory(__class_name='Update' + m.__name__, base_classes=(Mutation,), **_attrs) _attrs.update(mutate=prepare_update_mutate(model=m, _mutation_class=_base_class)) _class = class_factory(__class_name='Update' + m.__name__, base_classes=(_base_class,), **_attrs) _classes.append(_class) return tuple(_classes)
27e450ea81000e81ebbf33db5d860c9a6b0adb23
3,658,162
from operator import le def makePacket(ID, instr, reg=None, params=None): """ This makes a generic packet. TODO: look a struct ... does that add value using it? 0xFF, 0xFF, 0xFD, 0x00, ID, LEN_L, LEN_H, INST, PARAM 1, PARAM 2, ..., PARAM N, CRC_L, CRC_H] in: ID - servo id instr - instruction reg - register params - instruction parameter values out: packet """ pkt = [] pkt += HEADER # header and reserved byte pkt += [ID] pkt += [0x00, 0x00] # length placeholder pkt += [instr] # instruction if reg: pkt += le(reg) # not everything has a register if params: pkt += params # not everything has parameters length = le(len(pkt) - 5) # length = len(packet) - (header(3), reserve(1), id(1)) pkt[5] = length[0] # L pkt[6] = length[1] # H crc = crc16(pkt) pkt += le(crc) print(pkt) return pkt
6553e5a62e22c9ad434b69e7f1e38060bd79e7e1
3,658,163
def vision_matched_template_get_pose(template_match): """ Get the pose of a previously detected template match. Use list operations to get specific entries, otherwise returns value of first entry. Parameters: template_match (List[MatchedTemplate3D] or MatchedTemplate3D): The template match(s) Return (Pose): The pose of the template match """ if isinstance(template_match,list): template_match = template_match[0] return template_match.pose.pose.pose
b854da7a085934f4f3aba510e76852fb8c0a440a
3,658,164
def create_rotor(model, ring_setting=0): """Factory function to create and return a rotor of the given model name.""" if model in ROTORS: data = ROTORS[model] return Rotor(model, data['wiring'], ring_setting, data['stepping']) raise RotorError("Unknown rotor type: %s" % model)
193ab444c8b5527360498cb1c8911194f04742a3
3,658,165
def get_description(sequence, xrefs, taxid=None): """ Compute a description for the given sequence and optional taxon id. This function will use the rule scoring if possible, otherwise it will fall back to the previous scoring method. In addition, if the rule method cannot produce a name it also falls back to the previous method. Providing a taxon id means to create a species name that is specific for the sequence in the given organism, otherwise one is created that is general for all species that this sequence is found in. Parameters ---------- sequence : Rna The sequence to generate a name for. taxid : int, None The taxon id to use Returns ------- description : str The description of this sequence. """ logger.debug("Computing get_description for %s (%s)", sequence.upi, taxid) name = _rm.get_description(sequence, xrefs, taxid=taxid) if not name: logger.debug("New style method failed, using score") return name or _sm.get_description(sequence, taxid=taxid)
7885b3b7b2678f2ed3c80244ae07d106df9712f1
3,658,166
def compute_ess(samples): """Compute an estimate of the effective sample size (ESS). See the [Stan manual](https://mc-stan.org/docs/2_18/reference-manual/effective-sample-size-section.html) for a definition of the effective sample size in the context of MCMC. Args: samples: Tensor, vector (n,), float32 of n sequential observations. Returns: ess: float, effective sample size, >= 1, <= n. efficiency: float, >= 0.0, the relative efficiency obtained compared to the naive Monte Carlo estimate which has an efficiency of one. """ ess, efficiency = compute_ess_multidimensional( tf.reshape(samples, (1, tf.size(samples)))) ess = ess[0] efficiency = efficiency[0] return ess, efficiency
8330c4f6efb4b23c5a25be18d29c07e946731716
3,658,167
import time def uptime(): """Returns uptime in milliseconds, starting at first call""" if not hasattr(uptime, "t0") is None: uptime.t0 = time.time() return int((time.time() - uptime.t0)*1000)
ff8dbe459cf7f349741cc8ac85b12e4d1dd88135
3,658,168
def load_plot(axis, plot, x_vals, y1=None, y2=None, y3=None, y4=None, title="", xlab="", ylab="", ltype=[1, 1, 1, 1], marker=['g-', 'r-', 'b-', 'k--']): """ Function to load the matplotlib plots. :param matplotlib.Axis axis: the matplotlib axis object. :param matplotlib.FigureCanvas plot: the matplotlib plot object. :param list x_vals: list of the x values to plot. :keyword float y1: list of the first data set y values to plot. :keyword float y2: list of the second data set y values to plot. :keyword float y3: list of the third data set y values to plot. :keyword float y4: list of the fourth data set y values to plot. :keyword str title: the title for the plot. :keyword str xlab: the x axis label for the plot. :keyword str ylab: the y axis label for the plot. :keyword int ltype: list of the type of line to plot. Options are: 1 = step 2 = plot 3 = histogram 4 = date plot :keyword str marker: list of the markers to use on the plot. Defaults are: g- = green solid line r- = red solid line b- = blue solid line k- = black dashed line :return: False if successful or True if an error is encountered. :rtype: bool """ # WARNING: Refactor load_plot; current McCabe Complexity metric=23. axis.cla() axis.grid(True, which='both') _x_min = min(x_vals) _x_max = max(x_vals) _y_min = 0.0 _lst_min = [0.0] _lst_max = [] if y1 is not None: if ltype[0] == 1: line, = axis.step(x_vals, y1, marker[0], where='mid') line.set_ydata(y1) _lst_min.append(min(y1)) _lst_max.append(max(y1)) elif ltype[0] == 2: line, = axis.plot(x_vals, y1, marker[0], linewidth=2) line.set_ydata(y1) _lst_min.append(min(y1)) _lst_max.append(max(y1)) elif ltype[0] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=y1, color=marker[0]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[0] == 4: line, = axis.plot_date(x_vals, y1, marker[0], xdate=True, linewidth=2) _lst_min.append(min(y1)) _lst_max.append(max(y1)) _y_min = min(y1) if y2 is not None: if ltype[1] == 1: line2, = axis.step(x_vals, y2, marker[1], where='mid') line2.set_ydata(y2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) elif ltype[1] == 2: line2, = axis.plot(x_vals, y2, marker[1], linewidth=2) line2.set_ydata(y2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) elif ltype[1] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y2), color=marker[1]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[1] == 4: line2, = axis.plot_date(x_vals, y2, marker[1], xdate=True, linewidth=2) _lst_min.append(min(y2)) _lst_max.append(max(y2)) _y_min = min(y2) if y3 is not None: if ltype[2] == 1: line3, = axis.step(x_vals, y3, marker[2], where='mid') line3.set_ydata(y3) _lst_min.append(min(y3)) _lst_max.append(max(y3)) elif ltype[2] == 2: line3, = axis.plot(x_vals, y3, marker[2], linewidth=2) line3.set_ydata(y3) _lst_min.append(min(y3)) _lst_max.append(max(y3)) elif ltype[2] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y3), color=marker[2]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[2] == 4: line3, = axis.plot_date(x_vals, y3, marker[2], xdate=True, linewidth=2) _lst_min.append(min(y3)) _lst_max.append(max(y3)) _y_min = min(y3) if y4 is not None: if ltype[3] == 1: line4, = axis.step(x_vals, y4, marker[3], where='mid') line4.set_ydata(y4) _lst_min.append(min(y4)) _lst_max.append(max(y4)) elif ltype[3] == 2: line4, = axis.plot(x_vals, y4, marker[3], linewidth=2) line4.set_ydata(y4) _lst_min.append(min(y4)) _lst_max.append(max(y4)) elif ltype[3] == 3: axis.grid(False, which='both') _values, _edges, __patches = axis.hist(x_vals, bins=len(y4), color=marker[3]) _x_min = min(_edges) _x_max = max(_edges) _lst_min.append(min(_values)) _lst_max.append(max(_values) + 1) elif ltype[3] == 4: line4, = axis.plot_date(x_vals, y4, marker[3], xdate=True, linewidth=2) _lst_min.append(min(y4)) _lst_max.append(max(y4)) _y_min = min(y4) axis.set_title(title, {'fontsize': 16, 'fontweight': 'bold', 'verticalalignment': 'baseline', 'horizontalalignment': 'center'}) # Set the x-axis label. _x_pos = (_x_max - _x_min) / 2.0 _y_pos = _y_min - 0.65 axis.set_xlabel(xlab, {'fontsize': 14, 'fontweight': 'bold', 'verticalalignment': 'center', 'horizontalalignment': 'center', 'x': _x_pos, 'y': _y_pos}) # Set the y-axis label. axis.set_ylabel(ylab, {'fontsize': 14, 'fontweight': 'bold', 'verticalalignment': 'center', 'horizontalalignment': 'center', 'rotation': 'vertical'}) # Get the minimum and maximum y-values to set the axis bounds. If the # maximum value is infinity, use the next largest value and so forth. _min = min(_lst_min) _max = _lst_max[0] for i in range(1, len(_lst_max)): if _max < _lst_max[i] and _lst_max[i] != float('inf'): _max = _lst_max[i] axis.set_ybound(_min, _max) plot.draw() return False
ad7499f357349fde12537c6ceeb061bf6163709d
3,658,169
def _optimize_loop_axis(dim): """ Chooses kernel parameters including CUDA block size, grid size, and number of elements to compute per thread for the loop axis. The loop axis is the axis of the tensor for which a thread can compute multiple outputs. Uses a simple heuristic which tries to get at least 4 warps per block and 8 items per thread to hide latencies. Prefers a higher item-per-thread to launching many blocks for very large axes since blocks are serialized by the GPU after all SMs are filled. Arguments: dim (int): Size of the tensor on the loop axis. Returns: tuple of grid dimension, block dimension, and items per thread """ sm_count = _get_sm_count() griddim = min(sm_count, -((-dim) // 32)) items_per_block = -((-dim) // griddim) items_per_thread = 1 warps = -((-items_per_block) // (32 * items_per_thread)) while (warps > 4 and items_per_thread < 8) or (warps > 32): items_per_thread = items_per_thread + 1 warps = -((-items_per_block) // (32 * items_per_thread)) blockdim = warps * 32 return (griddim, blockdim, items_per_thread)
8f3e77cc772dcf848de76328832c0546a68c1f09
3,658,170
def no_zero(t): """ This function replaces all zeros in a tensor with ones. This allows us to take the logarithm and then sum over all values in the matrix. Args: t: tensor to be replaced returns: t: tensor with ones instead of zeros. """ t[t==0] = 1. return t
8119d1859dc8b248f5bb09b7cc0fc3b492d9b7bd
3,658,171
def php_implode(*args): """ >>> array = Array('lastname', 'email', 'phone') >>> php_implode(",", array) 'lastname,email,phone' >>> php_implode('hello', Array()) '' """ if len(args) == 1: assert isinstance(args, list) return "".join(args) assert len(args) == 2 assert (isinstance(args[0], str) and isinstance(args[1], Array)) or \ (isinstance(args[1], str) and isinstance(args[0], Array)) _glue = args[0] if isinstance(args[0], str) else args[1] _array = args[1] if isinstance(args[1], Array) else args[0] return _glue.join([str(x) for x in _array.values()])
6f7c49ed340610c290d534a0c0edccd920a1e46e
3,658,172
import math def make_incompressible(velocity: Grid, domain: Domain, obstacles: tuple or list = (), solve_params: math.LinearSolve = math.LinearSolve(None, 1e-3), pressure_guess: CenteredGrid = None): """ Projects the given velocity field by solving for the pressure and subtracting its spatial_gradient. This method is similar to :func:`field.divergence_free()` but differs in how the boundary conditions are specified. Args: velocity: Vector field sampled on a grid domain: Used to specify boundary conditions obstacles: List of Obstacles to specify boundary conditions inside the domain (Default value = ()) pressure_guess: Initial guess for the pressure solve solve_params: Parameters for the pressure solve Returns: velocity: divergence-free velocity of type `type(velocity)` pressure: solved pressure field, `CenteredGrid` iterations: Number of iterations required to solve for the pressure divergence: divergence field of input velocity, `CenteredGrid` """ input_velocity = velocity active = domain.grid(HardGeometryMask(~union(*[obstacle.geometry for obstacle in obstacles])), extrapolation=domain.boundaries['active_extrapolation']) accessible = domain.grid(active, extrapolation=domain.boundaries['accessible_extrapolation']) hard_bcs = field.stagger(accessible, math.minimum, domain.boundaries['accessible_extrapolation'], type=type(velocity)) velocity = layer_obstacle_velocities(velocity * hard_bcs, obstacles).with_(extrapolation=domain.boundaries['near_vector_extrapolation']) div = divergence(velocity) if domain.boundaries['near_vector_extrapolation'] == math.extrapolation.BOUNDARY: div -= field.mean(div) # Solve pressure def laplace(p): grad = spatial_gradient(p, type(velocity)) grad *= hard_bcs grad = grad.with_(extrapolation=domain.boundaries['near_vector_extrapolation']) div = divergence(grad) lap = where(active, div, p) return lap pressure_guess = pressure_guess if pressure_guess is not None else domain.scalar_grid(0) converged, pressure, iterations = field.solve(laplace, y=div, x0=pressure_guess, solve_params=solve_params, constants=[active, hard_bcs]) if math.all_available(converged) and not math.all(converged): raise AssertionError(f"pressure solve did not converge after {iterations} iterations\nResult: {pressure.values}") # Subtract grad pressure gradp = field.spatial_gradient(pressure, type=type(velocity)) * hard_bcs velocity = (velocity - gradp).with_(extrapolation=input_velocity.extrapolation) return velocity, pressure, iterations, div
73904675b5d0c5b74bd13c029b52f7a6592eddac
3,658,173
from datetime import datetime import os import shutil def create_and_empty_dir(tdir, label, suffix=datetime.now().strftime("%Y%m%d%H%M%S"), sep='__', simulation=False): """ Tests if directory exists, if not creates it. If yes, tests if readable/writable. Returns True if new directory created, False if already existed and emptied (keeping directory, not contents) :param tdir: path to directory to be tested/created :type tdir: str :param label: label for type of directory being tested/created :type label: str :param suffix: string to old suffix directory name :type suffix: str :param suffix: string to separate suffix from original name :type suffix: str :param simulation: True if simulation only (no changes to be made), False if commands should be executed :type simulation: bool :rtype: bool """ if os.path.isdir(tdir): if not os.access(tdir, os.W_OK): msg = "Cannot write to pipeline {l} directory '{d}'".format(l=label, d=tdir) log.critical(msg) raise ONEFluxPipelineError(msg) if not os.access(tdir, os.R_OK): msg = "Cannot read from pipeline {l} directory '{d}'".format(l=label, d=tdir) log.critical(msg) raise ONEFluxPipelineError(msg) new_tdir = tdir + sep + suffix if not simulation: shutil.rmtree(path=tdir, ignore_errors=False, onerror=None) os.makedirs(new_tdir) os.makedirs(tdir) log.debug("Pipeline {l} moved EMPTY directory '{o}' to '{n}'".format(l=label, o=tdir, n=new_tdir)) log.debug("Created '{d}'".format(d=tdir)) return False else: if not simulation: os.makedirs(tdir) log.debug("Created '{d}'".format(d=tdir)) return True
4639c88fbd571c839288e527e229ed122e7f159f
3,658,174
from datetime import datetime import subprocess def no_source( time: datetime, glat: float, glon: float, Nbins: int, Talt: float, Thot: float ) -> xarray.Dataset: """testing only, may give non-physical results""" idate, utsec = glowdate(time) ip = gi.get_indices([time - timedelta(days=1), time], 81) cmd = [ str(get_exe()), idate, utsec, str(glat), str(glon), str(ip["f107s"][1]), str(ip["f107"][1]), str(ip["f107"][0]), str(ip["Ap"][1]), "-nosource", str(Nbins), str(Talt), str(Thot), ] dat = subprocess.check_output(cmd, timeout=15, stderr=subprocess.DEVNULL, text=True) return glowread(dat, time, ip, glat, glon)
074ce675dac3c31fb2d750de053dd509dc928a6d
3,658,175
def reduce_to_1D(ds, latitude_range, latitude_name='Latitude', time_mean=True, time_name='Time'): """ TODO """ if isinstance(latitude_range, (int, float)): latitude_range = [latitude_range, latitude_range] elif len(latitude_range) == 1: latitude_range = [latitude_range[0], latitude_range[0]] elif len(latitude_range) != 2: errmsg = ' '.join(['latitude_range has to be float or list of', 'one or two floats and not {}']).format( latitude_range) raise ValueError(errmsg) lats = ds[latitude_name].data lats = lats[(lats >= latitude_range[0]) & (lats <= latitude_range[1])] ds = ds.sel(**{latitude_name: lats}) ds = (ds.sum(latitude_name) > 0).astype(int) if time_mean: ds = ds.mean(time_name) return ds
26d1bee437bffe66017fa8f9e3c03856b87d8b16
3,658,176
def get_y_generator_method(x_axis, y_axis): """Return the y-value generator method for the given x-axis. Arguments: x_axis -- an instance of an XAxis class y_axis -- an instance of a YAxis class Returns: A reference to the y-value generator if it was found, and otherwise None. """ try: method_name = AXIS_PAIRS[x_axis.slug][y_axis.slug] except KeyError: raise ValueError("A %(x)s x-axis cannot be paired with a %(y)s y-axis" % { 'x': x_axis.__class__.name, 'y': x_axis.__class__.name }) y_method = getattr(y_axis, method_name, None) if not y_method: raise ValueError("No method named '%(method)s' exists for the %(axis)s y-axis" % { 'method': method_name, 'axis': y_axis.__class__.name }) return y_method
ab0f43743c91cfe9f51e8da3fe976f8c554af5c8
3,658,177
def generate_filename(table_type, table_format): """Generate the table's filename given its type and file format.""" ext = TABLE_FORMATS[table_format] return f'EIA_MER_{table_type}.{ext}'
076ef1e77cf4ec3c1be4fb602e5a1972eb75e826
3,658,178
def rescale_coords(df,session_epochs,maze_size_cm): """ rescale xy coordinates of each epoch into cm note: automatically detects linear track by x to y ratio input: df: [ts,x,y] pandas data frame session_epochs: nelpy epoch class with epoch times mazesize: list with size of maze in cm for each epoch output: df: rescaled df """ for i,val in enumerate(session_epochs.data): temp_df = df[df['ts'].between(val[0],val[1])] x_range = np.nanmax(temp_df.x) - np.nanmin(temp_df.x) y_range = np.nanmax(temp_df.y) - np.nanmin(temp_df.y) x_y_ratio = x_range/y_range # if the ratio of x to y is > 5, it is probably a linear track if x_y_ratio > 5: df.loc[df['ts'].between(val[0],val[1]),'x'] = rescale(temp_df.x,0,maze_size_cm[i]) df.loc[df['ts'].between(val[0],val[1]),'y'] = rescale(temp_df.y,0,maze_size_cm[i]/x_y_ratio) else: df.loc[df['ts'].between(val[0],val[1]),'x'] = rescale(temp_df.x,0,maze_size_cm[i]) df.loc[df['ts'].between(val[0],val[1]),'y'] = rescale(temp_df.y,0,maze_size_cm[i]) return df
49da12dca1e3b7e30bf909a73505a129941bd3db
3,658,179
def newsreader_debug(): """Given an query, return that news debug mode.""" query = request.args.get('query') if query is None: return 'No provided.', 400 result = SL.news_check(query, debug=True) if result is None: return 'not found : %s' % query, 400 return result, 200
e5f16ed2d4253d734ce23e4b6eaf7fce3c5dbcbb
3,658,180
def get_vocabulary(query_tree): """Extracts the normalized search terms from the leaf nodes of a parsed query to construct the vocabulary for the text vectorization. Arguments --------- query_tree: pythonds.trees.BinaryTree The binary tree object representing a parsed search query. Each leaf node is a search term and internal nodes represent boolean operations. See parse_query() for details. Returns ------- vocabulary: list List of strings representing unique normalized search terms. """ def _getleafnodes(node): terms = [] if node.isLeaf(): return terms + [node.normedterm] elif node.leftChild and not node.rightChild: return terms + _getleafnodes(node.getLeftChild()) elif node.rightChild and not node.leftChild: return terms + _getleafnodes(node.getRightChild()) else: # has two children return terms + _getleafnodes(node.getLeftChild()) \ + _getleafnodes(node.getRightChild()) # extract terms from the leaf nodes of the query object. terms = _getleafnodes(query_tree) # remove duplicates. vocabulary = list(set(terms)) return vocabulary
bd03f4894cd3f9a7964196bfb163335f84a048d7
3,658,181
def pubkey_to_address(pubkey): """Convert a public key (in hex) to a Bitcoin address""" return bin_to_b58check(hash_160(changebase(pubkey, 16, 256)))
bbfbe40346681a12d8b71ce8df6ef8670eb3e424
3,658,182
def transpose(A): """ Matrix transposition :rtype m: list :param m: a list of lists representing a matrix A :rtype: list :return: a list of lists representing the transpose of matrix A Example: -------- >>> A = [[0, -4, 4], [-3, -2, 0]] >>> print(transpose(A)) [[0.0, -3.0], [-4.0, -2.0], [4.0, 0.0]] """ # TODO: write exceptions to help user with errors from the backend return Clinear_algebra.transpose(A)
0887166ac0f34d338bec1d95972667100403f26a
3,658,183
def find_point_in_section_list(point, section_list): """Returns the start of the section the given point belongs to. The given list is assumed to contain start points of consecutive sections, except for the final point, assumed to be the end point of the last section. For example, the list [5, 8, 30, 31] is interpreted as the following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5, 32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for them the function returns 5, and 30, 30.7 and 31 all match [30-31]. Parameters --------- point : float The point for which to match a section. section_list : sortedcontainers.SortedList A list of start points of consecutive sections. Returns ------- float The start of the section the given point belongs to. None if no match was found. Example ------- >>> from sortedcontainers import SortedList >>> seclist = SortedList([5, 8, 30, 31]) >>> find_point_in_section_list(4, seclist) >>> find_point_in_section_list(5, seclist) 5 >>> find_point_in_section_list(27, seclist) 8 >>> find_point_in_section_list(31, seclist) 30 """ if point < section_list[0] or point > section_list[-1]: return None if point in section_list: if point == section_list[-1]: return section_list[-2] ind = section_list.bisect(point)-1 if ind == 0: return section_list[0] return section_list[ind] try: ind = section_list.bisect(point) return section_list[ind-1] except IndexError: return None
47d5cda15b140ba8505ee658fd46ab090b2fda8a
3,658,184
import os import io import json import functools def mk_metrics_api(tm_env): """Factory to create metrics api. """ class _MetricsAPI(object): """Acess to the locally gathered metrics. """ def __init__(self): def _get(rsrc_id, timeframe, as_json=False): """Return the rrd metrics. """ with lc.LogContext(_LOGGER, rsrc_id): _LOGGER.info('Get metrics') id_ = self._unpack_id(rsrc_id) file_ = self._get_rrd_file(**id_) if as_json: return rrdutils.get_json_metrics(file_, timeframe) return file_ def _file_path(rsrc_id): """Return the rrd metrics file path. """ id_ = self._unpack_id(rsrc_id) return self._abs_met_path(**id_) self.file_path = _file_path self.get = _get def _remove_ext(self, fname, extension='.rrd'): """Returns the basename of a file and removes the extension as well. """ res = os.path.basename(fname) res = res[:-len(extension)] return res def _unpack_id(self, rsrc_id): """Decompose resource_id to a dictionary. Unpack the (core) service or the application name and "uniq name" from rsrc_id to a dictionary. """ if '/' in rsrc_id: app, uniq = rsrc_id.split('/') return {'app': app, 'uniq': uniq} return {'service': rsrc_id} def _get_rrd_file(self, service=None, app=None, uniq=None, arch_extract=True): """Return the rrd file path of an app or a core service.""" if uniq is None: return self._core_rrd_file(service) if uniq == 'running': arch_extract = False # find out uniq ... state_json = os.path.join(tm_env().running_dir, app, 'data', 'state.json') with io.open(state_json) as f: uniq = json.load(f)['uniqueid'] return self._app_rrd_file(app, uniq, arch_extract) def _app_rrd_file(self, app, uniq, arch_extract=True): """Return an application's rrd file.""" return _get_file( self._abs_met_path(app=app, uniq=uniq), arch_extract=arch_extract, arch=_archive_path(tm_env, 'sys', app, uniq), arch_extract_filter=functools.partial(_arch_file_filter, fname='metrics.rrd')) def _core_rrd_file(self, service): """Return the given service's rrd file.""" return _get_file(self._abs_met_path(service), arch_extract=False) def _abs_met_path(self, service=None, app=None, uniq=None): """Return the rrd metrics file's full path.""" if service is not None: return os.path.join(tm_env().metrics_dir, 'core', service + '.rrd') return os.path.join(tm_env().metrics_dir, 'apps', '%s-%s.rrd' % (app.replace('#', '-'), uniq)) return _MetricsAPI
1f122f5c71ba7abc4a5a341e240f8d76814210de
3,658,185
import os import types def generate_module(file_allocator, name): """ Generate an in-memory module from a generated Python implementation. """ assert name in file_allocator.allocated_files f = file_allocator.allocated_files[name] f.seek(0) data = f.read() modname, _ = os.path.splitext(name) d = {} eval(compile(data, name, "exec"), d, d) m = types.ModuleType(modname) vars(m).update(d) return m
beab4cdf12fcdfeacef9f8a8607e995b771d6012
3,658,186
def all_reduce_sum(t, dim): """Like reduce_sum, but broadcasts sum out to every entry in reduced dim.""" t_shape = t.get_shape() rank = t.get_shape().ndims return tf.tile( tf.expand_dims(tf.reduce_sum(t, dim), dim), [1] * dim + [t_shape[dim].value] + [1] * (rank - dim - 1))
c4048c308ccf2b7550e125b63911183d097959f5
3,658,187
def get_deltas_from_bboxes_and_landmarks(prior_boxes, bboxes_and_landmarks): """Calculating bounding box and landmark deltas for given ground truth boxes and landmarks. inputs: prior_boxes = (total_bboxes, [center_x, center_y, width, height]) bboxes_and_landmarks = (batch_size, total_bboxes, [y1, x1, y2, x2, landmark_x0, landmark_y0, ..., landmark_xN, landmark_yN]) outputs: deltas = (batch_size, total_bboxes, [delta_bbox_y, delta_bbox_x, delta_bbox_h, delta_bbox_w, delta_landmark_x0, delta_landmark_y0, ..., delta_landmark_xN, delta_landmark_yN]) """ # gt_width = bboxes_and_landmarks[..., 3] - bboxes_and_landmarks[..., 1] gt_height = bboxes_and_landmarks[..., 2] - bboxes_and_landmarks[..., 0] gt_ctr_x = bboxes_and_landmarks[..., 1] + 0.5 * gt_width gt_ctr_y = bboxes_and_landmarks[..., 0] + 0.5 * gt_height # delta_x = (gt_ctr_x - prior_boxes[..., 0]) / prior_boxes[..., 2] delta_y = (gt_ctr_y - prior_boxes[..., 1]) / prior_boxes[..., 3] delta_w = gt_width / prior_boxes[..., 2] delta_h = gt_height / prior_boxes[..., 3] # total_landmarks = tf.shape(bboxes_and_landmarks[..., 4:])[-1] // 2 xy_pairs = tf.tile(prior_boxes[..., 0:2], (1, total_landmarks)) wh_pairs = tf.tile(prior_boxes[..., 2:4], (1, total_landmarks)) landmark_deltas = (bboxes_and_landmarks[..., 4:] - xy_pairs) / wh_pairs # return tf.concat([tf.stack([delta_y, delta_x, delta_h, delta_w], -1), landmark_deltas], -1)
4945723b431657b643ef8799eeabacf0a745b8d2
3,658,188
def choose(population, sample): """ Returns ``population`` choose ``sample``, given by: n! / k!(n-k)!, where n == ``population`` and k == ``sample``. """ if sample > population: return 0 s = max(sample, population - sample) assert s <= population assert population > -1 if s == population: return 1 numerator = 1 denominator = 1 for i in range(s+1, population + 1): numerator *= i denominator *= (i - s) return numerator/denominator
659eac683cae737888df74c0db21aa3ece746b33
3,658,189
def _where_cross(data,threshold): """return a list of Is where the data first crosses above threshold.""" Is=np.where(data>threshold)[0] Is=np.concatenate(([0],Is)) Ds=Is[:-1]-Is[1:]+1 return Is[np.where(Ds)[0]+1]
85fe8da97210e2eb7e3c9bca7074f0b0b88c425a
3,658,190
import csv def TVD_to_MD(well,TVD): """It returns the measure depth position for a well based on a true vertical depth Parameters ---------- well : str Selected well TVD : float Desire true vertical depth Returns ------- float MD : measure depth Attention --------- The input information comes from the files input/ubication.csv and input/survey/{well}_MD.dat. Note ---- A linear regression is used. Examples -------- >>> TVD_to_MD('WELL-1',-100) """ file="../input/survey/%s_MD.dat"%well MD,DeltaY,DeltaX=np.loadtxt(file,skiprows=1,unpack=True,delimiter=',') reader = csv.DictReader(open("../input/ubication.csv", 'r')) #'rb' dict_ubication={} for line in reader: dict_ubication[line['well']]=line z_0=float(dict_ubication[well]['masl']) x_0=float(dict_ubication[well]['east']) y_0=float(dict_ubication[well]['north']) #Initialize the delta z values z_delta=[0 for i in MD] x=[0 for i in MD] y=[0 for i in MD] z=[0 for i in MD] #Assuming straight line between points for j in range(len(MD)): if j==0: z_delta[j]=0 else: z_delta[j]=((MD[j]-MD[j-1])**2-(DeltaX[j]-DeltaX[j-1])**2-(DeltaY[j]-DeltaY[j-1])**2)**0.5+z_delta[j-1] #Convertion delta to absolute for j in range(len(MD)): z[j]=z_0-z_delta[j] #Function of X-Y-Z with MD funzmd=interpolate.interp1d(z,MD) try: MD=funzmd(TVD) except ValueError: MD=np.nan return MD
eadca9f9e5ae22fc7a6d9d31f7f0ee7ba4c26be4
3,658,191
def get_table_b_2_b(): """表 B.2 居住人数 2 人における照明設備の使用時間率 (b) 休日在宅 Args: Returns: list: 表 B.2 居住人数 2 人における照明設備の使用時間率 (b) 休日在宅 """ table_b_2_b = [ (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.50, 0.00, 0.25, 0.00, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (0.50, 0.25, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00, 0.25, 0.00, 0.25, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (0.75, 0.25, 0.25, 0.00, 0.25, 0.00, 0.00, 0.75, 0.25, 0.25, 0.25, 0.25, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25), (1.00, 0.00, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.50, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.75, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.50, 0.25, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00), (0.50, 0.00, 0.00, 0.00, 0.25, 0.00, 0.00, 0.25, 0.25, 0.25, 0.00, 0.50, 0.00, 0.25, 0.00, 0.00, 0.25, 0.00, 0.00), ] return table_b_2_b
06d29050c0bc61170eeddc75be76fe8bb8422edd
3,658,192
def eea(m, n): """ Compute numbers a, b such that a*m + b*n = gcd(m, n) using the Extended Euclidean algorithm. """ p, q, r, s = 1, 0, 0, 1 while n != 0: k = m // n m, n, p, q, r, s = n, m - k*n, q, p - k*q, s, r - k*s return (p, r)
56e1c59ac3a51e26d416fe5c65cf6612dbe56b8c
3,658,193
def string_quote(s): """ TODO(ssx): quick way to quote string """ return '"' + s + '"'
6d54e7661b9b0e17c45cb30cb6efff2c8fd913ae
3,658,194
def arcball_constrain_to_axis(point, axis): """Return sphere point perpendicular to axis.""" v = np.array(point, dtype=np.float64, copy=True) a = np.array(axis, dtype=np.float64, copy=True) v -= a * np.dot(a, v) # on plane n = vector_norm(v) if n > _EPS: if v[2] < 0.0: v *= -1.0 v /= n return v if a[2] == 1.0: return np.array([1, 0, 0], dtype=np.float64) return unit_vector([-a[1], a[0], 0])
a58a80dd29ba785bd829b33ccb283e7c42993218
3,658,195
from typing import Mapping from typing import Any from typing import MutableMapping def text( node: "RenderTreeNode", renderer_funcs: Mapping[str, RendererFunc], options: Mapping[str, Any], env: MutableMapping, ) -> str: """Process a text token. Text should always be a child of an inline token. An inline token should always be enclosed by a heading or a paragraph. """ text = node.content if is_text_inside_autolink(node): return text # Escape backslash to prevent it from making unintended escapes. # This escape has to be first, else we start multiplying backslashes. text = text.replace("\\", "\\\\") text = escape_asterisk_emphasis(text) # Escape emphasis/strong marker. text = escape_underscore_emphasis(text) # Escape emphasis/strong marker. text = text.replace("[", "\\[") # Escape link label enclosure text = text.replace("]", "\\]") # Escape link label enclosure text = text.replace("<", "\\<") # Escape URI enclosure text = text.replace("`", "\\`") # Escape code span marker # Escape "&" if it starts a sequence that can be interpreted as # a character reference. for char_refs_found, char_ref in enumerate(RE_CHAR_REFERENCE.finditer(text)): start = char_ref.start() + char_refs_found text = text[:start] + "\\" + text[start:] # The parser can give us consecutive newlines which can break # the markdown structure. Replace two or more consecutive newlines # with newline character's decimal reference. text = text.replace("\n\n", "&#10;&#10;") # If the last character is a "!" and the token next up is a link, we # have to escape the "!" or else the link will be interpreted as image. next_sibling = node.next_sibling if text.endswith("!") and next_sibling and next_sibling.type == "link": text = text[:-1] + "\\!" return text
21b39fcdd21cba692a185e4de2c6f648c210e54b
3,658,196
def patch_importlib_util_find_spec(name,package=None): """ function used to temporarily redirect search for loaders to hickle_loader directory in test directory for testing loading of new loaders """ return find_spec("hickle.tests." + name.replace('.','_',1),package)
7a0082c0af92b4d79a93ae6bbd6d1be6ec0ec357
3,658,197
def format_msg_controller(data): """Prints a formatted message from a controller :param data: The bytes from the controller message :type data: bytes """ return format_message(data, 13, "Controller")
4d1f262fd673eb3948fbc46866931ab6bd7205ee
3,658,198
def initialize_window(icon, title, width, height, graphical): # pragma: no cover """ Initialise l'environnement graphique et la fenêtre. Parameters ---------- icon : Surface Icone de la fenêtre title : str Nom de la fenêtre width : int Largeur de la fenêtre height : int Hauteur de la fenêtre graphical : bool Indique si la fenêtre doit être affichée Returns ------- Surface * Surface Un couple (surface de jeu, surface à afficher) """ game = pygame.Surface((width, height)) if graphical: pygame.display.set_icon(load_image(icon)) pygame.display.set_caption(title) return (game, pygame.display.set_mode((width, height), flags=pygame.RESIZABLE)) return (game, None)
dbc15729b0cb9548ff229ac69dd5d1f2e76c85e5
3,658,199