content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def domain_delete(domainName): # noqa: E501 """domain_delete Remove the domain # noqa: E501 :param domainName: :type domainName: str :rtype: DefaultMessage """ return 'do some magic!'
a0865aa2ff4902ac5cf8a8c0ea9eb62e792af56b
24,691
from datetime import datetime def ParseDate(s): """ ParseDate(s) -> datetime This function converts a string containing the subset of ISO8601 that can be represented with xs:dateTime into a datetime object. As such it's suitable for parsing Collada's <created> and <modified> elements. The date must be of the form '-'? yyyy '-' mm '-' dd 'T' hh ':' mm ':' ss ('.' s+)? (zzzzzz)? See http://www.w3.org/TR/xmlschema-2/#dateTime for more info on the various parts. return: A datetime or None if the string wasn't formatted correctly. """ # Split the date (yyyy-mm-dd) and time by the "T" in the middle parts = s.split("T") if len(parts) != 2: return None date = parts[0] time = parts[1] # Parse the yyyy-mm-dd part parts = date.split("-") yearMultiplier = 1 if date[0] == "-": yearMultiplier = -1 parts.remove(0) if len(parts) != 3: return None try: year = yearMultiplier * int(parts[0]) month = int(parts[1]) day = int(parts[2]) except ValueError: return None # Split the time and time zone by "Z", "+", or "-" timeZoneDelta = timedelta() timeZoneDeltaModifier = 1 parts = time.split("Z") if len(parts) > 1: if parts[1] != "": return None if len(parts) == 1: parts = time.split("+") if len(parts) == 1: parts = time.split("-") timeZoneDeltaModifier = -1 if len(parts) == 1: # Time zone not present return None time = parts[0] timeZone = parts[1] if timeZone != "": parts = timeZone.split(":") if len(parts) != 2: return None try: hours = int(parts[0]) minutes = int(parts[1]) except ValueError: return None timeZoneDelta = timeZoneDeltaModifier * timedelta(0, 0, 0, 0, minutes, hours) parts = time.split(":") if len(parts) != 3: return None try: hours = int(parts[0]) minutes = int(parts[1]) seconds = int(parts[2]) # We're losing the decimal portion here, but it probably doesn't matter except ValueError: return None return datetime(year, month, day, hours, minutes, seconds) - timeZoneDelta
d65e4bb51487d9cb22b910e3dc44e299882600b5
24,692
def kinetic_energy(atoms): """ Returns the kinetic energy (Da*angs/ps^2) of the atoms. """ en = 0.0 for a in atoms: vel = v3.mag(a.vel) en += 0.5 * a.mass * vel * vel return en
8615d61f30f5ded029d1c230346682a040d05e87
24,693
def boxes_intersect(boxes, box): """Determine whether a box intersects with any of the boxes listed""" x1, y1, x2, y2 = box if in_box(boxes, x1, y1) \ or in_box(boxes, x1, y2) \ or in_box(boxes, x2, y1) \ or in_box(boxes, x2, y2): return True return False
b2201e1501a7827b6db8ef63ebf468b3e1839800
24,694
def cumulative_mean_normalized_difference_function(df, n): """ Compute cumulative mean normalized difference function (CMND). :param df: Difference function :param n: length of data :return: cumulative mean normalized difference function :rtype: list """ # scipy method cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float) return np.insert(cmn_df, 0, 1)
974c0bdaab0e8872ef746839c8d973604eab6929
24,695
def minimal_subject_transformer(index, minimal_subject, attributes, subject_types, subject_type_is, center, radius): """Construct the JSON object for a MinimalSubject.""" subdomain, type = minimal_subject.subdomain, minimal_subject.type # Gather all the attributes values = [None] # attributes are indexed starting from 1 for attribute in attributes: name = attribute.key().name() if name in subject_types[type].minimal_attribute_names: values.append(minimal_subject.get_value(name)) else: values.append(None) # Pack the results into an object suitable for JSON serialization. subject_jobject = { 'name': minimal_subject.name, 'type': subject_type_is[subdomain + ':' + type], 'values': values, } if (minimal_subject.has_value('location') and minimal_subject.get_value('location')): location = { 'lat': minimal_subject.get_value('location').lat, 'lon': minimal_subject.get_value('location').lon } if center: subject_jobject['distance_meters'] = distance(location, center) dist = subject_jobject.get('distance_meters') if center and (dist is None or dist > radius > 0): return None return subject_jobject
3babc222c508850b8b5bdea551efa8c9e9bc0aa4
24,697
def beta_avg_inv_cdf(y, parameters, res=0.001): """ Compute the inverse cdf of the average of the k beta distributions. Parameters ---------- y : float A float between 0 and 1 (the range of the cdf) parameters : array of tuples Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution. res : float, optional (default=0.001) The precision of the convolution, measured as step size in the support. Returns ------- x : float the inverse cdf of y """ return brentq(lambda x: beta_avg_cdf([x], parameters, res)[0] - y, 0, 1)
01c266e21401f6f7ad624151aa40d195c9196453
24,698
def _aprime(pHI,pFA): """recursive private function for calculating A'""" pCR = 1 - pFA # use recursion to handle # cases below the diagonal defined by pHI == pFA if pFA > pHI: return 1 - _aprime(1-pHI ,1-pFA) # Pollack and Norman's (1964) A' measure # formula from Grier 1971 if pHI == 0 or pFA == 1: # in both of these cases pHI == pFA return .5 return .5 + (pHI - pFA)*(1 + pHI - pFA)/(4*pHI*(1 - pFA))
3694dcdbc5da2c12bece51e85988245a60ebe811
24,699
def requires_testing_data(func): """Skip testing data test.""" return _pytest_mark()(func)
a3f5116bc3ac1639de13355795f2bd1da67521aa
24,700
def mfcc_derivative_loss(y, y_hat, derivative_op=None): """ Expects y/y_hat to be of shape batch_size x features_dim x time_steps (default=128) """ if derivative_op is None: derivative_op = delta_matrix() y_derivative = tf.matmul(y, derivative_op) y_hat_derivative = tf.matmul(y_hat, derivative_op) return tf.reduce_mean(tf.abs(y_derivative - y_hat_derivative))
8d2b05d65ae3efb10d00f0b30d75ca45abf92064
24,701
from typing import List def get_mprocess_names_type1() -> List[str]: """returns the list of valid MProcess names of type1. Returns ------- List[str] the list of valid MProcess names of type1. """ names = ( get_mprocess_names_type1_set_pure_state_vectors() + get_mprocess_names_type1_set_kraus_matrices() ) return names
e02b8e5ccc6153899fd999d669a34f14f2258170
24,704
def LoadAI(FileName): """LoadSC: This loads an IGOR binary file saved by LabView. Loads LabView Scope data from igor and extracts a bunch of interesting information (inf) from the data header""" IBWData = igor.LoadIBW(FileName); # I am going to store the experimental information in a dictionary AIdata = {"Note": IBWData["Note"], "Data": IBWData["Data"]}; return AIdata
d1d434c72e1d50bd857bddba1c4e750f74ea901b
24,705
def accept_message_request(request, user_id): """ Ajax call to accept a message request. """ sender = get_object_or_404(User, id=user_id) acceptor = request.user if sender in acceptor.profile.pending_list.all(): acceptor.profile.pending_list.remove(sender) acceptor.profile.contact_list.add(sender) sender.profile.contact_list.add(acceptor) Notification.objects.create(Actor=acceptor, Target=sender, notif_type='confirmed_msg_request') text = 'Added to contact list' else: text = 'Unexpected error!' return HttpResponse(text)
52a7701433a3ff4acbc6adf6cea3ee64303eee02
24,706
import pytz def pytz_timezones_from_utc_offset(tz_offset, common_only=True): """ Determine timezone strings corresponding to the given timezone (UTC) offset Parameters ---------- tz_offset : int, or float Hours of offset from UTC common_only : bool Whether to only return common zone names (True) or all zone names (False) Returns ------- results : list List of Olson database timezone name strings Examples -------- obs.pytz_timezones_from_utc_offset(-7) obs.pytz_timezones_from_utc_offset(-7.62, common_only=False) """ #pick one of the timezone collections (All possible vs only the common zones) timezones = pytz.common_timezones if common_only else pytz.all_timezones # convert the float hours offset to a timedelta offset_days, offset_seconds = 0, int(tz_offset * 3600) if offset_seconds < 0: offset_days = -1 offset_seconds += 24 * 3600 desired_delta = dt.timedelta(offset_days, offset_seconds) # Loop through the timezones and find any with matching offsets null_delta = dt.timedelta(0, 0) results = [] for tz_name in timezones: tz = pytz.timezone(tz_name) non_dst_offset = getattr(tz, '_transition_info', [[null_delta]])[-1] if desired_delta == non_dst_offset[0]: results.append(tz_name) return results
4890acae850530b4b7809afb1c09a5cf4795b443
24,707
from sage.arith.all import rising_factorial def _sympysage_rf(self): """ EXAMPLES:: sage: from sympy import Symbol, rf sage: _ = var('x, y') sage: rfxy = rf(Symbol('x'), Symbol('y')) sage: assert rising_factorial(x,y)._sympy_() == rfxy.rewrite('gamma') sage: assert rising_factorial(x,y) == rfxy._sage_() """ return rising_factorial(self.args[0]._sage_(), self.args[1]._sage_())
c3f55efdcca393cb795f35160131eb781721202a
24,708
def run_sax_on_sequences(rdd_sequences_data, paa, alphabet_size): """ Perform the Symbolic Aggregate Approximation (SAX) on the data provided in **ts_data** :param rdd_sequences_data: rdd containing all sequences: returned by function *sliding_windows()*: *sequences_data* contain a list of all seq : tuple composed by: (key, sequence_list, seq_mean, seq_sd) - keys: an unique key for each seq - sequence_list: the normalized sequence as numpy array giving TS points: [ [t1, v1 ], ... , [tN, vN] ] :type rdd_sequences_data: RDD of list :param paa: number of letters in output word :type paa: int :param alphabet_size: number of characters in result word :type alphabet_size: int :return: the PAA result, the SAX breakpoints and the SAX string :rtype: SaxResult object Note that each letter have the same signification (same breakpoints between all the seq). :raise exception: IkatsException when an error occurred while processing the sax algorithm """ if type(rdd_sequences_data) is not pyspark.rdd.PipelinedRDD: msg = "Unexpected type : PipelinedRDD expected for rdd_sequences_data={}" raise IkatsException(msg.format(rdd_sequences_data)) if type(alphabet_size) is not int or alphabet_size not in range(2, 27): msg = "Unexpected arg value : integer within [2,26] expected for alphabet_size={}" raise IkatsException(msg.format(alphabet_size)) try: LOGGER.info('Starting run_sax_on_sequences ...') # Calculate the PAAs on all the sequences def _spark_internal(sequence, local_paa=paa): """ Compute the PAA of each sequence *sequence*. """ local_paa_seq = run_paa(ts_data=np.array(sequence), paa_size=local_paa).means if len(local_paa_seq) != local_paa: local_paa_seq = local_paa_seq[: len(local_paa_seq) - 1] return local_paa_seq # INPUT : rdd_sequences_data = [(key, sequence_list, seq_mean, seq_sd),...] # OUTPUT : paa_seq = one sequence of all the paa concatenated (flatMap) # PROCESS : Run PAA on the TS data sequences paa_seq = rdd_sequences_data.sortByKey().flatMap(lambda x: _spark_internal(x[1])) # Note that *sortByKey()* is necessary for reproducible results # Once PAA calculated, then, find breakpoints and SAX words sax_result = SaxResult(paa=paa_seq, breakpoints=[], sax_word='') # Build the distribution breakpoints: need a flat list of paa # Note that this method is not sparkified => need to collect the paa data sax_result.build_breakpoints(alphabet_size) # Give the SAX result for all sequences (all timeseries) # Note that the concatenated entire sax word is collected. sax_result.build_sax_word() LOGGER.info("... ended run_sax_on_sequences.") return sax_result except Exception: LOGGER.error("... ended run_sax_on_sequences with error.") raise IkatsException("Failed execution: run_sax_on_sequences()")
4f23894355abfa29094c0b8974e2a7a2e50b6789
24,710
from typing import Union def attack_speed(game: FireEmblemGame, unit: ActiveUnit, weapon: Union[ActiveWeapon, None]) -> int: """ Calculates and returns the unit's Attack Speed, based on the AS calculation method of the current game, the unit's stats, the given weapon's Weight. If the weapon is None, always returns the unit's base Spd stat :param game: game to determine method to use for calculating AS. :param unit: unit for which to calculate AS :param weapon: Weapon unit is assumed to be holding :return: Unit's functional Attack Speed """ method = game.attack_speed_method if method == AS_Methods.SPEED or (weapon is None and method != AS_Methods.SPEED_MINUS_WEIGHT_MINUS_STR_OVER_FIVE): return stats.calc_spd(unit) elif method == AS_Methods.SPEED_MINUS_WEIGHT: return stats.calc_spd(unit) - weapon.template.wt elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_CON: return stats.calc_spd(unit) - max(weapon.template.wt - stats.calc_con(unit), 0) elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_CON_BUT_NOT_FOR_MAGIC: if weapon.template.weapon_type in (WeaponType.TOME, WeaponType.FIRE, WeaponType.THUNDER, WeaponType.WIND, WeaponType.DARK, WeaponType.LIGHT, WeaponType.ANIMA, WeaponType.BLACK, WeaponType.WHITE): return stats.calc_spd(unit) - weapon.template.wt else: return stats.calc_spd(unit) - max(weapon.template.wt - stats.calc_con(unit), 0) elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_STR: return stats.calc_spd(unit) - max(weapon.template.wt - stats.calc_str(unit), 0) elif method == AS_Methods.SPEED_MINUS_WEIGHT_MINUS_STR_OVER_FIVE: # this is exclusively the calc method for Three Houses, which allows carried items to have weight # if the unit has an equipped item, count that too item_wt = 0 for item in unit.items.all(): if item.equipped: item_wt = item.template.wt weapon_wt = weapon.template.wt if weapon else 0 return stats.calc_spd(unit) - max(weapon_wt + item_wt - stats.calc_str(unit) // 5, 0) else: raise ValueError(f"Unrecognized AS calculation method '{method}' for game {game.name}")
268af7ee65e6bab291818a5fece35006b1209c90
24,711
def set_pow_ref_by_upstream_turbines_in_radius( df, df_upstream, turb_no, x_turbs, y_turbs, max_radius, include_itself=False): """Add a column called 'pow_ref' to your dataframe, which is the mean of the columns pow_%03d for turbines that are upstream and also within radius [max_radius] of the turbine of interest [turb_no]. Args: df ([pd.DataFrame]): Dataframe with measurements. This dataframe typically consists of wd_%03d, ws_%03d, ti_%03d, pow_%03d, and potentially additional measurements. df_upstream ([pd.DataFrame]): Dataframe containing rows indicating wind direction ranges and the corresponding upstream turbines for that wind direction range. This variable can be generated with flasc.floris_tools.get_upstream_turbs_floris(...). turb_no ([int]): Turbine number from which the radius should be calculated. x_turbs ([list, array]): Array containing x locations of turbines. y_turbs ([list, array]): Array containing y locations of turbines. max_radius ([float]): Maximum radius for the upstream turbines until which they are still considered as relevant/used for the calculation of the averaged column quantity. include_itself (bool, optional): Include the measurements of turbine turb_no in the determination of the averaged column quantity. Defaults to False. Returns: df ([pd.DataFrame]): Dataframe which equals the inserted dataframe plus the additional column called 'pow_ref'. """ return _set_col_by_upstream_turbines_in_radius( col_out='pow_ref', col_prefix='pow', df=df, df_upstream=df_upstream, turb_no=turb_no, x_turbs=x_turbs, y_turbs=y_turbs, max_radius=max_radius, circular_mean=False, include_itself=include_itself)
10ee016f3dbd86bd047dc75a10fa13701cf64fca
24,712
def load_block_production(config: ValidatorConfig, identity_account_pubkey: str): """ loads block production https://docs.solana.com/developing/clients/jsonrpc-api#getblockproduction """ params = [ { 'identity': identity_account_pubkey } ] return smart_rpc_call(config, "getBlockProduction", params, {})
3af5bc21772699fa10102147fb4a3d90569d8cff
24,713
def remove_metatlas_objects_by_list(object_list, field, filter_list): """ inputs: object_list: iterable to be filtered by its attribute values field: name of attribute to filter on filter_list: strings that are tested to see if they are substrings of the attribute value returns filtered list of objects that do not have matches to filter_list """ return filter_by_list(object_list, lambda x: getattr(x, field), filter_list, include=False)
8885a355fcf79696ef84d13242c28943999693b5
24,715
def resnet_encoder(inputs, input_depth=16, block_type='wide', activation_fn=tf.nn.relu, is_training=True, reuse=None, outputs_collections=None, scope=None): """Defines an encoder network based on resnet blocks """ if block_type == 'wide': resnet_block = wide_resnet else: raise NotImplementedError normalizer_fn = slim.batch_norm with tf.variable_scope(scope, [inputs], reuse=reuse) as sc: size_in = inputs.get_shape().as_list()[1] num_stages = int(log2(size_in)) # Initial convolution net = slim.conv2d(inputs, input_depth, kernel_size=5, activation_fn=activation_fn, padding='SAME', weights_initializer=slim.initializers.variance_scaling_initializer(), normalizer_fn=None, stride=2, scope='conv_in') for i in range(1, num_stages - 2): current_depth = input_depth * 2**i net = resnet_block(net, current_depth, resample='down', activation_fn=activation_fn, scope='resnet%d_a' % i) net = resnet_block(net, current_depth, resample=None, activation_fn=activation_fn, scope='resnet%d_b' % i) # Reshaping into a 1D code net = slim.flatten(net, scope='flat') output = slim.fully_connected(net, 2048, activation_fn=activation_fn, normalizer_fn=normalizer_fn, scope='fc_enc1') return slim.utils.collect_named_outputs(outputs_collections, sc.original_name_scope, output)
80a136a2d6a4047a92a9c924e92523136017354b
24,716
def execute_stored_proc(cursor, sql): """Execute a stored-procedure. Parameters ---------- cursor: `OracleCursor` sql: `str` stored-proc sql statement. """ stored_proc_name, stored_proc_args = _sql_to_stored_proc_cursor_args(sql) status = cursor.callproc(stored_proc_name, parameters=stored_proc_args) status = '\n'.join(status) return [(None, None, None, status)]
528a5b19c208695db0eff8efdb18c1e30cb484b4
24,717
def no_order_func_nb(c: OrderContext, *args) -> Order: """Placeholder order function that returns no order.""" return NoOrder
8bfb6c93930acdf03b83e90271e6904ce5a8e689
24,718
def get_queued(): """ Returns a list of notifications that should be sent: - Status is queued - Has scheduled_time lower than the current time or None """ return PushNotification.objects.filter(status=STATUS.queued) \ .select_related('template') \ .filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \ .order_by(*get_sending_order())[:get_batch_size()]
4fa14ad21a2954e1f55df562ccd10edf356b3d02
24,719
def hue_of_color(color): """ Gets the hue of a color. :param color: The RGB color tuple. :return: The hue of the color (0.0-1.0). """ return rgb_to_hsv(*[x / 255 for x in color])[0]
06fd67639f707d149c1b0b21ffc0f337faf1fbe0
24,720
import requests def recent_tracks(user, api_key, page): """Get the most recent tracks from `user` using `api_key`. Start at page `page` and limit results to `limit`.""" return requests.get( api_url % (user, api_key, page, LastfmStats.plays_per_page)).json()
f0814fca86dfdcb434527ebbefcea867045359fe
24,721
def extract_hdf5_frames(hdf5_frames): """ Extract frames from HDF5 dataset. This converts the frames to a list. :param hdf5_frames: original video frames :return [frame] list of frames """ frames = [] for i in range(len(hdf5_frames)): hdf5_frame = hdf5_frames[str(i)] assert len(hdf5_frame) == 120 frame_rows = [] for rnum in range(len(hdf5_frame)): row = hdf5_frame[rnum] frame_rows.append(row) frames.append(np.array(frame_rows)) return frames
f22b8ef61a86de45c0801ab3ff8cba679549387b
24,722
def st_oid(draw, max_value=2**512, max_size=50): """ Hypothesis strategy that returns valid OBJECT IDENTIFIERs as tuples :param max_value: maximum value of any single sub-identifier :param max_size: maximum length of the generated OID """ first = draw(st.integers(min_value=0, max_value=2)) if first < 2: second = draw(st.integers(min_value=0, max_value=39)) else: second = draw(st.integers(min_value=0, max_value=max_value)) rest = draw(st.lists(st.integers(min_value=0, max_value=max_value), max_size=max_size)) return (first, second) + tuple(rest)
e43daccf3b123a1d35e1c1cc9c313b580161971a
24,723
import torchvision def predict(model, image_pth=None, dataset=None): """ Args: model : model image (str): path of the image >>> ".../image.png" dataset [Dataset.Tensor]: Returns: -------- predicted class """ transform = transforms.Compose([ transforms.Resize([28, 28]), transforms.Grayscale(), transforms.ToTensor() ]) if dataset: dataset = torchvision.datasets.MNIST(root="./", download=True, train=True, transform=transforms.ToTensor()) i, l = next(iter(dataset)) image = i.squeeze(0).numpy() #plt.imshow(image, cmap="gray") if image_pth: i = Image.open(image_pth) image = transform(i) imag = image.squeeze(0).numpy() #plt.imshow(imag, cmap="gray") i = image predict = model(i.unsqueeze(0)) predicted = np.argmax(predict.detach()) #plt.title(f"predicted label: {predicted.item()}") #plt.show() image_cv = cv.imread(image_pth) font = cv.FONT_HERSHEY_SIMPLEX img_label = f"predicted label: {predicted}" cv.putText(image_cv, img_label, [10, 20], \ font,0.7, (0, 255, 0),1, cv.LINE_AA) #cv.imshow("sample image", image_cv) plt.imshow(image_cv) plt.show() return predicted
16a0a429b0fb42ac2c58df94b25d52478d5288a7
24,724
def to_centers(sids): """ Converts a (collection of) sid(s) into a (collection of) trixel center longitude, latitude pairs. Parameters ---------- sids: int or collection of ints sids to covert to vertices Returns -------- Centers: (list of) tuple(s) List of centers. A center is a pair of longitude/latitude. Examples ---------- >>> import starepandas >>> sids = [4611263805962321926, 4611404543450677254] >>> starepandas.to_centers(sids) array([[19.50219018, 23.29074702], [18.65957821, 25.34384175]]) """ vertices = to_vertices(sids) centers = vertices2centers(vertices) return centers
795e6f60d21997b425d60a2859a34ca56f94c7fb
24,725
def get_atlas_by_id(atlas_id: str, request: Request): """ Get more information for a specific atlas with links to further objects. """ for atlas in siibra.atlases: if atlas.id == atlas_id.replace('%2F', '/'): return __atlas_to_result_object(atlas, request) raise HTTPException( status_code=404, detail='atlas with id: {} not found'.format(atlas_id))
f37079c16c6bbcf17295e7e14be21cbc3c38bd1e
24,726
def load_etod(event): """Called at startup or when the Reload Ephemeris Time of Day rule is triggered, deletes and recreates the Ephemeris Time of Day rule. Should be called at startup and when the metadata is added to or removed from Items. """ # Remove the existing rule if it exists. if not delete_rule(ephem_tod, log): log.error("Failed to delete rule!") return None # Generate the rule triggers with the latest metadata configs. etod_items = load_rule_with_metadata(NAMESPACE, check_config, "changed", "Ephemeris Time of Day", ephem_tod, log, description=("Creates the timers that " "drive the {} state" "machine".format(ETOD_ITEM)), tags=["openhab-rules-tools","etod"]) if etod_items: for i in [i for i in timers.timers if not i in etod_items]: timers.cancel(i) # Generate the timers now. ephem_tod(None)
74f9b1d4644417263eb2bfe56543baa9ebf0d0a1
24,727
def extract_title(html): """Return the article title from the article HTML""" # List of xpaths for HTML tags that could contain a title # Tuple scores reflect confidence in these xpaths and the preference # used for extraction xpaths = [ ('//header[@class="entry-header"]/h1[@class="entry-title"]//text()', 4), # noqa : E501 ('//meta[@property="og:title"]/@content', 4), ('//h1[@class="entry-title"]//text()', 3), ('//h1[@itemprop="headline"]//text()', 3), ('//h2[@itemprop="headline"]//text()', 2), ('//meta[contains(@itemprop, "headline")]/@content', 2), ('//body/title//text()', 1), ('//div[@class="postarea"]/h2/a//text()', 1), ('//h1[@class="post__title"]//text()', 1), ('//h1[@class="title"]//text()', 1), ('//head/title//text()', 1), ('//header/h1//text()', 1), ('//meta[@name="dcterms.title"]/@content', 1), ('//meta[@name="fb_title"]/@content', 1), ('//meta[@name="sailthru.title"]/@content', 1), ('//meta[@name="title"]/@content', 1), ] extracted_titles = extract_element(html, xpaths, process_dict_fn=combine_similar_titles) if not extracted_titles: return None return max(extracted_titles, key=lambda x: extracted_titles[x].get('score'))
c9a26c6e54e0e4d26c9f807499103bce51b3a1b3
24,728
def re_send_mail(request, user_id): """ re-send the email verification email """ user = User.objects.get(pk=user_id) try: verify = EmailVerify.objects.filter(user = user).get() verify.delete() except EmailVerify.DoesNotExist: pass email_verify = EmailVerify(user=user, user_activation=True) email_verify.generate_code() email_verify.save() send_mail_account_confirmation(user, email_verify.code, request.shop.name_shop(), request.get_host()) return HttpResponseRedirect(reverse('welcome'))
f07c7f55e8bd5b4b1282d314bf96250a031937db
24,729
def add_map_widget( width: int, height: int, center: tuple[float, float], zoom_level: int, tile_server: TileServer, ) -> int | str: """Add map widget Args: width (int): Widget width height (int): Widget height center (tuple[float, float]): Center point coordinates: latitude, longitude zoom_level (int): Tile map zoom level tile_server (TileServer): Tile supplier, from dearpygui_map.tile_source """ map_widget = MapWidget( width=width, height=height, center=center, zoom_level=zoom_level, tile_server=tile_server, ) return map_widget.insert_widget()
b6a9bd455f8394485a48f6ba45d0084baa1b24b1
24,732
from typing import List def _index_within_range(query: List[int], source: List[int]) -> bool: """Check if query is within range of source index. :param query: List of query int :param source: List of soure int """ dim_num = len(query) for i in range(dim_num): if query[i] > source[i]: raise IndexError(f"index {query[i]} is out of bound for axis {i} with size {source[i]}") return True
34c595a3c498fbe1cce24eea5d5dc1866bbbcfac
24,733
from typing import List def test_transactions() -> List[TransactionObject]: """ Load some example transactions """ transaction_dict_1 = { "amount": 1.0, "asset_id": 23043, "category_id": 229134, "currency": "usd", "date": "2021-09-19", "external_id": None, "fees": None, "group_id": None, "id": 55907882, "is_group": False, "notes": "Test Transaction 1", "original_name": "Test 1", "parent_id": None, "payee": "Test 1", "plaid_account_id": None, "price": None, "quantity": None, "status": "uncleared", "subtype": None, "tags": None, "type": None, } transaction_dict_2 = { "amount": 2.0, "asset_id": 23043, "category_id": 229146, "currency": "usd", "date": "2021-09-19", "external_id": None, "fees": None, "group_id": None, "id": 55907976, "is_group": False, "notes": "Test Transaction 2", "original_name": "Test 2", "parent_id": None, "payee": "Test 2", "plaid_account_id": None, "price": None, "quantity": None, "status": "uncleared", "subtype": None, "tags": None, "type": None, } transaction_dict_3 = { "amount": 3.0, "asset_id": 23043, "category_id": 229140, "currency": "usd", "date": "2021-09-19", "external_id": None, "fees": None, "group_id": None, "id": 55907977, "is_group": False, "notes": "Test Transaction 3", "original_name": "Test 3", "parent_id": None, "payee": "Test 3", "plaid_account_id": None, "price": None, "quantity": None, "status": "uncleared", "subtype": None, "tags": None, "type": None, } transaction_1 = TransactionObject(**transaction_dict_1) transaction_2 = TransactionObject(**transaction_dict_2) transaction_3 = TransactionObject(**transaction_dict_3) return [transaction_1, transaction_2, transaction_3]
8aa52fdbf719793e9f93a276097498f70b4973a6
24,735
def modified(mctx, x): """``modified()`` File that is modified according to status. """ # i18n: "modified" is a keyword getargs(x, 0, 0, _("modified takes no arguments")) s = mctx.status()[0] return [f for f in mctx.subset if f in s]
ec647f7478c587b35e9a1d7b219754fabf3940a8
24,736
def parse_declarations(lang, state, code_only=False, keep_tokens=True): """ Return the comments or code of state.line. Unlike parse_line, this function assumes the parser is *not* in the context of a multi-line comment. Args: lang (Language): Syntax description for the language being parsed. state (State): Parser state. code_only (bool, default: False): If False, each non-comment character is replaced with a space. If True, each comment character is replaced with a space. keep_tokens (bool, default: True): If False, comment tokens are filtered out. If True, comment tokens are preserved. Returns: (string, State) """ code, state = parse_code(lang, state) comment, state = parse_line_comment(lang, state, keep_tokens) comment2, state = parse_multiline_comment(lang, state, keep_tokens) if comment or comment2: line = state.line if not state.multi_end_stack: # Continue looking for declarations. line, state = parse_declarations(lang, state, code_only, keep_tokens) if code_only: line = code + clear_line(comment) + clear_line(comment2) + line else: line = clear_line(code) + comment + comment2 + line return line, state else: state.line = '' if code_only: return code, state else: return clear_line(code), state
48b1ea0259795ef3f7acd783b704be5e4be8a79b
24,737
import math def rads_to_degs(rad): """Helper radians to degrees""" return rad * 180.0 / math.pi
1be742aa4010e2fc5678e6f911dcb21b0b4d1b59
24,738
def get_employer_jobpost(profile): """ """ jobs = None if profile.is_employer: jobs = JobPost.objects.filter(user=profile.user).order_by('title', 'employment_option', 'is_active') return jobs
d1be49c5381aedfd21d6dbceb07eeb98f1ef3dc4
24,739
def _paginate_issues_with_cursor(page_url, request, query, cursor, limit, template, extra_nav_parameters=None, extra_template_params=None): """Display paginated list of issues using a cursor instead of offset. Args: page_url: Base URL of issue page that is being paginated. Typically generated by calling 'reverse' with a name and arguments of a view function. request: Request containing offset and limit parameters. query: Query over issues cursor: cursor object passed to web form and back again. limit: Maximum number of issues to return. template: Name of template that renders issue page. extra_nav_parameters: Dictionary of extra parameters to append to the navigation links. extra_template_params: Dictionary of extra parameters to pass to page rendering. Returns: Response for sending back to browser. """ issues, next_cursor, has_more = query.fetch_page(limit, start_cursor=cursor) nav_parameters = {} if extra_nav_parameters: nav_parameters.update(extra_nav_parameters) nav_parameters['cursor'] = next_cursor.urlsafe() if next_cursor else '' params = { 'limit': limit, 'cursor': nav_parameters['cursor'], 'nexttext': 'Next', } if has_more: params['next'] = _url(page_url, **nav_parameters) if extra_template_params: params.update(extra_template_params) return _inner_paginate(request, issues, template, params)
a315163a9d0e53473ce77e262edf3b7f6e663802
24,740
def overlapping(startAttribute, # X endAttribute, # Y startValue, # A endValue, # B ): """ Return an L{axiom.iaxiom.IComparison} (an object that can be passed as the 'comparison' argument to Store.query/.sum/.count) which will constrain a query against 2 attributes for ranges which overlap with the given arguments. For a database with Items of class O which represent values in this configuration:: X Y (a) (b) |-------------------| (c) (d) |--------| (e) (f) |--------| (g) (h) |---| (i) (j) |------| (k) (l) |-------------------------------------| (a) (l) |-----------------------------| (c) (b) |------------------------| (c) (a) |----| (b) (l) |---------| The query:: myStore.query( O, findOverlapping(O.X, O.Y, a, b)) Will return a generator of Items of class O which represent segments a-b, c-d, e-f, k-l, a-l, c-b, c-a and b-l, but NOT segments g-h or i-j. (NOTE: If you want to pass attributes of different classes for startAttribute and endAttribute, read the implementation of this method to discover the additional join clauses required. This may be eliminated some day so for now, consider this method undefined over multiple classes.) In the database where this query is run, for an item N, all values of N.startAttribute must be less than N.endAttribute. startValue must be less than endValue. """ assert startValue <= endValue return OR( AND(startAttribute >= startValue, startAttribute <= endValue), AND(endAttribute >= startValue, endAttribute <= endValue), AND(startAttribute <= startValue, endAttribute >= endValue) )
66e56c9a7c66cbc385e4b7bbea4aa6c08212993e
24,741
def _aligned_series(*many_series): """ Return a new list of series containing the data in the input series, but with their indices aligned. NaNs will be filled in for missing values. Parameters ---------- many_series : list[pd.Series] Returns ------- aligned_series : list[pd.Series] A new list of series containing the data in the input series, but with their indices aligned. NaNs will be filled in for missing values. """ return [series for col, series in iteritems(pd.concat(many_series, axis=1))]
6598dff7814ef20dd0f4904b6c140b6883f9283b
24,742
def structConfMat(confmat, index=0, multiple=False): """ Creates a pandas dataframe from the confusion matrix. It distinguishes between binary and multi-class classification. Parameters ---------- confmat : numpy.ndarray Array with n rows, each of one being a flattened confusion matrix. index : INT, optional Integer for index of the dataframe. The default is 0. multiple : BOOL, optional If True, returns metrics per CV fold. If False, returns mean and std of the metric over all folds (in complex format). Returns ------- performance : pd.DataFrame Dataframe with all classification performance metrics. Use "{0.real:.3} [{0.imag:.2}]".format to display float_format in latex Example for latex tables: print(structConfMat(confmat,multiple=False) .to_latex(float_format="{0.real:.3} [{0.imag:.2}]".format)) Note: for coonverting multiple performance to average/std use (performance.mean() + 1j*performance.std()).to_frame().T """ intdim = int(np.sqrt(confmat.shape[1])) conf_n = confmat.reshape((len(confmat), intdim, intdim)) corrects = conf_n.transpose(2,1,0).reshape((-1,len(conf_n)))[::(intdim+1)] corrects = corrects.sum(axis=0) n_folds = conf_n.sum(axis=1).sum(axis=1) cr = corrects/n_folds aux_n = conf_n[:,0][:,0]/conf_n[:,0].sum(axis=1) for ix in range(intdim-1): aux_n = np.c_[aux_n, conf_n[:,ix+1][:,ix+1]/conf_n[:,ix+1].sum(axis=1)] b_acc = np.nanmean(aux_n, axis=1) performance = pd.DataFrame({'CorrectRate': cr, 'ErrorRate': 1-cr, 'balAcc': b_acc}, index=index+np.arange(confmat.shape[0])) for ix in range(aux_n.shape[1]): auxperf = pd.DataFrame({f'Class_{ix}': aux_n[:,ix]}, index=index+np.arange(confmat.shape[0])) performance = pd.concat((performance, auxperf),axis=1) if intdim==2: columns = performance.columns.tolist() columns[columns.index('Class_0')]='Sensitivity' columns[columns.index('Class_1')]='Specificity' performance.columns = columns prec = aux_n[:,1]/(aux_n[:,1]+1-aux_n[:,0]) f1 = 2*prec*aux_n[:,1]/(prec+aux_n[:,1]) performance['Precision'] = prec performance['F1'] = f1 if multiple==False: performance = (performance.mean(skipna=True) + 1j*performance.std(skipna=True)).to_frame().T return performance
25b458a81a96c8d38d990cd43d3fab5c6526dabd
24,743
from . import data import datasets import pkg_resources def get_img(ds): """Get a standard image file as a Niimg Parameters ---------- ds : str Name of image to get.\n Volume Masks:\n "MNI152_T1_2mm_brain"\n "MNI152_T1_2mm_brain_mask"\n "MNI152_T1_2mm_brain_mask_dil"\n "MNI152_T1_1mm_brain"\n "MNI152_T1_1mm_brain_mask"\n "MNI152_T1_1mm_brain_mask_dil"\n Surface Masks:\n "fs5_mask"\n "fs5_mask_lh"\n "fs5_mask_rh"\n Returns ------- Niimg-like object """ assert ds in datasets.keys(), "Unknown image specified" fname = datasets[ds] with pkg_resources.path(data, fname) as datafile: return nib.load(str(datafile))
0df5bbd1d5188a8d67a7078625734f4b186ca2e9
24,744
from typing import Tuple from typing import Optional def check_for_missing_kmers(is_fastq: bool, subtype_result: str, scheme: str, df: pd.DataFrame, exp: int, obs: int, p: SubtypingParams) -> Tuple[Optional[str], Optional[str]]: """Check if there are too many missing kmers Also check if the mean kmer coverage depth is above the low coverage threshold. Args: is_fastq: Is input sample reads? subtype_result: Single subtype designation scheme: Scheme name df: Subtyping results dataframe exp: Expected number of kmers that should be found obs: Actual observed number of kmers found p: Subtyping parameters Returns: Tuple of QC status and any QC messages """ status = None messages = None # proportion of missing kmers p_missing = (exp - obs) / exp # type: float if p_missing > p.max_perc_missing_kmers: status = QC.FAIL if is_fastq: kmers_with_hits = df[df['is_kmer_freq_okay']] # type: pd.DataFrame depth = kmers_with_hits['freq'].mean() if depth < p.low_coverage_depth_freq: coverage_msg = f'Low coverage depth ({depth:.1f} < {float(p.low_coverage_depth_freq):.1f} expected); ' \ f'you may need more WGS data.' else: coverage_msg = f'Okay coverage depth ({depth:.1f} >= {float(p.low_coverage_depth_freq):.1f} expected), ' \ f'but this may be the wrong serovar or species for scheme "{scheme}"' messages = f'{p_missing:.2%} missing kmers; more than {p.max_perc_missing_kmers:.2%} missing ' \ f'kmers threshold. {coverage_msg}' else: messages = f'{p_missing:.2%} missing kmers for subtype "{subtype_result}"; more than ' \ f'{p.max_perc_missing_kmers:.2%} missing kmer threshold' return status, messages
add7449112c7720bb56a8d682030c7220991ff7b
24,746
import torch import itertools def splat_feat_nd(init_grid, feat, coords): """ Args: init_grid: B X nF X W X H X D X .. feat: B X nF X nPt coords: B X nDims X nPt in [-1, 1] Returns: grid: B X nF X W X H X D X .. """ wts_dim = [] pos_dim = [] grid_dims = init_grid.shape[2:] B = init_grid.shape[0] F = init_grid.shape[1] n_dims = len(grid_dims) grid_flat = init_grid.view(B, F, -1) for d in range(n_dims): pos = coords[:, [d], :] * grid_dims[d] / 2 + grid_dims[d] / 2 pos_d = [] wts_d = [] for ix in [0, 1]: pos_ix = torch.floor(pos) + ix safe_ix = (pos_ix > 0) & (pos_ix < grid_dims[d]) safe_ix = safe_ix.type(pos.dtype) wts_ix = 1 - torch.abs(pos - pos_ix) wts_ix = wts_ix * safe_ix pos_ix = pos_ix * safe_ix pos_d.append(pos_ix) wts_d.append(wts_ix) pos_dim.append(pos_d) wts_dim.append(wts_d) l_ix = [[0, 1] for d in range(n_dims)] for ix_d in itertools.product(*l_ix): wts = torch.ones_like(wts_dim[0][0]) index = torch.zeros_like(wts_dim[0][0]) for d in range(n_dims): index = index * grid_dims[d] + pos_dim[d][ix_d[d]] wts = wts * wts_dim[d][ix_d[d]] index = index.long() grid_flat.scatter_add_(2, index.expand(-1, F, -1), feat * wts) grid_flat = torch.round(grid_flat) return grid_flat.view(init_grid.shape)
24e798dd9cdaf51988c5229442bef4ebed14c4be
24,747
from typing import Any from typing import Optional import torch def q_nstep_td_error_ngu( data: namedtuple, gamma: Any, # float, nstep: int = 1, cum_reward: bool = False, value_gamma: Optional[torch.Tensor] = None, criterion: torch.nn.modules = nn.MSELoss(reduction='none'), ) -> torch.Tensor: """ Overview: Multistep (1 step or n step) td_error for q-learning based algorithm Arguments: - data (:obj:`q_nstep_td_data`): the input data, q_nstep_td_data to calculate loss - gamma (:obj:`float`): discount factor - cum_reward (:obj:`bool`): whether to use cumulative nstep reward, which is figured out when collecting data - value_gamma (:obj:`torch.Tensor`): gamma discount value for target q_value - criterion (:obj:`torch.nn.modules`): loss function criterion - nstep (:obj:`int`): nstep num, default set to 1 Returns: - loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor - td_error_per_sample (:obj:`torch.Tensor`): nstep td error, 1-dim tensor Shapes: - data (:obj:`q_nstep_td_data`): the q_nstep_td_data containing\ ['q', 'next_n_q', 'action', 'reward', 'done'] - q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim] - next_n_q (:obj:`torch.FloatTensor`): :math:`(B, N)` - action (:obj:`torch.LongTensor`): :math:`(B, )` - next_n_action (:obj:`torch.LongTensor`): :math:`(B, )` - reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep) - done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep - td_error_per_sample (:obj:`torch.FloatTensor`): :math:`(B, )` """ q, next_n_q, action, next_n_action, reward, done, weight = data assert len(action.shape) == 1, action.shape if weight is None: weight = torch.ones_like(action) batch_range = torch.arange(action.shape[0]) q_s_a = q[batch_range, action] target_q_s_a = next_n_q[batch_range, next_n_action] if cum_reward: if value_gamma is None: target_q_s_a = reward + (gamma ** nstep) * target_q_s_a * (1 - done) else: target_q_s_a = reward + value_gamma * target_q_s_a * (1 - done) else: target_q_s_a = nstep_return_ngu(nstep_return_data(reward, target_q_s_a, done), gamma, nstep, value_gamma) td_error_per_sample = criterion(q_s_a, target_q_s_a.detach()) return (td_error_per_sample * weight).mean(), td_error_per_sample
eac618bddf77252e6be77f8254fa14f8e00e2c68
24,748
import time import hashlib def generate_activation_code(email : str) -> str: """ Takes email address and combines it with a timestamp before encrypting everything with the ACTIVATION_LINK_SECRET No database storage required for this action :param email: email :type email: unicode :return: activation_code :rtype: str """ email = str(email).lower().strip() time_stamp = str(int(time.time())) # normally encrypt emails, so they are not stored in plaintext with a random nonce secret_key = hashlib.sha256(settings.ACTIVATION_LINK_SECRET.encode()).hexdigest() crypto_box = nacl.secret.SecretBox(secret_key, encoder=nacl.encoding.HexEncoder) validation_secret = crypto_box.encrypt((time_stamp + '#' + email).encode("utf-8"), nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)) return nacl.encoding.HexEncoder.encode(validation_secret).decode()
d2770bf9e2d15e361fa5dcf145fba978f40cb06f
24,749
def fmin_sgd(*args, **kwargs): """ See FMinSGD for documentation. This function creates that object, exhausts the iterator, and then returns the final self.current_args values. """ print_interval = kwargs.pop('print_interval', sys.maxint) obj = FMinSGD(*args, **kwargs) while True: t = time.time() vals = obj.nextN(print_interval) if len(vals): print 'Value', np.mean(vals), 'time', (time.time() - t) else: break return obj.current_args
91173eb8c2b4732c217be4ff170d5821eb1e9e5f
24,750
def scan(this, accumulator, seed=None): """Applies an accumulator function over an observable sequence and returns each intermediate result. The optional seed value is used as the initial accumulator value. For aggregation behavior with no intermediate results, see OutputThing.aggregate. 1 - scanned = source.scan(lambda acc, x: acc + x) 2 - scanned = source.scan(lambda acc, x: acc + x, 0) Keyword arguments: accumulator -- An accumulator function to be invoked on each element. seed -- [Optional] The initial accumulator value. Returns an observable sequence containing the accumulated values. """ has_seed = False if seed is not None: has_seed = True has_accumulation = [False] accumulation = [None] def calculate(x): if has_accumulation[0]: accumulation[0] = accumulator(accumulation[0], x) else: accumulation[0] = accumulator(seed, x) if has_seed else x has_accumulation[0] = True return accumulation[0] return this.map(calculate)
4d69686f41c93549208b2e0721e14d95c7c52321
24,753
def get_efficient_pin_order_scramble(): """ Gets an efficient pin order scramble for a Rubik's Clock. """ return _UTIL_SCRAMBLER.call("util_scramble.getClockEfficientPinOrderScramble")
edf20cd55f9e2c8e7e3aa0f73f08c34958336a44
24,754
def fixed_padding(inputs, kernel_size): """Pad the input along the spatial dimensions independently of input size. This function is copied/modified from original repo: https://github.com/tensorflow/tpu/blob/acb331c8878ce5a4124d4d7687df5fe0fadcd43b/models/official/resnet/resnet_model.py#L357 Args: inputs: `Tensor` of size `[batch, channels, height, width]` or `[batch, height, width, channels]` depending on `data_format`. kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d` operations. Should be a positive integer. Returns: A padded `Tensor` of the same `data_format` with size either intact (if `kernel_size == 1`) or padded (if `kernel_size > 1`). """ pad_total = kernel_size - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg # Use ZeroPadding as to avoid TFOpLambda layer padded_inputs = tf.keras.layers.ZeroPadding2D( padding=((pad_beg, pad_end), (pad_beg, pad_end)) )(inputs) return padded_inputs
ac20d85fff8abd1ea6b160294f1f0a3118e09527
24,755
def _is_si_object(storage_instance): """ Helper method for determining if a storage instance is object. Args: storage_instance: Returns: (Bool) True if object, False if not. """ si_type = storage_instance.get("service_configuration", None) if si_type is None: # object not supported on storage instance return False elif si_type == "object": return True else: return False
3cc2591bb0391e6d9d62197d0bb593f5006215c8
24,756
def wgs84_to_bd09(lng, lat): """WGS84 -> BD09""" lng, lat = wgs84_to_gcj02(lng, lat) lng, lat = gcj02_to_bd09(lng, lat) return lng, lat
e0d0b7dfa70b8e260b8fb061b50e838463ccec08
24,757
import math def spin_images(pc1, pc2, opt = spin_image_options()): """Compute spin image descriptors for a point cloud Parameters ---------- pc1 : pcloud The function computes a spin image descriptor for each point in the point cloud pc1 pc2 : pcloud The points in the point cloud pc2 are used for casting votes when constructing the spin image descriptors. pc2 can simply be the same as pc1. However, typically it will be a larger set of points than pc1, so that the descriptors can be computed with enough detail. In any case, pc1 and pc2 should be sampled from the same shape opt : geomproc.spin_image_options, optional Object with the configuration for computing the spin image descriptors Returns ------- desc : array_like Spin image descriptors, represented as an array of shape (n, radial_bins*height_bins), where 'n' is the number of points in pc1, and radial_bins*height_bins is the total number of bins in one descriptor according to the given configuration object. desc[i, :] represents the descriptor of point 'i' in pc1 See Also -------- geomproc.spin_image_options Notes ----- The implementation is based on the paper of Johnson and Hebert, "Using Spin Images for Efficient Object Recognition in Cluttered 3D Scenes", IEEE PAMI 21(5), 1999. To compute one spin image descriptor, the method places a cylinder at a point according to the position of the point and orientation of the normal of the point. It then divides the cylinder radially and along its normal to create a number of bins, and counts how many points fall inside each bin. Finally, if desired, each bin is normalized by the total number of points in all the bins, to make the descriptor more robust to point clouds with different numbers of samples. """ # Initialize descriptor desc = np.zeros((pc1.point.shape[0], opt.radial_bins*opt.height_bins)) # Set up KDTree with all points from pc2 tree = KDTree(pc2.point.tolist()) # Build descriptor for each point in pc1 for i in range(pc1.point.shape[0]): # Get point and its normal point = pc1.point[i, :] normal = pc1.normal[i, :] # Get all the points in the range of the descriptor (neighbors) neighbors = tree.dist_query(pc1.point[i, :], opt.radius) # Iterate through each neighbor for j in range(len(neighbors)): # Get neighbor neigh = np.array(neighbors[j]) #### Compute radial and height distances for this neighbor # Form a vector from the reference point to the neighbor vec = neigh - point # Project the vector on the normal of the reference point # to get the distance of the neighbor along the normal # Also, normalize the distance by the height of the # descriptor height_dist = np.dot(normal, vec) / opt.height # Project the vector on the plane perpendicular to the # normal to get the distance of the neighbor along the # radial direction # Also, normalize the distance by the radius of the # descriptor radial_dist = np.linalg.norm(vec - height_dist*normal) / opt.radius # Check if point is inside the range of the descriptor and # can be considered in the descriptor construction # Since we normalized the distances by radius and height, we # can simply compare to 1.0 if (radial_dist < 1.0) and (abs(height_dist) < 1.0): # Normalize the height_dist to a value between 0 and 1 height_dist = (height_dist + 1.0)/2.0 # Find bin index for radial and height distances radial_index = math.floor(radial_dist*opt.radial_bins) height_index = math.floor(height_dist*opt.height_bins) # Convert two bin indices into one index and cast a vote # in the corresponding bin desc[i, radial_index + height_index*opt.radial_bins] += 1 # If normalizing, divide each bin by the total number of votes in # all the bins if opt.normalize: desc /= desc.sum() return desc
2011d2c1d610655dd752f30c4efa3abe9c7d4d29
24,758
def clone_file_info(input, output): """clone_file_info(FileConstHandle input, FileHandle output)""" return _RMF.clone_file_info(input, output)
226ee2a0a2547f4bf57754187517301e649cd919
24,759
def queries_to_retract_from_dataset(client, project_id, dataset_id, person_id_query, retraction_type=None): """ Get list of queries to remove all records in all tables associated with supplied ids :param client: bigquery client :param project_id: identifies associated project :param dataset_id: identifies associated dataset :param person_id_query: query to select person_ids to retract :param retraction_type: string indicating whether all data needs to be removed, including RDR, or if RDR data needs to be kept intact. Can take the values 'rdr_and_ehr' or 'only_ehr' :return: list of dict with keys query, dataset, table """ LOGGER.info(f'Checking existing tables for {project_id}.{dataset_id}') existing_tables = [ table.table_id for table in client.list_tables(f'{project_id}.{dataset_id}') ] queries = {TABLES: []} tables_to_retract = set(list(TABLES_FOR_RETRACTION)) # Ignore RDR rows using id constant factor if retraction type is 'only_ehr' id_const = 2 * ID_CONSTANT_FACTOR if ru.is_unioned_dataset( dataset_id) or retraction_type == RETRACTION_RDR_EHR: tables_to_retract |= set(NON_EHR_TABLES) id_const = 0 for table in tables_to_retract: id_const_condition = JINJA_ENV.from_string(ID_CONST_CONDITION).render( table_id=get_table_id(table), id_constant=id_const) if table in existing_tables: if table in [common.DEATH, common.PERSON]: q_dataset = JINJA_ENV.from_string( RETRACT_DATA_TABLE_QUERY).render( project=project_id, dataset=dataset_id, table=table, person_id_query=person_id_query) queries[TABLES].append(q_dataset) else: q_dataset = JINJA_ENV.from_string( RETRACT_DATA_TABLE_QUERY).render( project=project_id, dataset=dataset_id, table=table, table_id=get_table_id(table), person_id_query=person_id_query, id_const_condition=id_const_condition) queries[TABLES].append(q_dataset) table = common.FACT_RELATIONSHIP if table in existing_tables: q_fact_relationship = JINJA_ENV.from_string( RETRACT_DATA_FACT_RELATIONSHIP).render( project=project_id, dataset=dataset_id, table=table, PERSON_DOMAIN=PERSON_DOMAIN, person_id_query=person_id_query) queries[TABLES].append(q_fact_relationship) return queries[TABLES]
b0e2512da10bf1d4742995d8b5a7ed0ec15bca6a
24,760
def calc_moments(imcube, rmscube, mask=None): """ Calculate moments of a masked cube and their errors Parameters ---------- imcube : SpectralCube The image cube for which to calculate the moments and their errors. rmscube : SpectralCube A cube representing the noise estimate at each location in the image cube. Should have the same units as the image cube. mask : `~numpy.ndarray` A binary mask array (0s and 1s) to be applied before measuring the flux and uncertainty. This should NOT be a SpectralCube. Returns ------- altmom : `~numpy.ndarray` A stack of the three moment maps. These are generally redundant since they were previously calculated by SpectralCube. errmom : `~numpy.ndarray` A stack of the three uncertainty maps. """ if mask is not None: immask = imcube.with_mask(mask > 0) errmask = rmscube.with_mask(mask > 0) else: immask = imcube errmask = rmscube tbarry = immask.unitless_filled_data[:] nsearry = errmask.unitless_filled_data[:] vels = immask.spectral_axis.to(u.km/u.s) vel3d = np.expand_dims(vels, axis=(1, 2)) velarry = np.broadcast_to(vel3d, immask.shape) mom0 = np.nansum( tbarry, axis=0 ) mom0_var = np.nansum( nsearry**2, axis=0 ) mom0_err = np.sqrt(mom0_var) mom1 = np.nansum( tbarry * velarry, axis=0) / mom0 mom1_var = np.nansum( ((velarry - mom1)/mom0 * nsearry)**2, axis=0 ) mom1_err = np.sqrt(mom1_var) mom2 = np.nansum( tbarry * (velarry-mom1)**2, axis=0) / mom0 mom2_var = np.nansum( ((mom0 * (velarry-mom1)**2 - np.nansum(tbarry*(velarry - mom1)**2, axis=0)) / mom0**2 * nsearry)**2 + (2*np.nansum( tbarry*(velarry-mom1), axis=0)/mom0 * mom1_err)**2, axis=0 ) stdev = np.sqrt(mom2) sderr = np.sqrt(mom2_var)/(2*stdev) for x in [mom1, stdev, mom1_err, sderr]: x[x == np.inf] = np.nan x[x == -np.inf] = np.nan altmom = np.stack([mom0, mom1, stdev], axis=0) errmom = np.stack([mom0_err, mom1_err, sderr], axis=0) return altmom, errmom
00c43af169f650efe155dca87b3755f128a13f7f
24,761
import types import array def value(*, source: str, current_location: types.Location) -> types.TSourceMapEntries: """ Calculate the source map of any value. Args: source: The JSON document. current_location: The current location in the source. Returns: A list of JSON pointers and source map entries. """ advance.to_next_non_whitespace(source=source, current_location=current_location) check.not_end(source=source, current_location=current_location) if source[current_location.position] == constants.BEGIN_ARRAY: return array(source=source, current_location=current_location) if source[current_location.position] == constants.BEGIN_OBJECT: return object_(source=source, current_location=current_location) return primitive(source=source, current_location=current_location)
093b2df66c3577c7f24ec1cc1da3cd71bf3cdd4f
24,762
def get_class_cnts_by_feature_null(df, class_col, feature, normalize=True): """ Break out class fequencies (in `df[class_col]`) by whether or not `df[feature]` is null. Parameters ---------- df : pandas.DataFrame DataFrame on which this function will operate. class_col : str Column name for the class / target. feature : str Column name for the feature. normalize : bool (default=True) Whether or not to normalize class counts by number of rows in the respective feature is: [null / non-null] query. I.e. the value for `normalize` is passed straight to the `normalize` kwarg in `pandas.Series.value_counts`, which is called on data that is filtered for either `df[feature].isnull()` of `df[feature].notnull()`. Return ------ pandas.DataFrame of class counts, broken out by whether or not `df[feature]` is null. """ null = df.loc[df[feature].isnull(), class_col ].value_counts(normalize=normalize ).rename("null" ).to_frame() not_null = df.loc[df[feature].notnull(), class_col ].value_counts(normalize=normalize ).rename("not_null") return pd.concat({feature: null.join(not_null)}, axis=1)
9ac2ae717c2aca90ca988ad0f960365e8475a555
24,764
import gc def systemic_vel_est(z,param_dict,burn_in,run_dir,plot_param_hist=True): """ Estimates the systemic (stellar) velocity of the galaxy and corrects the SDSS redshift (which is based on emission lines). """ c = 299792.458 # Get measured stellar velocity stel_vel = np.array(param_dict['stel_vel']['chain']) # Calculate new redshift z_best = (z+1)*(1+stel_vel/c)-1 # Burned-in + Flattened (along walker axis) chain # If burn_in is larger than the size of the chain, then # take 50% of the chain length instead. if (burn_in >= np.shape(z_best)[1]): burn_in = int(0.5*np.shape(z_best)[1]) # print('\n Burn-in is larger than chain length! Using 50% of chain length for burn-in...\n') flat = z_best[:,burn_in:] flat = flat.flat # Old confidence interval stuff; replaced by np.quantile p = np.percentile(flat, [16, 50, 84]) pdfmax = p[1] low1 = p[1]-p[0] upp1 = p[2]-p[1] if ((pdfmax-(3.0*low1))<0): flag = 1 else: flag = 0 if (plot_param_hist==True): # Initialize figures and axes # Make an updating plot of the chain fig = plt.figure(figsize=(10,8)) gs = gridspec.GridSpec(2, 2) gs.update(wspace=0.35, hspace=0.35) # set the spacing between axes. ax1 = plt.subplot(gs[0,0]) ax2 = plt.subplot(gs[0,1]) ax3 = plt.subplot(gs[1,0:2]) # Plot 1: Histogram plots # Histogram; 'Doane' binning produces the best results from tests. n, bins, patches = ax1.hist(flat, bins='doane', density=True, facecolor='xkcd:aqua green', alpha=0.75) ax1.axvline(pdfmax,linestyle='--',color='white',label='$\mu=%0.6f$\n' % pdfmax) ax1.axvline(pdfmax-low1,linestyle=':',color='white',label='$\sigma_-=%0.6f$\n' % low1) ax1.axvline(pdfmax+upp1,linestyle=':',color='white',label='$\sigma_+=%0.6f$\n' % upp1) # ax1.plot(xvec,yvec,color='white') ax1.set_xlabel(r'$z_{\rm{best}}$',fontsize=12) ax1.set_ylabel(r'$p(z_{\rm{best}})$',fontsize=12) # Plot 2: best fit values ax2.axvline(pdfmax,linestyle='--',color='black',alpha=0.0,label='$\mu=%0.6f$\n' % pdfmax) ax2.axvline(pdfmax-low1,linestyle=':',color='black',alpha=0.0,label='$\sigma\_=%0.6f$\n' % low1) ax2.axvline(pdfmax+upp1,linestyle=':',color='black',alpha=0.0,label='$\sigma_{+}=%0.6f$\n' % upp1) ax2.legend(loc='center left',frameon=False,fontsize=14) ax2.axis('off') # Plot 3: Chain plot for w in range(0,np.shape(z_best)[0],1): ax3.plot(range(np.shape(z_best)[1]),z_best[w,:],color='white',linewidth=0.5,alpha=0.5,zorder=0) # Calculate median and median absolute deviation of walkers at each iteration; we have depreciated # the average and standard deviation because they do not behave well for outlier walkers, which # also don't agree with histograms. c_med = np.median(z_best,axis=0) c_madstd = mad_std(z_best) ax3.plot(range(np.shape(z_best)[1]),c_med,color='xkcd:red',alpha=1.,linewidth=2.0,label='Median',zorder=10) ax3.fill_between(range(np.shape(z_best)[1]),c_med+c_madstd,c_med-c_madstd,color='xkcd:aqua',alpha=0.5,linewidth=1.5,label='Median Absolute Dev.',zorder=5) ax3.axvline(burn_in,linestyle='--',color='xkcd:orange',label='burn-in = %d' % burn_in) ax3.set_xlim(0,np.shape(z_best)[1]) ax3.set_xlabel('$N_\mathrm{iter}$',fontsize=12) ax3.set_ylabel(r'$z_{\rm{best}}$',fontsize=12) ax3.legend(loc='upper left') # Save the figure plt.savefig(run_dir+'histogram_plots/param_histograms/'+'z_best_MCMC.png' ,bbox_inches="tight",dpi=300,fmt='png') # Close plot window fig.clear() plt.close() # Collect garbage del fig del ax1 del ax2 del ax3 del flat gc.collect() z_dict = {'par_best':pdfmax,'sig_low':low1,'sig_upp':upp1,'chain':z_best,'flag':flag} # z_best = pdfmax z_best_low = low1 z_best_upp = upp1 return (z_best,z_best_low,z_best_upp),z_dict
3d5c957147ad7f16cd664d08326ad40196f46d5e
24,765
def set_model_weights(model, weights): """Set the given weights to keras model Args: model : Keras model instance weights (dict): Dictionary of weights Return: Keras model instance with weights set """ for key in weights.keys(): model.get_layer(key).set_weights(weights[key]) return model
0adb7294348af379df0d2a7ce2101a6dc3a43be4
24,767
import re def extract_manage_vlan(strValue): """处理show manage-vlan得到的信息 Args: strValue(str): show manage-vlan得到的信息 Returns: list: 包含管理Vlan字典的列表 """ # ------------------------------------ # Manage name : xx # ------------------------------------ # Svlan : 1000 # Scos : 7 # Port : 9:2[T] # Device : sub # Unit : 1000 # Ethernet address: 48:f9:7c:e9:8a:e3 # Total protocols : 0 # RX packets : 0 # TX packets : 8 # RX bytes : 0 # TX bytes : 704 # MTU : 0 # ------------------------------------ # Manage name : yy # ------------------------------------ # Svlan : 2000 # Scos : 7 # Port : 9:2[T] # Device : sub # Unit : 2000 # Ethernet address: 48:f9:7c:e9:8a:e3 # Total protocols : 0 # RX packets : 0 # TX packets : 8 # RX bytes : 0 # TX bytes : 704 # MTU : 0 keyValueExp = re.compile('([\w\s]+):\s(.+)') ret = [ ] for line in strValue.splitlines(): match = keyValueExp.match(line) if match: k, v = match.groups() k = auto_convert(k) v = auto_convert(v) if k == 'Manage name': ret.append({ }) ret[-1][k] = v return ret
83ff7d5af37c9caf9bf557caa9e1d845e96706fb
24,769
from typing import List def generate_sample_space_plot_detailed( run_directory: str, step: int = 0, agent_ids: List[int] = [0], contour_z: str = "action_value", circle_size: str = "action_visits", ) -> List[dcc.Graph]: """Generates detailed sample space plots for the given agents. Parameters ---------- run_directory : str directory of the mcts evaluator output. step : int, optional the currently selected step, by default 0 agent_ids : List[int], optional list of agent ids for each of which a plot should be generated, by default [0] contour_z : str, optional string indicating which dataframe column the contour should display, by default "action_value" circle_size : str, optional string indicating which dataframe column determines the circle size, by default "action_visits" Returns ------- List[dcc.Graph] plotly figure for every given agent. """ figs: List[dcc.Graph] = [] step_data = load_step(run_directory, step) if step_data is None: return [ dcc.Graph( figure=go.Figure( { "layout": { "xaxis": {"visible": False}, "yaxis": {"visible": False}, "annotations": [ { "text": "Enable childMap export for sample space visualization.", "xref": "paper", "yref": "paper", "showarrow": False, "font": {"size": 28}, } ], } } ) ) ] elif not agent_ids: # list agent_ids is empty, so return an empty figure list return figs df = get_step_dataframe(step_data) # this variable must be set here because the variable circle_size may change use_action_value_circles = circle_size == "action_value" for agent_id in agent_ids: agent_df = df[df["id"] == agent_id] # drop unvisited actions agent_df = agent_df[agent_df["action_visits"] != 0] # finally chosen action chosen_x = agent_df[agent_df["action_chosen"] > 0]["d_velocity"] chosen_y = agent_df[agent_df["action_chosen"] > 0]["d_lateral"] labels = { "d_velocity": "Velocity change", "d_lateral": "Lateral change", "action_visits": "Action visit count", "action_value": "Action value", } if use_action_value_circles: # action values can be negative, so transform them to positive values for the circle size min_value = agent_df["action_value"].min() if min_value < 0: circle_size = agent_df["action_value"] - min_value fig = px.scatter( agent_df, x="d_velocity", y="d_lateral", marginal_x="histogram", marginal_y="histogram", hover_data=["action_visits", "action_value"], labels=labels, size=circle_size, ).update_traces( marker=dict( line=dict(width=1, color="black"), opacity=0.5, symbol="circle-dot", color="grey", ), selector=dict(type="scatter"), ) pivot_df = agent_df.pivot( index="d_lateral", columns="d_velocity", values=contour_z ) fig.add_trace( go.Contour( z=pivot_df.values, x=pivot_df.columns.values, y=pivot_df.index.values, contours_coloring="heatmap", # "fill" connectgaps=True, # line_smoothing=1.3, colorscale=px.colors.sequential.Plasma, xaxis="x", yaxis="y", hoverinfo="skip", colorbar=dict(title=labels[contour_z], titleside="right"), ) ) fig.add_trace( go.Scatter( x=chosen_x, y=chosen_y, xaxis="x", yaxis="y", mode="markers", name="Selected Action", marker=dict( line=dict(width=2, color="DarkSlateGrey"), color="red", size=15, symbol="x", ), ) ) # determine min/max x/y values to specify the axes ranges manually min_x = agent_df.loc[:, "d_velocity"].min() max_x = agent_df.loc[:, "d_velocity"].max() min_y = agent_df.loc[:, "d_lateral"].min() max_y = agent_df.loc[:, "d_lateral"].max() fig.update_layout( title=dict( text=f"Agent: {agent_id}, Step: {step}", x=0.5, ), margin_t=110, # default: 100 height=460, # default: 450 xaxis_range=[min_x, max_x], yaxis_range=[min_y, max_y], legend=dict( orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1 ), ) figs.append(fig) return [ dcc.Graph(figure=fig, className="col-sm-12 col-md-6 col-lg-4") for fig in figs ]
544e3bebcb2ecb6d62f4228b13527e392a3dc51a
24,770
import ntpath def raw_path_basename(path): """Returns basename from raw path string""" path = raw(path) return ntpath.basename(path)
d1282ead5a8d5bf9705302347b2769421ece409b
24,771
from typing import Dict from typing import Tuple from typing import Optional def prepare_request( url: str, access_token: str = None, user_agent: str = None, ids: MultiInt = None, params: RequestParams = None, headers: Dict = None, json: Dict = None, ) -> Tuple[str, RequestParams, Dict, Optional[Dict]]: """Translate some ``pyinaturalist``-specific params into standard params and headers, and other request param preprocessing. This is made non-``requests``-specific so it could potentially be reused for ``aiohttp`` requests. Returns: Tuple of ``(URL, params, headers, data)`` """ # Prepare request params params = preprocess_request_params(params) # Prepare user-agent and authentication headers headers = headers or {} headers['User-Agent'] = user_agent or pyinaturalist.user_agent headers['Accept'] = 'application/json' if access_token: headers['Authorization'] = f'Bearer {access_token}' # If one or more resources are requested by ID, valudate and update the request URL accordingly if ids: url = url.rstrip('/') + '/' + validate_ids(ids) # Convert any datetimes to strings in request body if json: headers['Content-type'] = 'application/json' json = preprocess_request_body(json) return url, params, headers, json
f1ab81d32146a102c2ac7518228c4f880d988214
24,772
def require_permission(permission): """Pyramid decorator to check permissions for a request.""" def handler(f, *args, **kwargs): request = args[0] if check_permission(request, request.current_user, permission): return f(*args, **kwargs) elif request.current_user: raise HTTPForbidden() else: raise HTTPFound(request.route_url('user.login', _query={'redirect': encode_route(request)})) return decorator(handler)
e9321d6eaf84b80bd41d253ebd26aabecd6660ee
24,773
import _uuid def _collapse_subgraph(graph_def, inputs, outputs, op_definition): """Substitute a custom op for the subgraph delimited by inputs and outputs.""" name = _uuid.uuid1().hex # We need a default type, but it can be changed using 'op_definition'. default_type = types_pb2.DT_FLOAT new_graph = fuse_op( graph_def=graph_def, input_nodes=inputs, output_nodes=outputs, output_dtypes=[default_type for _ in outputs], output_quantized=False, op_name=name, op_type="CustomTfLiteOp") node_def = node_def_pb2.NodeDef() text_format.Parse(op_definition, node_def) for node in new_graph.node: if node.name == name: node.MergeFrom(node_def) return new_graph
f0f562c7d26876761a3d251d41a64aac5c432db0
24,774
def downsample_filter_simple(data_in, n_iter=1, offset=0): """ Do nearest-neighbor remixing to downsample data_in (..., nsamps) by a power of 2. """ if n_iter <= 0: return None ns_in = data_in.shape[-1] ns_out = ns_in // 2 dims = data_in.shape[:-1] + (ns_out,) data_out = np.empty(dims, dtype=data_in.dtype) # Central sample data_out[...,:] = data_in[...,offset:ns_out*2+offset:2] * 2 # To the left (all output samples except maybe the first) l_start = 1-offset l_count = ns_out - l_start data_out[...,l_start:] += data_in[...,(1-offset):2*l_count:2] # To the right (all output samples except maybe the last) # depending on 2*ns_out+offset <= ns_in r_count = (ns_in - offset) // 2 data_out[...,:r_count] += data_in[...,offset+1::2] # Normalization... data_out[...,:] /= 4 if l_start > 0: data_out[...,0] *= 4./3 if r_count < ns_out: data_out[...,-1] *= 4./3 if n_iter <= 1: return data_out # Destroy intermediate storage, and iterate data_in = data_out return downsample_filter_simple(data_in, n_iter-1, offset)
f3b247e552ed95f28bd9f68ac00ff39db6741d77
24,775
from datetime import datetime def _session_setup(calling_function_name='[FUNCTION NAME NOT GIVEN]'): """ Typically called at the top of lightcurve workflow functions, to collect commonly required data. :return: tuple of data elements: context [tuple], defaults_dict [py dict], log_file [file object]. """ context = _get_session_context() if context is None: return this_directory, mp_string, an_string, filter_string = context defaults_dict = ini.make_defaults_dict() session_dict = ini.make_session_dict(defaults_dict, this_directory) log_filename = defaults_dict['session log filename'] log_file = open(log_filename, mode='a') # set up append to log file. log_file.write('\n===== ' + calling_function_name + '() ' + '{:%Y-%m-%d %H:%M:%S utc}'.format(datetime.now(timezone.utc)) + '\n') return context, defaults_dict, session_dict, log_file
95dd3cada63e8970b3f39ff2b3f3d14797f86cd8
24,776
def find_center(image, center_guess, cutout_size=30, max_iters=10): """ Find the centroid of a star from an initial guess of its position. Originally written to find star from a mouse click. Parameters ---------- image : numpy array or CCDData Image containing the star. center_guess : array or tuple The position, in pixels, of the initial guess for the position of the star. The coordinates should be horizontal first, then vertical, i.e. opposite the usual Python convention for a numpy array. cutout_size : int, optional The default width of the cutout to use for finding the star. max_iters : int, optional Maximum number of iterations to go through in finding the center. """ pad = cutout_size // 2 x, y = center_guess # Keep track of iterations cnt = 0 # Grab the cutout... sub_data = image[y - pad:y + pad, x - pad:x + pad] # - med # ...do stats on it... _, sub_med, _ = sigma_clipped_stats(sub_data) # sub_med = 0 # ...and centroid. x_cm, y_cm = centroid_com(sub_data - sub_med) # Translate centroid back to original image (maybe use Cutout2D instead) cen = np.array([x_cm + x - pad, y_cm + y - pad]) # ceno is the "original" center guess, set it to something nonsensical here ceno = np.array([-100, -100]) while (cnt <= max_iters and (np.abs(np.array([x_cm, y_cm]) - pad).max() > 3 or np.abs(cen - ceno).max() > 0.1)): # Update x, y positions for subsetting x = int(np.floor(x_cm)) + x - pad y = int(np.floor(y_cm)) + y - pad sub_data = image[y - pad:y + pad, x - pad:x + pad] # - med _, sub_med, _ = sigma_clipped_stats(sub_data) # sub_med = 0 mask = (sub_data - sub_med) < 0 x_cm, y_cm = centroid_com(sub_data - sub_med, mask=mask) ceno = cen cen = np.array([x_cm + x - pad, y_cm + y - pad]) if not np.all(~np.isnan(cen)): raise RuntimeError('Centroid finding failed, ' 'previous was {}, current is {}'.format(ceno, cen)) cnt += 1 return cen
9357b6655ec094b058af0710eb6f410b49019b9b
24,777
def zGetTraceArray(numRays, hx=None, hy=None, px=None, py=None, intensity=None, waveNum=None, mode=0, surf=-1, want_opd=0, timeout=5000): """Trace large number of rays defined by their normalized field and pupil coordinates on lens file in the LDE of main Zemax application (not in the DDE server) Parameters ---------- numRays : integer number of rays to trace. ``numRays`` should be equal to the length of the lists (if provided) ``hx``, ``hy``, ``px``, etc. hx : list, optional list of normalized field heights along x axis, of length ``numRays``; if ``None``, a list of 0.0s for ``hx`` is created. hy : list, optional list of normalized field heights along y axis, of length ``numRays``; if ``None``, a list of 0.0s for ``hy`` is created px : list, optional list of normalized heights in pupil coordinates, along x axis, of length ``numRays``; if ``None``, a list of 0.0s for ``px`` is created. py : list, optional list of normalized heights in pupil coordinates, along y axis, of length ``numRays``; if ``None``, a list of 0.0s for ``py`` is created intensity : float or list, optional initial intensities. If a list of length ``numRays`` is given it is used. If a single float value is passed, all rays use the same value for their initial intensities. If ``None``, all rays use a value of ``1.0`` as their initial intensities. waveNum : integer or list (of integers), optional wavelength number. If a list of integers of length ``numRays`` is given it is used. If a single integer value is passed, all rays use the same value for wavelength number. If ``None``, all rays use wavelength number equal to 1. mode : integer, optional 0 = real (Default), 1 = paraxial surf : integer, optional surface to trace the ray to. Usually, the ray data is only needed at the image surface (``surf = -1``, default) want_opd : integer, optional 0 if OPD data is not needed (Default), 1 if it is. See Zemax manual for details. timeout : integer, optional command timeout specified in milli-seconds Returns ------- error : list of integers 0 = ray traced successfully; +ve number = the ray missed the surface; -ve number = the ray total internal reflected (TIR) at surface given by the absolute value of the ``error`` vigcode : list of integers the first surface where the ray was vignetted. Unless an error occurs at that surface or subsequent to that surface, the ray will continue to trace to the requested surface. x, y, z : list of reals x, or , y, or z, coordinates of the ray on the requested surface l, m, n : list of reals the x, y, and z direction cosines after refraction into the media following the requested surface. l2, m2, n2 : list of reals list of x or y or z surface intercept direction normals at requested surface opd : list of reals computed optical path difference if ``want_opd > 0`` intensity : list of reals the relative transmitted intensity of the ray, including any pupil or surface apodization defined. If ray tracing fails, a single integer error code is returned, which has the following meaning: -1 = Couldn't retrieve data in PostArrayTraceMessage, -999 = Couldn't communicate with Zemax, -998 = timeout reached Examples -------- >>> n = 9**2 >>> nx = np.linspace(-1, 1, np.sqrt(n)) >>> hx, hy = np.meshgrid(nx, nx) >>> hx, hy = hx.flatten().tolist(), hy.flatten().tolist() >>> rayData = at.zGetTraceArray(numRays=n, hx=hx, hy=hy, mode=0) >>> err, vig = rayData[0], rayData[1] >>> x, y, z = rayData[2], rayData[3], rayData[4] Notes ----- The opd can only be computed if the last surface is the image surface, otherwise, the opd value will be zero. """ rd = getRayDataArray(numRays, tType=0, mode=mode, endSurf=surf) hx = hx if hx else [0.0] * numRays hy = hy if hy else [0.0] * numRays px = px if px else [0.0] * numRays py = py if py else [0.0] * numRays if intensity: intensity = intensity if isinstance(intensity, list) else [intensity]*numRays else: intensity = [1.0] * numRays if waveNum: waveNum = waveNum if isinstance(waveNum, list) else [waveNum]*numRays else: waveNum = [1] * numRays want_opd = [want_opd] * numRays # fill up the structure for i in xrange(1, numRays+1): rd[i].x = hx[i-1] rd[i].y = hy[i-1] rd[i].z = px[i-1] rd[i].l = py[i-1] rd[i].intensity = intensity[i-1] rd[i].wave = waveNum[i-1] rd[i].want_opd = want_opd[i-1] # call ray tracing ret = zArrayTrace(rd, timeout) # free up some memory #del hx, hy, px, py, intensity, waveNum, want_opd # seems to increase running time #_gc.collect() d = {} if ret == 0: reals = ['x', 'y', 'z', 'l', 'm', 'n', 'l2', 'm2', 'n2', 'opd', 'intensity'] ints = ['error', 'vigcode'] for r in reals: exec(r + " = [0.0] * numRays", locals(), d) for i in ints: exec(i + " = [0] * numRays", locals(), d) for i in xrange(1, numRays+1): d["x"][i-1] = rd[i].x d["y"][i-1] = rd[i].y d["z"][i-1] = rd[i].z d["l"][i-1] = rd[i].l d["m"][i-1] = rd[i].m d["n"][i-1] = rd[i].n d["opd"][i-1] = rd[i].opd d["intensity"][i-1] = rd[i].intensity d["l2"][i-1] = rd[i].Exr d["m2"][i-1] = rd[i].Eyr d["n2"][i-1] = rd[i].Ezr d["error"][i-1] = rd[i].error d["vigcode"][i-1] = rd[i].vigcode return (d["error"], d["vigcode"], d["x"], d["y"], d["z"], d["l"], d["m"], d["n"], d["l2"], d["m2"], d["n2"], d["opd"], d["intensity"]) else: return ret
986d69d3adae09841fb1b276e9b182a93f4c7fd7
24,778
def bin2bytes(binvalue): """Convert binary string to bytes. Sould be BYTE aligned""" hexvalue = bin2hex(binvalue) bytevalue = unhexlify(hexvalue) return bytevalue
ecc90282e7b247f0e87f8349e382d2b2a194cf77
24,779
from typing import Optional def _report_maker( *, tback: str, func_name: Optional[str] = None, header: Optional[str] = None, as_attached: bool = False, ) -> Report: """ Make report from Args: tback(str): traceback for report. func_name(str, optional): name of function when raised error. header(str, optional): first line in report message. Default - "Your program has crashed ☠️" as_attached(bool, optional): make report for sending as a file. Default - False. Returns: isinstance of Report obj. """ return Report(tback, func_name, header, as_attached)
a4ff80557944d774b162ac0671dd6997243ff49f
24,781
def moments(data,x0=None,y0=None): """Returns (height, x, y, width_x, width_y) the gaussian parameters of a 2D distribution by calculating its moments """ total = data.sum() X, Y = np.indices(data.shape) x = (X*data).sum()/total y = (Y*data).sum()/total col = data[:, int(y)] width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum()) row = data[int(x), :] width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum()) height = data.max() if x0 is None: return height, x, y, width_x, width_y, 0.0, 0.0 else: xstep = x0[1] - x0[0] ystep = y0[1] - y0[0] return height, x*xstep+x0[0], y*ystep+y0[0], width_x*xstep, width_y*ystep, 0.0, 0.0
d88f8aa9938d397c205135bd1174be64bdf32d5f
24,782
def VecStack(vector_list, axis=0): """ This is a helper function to stack vectors """ # Determine output size single_vector_shape = [max([shape(vector)[0] for vector in vector_list]), max([shape(vector)[1] for vector in vector_list])] vector_shape = dcopy(single_vector_shape) vector_shape[axis] *= len(vector_list) # Allocate memory vector_full = zeros(vector_shape, getDatatype(vector_list[0]), getBackend(vector_list[0])) # Assign vector elements to the output for index, vector in enumerate(vector_list): vector_full[index * single_vector_shape[0]:(index + 1) * single_vector_shape[0], :] = pad(vector, single_vector_shape) # Return return vector_full
5a2b00ec25fc34dc7a24a493b7e72edb13fbf1b9
24,784
def precook(s, n=4, out=False): """ Takes a string as input and returns an object that can be given to either cook_refs or cook_test. This is optional: cook_refs and cook_test can take string arguments as well. :param s: string : sentence to be converted into ngrams :param n: int : number of ngrams for which representation is calculated :return: term frequency vector for occuring ngrams """ words = s.split() counts = defaultdict(int) for k in xrange(1,n+1): #1,2,3,4 for i in xrange(len(words)-k+1): ngram = tuple(words[i:i+k]) counts[ngram] += 1 return counts
556af462015429173464574de0103a6f661355f7
24,785
def get_full_path(path, nx_list_subgraph): """Creates a numpy array of the line result. Args: path (str): Result of ``nx.shortest_path`` nx_list_subgraph (list): See ``create_shortest path`` function Returns: ndarray: Coordinate pairs along a path. """ p_list = [] curp = None for i in range(len(path)-1): p = get_path(path[i], path[i+1], nx_list_subgraph) if curp is None: curp = p if np.sum((p[0]-curp)**2) > np.sum((p[-1]-curp)**2): p = p[::-1, :] p_list.append(p) curp = p[-1] return np.vstack(p_list)
1849341431bf24d0dbe0920ca1ae955a6280415f
24,786
def get_compliance_site_case_notifications(data, request): """ returns the count of notification for a compliance site case and all visit cases under it. """ ids = [item["id"] for item in data] notifications = ( ExporterNotification.objects.filter( user_id=request.user.pk, organisation_id=get_request_user_organisation_id(request), case_id__in=ids ) .values("case") .annotate(count=Count("case")) ) cases_with_notifications = {str(notification["case"]): notification["count"] for notification in notifications} visit_notifications = list( ExporterNotification.objects.filter( user_id=request.user.pk, organisation_id=get_request_user_organisation_id(request), case__compliancevisitcase__site_case__id__in=ids, ) .values("case__compliancevisitcase__site_case_id") .annotate(count=Count("case__compliancevisitcase__site_case_id")) ) visit_cases_with_notifications = { str(notification["case__compliancevisitcase__site_case_id"]): notification["count"] for notification in visit_notifications } for item in data: if item["id"] in cases_with_notifications: item["exporter_user_notification_count"] = cases_with_notifications[item["id"]] else: item["exporter_user_notification_count"] = 0 if item["id"] in visit_cases_with_notifications: item["exporter_user_notification_count"] += visit_cases_with_notifications[item["id"]] return data
639b4ef0425573e3f7806b18c7d03221d5db3932
24,787
def compare_two_data_lists(data1, data2): """ Gets two lists and returns set difference of the two lists. But if one of them is None (file loading error) then the return value is None """ set_difference = None if data1 is None or data2 is None: set_difference = None else: set_difference = len(set(data1).difference(data2)) return set_difference
6e926d3958544d0d8ce1cbcb54c13535c74ab66b
24,789
from typing import Any from datetime import datetime def get_on_request(field: Any, default_value: Any) -> Any: """ Функция получения значений Args: field: поле default_value: если пустое то подставим это значение Return: значение поля или дефолтное """ if isinstance(field, datetime): if field.timestamp() < 10: return default_value if field: return field return default_value
598f47d996618cfcf3790fe7497c6d51508efc48
24,790
def aptamer(ligand, piece='whole', liu=False): """ Construct aptamer sequences. Parameters ---------- ligand: 'theo' Specify the aptamer to generate. Right now only the theophylline aptamer is known. piece: 'whole', '5', '3', or 'splitter' Specify which part of the aptamer to generate. The whole aptamer is returned by default, but each aptamer can be broken into a 5' half, a 3' half, and a splitter between those halves. Returns ------- aptamer: Construct The returned construct is given constraints, which can be used to force RNAfold to approximate a ligand bound state. """ affinity_uM = float('inf') # Get the right sequence for the requested aptamer. if ligand in ('th', 'theo', 'theophylline'): sequence_pieces = 'AUACCAGCC', 'GAAA', 'GGCCCUUGGCAG' if liu: sequence_pieces = 'AUACCACGC', 'GAAA', 'GCGCCUUGGCAG' constraint_pieces = '.((((.(((', '....', ')))....)))).' affinity_uM = 0.32 elif ligand in ('gtheoc'): # The theophylline aptamer, bracketed by a GC base pair. This # construct is more convenient to use with ViennaRNA, because a # bracketing base pair is required to make a constraint. sequence_pieces = 'GAUACCAGCC', 'GAAA', 'GGCCCUUGGCAGC' constraint_pieces = '(.((((.(((', '....', ')))....)))).)' affinity_uM = 0.32 elif ligand in ('3', '3mx', '3-methylxanthine'): # Soukup, Emilsson, Breaker. Altering molecular recognition of RNA # aptamers by allosteric selection. J. Mol. Biol. (2000) 298, 623-632. sequence_pieces = 'AUACCAGCC', 'GAAA', 'GGCCAUUGGCAG' constraint_pieces = '.(.((((((', '....', ')))...))).).' elif ligand in ('r', 'tmr', 'tetramethylrosamine', 'mg', 'malachite green'): # Baugh, Grate, Wilson. 2.8Å structure of the malachite green aptamer. # JMB (2000) 301:1:117-128. # This aptamer was used to make riboswitches, but with luciferase and # not RFP, possibly because TMR is a fluorescent dye: Borujeni et al. # Automated physics-based design of synthetic riboswitches from diverse # RNA aptamers. Nucl. Acids Res. (2016) 44:1:1-13. # I can't find any commercial TMR. Sigma used to sell it as product # number T1823, but has since discontinued it. sequence_pieces = 'CCGACUGGCGAGAGCCAGGUAACGAAUG', constraint_pieces = '(...(((((....))))).........)', elif ligand in ('tpp', 'thiamine', 'thiamine pyrophosphate'): # Winkler, Hahvi, Breaker. Thiamine derivatives bind messenger RNAs # directly to regulate bacterial gene expression. Nature (2002) # 419:952-956. # The sequence I've copied here is the ThiM 91 fragment from Winkler et # al. Weiland et al. used almost the same sequence, but they mutated # the last nucleotide from A to U to break a base pair. # Winker et al used "M9 glucose minimal media (plus 50 μg/mL vitamin # assay Casamino acids; Difco)" with or without 100 μM thiamine for # their in vivo assays (figure 4b, bottom). The "vitamin assay" means # the casein digest was treated to remove certain vitamins; presumably # this is an important detail. # Weiland et al. used M63 media with or without 1 mM thiamine for their # in vivo assays. This is a little confusing to me because the M63 # recipe I found contains thiamine. Also, the titrations in figure 3B # and 3C only go to 50 μM (and saturate around 1 μM). # My plan is to use M9 media with glucose and "vitamin assay" Casamino # acids, with and without 100 μM thiamine. sequence_pieces = 'UCGGGGUGCCCUUCUGCGUGAAGGCUGAGAAAUACCCGUAUCACCUGAUCUGGAUAAUGCCAGCGUAGGGAA', constraint_pieces = '(..(((((.(((((.....)))))........)))))......((((..((((......))))..))))..)', affinity_uM = 0.0324 # (interpolated from figure 2b in Winkler et al.) elif ligand in ('a', 'add', 'adenine'): # Serganov et al. Structural Basis for discriminative regulation of # gene expression by adenine- and guanine-sensing mRNAs. Chemistry & # Biology (2004) 11:1729-1741. # I truncated 7 base pairs that weren't interacting with the ligand # from the end of the construct. I haven't been able to find an # example of the adenine aptamer being used in a riboswitch to see if # this is what other people have done, but Nomura et al. made # essentially the same truncation to the highly homologous guanine # aptamer when using it to make an allosteric ribozyme, so I'm pretty # confident that this could work. # Dixon et al. used M9 + 0.4% glucose + 2 mg/mL cas-amino acids + 0.1 # mg/mL thiamine. This is a higher concentration of cas-amino acids # than Winkler et al. use for the TPP aptamer, but this is much more in # line with the standard protocols. # # The ligand was also in some amount of DMSO, but I'm not sure how # much. The solubility of adenine in water is 7.6 mM, so maybe the # DMSO was only necessary for some of their other ligands. sequence_pieces = 'UAUAAUCCUAAUGAUAUGGUUUGGGAGUUUCUACCAAGAGCCUUAAACUCUUGAUUA', constraint_pieces = '((...(((((((.......)))))))........((((((.......))))))..))', elif ligand in ('b', 'amm', 'ammeline'): # Dixon et al. Reengineering orthogonally selective riboswitches. PNAS # (2010) 107:7:2830-2835. # This is the M6 construct, which is just the adenine aptamer from # above with U47C and U51C. The affinity measurement is actually for # M6'', because it was not measured for M6. sequence_pieces = 'UAUAAUCCUAAUGAUAUGGUUUGGGAGCUUCCACCAAGAGCCUUAAACUCUUGAUUA', constraint_pieces = '((...(((((((.......)))))))........((((((.......))))))..))', affinity_uM = 1.19 elif ligand in ('g', 'gua', 'guanine'): # Nomura, Zhou, Miu, Yokobayashi. Controlling mammalian gene expression # by allosteric Hepatitis Delta Virus ribozymes. ACS Synth. Biol. # (2013) 2:684-689. # Nomura et al. used guanine at 500 μM, but I still see toxicity at # this concentration. I think I'm going to use 250 μM instead. sequence_pieces = 'UAUAAUCGCGUGGAUAUGGCACGCAAGUUUCUACCGGGCACCGUAAAUGUCCGACUA', constraint_pieces = '((...(.(((((.......))))).)........((((((.......))))))..))', affinity_uM = 0.005 elif ligand in ('fmn', 'flavin', 'flavin mononucleotide'): # Soukup, Breaker. Engineering precision RNA molecular switches. PNAS # (1999) 96:3584-3589. # I can't find any examples of anyone using this aptamer in vivo. sequence_pieces = 'GAGGAUAUGCUUCGGCAGAAGGC', constraint_pieces = '(......(((....))).....)', elif ligand in ('m', 'ms2', 'ms2 coat protein'): # Qi, Lucks, Liu, Mutalik, Arkin. Engineering naturally occurring # trans-acting non-coding RNAs to sense molecular signals. Nucl. Acids # Res. (2012) 40:12:5775-5786. Sequence in supplement. # I can't really figure out which MS2 aptamer people use for synthetic # biology. All the papers I've read agree that the aptamer has one # stem and three unpaired adenosines. The sequences from Romaniuk, # Convery, and Qi actually have the same stem, they just differ in the # loop. The sequences from Batey and Culler are exactly the same, but # different from those in the other papers. # The loop from Romaniuk and Convery is AUUA (the wildtype sequence) # while the loop from Qi is ACCA. I'm inclined to use ACCA because Qi # was doing synthetic biology and because Convery mentions that the # natural consensus sequence for the loop is ANYA, a standard tetraloop # which doesn't preclude ACCA. # I should consider making the N55K mutation to the coat protein # itself. One of the plasmids on AddGene mentioned that this mutation # increases affinity for the aptamer. That plasmid was for mammalian # expression, and so far I haven't seen this assertion corroborated for # bacterial systems. sequence_pieces = 'AACAUGAGGACCACCCAUGUU', constraint_pieces = '((((((.((....))))))))', elif ligand in ('bca', 'beta-catenin'): # Culler, Hoff, Smolke. Reprogramming cellular behavior with rna # controllers responsive to endogenous proteins. Science (2010) # 330:6008:1251-1255. sequence_pieces = 'AGGCCGATCTATGGACGCTATAGGCACACCGGATACTTTAACGATTGGCT', raise NotImplementedError elif ligand in ('tc', 'tet', 'tetracycline'): # Wittmann and Suess. Selection of tetracycline inducible # self-cleaving ribozymes as synthetic devices for gene regulation in # yeast. Mol BioSyst (2011) 7:2419-2427. # The authors used 100 μM tetracycline in yeast. I've seen other # papers that used as much as 250 μM. sequence_pieces = 'AAAACAUACCAGAUUUCGAUCUGGAGAGGUGAAGAAUACGACCACCU', constraint_pieces = '(.......((((((....))))))...((((...........)))))', # Müller, Weigand, Weichenrieder, Suess. Thermodynamic characterization # of an engineered tetracycline-binding riboswitch. Nucleic Acids # Research (2006) 34:9:2607-2617. affinity_uM = 0.00077 # 770 pM elif ligand in ('neo', 'neomycin'): # Weigand, Sanchez, Gunnesch, Zeiher, Schroeder, Suess. Screening for # engineered neomycin riboswitches that control translation initiation. # RNA (2008) 14:89-97. # The authors show that the aptamer consists of two domains: one that # binds neomycin and one which is just a stem. Both are important for # regulating gene expression in their system, which is the 5'-UTR of an # mRNA. However, here I only include the ligand-binding domain. The # length and composition of the stem domain is probably application # dependent, and that's what I need to pull out of directed evolution. # # The authors used 100 μM neomycin. Yeast were grown at 28°C for 48h # in 5 mL minimal media. sequence_pieces = 'GCUUGUCCUUUAAUGGUCC', constraint_pieces = '(.....((......))..)', elif ligand in ('asp', 'aspartame'): # Ferguson et al. A novel strategy for selection of allosteric # ribozymes yields RiboReporter™ sensors for caffeine and aspartame. # Nucl. Acids Res. (2004) 32:5 sequence_pieces = 'CGGTGCTAGTTAGTTGCAGTTTCGGTTGTTACG', constraint_pieces = '((.............................))', elif ligand in ('caf', 'caffeine'): # Ferguson et al. A novel strategy for selection of allosteric # ribozymes yields RiboReporter™ sensors for caffeine and aspartame. # Nucl. Acids Res. (2004) 32:5 sequence_pieces = 'GATCATCGGACTTTGTCCTGTGGAGTAAGATCG', constraint_pieces = '.................................', else: raise ValueError("no aptamer for '{}'".format(ligand)) # Check for obvious entry errors in the aptamer sequences. if len(sequence_pieces) not in (1, 3): raise AssertionError("{} has {} sequence pieces, not 1 or 3.".format(ligand, len(sequence_pieces))) if len(sequence_pieces) != len(constraint_pieces): raise AssertionError("{} has {} sequence pieces and {} constraint pieces.".format(ligand, len(sequence_pieces), len(constraint_pieces))) if len(''.join(sequence_pieces)) != len(''.join(constraint_pieces)): raise AssertionError("the {} sequence has a different length than its constraints.".format(ligand)) # Define the domains that make up the aptamer. if len(sequence_pieces) == 1: aptamer = Domain("aptamer", sequence_pieces[0]) aptamer.constraints = constraint_pieces[0] aptamer.style = 'yellow', 'bold' aptamer.kd = affinity_uM if len(sequence_pieces) == 3: aptamer_5 = Domain("aptamer/5'", sequence_pieces[0]) aptamer_S = Domain("aptamer/splitter", sequence_pieces[1]) aptamer_3 = Domain("aptamer/3'", sequence_pieces[2]) aptamer_5.constraints = constraint_pieces[0] aptamer_S.constraints = constraint_pieces[1] aptamer_3.constraints = constraint_pieces[2] aptamer_5.style = 'yellow', 'bold' aptamer_S.style = 'yellow', 'bold' aptamer_3.style = 'yellow', 'bold' aptamer_S.mutable = True # Assemble the aptamer domains into a single construct and return it. construct = Construct('aptamer') if len(sequence_pieces) == 1: construct += aptamer if len(sequence_pieces) == 3: if piece == 'whole': construct += aptamer_5 construct += aptamer_S construct += aptamer_3 elif str(piece) == '5': construct += aptamer_5 elif piece == 'splitter': construct += aptamer_S elif str(piece) == '3': construct += aptamer_3 else: raise ValueError("must request 'whole', '5', '3', or 'splitter' piece of aptamer, not {}.".format(piece)) return construct
1d3b3803022d0529ff9fa393779aec5fd36cb94b
24,791
def bootstrap_ci(dataframe, kind='basic'): """Generate confidence intervals on the 1-sigma level for bootstrapped data given in a DataFrame. Parameters ---------- dataframe: DataFrame DataFrame with the results of each bootstrap fit on a row. If the t-method is to be used, a Panel is required, with the data in the panel labeled 'data' and the uncertainties labeled 'stderr' kind: str, optional Selects which method to use: percentile, basic, or t-method (student). Returns ------- DataFrame Dataframe containing the left and right limits for each column as rows. """ if isinstance(dataframe, pd.Panel): data = dataframe['data'] stderrs = dataframe['stderr'] args = (data, stderrs) else: data = dataframe args = (data) def percentile(data, stderrs=None): CI = pd.DataFrame(index=['left', 'right'], columns=data.columns) left = data.apply(lambda col: np.percentile(col, 15.865), axis=0) right = data.apply(lambda col: np.percentile(col, 84.135), axis=0) CI.loc['left'] = left CI.loc['right'] = right return CI def basic(data, stderrs=None): CI = pd.DataFrame(index=['left', 'right'], columns=data.columns) left = data.apply(lambda col: 2 * col[0] - np.percentile(col[1:], 84.135), axis=0) right = data.apply(lambda col: 2 * col[0] - np.percentile(col[1:], 15.865), axis=0) CI.loc['left'] = left CI.loc['right'] = right return CI def student(data, stderrs=None): CI = pd.DataFrame(index=['left', 'right'], columns=data.columns) R = (data - data.loc[0]) / stderrs left = R.apply(lambda col: np.percentile(col[1:], 84.135), axis=0) right = R.apply(lambda col: np.percentile(col[1:], 15.865), axis=0) left = data.loc[0] - stderrs.loc[0] * left right = data.loc[0] - stderrs.loc[0] * right CI.loc['left'] = left CI.loc['right'] = right return CI method = {'basic': basic, 'percentile': percentile, 't': student} method = method.pop(kind.lower(), basic) return method(*args)
073276c5cf28b384716352ec386091ed252de72c
24,792
import re def get_Trinity_gene_name(transcript_name): """ extracts the gene name from the Trinity identifier as the prefix """ (gene_name, count) = re.subn("_i\d+$", "", transcript_name) if count != 1: errmsg = "Error, couldn't extract gene_id from transcript_id: {}".format(transcript_name) logger.critical(errmsg) raise RuntimeError(errmsg) return gene_name
4c1ab87285dbb9cdd6a94c050e6dbed27f9f39bf
24,793
import regex def exclude_block_notation(pattern: str, text: str) -> str: """ 行を表現するテキストから、ブロック要素の記法を除外\n 除外した結果をInline Parserへ渡すことで、Block/Inlineの処理を分離することができる :param pattern: 記法パターン :param text: 行テキスト :return: 行テキストからブロック要素の記法を除外した文字列 """ return regex.extract_from_group(pattern, text, [INDEX_TEXT])
803d084a383b454da6d1c69a5d99ad0b4777eea7
24,794
def envfile_to_params(data): """ Converts environment file content into a dictionary with all the parameters. If your input looks like: # comment NUMBER=123 KEY="value" Then the generated dictionary will be the following: { "NUMBER": "123", "KEY": "value" } """ params = filter(lambda x: len(x) == 2, map(lambda x: x.strip().split("="), data.splitlines())) return { k: v[1:-1] if v.startswith('"') and v.endswith('"') else v for (k, v) in params }
03d3b4eb7ea5552938e6d42dcfd4554a1fe89422
24,795
import tokenize def greedy_decode(input_sentence, model, next_symbol=next_symbol, tokenize=tokenize, detokenize=detokenize): """Greedy decode function. Args: input_sentence (string): a sentence or article. model (trax.layers.combinators.Serial): Transformer model. Returns: string: summary of the input. """ ### START CODE HERE (REPLACE INSTANCES OF 'None' WITH YOUR CODE) ### # Use tokenize() cur_output_tokens = tokenize(input_sentence) + [0] generated_output = [] cur_output = 0 EOS = 1 while cur_output != EOS: # Get next symbol cur_output = next_symbol(cur_output_tokens, model) # Append next symbol to original sentence cur_output_tokens.append(cur_output) # Append next symbol to generated sentence generated_output.append(cur_output) print(detokenize(generated_output)) ### END CODE HERE ### return detokenize(generated_output)
416d61827ecb54c5ef85e2669c9905d5b20ecbf3
24,796
def get_browser_errors(driver): """ Checks browser for errors, returns a list of errors :param driver: :return: """ try: browserlogs = driver.get_log('browser') except (ValueError, WebDriverException) as e: # Some browsers does not support getting logs print(f"Could not get browser logs for driver {driver} due to exception: {e}") return [] return [entry for entry in browserlogs if entry['level'] == 'SEVERE']
fda6953703053fa16280b8a99aa91165625f6aa9
24,797
def get_user(request, user_id): """ Endpoint for profile given a user id. :param request: session request. :param user_id: id of user. :return: 200 - user profile. 401 - login required. 404 - user not found. """ try: get_user = User.objects.get(id=user_id) except: return JsonResponse( "Not Found - User does not exist.", status=404, safe=False ) # Check for share code. valid_sc = False if get_user.share_code: if request.GET.get("sharecode") == get_user.share_code: valid_sc = True if not valid_sc: try: verify_user_login(request) except PermissionDenied: return JsonResponse( "Unauthorized - Login required.", status=401, safe=False ) response = get_user.serialize() response["graphs"] = get_graphs(get_user) return JsonResponse(response, status=200)
cd790d93ed9f5f974fe4b6868f10cf2f4470f93c
24,798
def get_file_ext(url): """ Returns extension of filename of the url or path """ return get_filename(url).split('.')[-1]
419f8b1e8caac13aed500563e94cc28e40669156
24,799
def check_records(msg: dict) -> int: """ Returns the number of records sent in the SQS message """ records = 0 if msg is not None: records = len(msg[0]) if records != 1: raise ValueError("Not expected single record") return records
7036f943b733ca34adaaa5ff917b3eb246075422
24,800
def get_processes_from_tags(test): """Extract process slugs from tags.""" tags = getattr(test, 'tags', set()) slugs = set() for tag_name in tags: if not tag_name.startswith('{}.'.format(TAG_PROCESS)): continue slugs.add(tag_name[len(TAG_PROCESS) + 1:]) return slugs
16d333d1371ab533aa5ed7a26c4bdd968233edf9
24,801
import random def eight_ball(): """ Magic eight ball. :return: A random answer. :rtype: str """ answers = [ 'It is certain', 'It is decidedly so', 'Not a fucking chance!', 'without a doubt', 'Yes definitely', 'I suppose so', 'Maybe', ' No fucking way!', 'Sure :D', 'hahahaha no you plank! :P ', 'Ohhh yes!', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes', 'Try again', 'Ask again later', 'Better not tell you now as you may cry like a little girl', 'Cannot predict now', 'Fucking dead right!', 'Ohhhh most definitely', 'Concentrate and ask again', 'Don\'t count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful', 'Possibly, but I think you need to chillout!' ] return random.choice(answers)
728aea44a111a25d878ec7686038d993fe49f71c
24,803
def head(line, n: int): """returns the first `n` lines""" global counter counter += 1 if counter > n: raise cbox.Stop() # can also raise StopIteration() return line
221f8c6ac5a64b5f844202622e284053738147aa
24,804
def onehot(x, numclasses=None): """ Convert integer encoding for class-labels (starting with 0 !) to one-hot encoding. If numclasses (the number of classes) is not provided, it is assumed to be equal to the largest class index occuring in the labels-array + 1. The output is an array who's shape is the shape of the input array plus an extra dimension, containing the 'one-hot'-encoded labels. """ if x.shape == (): x = x[np.newaxis] if numclasses is None: numclasses = x.max() + 1 result = np.zeros(list(x.shape) + [numclasses]) z = np.zeros(x.shape) for c in range(numclasses): z *= 0 z[np.where(x == c)] = 1 result[..., c] += z return result
6595ef4fc837296f6ba31c78a4b3047aaca7ee49
24,805
import numpy def draw_graph(image, graph): """ Draw the graph on the image by traversing the graph structure. Args: | *image* : the image where the graph needs to be drawn | *graph* : the *.txt file containing the graph information Returns: """ tmp = draw_edges(image, graph) node_size = int(numpy.ceil((max(image.shape) / float(NODESIZESCALING)))) return draw_nodes(tmp, graph, max(node_size, 1))
2454e654969d766af60546686d9c305c67199c8a
24,806
def valid_octet (oct): """ Validates a single IP address octet. Args: oct (int): The octet to validate Returns: bool: True if the octet is valid, otherwise false """ return oct >= 0 and oct <= 255
9dd2346bb5df5bc00bb360013abe40b8039bdc45
24,807
def load_clean_dictionaries(): """ is loading the combilex data into two dictionaries word2phone and phone2word :return: g2p_dict, p2g_dict """ grapheme_dict = {} phonetic_dict = {} with open(COMBILEX_PATH, encoding='utf-8') as combilex_file: for line in combilex_file: # Skip commented lines if line[0:3] == ';;;': continue word, phone = line.strip().split('\t') if not should_skip_seq(word): if word not in grapheme_dict: grapheme_dict[word] = [] grapheme_dict[word].append(phone) if not should_skip_seq(phone): if phone not in phonetic_dict: phonetic_dict[phone] = [] phonetic_dict[phone].append(word) return grapheme_dict, phonetic_dict
ab7257c78ec8ba0786a0112362ba318468e69e02
24,808