content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def encode(numbers, GCE=GCE): """ do extended encoding on a list of numbers for the google chart api >>> encode([1690, 90,1000]) 'chd=e:aaBaPo' """ encoded = [] for number in numbers: if number > 4095: raise ValueError('too large') first, second = divmod(number, len(GCE)) encoded.append("%s%s" % (GCE[first], GCE[second])) return "chd=e:%s" % ''.join(encoded)
9a15134e0266a0dfc60b654b9da8d2a6169bee7f
15,357
def compute_f_mat(mat_rat,user_count,movie_count): """ compute the f matrix :param mat_rat: user`s rating matrix([user number,movie number]) where 1 means user likes the index movie. :param user_count: statistics of moive numbers that user have watch. :param movie_count: statistics of user numbers that movie have been rated. :return: f matrix """ temp = (mat_rat / user_count.reshape([-1,1]) )/ movie_count.reshape([1,-1]) D = np.dot(mat_rat.T, temp) f = np.dot(D, mat_rat.T).T return f
ff13544c28dde9025630878aed0844f56453e08e
15,358
import logging import tqdm def query_assemblies(organism, output, quiet=False): """from a taxid or a organism name, download all refseq assemblies """ logger = logging.getLogger(__name__) assemblies = [] genomes = Entrez.read(Entrez.esearch( "assembly", term=f"{organism}[Organism]", retmax=10000))["IdList"] logger.info( f"Found {len(genomes)} organisms in ncbi assemblies for {organism}") logger.info("Downloading the assemblies. Please be patient.") for id in tqdm(genomes, disable=quiet): try: entrez_assembly = Entrez.read( Entrez.esummary( db="assembly", id=id))["DocumentSummarySet"]["DocumentSummary"][0] except KeyError as e: entrez_assembly = Entrez.read( Entrez.esummary(db="assembly", id=id))["DocumentSummarySet"] print(entrez_assembly.keys()) raise else: assembly = Assembly(entrez_assembly) output_file = f"{output}/{assembly.accession}.fasta" download(assembly.ftp_refseq, output_file) assemblies.append(assembly) return assemblies
48b4f4946f4368865b2934f1ac30ce650a00b5d0
15,360
def main(params): """Loads the file containing the collation results from Wdiff. Then, identifies various kinds of differences that can be observed. Assembles this information for each difference between the two texts.""" print("\n== coleto: running text_analyze. ==") difftext = get_difftext(params["wdiffed_file"]) analysisresults = analyse_diffs(difftext, params) analysissummary = save_summary(difftext, analysisresults, params["analysissummary_file"]) save_analysis(analysisresults, params["analysis_file"]) return analysissummary
425aa27fb7ba1ee0b1aa23c2489e8431b4a57726
15,361
def matrix_horizontal_stack(matrices: list, _deepcopy: bool = True): """ stack matrices horizontally. :param matrices: (list of Matrix) :param _deepcopy: (bool) :return: (Matrix) """ assert matrices for _i in range(1, len(matrices)): assert matrices[_i].basic_data_type() == matrices[0].basic_data_type() assert matrices[_i].size()[0] == matrices[0].size()[0] if _deepcopy: _matrices = deepcopy(matrices) else: _matrices = matrices _kernel = [] for _i in range(_matrices[0].size()[0]): _kernel.append([]) for _j in range(len(_matrices)): for _k in range(_matrices[_j].size()[1]): _kernel[_i].append(_matrices[_j].kernel[_i][_k]) return Matrix(_kernel)
83419b0f77fc055145026b61ab8a0200172fcb62
15,362
def inds_to_invmap_as_array(inds: np.ndarray): """ Returns a mapping that maps global indices to local ones as an array. Parameters ---------- inds : numpy.ndarray An array of global indices. Returns ------- numpy.ndarray Mapping from global to local. """ res = np.zeros(inds.max() + 1, dtype=inds.dtype) for i in prange(len(inds)): res[inds[i]] = i return res
25dc5fa9f1225cb9da64a513ebea3dff935c3c44
15,363
def ident_keys(item, cfg): """Returns the list of keys in item which gives its identity :param item: dict with type information :param cfg: config options :returns: a list of fields for item that give it its identity :rtype: list """ try: return content.ident_keys(item) except Exception as e: logger.error('Failed to extract ident keys for %s' % (item), e) raise e
e911ea9bb0dbccbdf4d0ab5cdfeb5742297ae9e8
15,365
def playlists_by_date(formatter, albums): """Returns a single playlist of favorite tracks from albums sorted by decreasing review date. """ sorted_tracks = [] sorted_albums = sorted(albums, key=lambda x: x["date"], reverse=True) for album in sorted_albums: if album["picks"] is None: continue tracks = [ { "artist_tag": album["artist_tag"], "album_tag": album["album_tag"], "artist": album["artist"], "album": album["album"], "track": album["tracks"][p], } for p in album["picks"] ] sorted_tracks.extend(tracks) return formatter.parse_list(sorted_tracks, formatter.format_track)
0448fd941c0219f6e854a15df62e4811c1cecf3e
15,366
def merge_two_sorted_array(l1, l2): """ Time Complexity: O(n+m) Space Complexity: O(n+m) :param l1: List[int] :param l2: List[int] :return: List[int] """ if not l1: return l2 if not l2: return l1 merge_list = [] i1 = 0 i2 = 0 l1_len = len(l1) - 1 l2_len = len(l2) - 1 while i1 <= l1_len and i2 <= l2_len: if l1[i1] < l2[i2]: merge_list.append(l1[i1]) i1 += 1 else: merge_list.append(l2[i2]) i2 += 1 while i1 <= l1_len: merge_list.append(l1[i1]) i1 += 1 while i2 <= l2_len: merge_list.append(l2[i2]) i2 += 1 return merge_list
2671d21707056741bbdc4e3590135e7e1be4c7e9
15,367
def regression_metrics(y_true,y_pred): """ param1: pandas.Series/pandas.DataFrame/numpy.darray param2: pandas.Series/pandas.DataFrame/numpy.darray return: dictionary Function accept actual prediction labels from the dataset and predicted values from the model and utilizes this two values/data to calculate r2 score, mean absolute error, mean squared error, and root mean squared error at same time add them to result dictionary. Finally return the result dictionary """ result=dict() result['R2']=round(r2_score(y_true, y_pred),3) result['MAE']=round(mean_absolute_error(y_true, y_pred),3) result['MSE']=round(mean_squared_error(y_true, y_pred),3) result['RMSE']=round(mean_squared_error(y_true, y_pred,squared=False),3) return result
085bf6d9006443c752f5b665480fce4f24e5f850
15,368
def convert_from_quint8(arr): """ Dequantize a quint8 NumPy ndarray into a float one. :param arr: Input ndarray. """ assert isinstance(arr, np.ndarray) assert ( "mgb_dtype" in arr.dtype.metadata and arr.dtype.metadata["mgb_dtype"]["name"] == "Quantized8Asymm" ), "arr should be a ndarray with quint8 dtype" scale, zp = ( arr.dtype.metadata["mgb_dtype"]["scale"], arr.dtype.metadata["mgb_dtype"]["zero_point"], ) return (arr.astype(np.float32) - zp) * scale
50143a309108bf68cb65b266e2aec84090eb30e6
15,369
def classpartial(*args, **kwargs): """Bind arguments to a class's __init__.""" cls, args = args[0], args[1:] class Partial(cls): __doc__ = cls.__doc__ def __new__(self): return cls(*args, **kwargs) Partial.__name__ = cls.__name__ return Partial
7cdc96e314a2ce3c658ecb886922df4d7bda5b99
15,370
def alphabetize_concat(input_list): """ Takes a python list. List can contain arbitrary objects with .__str__() method (so string, int, float are all ok.) Sorts them alphanumerically. Returns a single string with result joined by underscores. """ array = np.array(input_list, dtype=str) array.sort() return '_'.join(array)
4bac3712696fd776b96ca8501f696c505c05e699
15,372
def kick(state, ai, ac, af, cosmology=cosmo, dtype=np.float32, name="Kick", **kwargs): """Kick the particles given the state Parameters ---------- state: tensor Input state tensor of shape (3, batch_size, npart, 3) ai, ac, af: float """ with tf.name_scope(name): state = tf.convert_to_tensor(state, name="state") fac = 1 / (ac ** 2 * E(cosmo,ac)) * (Gf(cosmo,af) - Gf(cosmo,ai)) / gf(cosmo,ac) indices = tf.constant([[1]]) #indices = tf.constant([1]) Xjl = tf.multiply(fac, state[2]) update = tf.expand_dims(Xjl, axis=0) shape = state.shape update = tf.scatter_nd(indices, update, shape) state = tf.add(state, update) return state
e7deeca9001fccc078f2f8ab7e51ac38d72a1125
15,373
def check_similarity(var1, var2, error): """ Check the simulatiry between two numbers, considering a error margin. Parameters: ----------- var1: float var2: float error: float Returns: ----------- similarity: boolean """ if((var1 <= (var2 + error)) and (var1 >= (var2 - error))): return True else: return False
305fd08cf4d8b1718d8560315ebf7bd03a4c7e2a
15,374
def model_type_by_code(algorithm_code): """ Method which return algorithm type by algorithm code. algorithm_code MUST contain any 'intable' type :param algorithm_code: code of algorithm :return: algorithm type name by algorithm code or None """ # invalid algorithm code case if algorithm_code not in ALGORITHM[ALGORITHM_CODE].keys(): return None return ALGORITHM[TYPE][algorithm_code]
bcd811e200855cc026134ce05b67add807e176ca
15,375
def getCasing(word): """ Returns the casing of a word""" if len(word) == 0: return 'other' elif word.isdigit(): #Is a digit return 'numeric' elif word.islower(): #All lower case return 'allLower' elif word.isupper(): #All upper case return 'allUpper' elif word[0].isupper(): #is a title, initial char upper, then all lower return 'initialUpper' return 'other'
2af70926c0cbbde6310abb573ccc3ee8260b86bd
15,376
def normalize_angle(deg): """ Take an angle in degrees and return it as a value between 0 and 360 :param deg: float or int :return: float or int, value between 0 and 360 """ angle = deg while angle > 360: angle -= 360 while angle < 360: angle += 360 return angle
cd4788819bbc8fce17ca7c7b1b320499a3893dee
15,377
from datetime import datetime from dateutil import tz import math def current_global_irradiance(site_properties, solar_properties, timestamp): """Calculate the clear-sky POA (plane of array) irradiance for a specific time (seconds timestamp).""" dt = datetime.datetime.fromtimestamp(timestamp=timestamp, tz=tz.gettz(site_properties.tz)) n = dt.timetuple().tm_yday sigma = math.radians(solar_properties.tilt) rho = solar_properties.get('rho', 0.0) C = 0.095 + 0.04 * math.sin(math.radians((n - 100) / 365)) sin_sigma = math.sin(sigma) cos_sigma = math.cos(sigma) altitude = get_altitude(latitude_deg=site_properties.latitude, longitude_deg=site_properties.longitude, when=dt) beta = math.radians(altitude) sin_beta = math.sin(beta) cos_beta = math.cos(beta) azimuth = get_azimuth(latitude_deg=site_properties.latitude, longitude_deg=site_properties.longitude, when=dt) phi_s = math.radians(180 - azimuth) phi_c = math.radians(180 - solar_properties.azimuth) phi = phi_s - phi_c cos_phi = math.cos(phi) # Workaround for a quirk of pvsolar since the airmass for the sun ele===altitude of zero # is infinite and very small numbers close to zero result in NaNs being returned rather # than zero if altitude < 0.0: altitude = -1.0 cos_theta = cos_beta * cos_phi * sin_sigma + sin_beta * cos_sigma ib = get_radiation_direct(when=dt, altitude_deg=altitude) ibc = ib * cos_theta idc = C * ib * (1 + cos_sigma) / 2 irc = rho * ib * (sin_beta + C) * ((1 - cos_sigma) / 2) igc = ibc + idc + irc # If we still get a bad result just return 0 if math.isnan(igc): igc = 0.0 return igc
d8e180b9768d5cf6c3064a7d30a2e7d918307366
15,378
from datetime import datetime def date_formatting(format_date, date_selected): """Date formatting management. Arguments: format_date {str} -- Date date_selected {str} -- Date user input Returns: str -- formatted date """ if len(date_selected) == 19: date_selected = datetime.strptime( date_selected, "%d/%m/%Y %H:%M:%S") elif len(date_selected) == 10: date_selected = datetime.strptime(date_selected, "%d/%m/%Y") try: if "yyyy" in format_date: format_date = format_date.replace( "yyyy", date_selected.strftime("%Y")) elif "yy" in format_date: format_date = format_date.replace( "yy", date_selected.strftime("%y")) if "mm" in format_date: format_date = format_date.replace( "mm", date_selected.strftime("%m")) if "dd" in format_date: format_date = format_date.replace( "dd", date_selected.strftime("%d")) if "hh" in format_date: format_date = format_date.replace( "hh", date_selected.strftime("%H")) if "nn" in format_date: format_date = format_date.replace( "nn", date_selected.strftime("%M")) if "ss" in format_date: format_date = format_date.replace( "ss", date_selected.strftime("%S")) return (format_date, None) except AttributeError: return ( None, _("Date entry error, format is dd/mm/yyyy or dd/mm/yyyy hh:mm:ss") )
48ff4cb59de3e8f75238420d8d211812148db34c
15,379
def parse_activity_from_metadata(metadata): """Parse activity name from metadata Args: metadata: List of metadata from log file Returns Activity name from metadata""" return _parse_type_metadata(metadata)[1]
c583d34a8cb0db8ddf26ff79d1a0885aab5c6af9
15,380
def mask_data_by_FeatureMask(eopatch, data_da, mask): """ Creates a copy of array and insert 0 where data is masked. :param data_da: dataarray :type data_da: xarray.DataArray :return: dataaray :rtype: xarray.DataArray """ mask = eopatch[FeatureType.MASK][mask] if len(data_da.values.shape) == 4: mask = np.repeat(mask, data_da.values.shape[-1], -1) else: mask = np.squeeze(mask, axis=-1) data_da = data_da.copy() data_da.values[~mask] = 0 return data_da
6639cc2cbf4956edbd637f07308fb33f00fcb8af
15,381
def makeFields(prefix, n): """Generate a list of field names with this prefix up to n""" return [prefix+str(n) for n in range(1,n+1)]
435571557ef556b99c4729500f372cc5c9180052
15,382
def process_input_dir(input_dir): """ Find all image file paths in subdirs, convert to str and extract labels from subdir names :param input_dir Path object for parent directory e.g. train :returns: list of file paths as str, list of image labels as str """ file_paths = list(input_dir.rglob('*.png')) file_path_strings = [str(path) for path in file_paths] label_strings = [path.parent.name for path in file_paths] return file_path_strings, label_strings
569d4539368888c91a12538156c611d311da03b6
15,383
def fak(n): """ Berechnet die Fakultaet der ganzen Zahl n. """ erg = 1 for i in range(2, n+1): erg *= i return erg
9df6f4fa912a25535369f4deb0a06baef8e6bdcc
15,384
import re def create_sequences_sonnets(sonnets): """ This creates sequences as done in Homework 6, by mapping each word to an integer in order to create a series of sequences. This function specifically makes entire sonnets into individual sequences and returns the list of processed sonnets back to be used in the basic HMM notebook for generation. """ sequences = [] obs_counter = 0 obs_map = {} for sonnet in sonnets: sequence = [] for i, line in enumerate(sonnet): split = line.split() for word in split: word = re.sub(r'[^\w]', '', word).lower() if word not in obs_map: # Add unique words to the observations map. obs_map[word] = obs_counter obs_counter += 1 # Add the encoded word. sequence.append(obs_map[word]) # Add the encoded sequence. sequences.append(sequence) return obs_map, sequences
56087140fe5ed8934b64a18567b4e9023ddc6f59
15,386
def l_to_rgb(img_l): """ Convert a numpy array (l channel) into an rgb image :param img_l: :return: """ lab = np.squeeze(255 * (img_l + 1) / 2) return color.gray2rgb(lab) / 255
362a1ef926e780b311902c3637a5299afbce4c6a
15,388
def blockchain_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp): """ Checks if current time in millis exceeds the time specified in condition """ try: expected_mili_time = int_from_bytes(condition.vars[0]) except ValueError: return Err.INVALID_CONDITION current_time = timestamp if current_time <= expected_mili_time: return Err.ASSERT_SECONDS_ABSOLUTE_FAILED return None
7b3ce8801239a524b150c9191b49eb24575b3fbb
15,389
def show_aip(mets_file): """Show a METS file""" mets_instance = METS.query.filter_by(metsfile='%s' % (mets_file)).first() level = mets_instance.level original_files = mets_instance.metslist dcmetadata = mets_instance.dcmetadata divs = mets_instance.divs filecount = mets_instance.originalfilecount aip_uuid = mets_file for element in dcmetadata: tag = element.get('element') if tag and tag == 'ark identifier': aip_uuid = element['value'] break return render_template( 'aip.html', original_files=original_files, mets_file=mets_file, level=level, dcmetadata=dcmetadata, divs=divs, filecount=filecount, aip_uuid=aip_uuid )
606dfd4dba45fe8dae918f795f27bfeddb0fcd70
15,390
def testCartesianEpehemeris( ephemeris_actual, ephemeris_desired, position_tol=1*u.m, velocity_tol=(1*u.mm/u.s), magnitude=True, raise_error=True ): """ Tests that the two sets of cartesian ephemeris are within the desired absolute tolerances of each other. The absolute difference is calculated as |actual - desired|. Parameters ---------- ephemeris_actual : `~numpy.ndarray` (N, 3) or (N, 6) Array of ephemeris to compare to the desired ephemeris, may optionally include velocities. Assumed units for: positions : AU, velocities : AU per day ephemeris_desired : `~numpy.ndarray` (N, 3) or (N, 6) Array of desired ephemeris to which to compare the actual ephemeris to, may optionally include velocities. Assumed units for: positions : AU, velocities : AU per day position_tol : `~astropy.units.quantity.Quantity` (1) Absolute tolerance positions need to satisfy (x, y, z, r). velocity_tol : `~astropy.units.quantity.Quantity` (1) Absolute tolerance velocity need to satisfy. (vx, vy, vz, v). magnitude : bool Test the magnitude of the position difference and velocity difference vectors as opposed to testing per individual coordinate. Raises ------ AssertionError: If |ephemeris_actual - ephemeris_desired| > tolerance. ValueError: If ephemeris shapes are not equal. ValueError: If coordinate dimensions are not one of 3 or 6. Returns ------- None """ any_error = False error_message = "\n" differences = {} statistics = {} if ephemeris_actual.shape != ephemeris_desired.shape: err = ( "The shapes of the actual and desired ephemeris should be the same." ) raise ValueError(err) N, D = ephemeris_actual.shape if D not in (3, 6): err = ( "The number of coordinate dimensions should be one of 3 or 6.\n" "If 3 then the expected inputs are x, y, z positions in AU.\n" "If 6 then the expected inputs are x, y, z postions in AU\n" "and vx, vy, vz velocities in AU per day." ) raise ValueError(err) # Test positions if magnitude: names = ["r"] else: names = ["x", "y", "z"] diff, stats, error = _evaluateDifference( ephemeris_actual[:, :3], ephemeris_desired[:, :3], u.AU, position_tol, magnitude=magnitude ) for i, n in enumerate(names): differences[n] = diff[:, i] statistics[n] = {k : v[i] for k, v in stats.items()} # If any of the differences between desired and actual are # greater than the allowed tolerance set any_error to True # and build the error message if error: any_error = True error_message += "{} difference (|actual - desired|) is not within {}.\n".format(names, position_tol) error_message = __statsToErrorMessage( stats, error_message ) if D == 6: # Test velocities if magnitude: names = ["v"] else: names = ["vx", "vy", "vz"] diff, stats, error = _evaluateDifference( ephemeris_actual[:, 3:], ephemeris_desired[:, 3:], (u.AU / u.d), velocity_tol, magnitude=magnitude ) for i, n in enumerate(names): differences[n] = diff[:, i] statistics[n] = {k : v[i] for k, v in stats.items()} # If any of the differences between desired and actual are # greater than the allowed tolerance set any_error to True # and build the error message if error: any_error = True error_message += "{} difference (|actual - desired|) is not within {}.\n".format(names, velocity_tol) error_message = __statsToErrorMessage( stats, error_message ) if any_error and raise_error: raise AssertionError(error_message) return differences, statistics, error
5ab38140ed7ff446f0f961e147e7d8af3e6c97e0
15,391
import requests def get_longitude_latitude(city_info, station): """ 利用高德地图查询对应的地铁站经纬度信息,下面的key需要自己去高德官网申请 https://lbs.amap.com/api/webservice/guide/api/georegeo :param city_info: 具体城市的地铁,如:广州市地铁 :param station: 具体的地铁站名称,如:珠江新城站 :return: 经纬度 """ addr = city_info + station print('*要查找的地点:' + addr) parameters = {'address': addr, 'key': '98a3444618af14c0f20c601f5a442000'} base = 'https://restapi.amap.com/v3/geocode/geo' response = requests.get(base, parameters, timeout=10) # 超时设置为10s,翻墙开了全局代理会慢点的 if response.status_code == 200: answer = response.json() x, y = answer['geocodes'][0]['location'].split(',') coor = (float(x), float(y)) print('*' + station + '的坐标是:', coor) return coor else: return (None, None)
9b0132702e14af9dec1ce65724139af0188b14a0
15,392
def make_resource_object(resource_type, credentials_path): """Creates and configures the service object for operating on resources. Args: resource_type: [string] The Google API resource type to operate on. credentials_path: [string] Path to credentials file, or none for default. """ try: api_name, resource = resource_type.split('.', 1) except ValueError: raise ValueError('resource_type "{0}" is not in form <api>.<resource>' .format(resource_type)) version = determine_version(api_name) service = make_service(api_name, version, credentials_path) path = resource.split('.') node = service for elem in path: try: node = getattr(node, elem)() except AttributeError: path_str = '.'.join(path[0:path.index(elem)]) raise AttributeError('"{0}{1}" has no attribute "{2}"'.format( api_name, '.' + path_str if path_str else '', elem)) return node
018cac83513b61c8bc99e06a07ded004685016b2
15,393
def AreBenchmarkResultsDifferent(result_dict_1, result_dict_2, test=MANN, significance_level=0.05): """Runs the given test on the results of each metric in the benchmarks. Checks if the dicts have been created from the same benchmark, i.e. if metric names match (e.g. first_non_empty_paint_time). Then runs the specified statistical test on each metric's samples to find if they vary significantly. Args: result_dict_1: Benchmark result dict of format {metric: list of values}. result_dict_2: Benchmark result dict of format {metric: list of values}. test: Statistical test that is used. significance_level: The significance level the p-value is compared against. Returns: test_outcome_dict: Format {metric: (bool is_different, p-value)}. """ AssertThatKeysMatch(result_dict_1, result_dict_2) test_outcome_dict = {} for metric in result_dict_1: is_different, p_value = AreSamplesDifferent(result_dict_1[metric], result_dict_2[metric], test, significance_level) test_outcome_dict[metric] = (is_different, p_value) return test_outcome_dict
d9e1eaa16c2329511dd3e1fc7e5cad63cab0c208
15,394
def _load_pascal_annotation(image_index): """ Load image and bounding boxes info from XML file in the PASCAL VOC format. """ #image_index = _load_image_set_index() classes = ('__background__', # always index 0 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') num_classes = len(classes) _class_to_ind = dict(zip(classes, xrange(num_classes))) _data_path = "/var/services/homes/kchakka/py-faster-rcnn/VOCdevkit/VOC2007" image_index = [image_index] for index in image_index: filename = os.path.join(_data_path, 'Annotations', index + '.xml') tree = ET.parse(filename) objs = tree.findall('object') if True: # Exclude the samples labeled as difficult non_diff_objs = [ obj for obj in objs if int(obj.find('difficult').text) == 0] # if len(non_diff_objs) != len(objs): # print 'Removed {} difficult objects'.format( # len(objs) - len(non_diff_objs)) objs = non_diff_objs num_objs = len(objs) boxes = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros((num_objs), dtype=np.int32) ## # commented below by chaitu ## #overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32) ## # commented above by chaitu ## # "Seg" area for pascal is just the box area seg_areas = np.zeros((num_objs), dtype=np.float32) # Load object bounding boxes into a data frame. for ix, obj in enumerate(objs): bbox = obj.find('bndbox') # Make pixel indexes 0-based x1 = float(bbox.find('xmin').text) - 1 y1 = float(bbox.find('ymin').text) - 1 x2 = float(bbox.find('xmax').text) - 1 y2 = float(bbox.find('ymax').text) - 1 cls = _class_to_ind[obj.find('name').text.lower().strip()] boxes[ix, :] = [x1, y1, x2, y2] gt_classes[ix] = cls #overlaps[ix, cls] = 1.0 #seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1) #overlaps = scipy.sparse.csr_matrix(overlaps) return {'boxes' : boxes, 'gt_classes' : gt_classes}
e656d78003d17ca7b8dae204ed9ea2812e5871dc
15,395
def create_global_var(shape, value, dtype, persistable=False, force_cpu=False, name=None): """ This function creates a new tensor variable with value in the global block(block 0). Parameters: shape (list of int): Shape of the variable value (float): The value of the variable. The new created variable will be filled with it. dtype (str): Data type of the variable persistable (bool, optional): If this variable is persistable. Default: False force_cpu (bool, optional): Force this variable to be on CPU. Default: False name (str, optional): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. Returns: Variable: The created Variable Examples: .. code-block:: python import paddle.fluid as fluid import paddle.fluid.layers as layers var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32', persistable=True, force_cpu=True, name='new_var') """ helper = LayerHelper("global_var", **locals()) var = helper.create_global_variable( dtype=dtype, shape=shape, persistable=persistable, name=name, stop_gradient=True) helper.set_variable_initializer( var, initializer=Constant( value=float(value), force_cpu=force_cpu)) return var
0d64b4bd15c97b2f32058bfe298249c3e528ec16
15,396
def select(population, fitness_val): """ 选择操作,用轮盘赌法进行选择 :param population: 种群基因型 :param fitness_val: 种群适应度 :return selected_pop: 选择后的种群 """ f_sum = sum(fitness_val) cumulative = [] for i in range(1, len(fitness_val)+1): cumulative.append(sum(fitness_val[:i]) / f_sum) selected_pop = [] for i in range(len(fitness_val)): rand = np.random.rand() prand = [(c - rand) for c in cumulative] j = 0 while prand[j] < 0: j = j+1 selected_pop.append(population[j]) return selected_pop
dd3529eaf6ac35801c589152078e7fe1dd0ed9fe
15,397
def _nan_helper(y, nan=False, inf=False, undef=None): """ Helper to handle indices and logical indices of NaNs, Infs or undefs. Definition ---------- def _nan_helper(y, nan=False, inf=False, undef=None): Input ----- y 1d numpy array with possible missing values Optional Input -------------- At least one of the following has to be given nan if True, check only for NaN and not Inf. inf if True, check only for Inf and not NaN. undef if given then check for undef value rather than NaN and Inf. Output ------ ind logical indices of missing values find function, with signature indices = find(ind), to convert logical indices of NaNs to 'equivalent' indices Examples -------- >>> # linear interpolation of NaNs >>> y = np.array([1, np.nan, 3]) >>> nans, z = _nan_helper(y, nan=True) >>> y[nans] = np.interp(z(nans), z(~nans), y[~nans]) History ------- Written, Matthias Cuntz, Jul 2013 - modified from http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array Modified, Matthias Cuntz, Apr 2014 - assert Matthias Cuntz, Sep 2021 - code refactoring """ assert not ((not nan) & (not inf) & (undef is None)), ( 'at least one of nan, inf or undef has to be given.') out = np.zeros(y.shape, dtype=bool) if nan: out = out | np.isnan(y) if inf: out = out | np.isinf(y) if undef is not None: out = out | (y == undef) return out, lambda ind: ind.nonzero()[0]
742433c2140f4827f11e79c691e3a16be124ef99
15,398
def unpacking(block_dets, *, repeat=False, **_kwargs): """ Identify name unpacking e.g. x, y = coord """ unpacked_els = block_dets.element.xpath(ASSIGN_UNPACKING_XPATH) if not unpacked_els: return None title = layout("""\ ### Name unpacking """) summary_bits = [] for unpacked_el in unpacked_els: unpacked_names = [ name_el.get('id') for name_el in unpacked_el.xpath('elts/Name')] if not unpacked_names: continue nice_str_list = gen_utils.get_nice_str_list(unpacked_names, quoter='`') summary_bits.append(layout(f"""\ Your code uses unpacking to assign names {nice_str_list} """)) summary = ''.join(summary_bits) if not repeat: unpacking_msg = get_unpacking_msg() else: unpacking_msg = '' message = { conf.Level.BRIEF: title + summary, conf.Level.EXTRA: unpacking_msg, } return message
512238569efe4c17ef7afd3a26e2bc17a0f77cfb
15,399
import tempfile import gzip def load_training_data(): """Loads the Fashion-MNIST dataset. Returns: Tuple of Numpy arrays: `(x_train, y_train)`. License: The copyright for Fashion-MNIST is held by Zalando SE. Fashion-MNIST is licensed under the [MIT license]( https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE). """ download_directory = tempfile.mkdtemp() base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" files = [ "train-labels-idx1-ubyte.gz", "train-images-idx3-ubyte.gz", ] paths = [] for fname in files: paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory)) with gzip.open(paths[0], "rb") as lbpath: y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) with gzip.open(paths[1], "rb") as imgpath: x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) return x_train, y_train
be2a5d84c8ef0cd1aa62564a3fd3882af49344ca
15,400
def set_difference(tree, context, attribs): """A meta-feature that will produce the set difference of two boolean features (will have keys set to 1 only for those features that occur in the first set but not in the second). @rtype: dict @return: dictionary with keys for key occurring with the first feature but not the second, and \ keys equal to 1 """ ret = {} for key, val in context['feats'][attribs[0]].items(): if key not in context['feats'][attribs[1]]: ret[key] = val return ret
7887f619e601624843c6507e7b93442020ecf1ea
15,401
def create_STATES(us_states_location): """ Create shapely files of states. Args: us_states_location (str): Directory location of states shapefiles. Returns: States data as cartopy feature for plotting. """ proj = ccrs.LambertConformal(central_latitude = 25, central_longitude = 265, standard_parallels = (25, 25)) reader = shpreader.Reader( f'{us_states_location}/ne_50m_admin_1_states_provinces_lines.shp') states = list(reader.geometries()) STATES = cfeature.ShapelyFeature(states, ccrs.PlateCarree()) return STATES
fe2b48f465ee7e63bb4dfa91e2c9917eeeab082f
15,402
def get_name_by_url(url): """Returns the name of a stock from the instrument url. Should be located at ``https://api.robinhood.com/instruments/<id>`` where <id> is the id of the stock. :param url: The url of the stock as a string. :type url: str :returns: Returns the simple name of the stock. If the simple name does not exist then returns the full name. """ data = helper.request_get(url) if not data: return(None) # If stock doesn't have a simple name attribute then get the full name. filter = helper.filter(data, info = 'simple_name') if not filter or filter == "": filter = helper.filter(data, info = 'name') return(filter)
c90e453bb1576d8c93a3388ab2cfe0d9f63d550c
15,403
def recursively_replace(original, replacements, include_original_keys=False): """Clones an iterable and recursively replaces specific values.""" # If this function would be called recursively, the parameters 'replacements' and 'include_original_keys' would have to be # passed each time. Therefore, a helper function with a reduced parameter list is used for the recursion, which nevertheless # can access the said parameters. def _recursion_helper(obj): #Determine if the object should be replaced. If it is not hashable, the search will throw a TypeError. try: if obj in replacements: return replacements[obj] except TypeError: pass # An iterable is recursively processed depending on its class. if hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)): if isinstance(obj, dict): contents = {} for key, val in obj.items(): new_key = _recursion_helper(key) if include_original_keys else key new_val = _recursion_helper(val) contents[new_key] = new_val else: contents = [] for element in obj: new_element = _recursion_helper(element) contents.append(new_element) # Use the same class as the original. return obj.__class__(contents) # If it is not replaced and it is not an iterable, return it. return obj return _recursion_helper(original)
aee393b09c74eb6cb1417d017d7004ac69bb3543
15,404
from typing import get_origin from typing import get_args def destructure(hint: t.Any) -> t.Tuple[t.Any, t.Tuple[t.Any, ...]]: """Return type hint origin and args.""" return get_origin(hint), get_args(hint)
451d1fd5a3277f882b9645dcdc78b2accc4d56a2
15,405
def f_x_pbe(x, kappa=0.804, mu=0.2195149727645171): """Evaluates PBE exchange enhancement factor. 10.1103/PhysRevLett.77.3865 Eq. 14. F_X(x) = 1 + kappa ( 1 - 1 / (1 + mu s^2)/kappa ) kappa, mu = 0.804, 0.2195149727645171 (PBE values) s = c x, c = 1 / (2 (3pi^2)^(1/3) ) Args: x: Float numpy array with shape (num_grids,), the reduced density gradient. kappa: Float, parameter. mu: Float, parameter. Returns: Float numpy array with shape (num_grids,), the PBE exchange enhancement factor. """ c = 1 / (2 * (3 * jnp.pi ** 2) ** (1 / 3)) s = c * x f_x = 1 + kappa - kappa / (1 + mu * s ** 2 / kappa) return f_x
9933a379b659b38082aa91d4498a399a43b2e20c
15,406
def index(): """ if no browser and no platform: it's a CLI request. """ if g.client['browser'] is None or g.client['platform'] is None: string = "hello from API {} -- in CLI Mode" msg = {'message': string.format(versions[0]), 'status': 'OK', 'mode': 200} r = Response(j.output(msg)) r.headers['Content-type'] = 'application/json; charset=utf-8' return r, 200 """ ELSE: it's obviously on a web browser """ string = "<h1>hello from API v1 | {} | {} | {} | {}</h1>" return string.format(g.client['browser'], g.client['platform'], g.client['version'], g.client['language'])
d497ce0cf12bbe914ab3147080c05a4441e9d39b
15,407
def attention_resnet20(**kwargs): """Constructs a ResNet-20 model. """ model = CifarAttentionResNet(CifarAttentionBasicBlock, 3, **kwargs) return model
e44061a9ad42ceea26aa263a5169ffec62857f90
15,409
import re def get_basename(name): """ [pm/cmds] オブジェクト名からベースネームを取得する """ fullpath = get_fullpath(name) return re(r"^.*\|", "", fullpath)
a18cd5ac563dd37c53bdf6b0c1ea6a55efa7a221
15,410
from typing import Optional def get_live_token(resource_uri: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLiveTokenResult: """ The response to a live token query. :param str resource_uri: The identifier of the resource. """ __args__ = dict() __args__['resourceUri'] = resource_uri if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:insights/v20200602preview:getLiveToken', __args__, opts=opts, typ=GetLiveTokenResult).value return AwaitableGetLiveTokenResult( live_token=__ret__.live_token)
b82d799ff261994643c807f4d1b947ba591d6a14
15,411
def load_subspace_vectors(embd, subspace_words): """Loads all word vectors for the particular subspace in the list of words as a matrix Arguments embd : Dictonary of word-to-embedding for all words subspace_words : List of words representing a particular subspace Returns subspace_embd_mat : Matrix of word vectors stored row-wise """ subspace_embd_mat = [] ind = 0 for word in subspace_words: if word in embd: subspace_embd_mat.append(embd[word]) ind = ind+1 return subspace_embd_mat
5eb1db8be8801cf6b1fe294a6f2c93570e9a9fe1
15,412
from datetime import datetime def _safe_filename(filename): """ Generates a safe filename that is unlikely to collide with existing objects in Google Cloud Storage. ``filename.ext`` is transformed into ``filename-YYYY-MM-DD-HHMMSS.ext`` """ filename = secure_filename(filename) date = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H%M%S") basename, extension = filename.rsplit('.', 1) return "{0}-{1}.{2}".format(basename, date, extension)
63e55ccbf29505868efe702cd7c25cdfb0e6ad2f
15,413
from typing import List from typing import Tuple def get_raw(contents: List[str]) -> Tuple[sections.Raw, List[str]]: """Parse the \\*RAW section""" raw_dict, rest = get_section(contents, "raw") remarks = raw_dict[REMARKS] if REMARKS in raw_dict else "" raw_info = sections.Raw( remarks=remarks, raw=raw_dict, ) return raw_info, rest
005af62533c129d39b7af1524b00a48a9113adde
15,414
import itertools def transfers_from_stops( stops, stop_times, transfer_type=2, trips=False, links_from_stop_times_kwargs={'max_shortcut': False, 'stop_id': 'stop_id'}, euclidean_kwargs={'latitude': 'stop_lat', 'longitude': 'stop_lon'}, seek_traffic_redundant_paths=True, seek_transfer_redundant_paths=True, max_distance=800, euclidean_speed=5 * 1000 / 3600 / 1.4, geometry=False, gtfs_only=False ): """ Builds a relevant footpath table from the stop_times and stops tables of a transitfeed. The trips table may be used to spot the "dominated" footpaths that offer no new connection (compared to the pool of stops), for example: * line A stops at station i and station k; * line B stops at station j and station k; * no other line stops at a or b; * the footpath F goes from i to j; * In our understanding : F is dominated by the station k :param stops: DataFrame consistent with the GTFS table "trips" :param stop_times: DataFrame consistent with the GTFS table "trips" :param transfer_type: how to fill the 'transfer_type' column of the feed :param trips: DataFrame consistent with the GTFS table "trips" :param links_from_stop_times_kwargs: kwargs to pass to transitlinks.links_from_stop_times, called on stop_times :param euclidean_kwargs: kwargs to pass to skims.euclidean (the name of the latitude and longitude column) :param seek_traffic_redundant_paths: if True, only the footpaths that do not belong to the transit links are kept. the transit links are built from the stop times using transitlinks.links_from_stop_times. The maximum number of transit links to concatenate in order to look for redundancies may be passed in the kwargs ('max_shortcut'). For example, if max_shortcut = 5: the footpath that can be avoided be taking a five stations ride will be tagged as "dominated". :param seek_transfer_redundant_paths: if True, the "trips" table is used to look for the dominated footpaths :param max_distance: maximum distance of the footpaths (meters as the crows fly) :param euclidean_speed: speed as the crows fly on the footpaths. :param geometry: If True, a geometry column (shapely.geometry.linestring.Linestring object) is added to the table :return: footpaths data with optional "dominated" tag """ stop_id = links_from_stop_times_kwargs['stop_id'] origin = stop_id + '_origin' destination = stop_id + '_destination' euclidean = skims.euclidean(stops.set_index(stop_id), **euclidean_kwargs) euclidean.reset_index(drop=True, inplace=True) euclidean['tuple'] = pd.Series(list(zip(list(euclidean['origin']), list(euclidean['destination'])))) short_enough = euclidean[euclidean['euclidean_distance'] < max_distance] short_enough = short_enough[short_enough['origin'] != short_enough['destination']] footpath_tuples = {tuple(path) for path in short_enough[['origin', 'destination']].values.tolist()} paths = euclidean[euclidean['tuple'].isin(footpath_tuples)] paths['dominated'] = False _stop_times = stop_times if stop_id in stops.columns and stop_id not in stop_times.columns: _stop_times = pd.merge( stop_times, stops[['id', stop_id]], left_on='stop_id', right_on='id', suffixes=['', '_merged']) if seek_traffic_redundant_paths: links = feed_links.link_from_stop_times(_stop_times, **links_from_stop_times_kwargs).reset_index() in_links_tuples = {tuple(path) for path in links[[origin, destination]].values.tolist()} paths['trafic_dominated'] = paths['tuple'].isin(in_links_tuples) paths['dominated'] = paths['dominated'] | paths['trafic_dominated'] stop_routes = {} stop_set = set(_stop_times[stop_id]) # if two routes are connected by several footpaths we only keep the shortest one # if routes a and b are connected to route c, d and e by several footpaths : # we keep only the shortest one that does the job. if trips is not False: grouped = pd.merge(_stop_times, trips, left_on='trip_id', right_on='id').groupby(stop_id)['route_id'] stop_routes = grouped.aggregate(lambda x: frozenset(x)).to_dict() def get_routes(row): return tuple((stop_routes[row['origin']], stop_routes[row['destination']])) paths = paths[(paths['origin'].isin(stop_set) & paths['destination'].isin(stop_set))] paths['trips'] = paths.apply(get_routes, axis=1) paths = paths.sort('euclidean_distance').groupby(['trips', 'dominated'], as_index=False).first() paths['min_transfer_time'] = paths['euclidean_distance'] / euclidean_speed paths = paths[paths['origin'] != paths['destination']] if seek_transfer_redundant_paths: paths['frozen'] = paths['trips'].apply(lambda a: frozenset(a[0]).union(frozenset(a[1]))) max_length = max([len(f) for f in list(paths['frozen'])]) to_beat = [] for length in range(max_length + 1): for stop in stop_routes.values(): for c in list(itertools.combinations(stop, length)): to_beat.append(frozenset(c)) to_beat = set(to_beat) paths['transfer_dominated'] = paths['frozen'].apply(lambda f: f in to_beat) paths['dominated'] = paths['dominated'] | paths['transfer_dominated'] if geometry and not gtfs_only: paths['geometry'] = paths.apply(linestring_geometry, axis=1) paths['from_stop_id'] = paths['origin'] paths['to_stop_id'] = paths['destination'] paths['transfer_type'] = transfer_type if gtfs_only: paths = paths[~paths['dominated']] paths = paths[['from_stop_id', 'to_stop_id', 'transfer_type', 'min_transfer_time']] return paths
9e9456440b3dc6cbdd367f9ea99f85559e3343cd
15,415
from pypy.module.cpyext.tupleobject import PyTuple_GetItem from pypy.module.cpyext.listobject import PyList_GetItem def PySequence_ITEM(space, w_obj, i): """Return the ith element of o or NULL on failure. Macro form of PySequence_GetItem() but without checking that PySequence_Check(o)() is true and without adjustment for negative indices. This function used an int type for i. This might require changes in your code for properly supporting 64-bit systems.""" # XXX we should call Py*_GET_ITEM() instead of Py*_GetItem() # from here, but we cannot because we are also called from # PySequence_GetItem() py_obj = as_pyobj(space, w_obj) if isinstance(w_obj, tupleobject.W_TupleObject): py_res = PyTuple_GetItem(space, py_obj, i) incref(space, py_res) keepalive_until_here(w_obj) return py_res if isinstance(w_obj, W_ListObject): py_res = PyList_GetItem(space, py_obj, i) incref(space, py_res) keepalive_until_here(w_obj) return py_res as_sequence = py_obj.c_ob_type.c_tp_as_sequence if as_sequence and as_sequence.c_sq_item: ret = generic_cpy_call(space, as_sequence.c_sq_item, w_obj, i) return make_ref(space, ret) w_ret = space.getitem(w_obj, space.newint(i)) return make_ref(space, w_ret)
8a3bb364d6d2e96681bb89b170b8517e09eb719c
15,416
import codecs import binascii def decode_hex(data): """Decodes a hex encoded string into raw bytes.""" try: return codecs.decode(data, 'hex_codec') except binascii.Error: raise TypeError()
115e89d6f80a6fc535f44d92f610a6312edf6daf
15,417
def crop_bbox_by_coords(bbox, crop_coords, crop_height, crop_width, rows, cols): """Crop a bounding box using the provided coordinates of bottom-left and top-right corners in pixels and the required height and width of the crop. """ bbox = denormalize_bbox(bbox, rows, cols) x_min, y_min, x_max, y_max = bbox x1, y1, x2, y2 = crop_coords cropped_bbox = [x_min - x1, y_min - y1, x_max - x1, y_max - y1] return normalize_bbox(cropped_bbox, crop_height, crop_width)
2cd53c51f6a80034630a53a43678d22f1073e7f4
15,418
def computeDateGranularity(ldf): """ Given a ldf, inspects temporal column and finds out the granularity of dates. Example ---------- ['2018-01-01', '2019-01-02', '2018-01-03'] -> "day" ['2018-01-01', '2019-02-01', '2018-03-01'] -> "month" ['2018-01-01', '2019-01-01', '2020-01-01'] -> "year" Parameters ---------- ldf : lux.luxDataFrame.LuxDataFrame LuxDataFrame with a temporal field Returns ------- field: str A str specifying the granularity of dates for the inspected temporal column """ dateFields = ["day", "month", "year"] if ldf.dataType["temporal"]: dateColumn = ldf[ldf.dataType["temporal"][0]] # assumes only one temporal column, may need to change this function to recieve multiple temporal columns in the future dateIndex = pd.DatetimeIndex(dateColumn) for field in dateFields: if hasattr(dateIndex,field) and len(getattr(dateIndex, field).unique()) != 1 : #can be changed to sum(getattr(dateIndex, field)) != 0 return field
e998a144b22c6e65599f1f6b5cc2ec893310e3cc
15,419
from typing import Optional from typing import Tuple import sqlite3 def next_pending_location(user_id: int, current_coords: Optional[Tuple[int, int]] = None) -> Optional[Tuple[int, int]]: """ Retrieves the next pending stone's coordinates. If current_coords is not specified (or is not pending), retrieves the longest-pending stone's coordinates. The order for determining which stone is "next" is defined by how long stones have been pending -- successive applications of this function will retrieve successively younger pending stones. If there is no younger pending stone, the coordinates of the oldest pending stone are returned. If there are no pending stones at all, None is returned. """ with sqlite3.connect(db_file) as db: cur = db.cursor() current_stone_pending_since = 0 # Will always be older than any stone. if current_coords is not None: current_stone = get_stone(*current_coords) if current_stone is not None and current_stone["player"] == user_id and current_stone["status"] == "Pending": # The current stone belongs to the player and is pending. current_stone_pending_since = current_stone["last_status_change_time"] query = """SELECT x, y FROM stones WHERE player = ? AND status = 'Pending' AND last_status_change_time > ? ORDER BY last_status_change_time ASC;""" cur.execute(query, [user_id, current_stone_pending_since]) next_pending_coords = cur.fetchone() # A younger pending stone exists. if next_pending_coords is not None: return next_pending_coords # Otherwise, a younger pending stone does not exist. # Retrieve the oldest stone. cur.execute(query, [user_id, 0]) next_pending_coords = cur.fetchone() # Return either the coords of the oldest pending stone, or None if no such stone exists. return next_pending_coords
fb26531819c19532d7dbf8152963245f07af8e7c
15,420
import re import keyword def get_valid_identifier(prop, replacement_character='', allow_unicode=False): """Given a string property, generate a valid Python identifier Parameters ---------- replacement_character: string, default '' The character to replace invalid characters with. allow_unicode: boolean, default False If True, then allow Python 3-style unicode identifiers. Examples -------- >>> get_valid_identifier('my-var') 'myvar' >>> get_valid_identifier('if') 'if_' >>> get_valid_identifier('$schema', '_') '_schema' >>> get_valid_identifier('$*#$') '_' """ # First substitute-out all non-valid characters. flags = re.UNICODE if allow_unicode else re.ASCII valid = re.sub('\W', replacement_character, prop, flags=flags) # If nothing is left, use just an underscore if not valid: valid = '_' # first character must be a non-digit. Prefix with an underscore # if needed if re.match('^[\d\W]', valid): valid = '_' + valid # if the result is a reserved keyword, then add an underscore at the end if keyword.iskeyword(valid): valid += '_' return valid
a3eeb389b73540aba2041e877c2ff151e272ffdd
15,421
from re import T def temporal_padding(x, paddings=(1, 0), pad_value=0): """Pad the middle dimension of a 3D tensor with `padding[0]` values left and `padding[1]` values right. Modified from keras.backend.temporal_padding https://github.com/fchollet/keras/blob/3bf913d/keras/backend/theano_backend.py#L590 """ if not isinstance(paddings, (tuple, list, np.ndarray)): paddings = (paddings, paddings) output = T.zeros(x.size(0), x.size(1) + sum(paddings), x.size(2)).to(dev) output[:, :paddings[0], :] = pad_value output[:, paddings[1]:, :] = pad_value output[:, paddings[0]: paddings[0]+x.size(1), :] = x return output
8ccc828ac68cd98da4e7ec5f8253ae5385317d48
15,422
def get_orientation(y, num_classes=8, encoding='one_hot'): """ Args: y: [B, T, H, W] """ # [H, 1] idx_y = np.arange(y.shape[2]).reshape([-1, 1]) # [1, W] idx_x = np.arange(y.shape[3]).reshape([1, -1]) # [H, W, 2] idx_map = np.zeros([y.shape[2], y.shape[3], 2]) idx_map[:, :, 0] += idx_y idx_map[:, :, 1] += idx_x # [1, 1, H, W, 2] idx_map = idx_map.reshape([1, 1, y.shape[2], y.shape[3], 2]) # [B, T, H, W, 1] y2 = np.expand_dims(y, 4) # [B, T, H, W, 2] y_map = idx_map * y2 # [B, T, 1] y_sum = np.expand_dims(y.sum(axis=2).sum(axis=2), 3) + 1e-7 # [B, T, 2] centroids = y_map.sum(axis=2).sum(axis=2) / y_sum # [B, T, 1, 1, 2] centroids = centroids.reshape([y.shape[0], y.shape[1], 1, 1, 2]) # Orientation vector # [B, T, H, W, 2] ovec = (y_map - centroids) * y2 # Normalize orientation [B, T, H, W, 2] ovec = (ovec + 1e-8) / \ (np.sqrt((ovec * ovec).sum(axis=-1, keepdims=True)) + 1e-7) # [B, T, H, W] angle = np.arcsin(ovec[:, :, :, :, 0]) xpos = (ovec[:, :, :, :, 1] > 0).astype('float') ypos = (ovec[:, :, :, :, 0] > 0).astype('float') # [B, T, H, W] angle = angle * xpos * ypos + (np.pi - angle) * (1 - xpos) * ypos + \ angle * xpos * (1 - ypos) + \ (-np.pi - angle) * (1 - xpos) * (1 - ypos) angle = angle + np.pi / 8 # [B, T, H, W] angle_class = np.mod( np.floor((angle + np.pi) * num_classes / 2 / np.pi), num_classes) if encoding == 'one_hot': angle_class = np.expand_dims(angle_class, 4) clazz = np.arange(num_classes).reshape( [1, 1, 1, 1, -1]) angle_one_hot = np.equal(angle_class, clazz).astype('float32') angle_one_hot = (angle_one_hot * y2).max(axis=1) return angle_one_hot.astype('uint8') elif encoding == 'class': # [B, H, W] return (angle_class * y).max(axis=1).astype('uint8') else: raise Exception('Unknown encoding type: {}'.format(encoding))
501c57cf447865ec03229a3ba15125da3837eb8e
15,424
def plot_tempo_curve(f_tempo, t_beat, ax=None, figsize=(8, 2), color='k', logscale=False, xlabel='Time (beats)', ylabel='Temp (BPM)', xlim=None, ylim=None, label='', measure_pos=[]): """Plot a tempo curve Notebook: C3/C3S3_MusicAppTempoCurve.ipynb Args: f_tempo: Tempo curve t_beat: Time axis of tempo curve (given as sampled beat axis) ax: Plot either as figure (ax==None) or into axis (ax==True) (Default value = None) figsize: Size of figure (Default value = (8, 2)) color: Color of tempo curve (Default value = 'k') logscale: Use linear (logscale==False) or logartihmic (logscale==True) tempo axis (Default value = False) xlabel: Label for x-axis (Default value = 'Time (beats)') ylabel: Label for y-axis (Default value = 'Temp (BPM)') xlim: Limits for x-axis (Default value = None) ylim: Limits for x-axis (Default value = None) label: Figure labels when plotting into axis (ax==True) (Default value = '') measure_pos: Plot measure positions as spefified (Default value = []) Returns: fig: figure handle ax: axes handle """ fig = None if ax is None: fig = plt.figure(figsize=figsize) ax = plt.subplot(1, 1, 1) ax.plot(t_beat, f_tempo, color=color, label=label) ax.set_title('Tempo curve') if xlim is None: xlim = [t_beat[0], t_beat[-1]] if ylim is None: ylim = [np.min(f_tempo) * 0.9, np.max(f_tempo) * 1.1] ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.grid(True, which='both') if logscale: ax.set_yscale('log') ax.yaxis.set_major_formatter(ScalarFormatter()) ax.yaxis.set_minor_formatter(ScalarFormatter()) # ax.set_yticks([], minor=True) # yticks = np.arange(ylim[0], ylim[1]+1, 10) # ax.set_yticks(yticks) plot_measure(ax, measure_pos) return fig, ax
fd8f084d5912b7b64929e7bc8a61bd4dfc8ae107
15,425
def not_pathology(data): """Return false if the node is a pathology. :param dict data: A PyBEL data dictionary :rtype: bool """ return data[FUNCTION] != PATHOLOGY
b420826265164445e8df470a4049d68839182d4b
15,426
def remove_index_fastqs(fastqs,fastq_attrs=IlluminaFastqAttrs): """ Remove index (I1/I2) Fastqs from list Arguments: fastqs (list): list of paths to Fastq files fastq_attrs (BaseFastqAttrs): class to use for extracting attributes from Fastq names (defaults to IlluminaFastqAttrs) Returns: List: input Fastq list with any index read Fastqs removed. """ return list(filter(lambda fq: not fastq_attrs(fq).is_index_read, fastqs))
fc62bd4ab28427ba51d8cd56d17576c89e2ed7ad
15,427
from typing import Union from typing import List def max(name: "Union[str, List[Expr]]") -> "Expr": """ Get maximum value """ if isinstance(name, list): def max_(acc: Series, val: Series) -> Series: mask = acc < val return acc.zip_with(mask, val) return fold(lit(0), max_, name).alias("max") return col(name).max()
3ca308e951801e4376483188249da2333aafc789
15,428
def initialize(Lx, Ly, solutes, restart_folder, field_to_subspace, concentration_init, rad, enable_NS, enable_EC, dx, surface_charge, permittivity, **namespace): """ Create the initial state. """ w_init_field = dict() if not restart_folder: if enable_EC: for solute in ["c_p", "c_m"]: w_init_field[solute] = df.interpolate( df.Constant(1e-4), field_to_subspace[solute].collapse()) c_init = df.interpolate( df.Expression("1./(2*DOLFIN_PI*rad*rad)*exp(" "- (pow(x[0], 2) + pow(x[1], 2))/(2*rad*rad))", Lx=Lx, Ly=Ly, rad=rad, degree=2), field_to_subspace["c_n"].collapse()) C_tot = df.assemble(c_init*dx) c_init.vector()[:] *= concentration_init*Lx*Ly/C_tot w_init_field["c_n"] = c_init V_0 = -surface_charge*Ly/permittivity[0] w_init_field["V"] = df.interpolate( df.Expression("V_0*(x[1]/Ly-0.5)", Ly=Ly, V_0=V_0, degree=1), field_to_subspace["V"].collapse()) return w_init_field
802affd7e56598cb4a22e37480313401254e263f
15,429
def _get_sentry_sdk(): """Creates raven.Client instance configured to work with cron jobs.""" # NOTE: this function uses settings and therefore it shouldn't be called # at module level. try: sentry_sdk = __import__("sentry_sdk") DjangoIntegration = __import__( "sentry_sdk.integrations.django" ).integrations.django.DjangoIntegration except ImportError: raise MissingDependency( "Unable to import sentry_sdk. " "Sentry monitor requires this dependency." ) for setting in ( "CRONMAN_SENTRY_CONFIG", "SENTRY_CONFIG", "RAVEN_CONFIG", ): client_config = getattr(settings, setting, None) if client_config is not None: break else: client_config = app_settings.CRONMAN_SENTRY_CONFIG sentry_sdk.init(integrations=[DjangoIntegration()], **client_config) return sentry_sdk
8682004e68606bf8f67487ad541455179c50493c
15,430
def get_memos(): """ Returns all memos in the database, in a form that can be inserted directly in the 'session' object. """ records = [ ] for record in collection.find( { "type": "dated_memo" } ): record['date'] = arrow.get(record['date']).isoformat() del record['_id'] records.append(record) return sorted(records, key=lambda entry : entry['date'])
d9f0f66db05d368d086b77669418652565ea8587
15,431
def psplit(df, idx, label): """ Split the participants with a positive label in df into two sets, similarly for participants with a negative label. Return two numpy arrays of participant ids, each array are the chosen id's to be removed from two dataframes to ensure no overlap of participants between the two sets, and keeping half of all participants in df with the same prevelance of event positive participants. """ pos = np.unique(df.loc[df[label] == 1].index.get_level_values(idx)) all_id = np.unique(df.index.get_level_values(idx)) neg = np.setdiff1d(all_id, pos) np.random.shuffle(pos) np.random.shuffle(neg) rmv_1 = np.concatenate((pos[:len(pos)//2], neg[:len(neg)//2])) rmv_2 = np.concatenate((pos[len(pos)//2:], neg[len(neg)//2:])) return rmv_1, rmv_2
fa8652d8812c8f4fd94c7d2601b964b7aaced963
15,433
import numpy def invU(U): """ Calculate inverse of U Cell. """ nr, nc = U.getCellsShape() mshape = U.getMatrixShape() assert (nr == nc), "U Cell must be square!" nmat = nr u_tmp = admmMath.copyCell(U) u_inv = admmMath.Cells(nmat, nmat) for i in range(nmat): for j in range(nmat): if (i == j): u_inv[i,j] = numpy.identity(mshape[0]) else: u_inv[i,j] = numpy.zeros_like(U[0,0]) for j in range(nmat-1,0,-1): for i in range(j-1,-1,-1): tmp = u_tmp[i,j] for k in range(nmat): u_tmp[i,k] = u_tmp[i,k] - numpy.matmul(tmp, u_tmp[j,k]) u_inv[i,k] = u_inv[i,k] - numpy.matmul(tmp, u_inv[j,k]) return u_inv
58765834bde6c93e419d3e2f6d8de25d1740c587
15,437
def liq_g(drvt,drvp,temp,pres): """Calculate liquid water Gibbs energy using F03. Calculate the specific Gibbs free energy of liquid water or its derivatives with respect to temperature and pressure using the Feistel (2003) polynomial formulation. :arg int drvt: Number of temperature derivatives. :arg int drvp: Number of pressure derivatives. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :returns: Gibbs free energy in units of (J/kg) / K^drvt / Pa^drvp. :raises ValueError: If drvt or drvp are negative. :Examples: >>> liq_g(0,0,300.,1e5) -5.26505056073e3 >>> liq_g(1,0,300.,1e5) -393.062597709 >>> liq_g(0,1,300.,1e5) 1.00345554745e-3 >>> liq_g(2,0,300.,1e5) -13.9354762020 >>> liq_g(1,1,300.,1e5) 2.75754520492e-7 >>> liq_g(0,2,300.,1e5) -4.52067557155e-13 """ if drvt < 0 or drvp < 0: errmsg = 'Derivatives {0} cannot be negative'.format((drvt,drvp)) raise ValueError(errmsg) TRED, PRED = _C_F03[0] y = (temp - _TCELS)/TRED z = (pres - _PATM)/PRED g = 0. for (j,k,c) in _C_F03[1:]: if y==0: if j==drvt: pwrt = 1. else: pwrt = 0. else: pwrt = y**(j-drvt) for l in range(drvt): pwrt *= j-l if z==0: if k==drvp: pwrp = 1. else: pwrp = 0. else: pwrp = z**(k-drvp) for l in range(drvp): pwrp *= k-l g += c * pwrt * pwrp g /= TRED**drvt * PRED**drvp return g
dfa3eef9d0b9228495b2d4d33a503c0e8c174c2a
15,438
def extract_dates(obj): """extract ISO8601 dates from unpacked JSON""" if isinstance(obj, dict): new_obj = {} # don't clobber for k,v in iteritems(obj): new_obj[k] = extract_dates(v) obj = new_obj elif isinstance(obj, (list, tuple)): obj = [ extract_dates(o) for o in obj ] elif isinstance(obj, string_types): obj = _parse_date(obj) return obj
1dd7dbda376755cd962c23d2149f41e8559cff12
15,441
def construct_covariates(states, model_spec): """Construct a matrix of all the covariates that depend only on the state space. Parameters --------- states : np.ndarray Array with shape (num_states, 8) containing period, years of schooling, the lagged choice, the years of experience in part-time, and the years of experience in full-time employment, type, age of the youngest child, indicator for the presence of a partner. Returns ------- covariates : np.ndarray Array with shape (num_states, number of covariates) containing all additional covariates, which depend only on the state space information. """ # Age youngest child # Bins of age of youngest child based on kids age # bin 0 corresponds to no kid, remaining bins as in Blundell # 0-2, 3-5, 6-10, 11+ age_kid = pd.Series(states[:, 6]) bins = pd.cut( age_kid, bins=[-2, -1, 2, 5, 10, 11], labels=[0, 1, 2, 3, 4], ).to_numpy() # Male wages based on age and education level of the woman # Wages are first calculated as hourly wages log_wages = ( model_spec.partner_cf_const + model_spec.partner_cf_age * states[:, 0] + model_spec.partner_cf_age_sq * states[:, 0] ** 2 + model_spec.partner_cf_educ * states[:, 1] ) # Male wages # Final input of male wages / partner income is calculated on a weekly # basis. Underlying assumption that all men work full time. male_wages = np.where(states[:, 7] == 1, np.exp(log_wages) * HOURS[2], 0) # Equivalence scale # Depending on the presence of a partner and a child each state is # assigned an equivalence scale value following the modernized OECD # scale: 1 for a single woman HH, 1.5 for a woman with a partner, # 1.8 for a woman with a partner and a child and 1.3 for a woman with # a child and no partner equivalence_scale = np.full(states.shape[0], np.nan) equivalence_scale = np.where( (states[:, 6] == -1) & (states[:, 7] == 0), 1.0, equivalence_scale ) equivalence_scale = np.where( (states[:, 6] == -1) & (states[:, 7] == 1), 1.5, equivalence_scale ) equivalence_scale = np.where( (states[:, 6] != -1) & (states[:, 7] == 1), 1.8, equivalence_scale ) equivalence_scale = np.where( (states[:, 6] != -1) & (states[:, 7] == 0), 1.3, equivalence_scale ) assert ( np.isnan(equivalence_scale).any() == 0 ), "Some HH were not assigned an equivalence scale" # Child benefits # If a woman has a child she receives child benefits child_benefits = np.where(states[:, 6] == -1, 0, model_spec.child_benefits) # Collect in covariates vector covariates = np.column_stack((bins, male_wages, equivalence_scale, child_benefits)) return covariates
883395fc3561ea2fb774eb0ea6dfd866a3d2eed6
15,442
def check_oblique_montante(grille, x, y): """Alignements diagonaux montants (/) : allant du coin bas gauche au coin haut droit""" symbole = grille.grid[y][x] # Alignement diagonal montant de la forme XXX., le noeud (x,y) étant le plus bas et à gauche if grille.is_far_from_top(y) and grille.is_far_from_right(x): if all(symbole == grille.grid[y - i - 1][x + i + 1] for i in range(2)): my_play = grille.play_if_possible(x + 3, y - 2) if my_play is not None: return my_play # Alignements diagonaux montants, le noeud (x,y) étant le plus haut et à droite if grille.is_far_from_bottom(y) and grille.is_far_from_left(x): # Alignement diagonal de la forme .XXX if all(symbole == grille.grid[y + i + 1][x - i - 1] for i in range(2)): if grille.is_very_far_from_bottom(y): my_play = grille.play_if_possible(x - 3, y + 3) if my_play is not None: return my_play if symbole == grille.grid[y + 3][x - 3]: # Alignement diagonal de la forme X.XX if symbole == grille.grid[y + 2][x - 2]: my_play = grille.play_if_possible(x - 1, y + 1) if my_play is not None: return my_play # Alignement diagonal de la forme XX.X if symbole == grille.grid[y + 1][x - 1]: my_play = grille.play_if_possible(x - 2, y + 2) if my_play is not None: return my_play return None
f1ca8d7b55117e3e03c5150a07fd483a1da0a4d5
15,444
def _rotate_the_grid(lon, lat, rot_1, rot_2, rot_3): """Rotate the horizontal grid at lon, lat, via rotation matrices rot_1/2/3 Parameters ---------- lon, lat : xarray DataArray giving longitude, latitude in degrees of LLC horizontal grid rot_1, rot_2, rot_3 : np.ndarray rotation matrices Returns ------- xg, yg, zg : xarray DataArray cartesian coordinates of the horizontal grid """ # Get cartesian of 1D view of lat/lon xg, yg, zg = _convert_latlon_to_cartesian(lon.values.ravel(),lat.values.ravel()) # These rotations result in: # xg = 0 at pt1 # yg = 1 at pt1 # zg = 0 at pt1 and pt2 (and the great circle that crosses pt1 & pt2) xg, yg, zg = _apply_rotation_matrix(rot_1, (xg,yg,zg)) xg, yg, zg = _apply_rotation_matrix(rot_2, (xg,yg,zg)) xg, yg, zg = _apply_rotation_matrix(rot_3, (xg,yg,zg)) # Remake into LLC xarray DataArray xg = llc_tiles_to_xda(xg, grid_da=lon, less_output=True) yg = llc_tiles_to_xda(yg, grid_da=lat, less_output=True) zg = llc_tiles_to_xda(zg, grid_da=lon, less_output=True) return xg, yg, zg
b6c81dcc8191c2843534f369269e5c9cd466d581
15,445
def dict_mapper(data): """Mapper from `TypeValueMap` to :class`dict`""" out = {} for k, v in data.items(): if v.type in (iceint.TypedValueType.TypeDoubleComplex, iceint.TypedValueType.TypeFloatComplex): out[k] = complex(v.value.real, v.value.imag) elif v.type in (iceint.TypedValueType.TypeDoubleComplexSeq, iceint.TypedValueType.TypeFloatComplexSeq): out[k] = [ complex(i.real, i.imag) for i in v.value ] elif v.type == iceint.TypedValueType.TypeDirection: out[k] = (v.value.coord1, v.value.coord2, str(v.value.sys)) elif v.type == iceint.TypedValueType.TypeNull: out[k] = None else: out[k] = v.value return out
b10ba4ed38d81cca3fc760d281a32d46d03d4223
15,446
def split_by_normal(cpy): """split curved faces into one face per triangle (aka split by normal, planarize). in place""" for name, faces in cpy.iteritems(): new_faces = [] for points, triangles in faces: x = points[triangles, :] normals = np.cross(x[:, 1]-x[:, 0], x[:, 2]-x[:, 0]) normals /= np.sqrt(np.sum(np.square(normals), axis=1))[:, None] if np.allclose(normals, normals[0][None, :]): new_faces.append((points, triangles)) else: for triangle in triangles: new_faces.append((points[triangle, :], np.arange(3, dtype=np.intc).reshape((1, 3)))) cpy[name] = new_faces return cpy
9a4c563465cc2deb5c2946f3e182fc9b71327081
15,449
def generate_index_distribution_from_blocks(numTrain, numTest, numValidation, params): """ Generates a vector of indices to partition the data for training. NO CHECKING IS DONE: it is assumed that the data could be partitioned in the specified block quantities and that the block quantities describe a coherent partition. Parameters ---------- numTrain : int Number of training data points numTest : int Number of testing data points numValidation : int Number of validation data points (may be zero) params : dictionary with parameters Contains the keywords that control the behavior of the function (uq_train_bks, uq_valid_bks, uq_test_bks) Return ---------- indexTrain : int numpy array Indices for data in training indexValidation : int numpy array Indices for data in validation (if any) indexTest : int numpy array Indices for data in testing (if merging) """ # Extract required parameters numBlocksTrain = params['uq_train_bks'] numBlocksValidation = params['uq_valid_bks'] numBlocksTest = params['uq_test_bks'] numBlocksTotal = numBlocksTrain + numBlocksValidation + numBlocksTest # Determine data size and block size if numBlocksTest > 0: # Use all data and re-distribute the partitions numData = numTrain + numValidation + numTest else: # Preserve test partition numData = numTrain + numValidation blockSize = (numData + numBlocksTotal // 2) // numBlocksTotal # integer division with rounding remainder = numData - blockSize * numBlocksTotal if remainder != 0: print("Warning ! Requested partition does not distribute data evenly between blocks. " "Testing (if specified) or Validation (if specified) will use different block size.") sizeTraining = numBlocksTrain * blockSize sizeValidation = numBlocksValidation * blockSize # Fill partition indices # Fill train partition Folds = np.arange(numData) np.random.shuffle(Folds) indexTrain = Folds[:sizeTraining] # Fill validation partition indexValidation = None if numBlocksValidation > 0: indexValidation = Folds[sizeTraining:sizeTraining + sizeValidation] # Fill test partition indexTest = None if numBlocksTest > 0: indexTest = Folds[sizeTraining + sizeValidation:] return indexTrain, indexValidation, indexTest
7bb30a6f69a45d231cbb4a140d7527a270f22e27
15,450
def sct2e(sc, sclkdp): """sct2e(SpiceInt sc, SpiceDouble sclkdp)""" return _cspyce0.sct2e(sc, sclkdp)
a32defabb20993b87c182121e209e62c190a46c8
15,451
import re import json def test_main(monkeypatch, test_dict: FullTestDict): """ - GIVEN a list of words - WHEN the accent dict is generated - THEN check all the jisho info is correct and complete """ word_list = convert_list_of_str_to_kaki(test_dict['input']) sections = test_dict['jisho']['expected_sections'] expected_output = test_dict['jisho']['expected_output'] def get_word_from_jisho_url(url: URL) -> Kaki: match = re.search(r"words\?keyword=(.+)", url) assert match is not None return Kaki(match.group(1)) def get_api_response(url: URL) -> str: word = get_word_from_jisho_url(url) return json.dumps(sections[word]["api_response"]) monkeypatch.setattr("requests.get", lambda url: FakeResponse(get_api_response(url))) assert jisho.main(word_list) == expected_output
f32d75bc5219cf48eccffcd777dc0881e0299ae7
15,452
def get_tn(tp, fp, fn, _all): """ Args: tp (Set[T]): fp (Set[T]): fn (Set[T]): _all (Iterable[T]): Returns: Set[T] """ return set(_all) - tp - fp - fn
a9afa3a2f07c8b63a6d6911b9a54cf9f9df08600
15,454
def download_cow_head(): """Download cow head dataset.""" return _download_and_read('cowHead.vtp')
70dc6617d3b9d6a8f9fa4df90caf749d00a6d778
15,455
def select_tests(blocks, match_string_list, do_test): """Remove or keep tests from list in WarpX-tests.ini according to do_test variable""" if do_test not in [True, False]: raise ValueError("do_test must be True or False") if (do_test == False): for match_string in match_string_list: print('Selecting tests without ' + match_string) blocks = [ block for block in blocks if not match_string in block ] else: for match_string in match_string_list: print('Selecting tests with ' + match_string) blocks = [ block for block in blocks if match_string in block ] return blocks
f77a0b9e91ec34b85479a442008241c7da386beb
15,456
def get_last_ds_for_site(session, idescr: ImportDescription, col: ImportColumn, siteid: int): """ Returns the newest dataset for a site with instrument, valuetype and level fitting to the ImportDescription's column To be used by lab imports where a site is encoded into the sample name. """ q = session.query(db.Dataset).filter( db.Dataset._site == siteid, db.Dataset._valuetype == col.valuetype, db.Dataset._source == idescr.instrument, ) if col.level is not None: q = q.filter(db.Dataset.level == col.level) return q.order_by(db.Dataset.end.desc()).limit(1).scalar()
41040efe43c0189a3cc8b7288e47eccd752674a7
15,457
def get_cart_from_request(request, cart_queryset=Cart.objects.all()): """Get cart from database or return unsaved Cart :type cart_queryset: saleor.cart.models.CartQueryset :type request: django.http.HttpRequest :rtype: Cart """ if request.user.is_authenticated(): cart = get_user_cart(request.user, cart_queryset) user = request.user else: token = request.get_signed_cookie(Cart.COOKIE_NAME, default=None) cart = get_anonymous_cart_from_token(token, cart_queryset) user = None if cart is not None: return cart else: return Cart(user=user)
5d9d7e3708db5db38f07aea9299ee0aacdecea22
15,458
def _is_ge(series, value): """ Returns the index of rows from series where series >= value. Parameters ---------- series : pandas.Series The data to be queried value : list-like The values to be tested Returns ------- index : pandas.index The index of series for rows where series >= value. """ series = series[series.ge(value)] return series.index
98b8825753953b1b9bf7348d04d260b7514a7749
15,459
def preprocess_image(image, image_size, is_training=False, test_crop=True): """Preprocesses the given image. Args: image: `Tensor` representing an image of arbitrary size. image_size: Size of output image. is_training: `bool` for whether the preprocessing is for training. test_crop: whether or not to extract a central crop of the images (as for standard ImageNet evaluation) during the evaluation. Returns: A preprocessed image `Tensor` of range [0, 1]. """ image = tf.image.convert_image_dtype(image, dtype=tf.float32) if is_training: return preprocess_for_train(image, image_size, image_size) else: return preprocess_for_eval(image, image_size, image_size, crop=test_crop)
913f614798daaf7b752195c92e48890868666b57
15,460
async def wait_for_reaction(self, message): """ Assert that ``message`` is reacted to with any reaction. :param discord.Message message: The message to test with :returns: The reaction object. :rtype: discord.Reaction :raises NoReactionError: """ def check_reaction(reaction, user): return ( reaction.message.id == message.id and user == self.target and reaction.message.channel == self.channel ) try: result = await self.client.wait_for( "reaction_add", timeout=self.client.timeout, check=check_reaction ) except TimeoutError: raise NoResponseError else: return result
67890343d6b59923e8fd3e655252eddcde88323c
15,461
def _multivariate_normal_log_likelihood(X, means=None, covariance=None): """Calculate log-likelihood assuming normally distributed data.""" X = check_array(X) n_samples, n_features = X.shape if means is None: means = np.zeros_like(X) else: means = check_array(means) assert means.shape == X.shape if covariance is None: covariance = np.eye(n_features) else: covariance = check_array(covariance) assert covariance.shape == (n_features, n_features) log_likelihood = 0 for t in range(n_samples): log_likelihood += ss.multivariate_normal.logpdf( X[t], mean=means[t], cov=covariance) return log_likelihood
d5144074f0a88c51a0c46f1b36eb8bdd95f9140e
15,462
import tokenize def lemmatize(text): """ tokenize and lemmatize english messages Parameters ---------- text: str text messages to be lemmatized Returns ------- list list with lemmatized forms of words """ def get_wordnet_pos(treebank_tag): if treebank_tag.startswith('J'): return wordnet.ADJ if treebank_tag.startswith('V'): return wordnet.VERB if treebank_tag.startswith('N'): return wordnet.NOUN if treebank_tag.startswith('R'): return wordnet.ADV # try to transfer to Noun else # else: return wordnet.NOUN # lemmatize wordpos = nltk.pos_tag(tokenize(text)) lmtzer = WordNetLemmatizer() return [lmtzer.lemmatize(word, pos=get_wordnet_pos(pos)) for word, pos in wordpos]
0a744953ac014f2c0551cecb9c235fc405bf5aaa
15,463
def prune_non_overlapping_boxes(boxes1, boxes2, min_overlap): """Prunes the boxes in boxes1 that overlap less than thresh with boxes2. For each box in boxes1, we want its IOA to be more than min_overlap with at least one of the boxes in boxes2. If it does not, we remove it. Arguments: boxes1: a float tensor with shape [N, 4]. boxes2: a float tensor with shape [M, 4]. min_overlap: minimum required overlap between boxes, to count them as overlapping. Returns: boxes: a float tensor with shape [N', 4]. keep_indices: a long tensor with shape [N'] indexing kept bounding boxes in the first input tensor ('boxes1'). """ with tf.name_scope('prune_non_overlapping_boxes'): overlap = ioa(boxes2, boxes1) # shape [M, N] overlap = tf.reduce_max(overlap, axis=0) # shape [N] keep_bool = tf.greater_equal(overlap, min_overlap) keep_indices = tf.squeeze(tf.where(keep_bool), axis=1) boxes = tf.gather(boxes1, keep_indices) return boxes, keep_indices
5e1a04022707364f1d1a8b14afbd356e781137b9
15,464
def get_namespace_from_node(node): """Get the namespace from the given node Args: node (str): name of the node Returns: namespace (str) """ parts = node.rsplit("|", 1)[-1].rsplit(":", 1) return parts[0] if len(parts) > 1 else u":"
a2305719c0e72614f75309f1412ce71c9264b5df
15,465
def PricingStart(builder): """This method is deprecated. Please switch to Start.""" return Start(builder)
d87eae22f74b5251261bb39aea93e46887f03725
15,466
def create_whimsy_value_at_clients(number_of_clients: int = 3): """Returns a Python value and federated type at clients.""" value = [float(x) for x in range(10, number_of_clients + 10)] type_signature = computation_types.at_clients(tf.float32) return value, type_signature
87d1d110392bd83585fd19ba2e8a10a0c8507d30
15,468
def format_task_numbers_with_links(tasks): """Returns formatting for the tasks section of asana.""" project_id = data.get('asana-project', None) def _task_format(task_id): if project_id: asana_url = tool.ToolApp.make_asana_url(project_id, task_id) return "[#%d](%s)" % (task_id, asana_url) else: return "#%d" % task_id return "\n".join([_task_format(tid) for tid in tasks])
b6b7975cb45cdae0a146a67c0fab51ef0724aee2
15,469
def get_tick_indices(tickmode, numticks, coords): """ Ticks on the axis are a subset of the axis coordinates This function returns the indices of y coordinates on which a tick should be displayed :param tickmode: should be 'auto' (automatically generated) or 'all' :param numticks: minimum number of ticks to display, only applies to 'auto' mode :param coords: list of coordinates along the axis :return indices: ticks indices in the input list of y coordinates :return numchar: maximum number of characters required to display ticks, this is useful to preserve alignments """ if tickmode == 'all' or (tickmode == 'auto' and numticks >= len(coords)): # Put a tick in front of each row indices = list(range(len(coords))) else: # It tickmode is 'auto', put at least 'numticks' ticks tick_spacing = 5 # default spacing between ticks # Decrease the tick spacing progressively until we get the desired number of ticks indices = [] while len(indices) < numticks: indices = list(range(0, len(coords), tick_spacing)) tick_spacing -= 1 # Compute the number of characters required to display ticks numchar = max(len(str(NiceNumber(coords[i]))) for i in indices) return indices, numchar
72cf3fed39db3cabf672bff4b042c8685356f9ff
15,470
def fpIsNormal(a, ctx=None): """Create a Z3 floating-point isNormal expression. """ return _mk_fp_unary_pred(Z3_mk_fpa_is_normal, a, ctx)
ee6e2cccf1ad0534929aa0632d271d37f58a232e
15,471
import math def siqs_find_next_poly(n, factor_base, i, g, B): """Compute the (i+1)-th polynomials for the Self-Initialising Quadratic Sieve, given that g is the i-th polynomial. """ v = lowest_set_bit(i) + 1 z = -1 if math.ceil(i / (2 ** v)) % 2 == 1 else 1 b = (g.b + 2 * z * B[v - 1]) % g.a a = g.a b_orig = b if (2 * b > a): b = a - b assert ((b * b - n) % a == 0) g = Polynomial([b * b - n, 2 * a * b, a * a], a, b_orig) h = Polynomial([b, a]) for fb in factor_base: if a % fb.p != 0: fb.soln1 = (fb.ainv * (fb.tmem - b)) % fb.p fb.soln2 = (fb.ainv * (-fb.tmem - b)) % fb.p return g, h
d5529db62a194582aacd8769a56688cf6b42bbe1
15,473
def get_column(value): """Convert column number on command line to Python index.""" if value.startswith("c"): # Ignore c prefix, e.g. "c1" for "1" value = value[1:] try: col = int(value) except: stop_err("Expected an integer column number, not %r" % value) if col < 1: stop_err("Expect column numbers to be at least one, not %r" % value) return col - 1
858f4128955c0af579d99dcd64be157b41c6dae3
15,474
def sdi(ts_split, mean=False, keys=None): """ Compute the Structural Decoupling Index (SDI). i.e. the ratio between the norms of the "high" and the norm of the "low" "graph-filtered" timeseries. If the given dictionary does not contain the keywords "high" and "low", the SDI is computed as the ratio between the norm of the second and the norm of the first dictionary entry. "keys" can be used to indicate the order of the two keys, or to select two elements of a bigger dictionary. Parameters ---------- ts_split : dict or numpy.ndarrays A dictionary containing two entries. If the two entries are "low" and "high", then SDI will be computed as the norm of the high vs the norm of the low, oterwise as the ratio between the second (second key in sorted keys) and the first. mean : bool, optional If True, compute mean over the last axis (e.g. between subjects) keys : None or list of strings, optional Can be used to select two entries from a bigger dictionary and/or to specify the order in which the keys should be read (e.g. forcing a different order from teh sorted keys). Returns ------- numpy.ndarray Returns the structural decoupling index Raises ------ ValueError If keys are provided but not contained in the dictionary If keys are not provided and the dictionary has more than 2 entries """ # #!# Implement acceptance of two matrices and not only dictionary if keys is None: keys = list(ts_split.keys()) else: if all(item in list(ts_split.keys()) for item in keys) is False: raise ValueError(f'The provided keys {keys} do not match the ' 'keys of the provided dictionary ' f'({list(ts_split.keys())})') if len(keys) != 2: raise ValueError('`structural_decoupling_index` function requires ' 'a dictionary with exactly two timeseries as input.') check_keys = [item.lower() for item in keys] if all(item in ['low', 'high'] for item in check_keys): # Case insensitively reorder the items of dictionary as ['low', 'high']. keys = [keys[check_keys.index('low')], keys[check_keys.index('high')]] norm = dict.fromkeys(keys) for k in keys: norm[k] = np.linalg.norm(ts_split[k], axis=1) LGR.info('Computing Structural Decoupling Index.') sdi = norm[keys[1]] / norm[keys[0]] if sdi.ndim >= 2 and mean: sdi = sdi.mean(axis=1) return sdi
9ed09f72bc6902b5c007286e12f1ed72d904d4b8
15,475