content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def showresults(options=''): """ Generate and plot results from a kima run. The argument `options` should be a string with the same options as for the kima-showresults script. """ # force correct CLI arguments args = _parse_args(options) plots = [] if args.rv: plots.append('6') if args.planets: plots.append('1') if args.orbital: plots.append('2'); plots.append('3') if args.gp: plots.append('4'); plots.append('5') if args.extra: plots.append('7') for number in args.plot_number: plots.append(number) try: evidence, H, logx_samples = postprocess(plot=args.diagnostic) except IOError as e: print(e) sys.exit(1) res = KimaResults(list(set(plots))) show() # render the plots # __main__.__file__ doesn't exist in the interactive interpreter if not hasattr(__main__, '__file__'): return res
0c5c944dc21e0abf808c258d8993f6133a254701
22,041
import re def convert_as_number(symbol: str) -> float: """ handle cases: ' ' or '' -> 0 '10.95%' -> 10.95 '$404,691,250' -> 404691250 '$8105.52' -> 8105.52 :param symbol: string :return: float """ result = symbol.strip() if len(result) == 0: return 0 result = re.sub('[%$, *]', '', result) return float(result)
cea1d6e894fa380ecf6968d5cb0ef1ce21b73fac
22,042
def smiles_dict(): """Store SMILES for compounds used in test cases here.""" smiles = { "ATP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)OP(=O)(O)O)[C" + "@@H](O)[C@H]1O", "ADP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)O)[C@@H](O)[C" + "@H]1O", "meh": "CCC(=O)C(=O)O", "l_ala": "C[C@H](N)C(=O)O", "d_ala": "C[C@@H](N)C(=O)O", "FADH": "Cc1cc2c(cc1C)N(CC(O)C(O)C(O)COP(=O)(O)OP(=O)(O)OCC1OC(n3cnc" + "4c(N)ncnc43)C(O)C1O)c1nc(O)nc(O)c1N2", "S-Adenosylmethionine": "C[S+](CC[C@H](N)C(=O)O)C[C@H]1O[C@@H](n2cnc" + "3c(N)ncnc32)[C@H](O)[C@@H]1O", } return smiles
080373bdfb250f57e20e0e2b89702ac07c430f69
22,044
def prepare_parser() -> ArgumentParser: """Create all CLI parsers/subparsers.""" # Handle core parser args parser = ArgumentParser( description="Learning (Hopefully) Safe Agents in Gridworlds" ) handle_parser_args({"core": parser}, "core", core_parser_configs) # Handle environment subparser args env_subparsers = parser.add_subparsers( help="Types of gridworld environments", dest="env_alias" ) env_subparsers.required = True env_parsers = {} for env_name in ENV_MAP: env_parsers[env_name] = env_subparsers.add_parser(env_name) handle_parser_args(env_parsers, env_name, env_parser_configs) # Handle agent subparser args agent_subparsers = {} for env_name, env_parser in env_subparsers.choices.items(): agent_parser_configs = deepcopy(stashed_agent_parser_configs) agent_subparsers[env_name] = env_parser.add_subparsers( help="Types of agents", dest="agent_alias" ) agent_subparsers[env_name].required = True agent_parsers = {} for agent_name in AGENT_MAP: agent_parsers[agent_name] = agent_subparsers[env_name].add_parser( agent_name ) handle_parser_args(agent_parsers, agent_name, agent_parser_configs) return parser
c0a42abf56f3c82ae09ef2459aae49fded71e9f0
22,045
def import_google(authsub_token, user): """ Uses the given AuthSub token to retrieve Google Contacts and import the entries with an email address into the contacts of the given user. Returns a tuple of (number imported, total number of entries). """ contacts_service = gdata.contacts.service.ContactsService() contacts_service.auth_token = authsub_token contacts_service.UpgradeToSessionToken() entries = [] feed = contacts_service.GetContactsFeed() entries.extend(feed.entry) next_link = feed.GetNextLink() while next_link: feed = contacts_service.GetContactsFeed(uri=next_link.href) entries.extend(feed.entry) next_link = feed.GetNextLink() total = 0 imported = 0 for entry in entries: name = entry.title.text for e in entry.email: email = e.address total += 1 try: Contact.objects.get(user=user, email=email) except Contact.DoesNotExist: Contact(user=user, name=name, email=email).save() imported += 1 return imported, total
768b900ceac60cc69d1906ef915fdace8b6d0982
22,046
def getGpsTime(dt): """_getGpsTime returns gps time (seconds since midnight Sat/Sun) for a datetime """ total = 0 days = (dt.weekday()+ 1) % 7 # this makes Sunday = 0, Monday = 1, etc. total += days*3600*24 total += dt.hour * 3600 total += dt.minute * 60 total += dt.second return(total)
30f0fa562cf88ca2986c3346b4111dcb18b1cb34
22,047
from datetime import datetime def validate_date(date, flash_errors=True): """ Validates date string. Format should be YYYY-MM-DD. Flashes errors if flash_errors is True. """ try: datetime.datetime.strptime(date, '%Y-%m-%d') except ValueError: if flash_errors: flask.flash('Invalid date provided. Make sure dates are in YYYY-MM-DD format.') return False return True
ed3e45c3232da8d994f854001a3be852180681e0
22,049
def generate_error_map(image, losses, box_lenght): """ Function to overlap an error map to an image Args: image: input image losses: list of losses, one for each masked part of the flow. Returs: error_map: overlapped error_heatmap and image. """ box_lenght = int(box_lenght) # Assert that everything is correct num_boxes = int(image.shape[0] / box_lenght) * int(image.shape[1] / box_lenght) assert(num_boxes ==len(losses)) img_width = int(np.floor(image.shape[1] / box_lenght) * box_lenght) img_height = int(np.floor(image.shape[0] / box_lenght) * box_lenght) image = image[:img_height, :img_width] image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) heatmap = np.ones_like(image[:,:,0]) res_heatmap = np.reshape(heatmap, (box_lenght, box_lenght, num_boxes)) res_heatmap = res_heatmap * np.array(losses) heatmap = np.zeros((img_height, img_width)) # ugly for loop, unable to solve atm i = 0 for y in np.arange(0, img_height, step=box_lenght): for x in np.arange(0, img_width, step=box_lenght): # convert to x,y coordinates heatmap[y: y+box_lenght, x: x+box_lenght] = res_heatmap[:,:,i] i+=1 heatmap = np.asarray(heatmap / np.max(heatmap) * 255, dtype=np.uint8) heatmap_img = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) final = cv2.addWeighted(heatmap_img, 0.5, postprocess_image(image), 0.5, 0) return final
b4b8a8207a90226caba0c5f5be4c322dc6181a42
22,050
def get_order(oid): # noqa: E501 """Gets an existing order by order id # noqa: E501 :param oid: :type oid: str :rtype: Order """ oid = int(oid) msg = "error retrieving order" ret_code = 400 if oid in orders: msg = {"status": f"order retrieved", "order": orders[oid], "oid": oid} ret_code = 200 else: msg = f"Order: {oid} could not be found" ret_code = 404 return msg, ret_code
d95051e027994b0bd7837859a3e7fd8106f4a07e
22,051
import operator def most_recent_assembly(assembly_list): """Based on assembly summaries find the one submitted the most recently""" if assembly_list: return sorted(assembly_list, key=operator.itemgetter('submissiondate'))[-1]
1d7ecf3a1fa862e421295dda0ba3d89863f33b0f
22,052
import re def dict_from_xml_text(xml_text, fix_ampersands=False): """ Convert an xml string to a dictionary of values :param xml_text: valid xml string :param fix_ampersands: additionally replace & to & encoded value before parsing to etree :return: dictionary of data """ if fix_ampersands: xml_text = re.sub(r'&', '&', xml_text) root = Etree.fromstring(xml_text) return dict_from_etree(root)
fc008f9c9ef23640ed09adef3320eb549506988d
22,053
def find_encryption_key(loop_size, subject_number): """Find encryption key from the subject_number and loop_size.""" value = 1 for _ in range(loop_size): value = transform_value(value, subject_number) return value
cb9f58d065e4bc227ac034357981eac070834f73
22,054
import math def carla_rotation_to_RPY(carla_rotation): """ Convert a carla rotation to a roll, pitch, yaw tuple Considers the conversion from left-handed system (unreal) to right-handed system (ROS). Considers the conversion from degrees (carla) to radians (ROS). :param carla_rotation: the carla rotation :type carla_rotation: carla.Rotation :return: a tuple with 3 elements (roll, pitch, yaw) :rtype: tuple """ roll = -math.radians(carla_rotation.roll) pitch = -math.radians(carla_rotation.pitch) yaw = -math.radians(carla_rotation.yaw) return (roll, pitch, yaw)
30f4cd3facd3696d3f0daf25f2723d82541c89f2
22,055
from bempp.api.integration.triangle_gauss import rule from scipy.sparse import coo_matrix from scipy.sparse.linalg import aslinearoperator def compute_p1_curl_transformation(space, quadrature_order): """ Compute the transformation of P1 space coefficients to surface curl values. Returns two lists, curl_transforms and curl_transforms_transpose. The jth matrix in curl_transforms is the map from P1 function space coefficients (or extended space built upon P1 type spaces) to the jth component of the surface curl evaluated at the quadrature points, multiplied with the quadrature weights and integration element. The list curl_transforms_transpose contains the transpose of these matrices. """ grid_data = space.grid.data("double") number_of_elements = space.grid.number_of_elements quad_points, weights = rule(quadrature_order) npoints = len(weights) dof_count = space.localised_space.grid_dof_count data, iind, jind = compute_p1_curl_transformation_impl( grid_data, space.support_elements, space.normal_multipliers, quad_points, weights, ) curl_transforms = [] curl_transforms_transpose = [] for index in range(3): curl_transforms.append( aslinearoperator( coo_matrix( (data[index, :], (iind, jind)), shape=(npoints * number_of_elements, dof_count), ).tocsr() ) @ aslinearoperator(space.map_to_localised_space) @ aslinearoperator(space.dof_transformation) ) curl_transforms_transpose.append( aslinearoperator(space.dof_transformation.T) @ aslinearoperator(space.map_to_localised_space.T) @ aslinearoperator( coo_matrix( (data[index, :], (jind, iind)), shape=(dof_count, npoints * number_of_elements), ).tocsr() ) ) return curl_transforms, curl_transforms_transpose
6ad147c23fb9c153d534b742443c6238c1dc6f33
22,056
def _get_individual_id(individual) -> str: """ Returns a unique identifier as string for the given individual. :param individual: The individual to get the ID for. :return: A string representing the ID. """ if hasattr(individual, "identifier") and (isinstance(individual.identifier, list) and len(individual.identifier) > 0 and type(individual.identifier[0]) in [int, str]) or ( type(individual.identifier) in [int, str]): return str(individual.identifier[0]) else: return str(individual)
e606d5eef7bfbcd0d76113c20f450be3c1e6b2ab
22,057
def get_self_url(d): """Returns the URL of a Stash resource""" return d.html_url if isinstance(d, PullRequest) else d["links"]["self"][0]["href"]
b7b88b49a1035ec7d15e4d0f7c864e489dccbf70
22,058
def shift(arr, *args): """ **WARNING** The ``Si`` arguments can be either a single array containing the shift parameters for each dimension, or a sequence of up to eight scalar shift values. For arrays of more than one dimension, the parameter ``Sn`` specifies the shift applied to the n-th dimension while this implementation supports lists as ``arr`` argument, to match the style of IDL, the IDLpy bridge does *not* support lists, and returns it *unchanged*! If ``SHIFT`` is used in combination with ``FFT``, maybe you should look at ``np.fft.fftshift``. """ arr = np.asarray(arr) # accept list (see note above) if arr.ndim==1: if len(args)==1: return np.roll(arr, _int_list(args)) elif arr.ndim==2: if len(args)==1: return np.roll(arr, _int_list(args)) if len(args)==2: return np.roll(arr, _int_list(args)[::-1], axis=(0,1)) elif arr.ndim==3: if len(args)==1: return np.roll(arr, args) elif len(args)==1: raise IDLException("Incorrect number of arguments.") elif len(args)==3: return np.roll(arr, args[::-1], axis=(0,1,2)) raise NotImplementedError("shift does only work for 1D, 2D and 3D arrays.")
b70a430039ba369d99a3c2edea920430eb27dfa1
22,059
def ConvertToMeaningfulConstant(pset): """ Gets the flux constant, and quotes it above some energy minimum Emin """ # Units: IF TOBS were in yr, it would be smaller, and raw const greater. # also converts per Mpcs into per Gpc3 units=1e9*365.25 const = (10**pset[7])*units # to cubic Gpc and days to year Eref=1e40 #erg per Hz Emin=10**pset[0] Emax=10**pset[1] gamma=pset[3] factor=(Eref/Emin)**gamma - (Emax/Emin)**gamma const *= factor return const
e393f66e72c3a43e91e9975f270ac7dcf577ad3e
22,060
def hw_uint(value): """return HW of 16-bit unsigned integer in two's complement""" bitcount = bin(value).count("1") return bitcount
9a9c6017d3d6da34c4e9132a0c89b267aa263ace
22,061
import copy def clip(x,xmin,xmax) : """ clip input array so that x<xmin becomes xmin, x>xmax becomes xmax, return clipped array """ new=copy.copy(x) bd=np.where(x<xmin)[0] new[bd]=xmin bd=np.where(x>xmax)[0] new[bd]=xmax return new
502d8c5ce0427283bf02ead2d5f5c90c69e14638
22,062
def profile_from_creds(creds, keychain, cache): """Create a profile from an AWS credentials file.""" access_key, secret_key = get_keys_from_file(creds) arn = security_store(access_key, secret_key, keychain, cache) return profile_from_arn(arn)
62923959ce115bef776b32c3ed93a19aef93f9c3
22,063
import torch def test(model, test_loader, dynamics, fast_init): """ Evaluate prediction accuracy of an energy-based model on a given test set. Args: model: EnergyBasedModel test_loader: Dataloader containing the test dataset dynamics: Dictionary containing the keyword arguments for the relaxation dynamics on u fast_init: Boolean to specify if fast feedforward initilization is used for the prediction Returns: Test accuracy Mean energy of the model per batch """ test_E, correct, total = 0.0, 0.0, 0.0 for x_batch, y_batch in test_loader: # Prepare the new batch x_batch, y_batch = x_batch.to(config.device), y_batch.to(config.device) # Extract prediction as the output unit with the strongest activity output = predict_batch(model, x_batch, dynamics, fast_init) prediction = torch.argmax(output, 1) with torch.no_grad(): # Compute test batch accuracy, energy and store number of seen batches correct += float(torch.sum(prediction == y_batch.argmax(dim=1))) test_E += float(torch.sum(model.E)) total += x_batch.size(0) return correct / total, test_E / total
41c6e20fbcf11e76437a175f7b4662ec24b85773
22,065
def get_cluster_activite(cluster_path_csv, test, train=None): """Get cluster activite csv from patch cluster_path_csv. Merge cluster with station_id Parameters ---------- cluster_path_csv : String : Path to export df_labels DataFrame test : pandas.DataFrame train : pandas.DataFrame Returns ------- If train is not None: Return 2 pandas.DataFrame train, test Else: Return 1 pandas.DataFrame test """ cluster_activite = read_cluster_activite(cluster_path_csv=cluster_path_csv) test = test.merge(cluster_activite, left_on='station_id', right_on='id_station', how='left') test.drop('id_station', axis=1, inplace=True) if len(train) > 0: train = train.merge(cluster_activite, left_on='station_id', right_on='id_station', how='left') train.drop('id_station', axis=1, inplace=True) return train, test else: return test
f8dbb1bb2149a58f8617f76cfdf1a4943f500314
22,066
from typing import Optional def get_ssl_policy(name: Optional[str] = None, project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSSLPolicyResult: """ Gets an SSL Policy within GCE from its name, for use with Target HTTPS and Target SSL Proxies. For more information see [the official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies). ## Example Usage ```python import pulumi import pulumi_gcp as gcp my_ssl_policy = gcp.compute.get_ssl_policy(name="production-ssl-policy") ``` :param str name: The name of the SSL Policy. :param str project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ __args__ = dict() __args__['name'] = name __args__['project'] = project if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('gcp:compute/getSSLPolicy:getSSLPolicy', __args__, opts=opts, typ=GetSSLPolicyResult).value return AwaitableGetSSLPolicyResult( creation_timestamp=__ret__.creation_timestamp, custom_features=__ret__.custom_features, description=__ret__.description, enabled_features=__ret__.enabled_features, fingerprint=__ret__.fingerprint, id=__ret__.id, min_tls_version=__ret__.min_tls_version, name=__ret__.name, profile=__ret__.profile, project=__ret__.project, self_link=__ret__.self_link)
1b4248a6a8dbbd735a006cc99de5d5e855c195ca
22,067
def getitem(self, item): """Select elements at the specific index. Parameters ---------- item : Union[slice, int, dragon.Tensor] The index. Returns ------- dragon.Tensor The output tensor. """ gather_args = [] if isinstance(item, Tensor): if item.dtype == 'bool' or item.dtype == 'uint8': if context.executing_eagerly(): return OpLib.execute('BooleanMask', [self, item]) return OpLib.add('BooleanMask', [self, item]) elif item.dtype == 'int64': gather_args.append((0, item)) else: raise TypeError('Unsupported index type: ' + item.dtype) if isinstance(item, tuple): for i, elem in enumerate(item): if isinstance(elem, Tensor): if elem.dtype == 'int64': gather_args.append((i, elem)) else: raise TypeError('Unsupported index type: ' + elem.dtype) if len(gather_args) == 1: axis, index = gather_args[0] if context.executing_eagerly(): return OpLib.execute( 'Gather', [self, index], axis=axis, end_axis=None) return OpLib.add('Gather', [self, index], axis=axis) elif len(gather_args) > 1: raise NotImplementedError starts, sizes = _process_index(item) if context.executing_eagerly(): return OpLib.execute( 'Slice', [self], ndim=len(starts), starts=starts, sizes=sizes) return OpLib.add('Slice', [self], starts=starts, sizes=sizes)
0f7a4659a9fc3ac34fcee8f402fada7f622e11f0
22,068
import torch def zdot_batch(x1, x2): """Finds the complex-valued dot product of two complex-valued multidimensional Tensors, preserving the batch dimension. Args: x1 (Tensor): The first multidimensional Tensor. x2 (Tensor): The second multidimensional Tensor. Returns: The dot products along each dimension of x1 and x2. """ batch = x1.shape[0] return torch.reshape(torch.conj(x1)*x2, (batch, -1)).sum(1)
5e57dd7a693c420dd1b0c5c01523eb2f0fb85253
22,069
def serve_values(name, func, args, kwargs, serving_values, fallback_func, backend_name=None, implemented_funcs=None, supported_kwargs=None,): #249 (line num in coconut source) """Determines the parameter value to serve for the given parameter name and kwargs. First checks for unsupported funcs or kwargs, then uses the following algorithm: 1. if name in serving_values, use serving_values[name], else 2. if guess in kwargs, use the guess, else 3. call fallback_func(name, func, *args, **kwargs).""" #265 (line num in coconut source) # validate arguments if implemented_funcs is not None: #267 (line num in coconut source) assert backend_name is not None, "serve_values expects a backend_name argument when doing func validation" #268 (line num in coconut source) if func not in implemented_funcs: #269 (line num in coconut source) raise ValueError("the {_coconut_format_0} backend does not implement the {_coconut_format_1} function".format(_coconut_format_0=(backend_name), _coconut_format_1=(func))) #270 (line num in coconut source) if supported_kwargs is not None: #271 (line num in coconut source) assert backend_name is not None, "serve_values expects a backend_name argument when doing kwargs validation" #272 (line num in coconut source) unsupported_kwargs = set(kwargs) - set(supported_kwargs) #273 (line num in coconut source) if unsupported_kwargs: #274 (line num in coconut source) raise ValueError("the {_coconut_format_0} backend does not support {_coconut_format_1} option(s)".format(_coconut_format_0=(backend_name), _coconut_format_1=(unsupported_kwargs))) #275 (line num in coconut source) # determine value _coconut_match_to_4 = serving_values #278 (line num in coconut source) _coconut_match_check_6 = False #278 (line num in coconut source) _coconut_match_set_name_value = _coconut_sentinel #278 (line num in coconut source) if _coconut.isinstance(_coconut_match_to_4, _coconut.abc.Mapping): #278 (line num in coconut source) _coconut_match_temp_19 = _coconut_match_to_4.get(name, _coconut_sentinel) #278 (line num in coconut source) if _coconut_match_temp_19 is not _coconut_sentinel: #278 (line num in coconut source) _coconut_match_set_name_value = _coconut_match_temp_19 #278 (line num in coconut source) _coconut_match_check_6 = True #278 (line num in coconut source) if _coconut_match_check_6: #278 (line num in coconut source) if _coconut_match_set_name_value is not _coconut_sentinel: #278 (line num in coconut source) value = _coconut_match_set_name_value #278 (line num in coconut source) if _coconut_match_check_6: #278 (line num in coconut source) return value #279 (line num in coconut source) else: #280 (line num in coconut source) _coconut_match_to_3 = kwargs #280 (line num in coconut source) _coconut_match_check_5 = False #280 (line num in coconut source) _coconut_match_set_name_guess = _coconut_sentinel #280 (line num in coconut source) if _coconut.isinstance(_coconut_match_to_3, _coconut.abc.Mapping): #280 (line num in coconut source) _coconut_match_temp_18 = _coconut_match_to_3.get("guess", _coconut_sentinel) #280 (line num in coconut source) if _coconut_match_temp_18 is not _coconut_sentinel: #280 (line num in coconut source) _coconut_match_set_name_guess = _coconut_match_temp_18 #280 (line num in coconut source) _coconut_match_check_5 = True #280 (line num in coconut source) if _coconut_match_check_5: #280 (line num in coconut source) if _coconut_match_set_name_guess is not _coconut_sentinel: #280 (line num in coconut source) guess = _coconut_match_set_name_guess #280 (line num in coconut source) if _coconut_match_check_5: #280 (line num in coconut source) return guess #281 (line num in coconut source) else: #282 (line num in coconut source) return fallback_func(name, func, *args, **kwargs)
7ad5847df2d3904da7786ab0c77e5a0d9e6380cd
22,072
def find_peaks(sig): """ Find hard peaks and soft peaks in a signal, defined as follows: - Hard peak: a peak that is either /\ or \/. - Soft peak: a peak that is either /-*\ or \-*/. In this case we define the middle as the peak. Parameters ---------- sig : np array The 1d signal array. Returns ------- hard_peaks : ndarray Array containing the indices of the hard peaks. soft_peaks : ndarray Array containing the indices of the soft peaks. """ if len(sig) == 0: return np.empty([0]), np.empty([0]) tmp = sig[1:] tmp = np.append(tmp, [sig[-1]]) tmp = sig - tmp tmp[np.where(tmp>0)] = 1 tmp[np.where(tmp==0)] = 0 tmp[np.where(tmp<0)] = -1 tmp2 = tmp[1:] tmp2 = np.append(tmp2, [0]) tmp = tmp-tmp2 hard_peaks = np.where(np.logical_or(tmp==-2, tmp==+2))[0] + 1 soft_peaks = [] for iv in np.where(np.logical_or(tmp==-1,tmp==+1))[0]: t = tmp[iv] i = iv+1 while True: if i==len(tmp) or tmp[i] == -t or tmp[i] == -2 or tmp[i] == 2: break if tmp[i] == t: soft_peaks.append(int(iv + (i - iv)/2)) break i += 1 soft_peaks = np.array(soft_peaks, dtype='int') + 1 return hard_peaks, soft_peaks
486b30d506e3d79dc7df8d2503d3bc626b6791f5
22,073
def evenly_divides(x, y): """Returns if [x] evenly divides [y].""" return int(y / x) == y / x
dbf8236454e88805e71aabf58d9b7ebd2b2a6393
22,074
def proxmap_sort(arr: list, key: Function = lambda x: x, reverse: bool = False) -> list: """Proxmap sort is a sorting algorithm that works by partitioning an array of data items, or keys, into a number of "subarrays" (termed buckets, in similar sorts). The name is short for computing a "proximity map," which indicates for each key K the beginning of a subarray where K will reside in the final sorted order. Keys are placed into each subarray using insertion sort.""" # Time complexity: # Worst: O(n^2) # Average: Theta(n) # Best: Omega(n) # Stable, Not in place _check_key_arr(arr, key, IntFloatList) if not arr: return [] _min = key(min(arr, key=key)) _max = key(max(arr, key=key)) hit_counts = [0 for _ in range(int(_min), int(_max + 1))] for item in arr: hit_counts[int(key(item)) - int(_min)] += 1 proxmaps = [] last_hit_count = 0 for hc in hit_counts: if hc == 0: proxmaps.append(None) else: proxmaps.append(last_hit_count) last_hit_count += hc locations = [] for item in arr: locations.append(proxmaps[int(key(item)) - int(_min)]) final = [None for _ in range(len(locations))] for idx, item in enumerate(arr): loc = locations[idx] if final[loc] is None: final[loc] = item else: none_ptr = loc while final[none_ptr] is not None: none_ptr += 1 for ptr in range(none_ptr - 1, loc - 1, -1): if final[ptr] > item: final[ptr], final[ptr + 1] = final[ptr + 1], final[ptr] else: final[ptr + 1] = item break else: final[loc] = item if reverse: final = final[::-1] return final
9673abbad9320df5d83ebef9658c84ab21b5f021
22,075
import pathlib def load_footings_file(file: str): """Load footings generated file. :param str file: The path to the file. :return: A dict representing the respective file type. :rtype: dict .. seealso:: :obj:`footings.testing.load_footings_json_file` :obj:`footings.testing.load_footings_xlsx_file` """ file_ext = pathlib.Path(file).suffix return _load_footings_file(file_ext=file_ext, file=file)
929cf95e631e8be4dcc8f18c36a0b545593fed69
22,076
def coupler(*, coupling: float = 0.5) -> SDict: """a simple coupler model""" kappa = coupling ** 0.5 tau = (1 - coupling) ** 0.5 sdict = reciprocal( { ("in0", "out0"): tau, ("in0", "out1"): 1j * kappa, ("in1", "out0"): 1j * kappa, ("in1", "out1"): tau, } ) return sdict
dfd40ce9a8c61ffe8382b461c41d4034c7342570
22,077
def new_client(request): """ Function that allows a new client to register itself. :param request: Who has made the request. :return: Response 200 with user_type, state, message and token, if everything goes smoothly. Response 400 if there is some kind of request error. Response 403 for forbidden. Or Response 404 for not found error. """ if "email" not in request.data or "first_name" not in request.data or "last_name" not in request.data or "password" not in request.data: return Response({"state": "Error", "message": "Missing parameters"}, status=HTTP_400_BAD_REQUEST) state, message, username = queries.add_client(request.data) state, status = ("Success", HTTP_200_OK) if state else ("Error", HTTP_400_BAD_REQUEST) return Response({"state": state, "message": message}, status=status)
d05eeb55527cfe355fb237751693749ed707a598
22,078
from datetime import datetime def parse_time_interval_seconds(time_str): """ Convert a given time interval (e.g. '5m') into the number of seconds in that interval :param time_str: the string to parse :returns: the number of seconds in the interval :raises ValueError: if the string could not be parsed """ cal = parsedatetime.Calendar() parse_result = cal.parseDT(time_str, sourceTime=datetime.min) if parse_result[1] == 0: raise ValueError("Could not understand time {time}".format(time=time_str)) return (parse_result[0] - datetime.min).total_seconds()
6c07ee52b8dd727e96dae8a58f59c1bd043f3627
22,079
from typing import List from typing import Dict def _map_class_names_to_probabilities(probabilities: List[float]) -> Dict[str, float]: """Creates a dictionary mapping readable class names to their corresponding probabilites. Args: probabilities (List[float]): A List of the probabilities for the best predicted classes. Returns: Dict[str, float]: A dictionary mapping all readable class names to their corresponding probabilites. """ classes = load_classes() return { class_name: probability for class_name, probability in zip(classes, probabilities) }
51096015e4c291b7d3357822ea13de3354b9b12f
22,080
import collections def order_items(records): """Orders records by ASC SHA256""" return collections.OrderedDict(sorted(records.items(), key=lambda t: t[0]))
a9117282974fcea8d0d99821ea6293df82889b30
22,081
def G2DListMutatorRealGaussianGradient(genome, **args): """ A gaussian gradient mutator for G2DList of Real Accepts the *rangemin* and *rangemax* genome parameters, both optional. The difference is that this multiplies the gene by gauss(1.0, 0.0333), allowing for a smooth gradient drift about the value. """ if args["pmut"] <= 0.0: return 0 height, width = genome.getSize() elements = height * width mutations = args["pmut"] * elements mu = constants.CDefGaussianGradientMU sigma = constants.CDefGaussianGradientSIGMA if mutations < 1.0: mutations = 0 for i in xrange(genome.getHeight()): for j in xrange(genome.getWidth()): if utils.randomFlipCoin(args["pmut"]): final_value = genome[i][j] * abs(prng.normal(mu, sigma)) final_value = min(final_value, genome.getParam("rangemax", constants.CDefRangeMax)) final_value = max(final_value, genome.getParam("rangemin", constants.CDefRangeMin)) genome.setItem(i, j, final_value) mutations += 1 else: for it in xrange(int(round(mutations))): which_x = prng.randint(0, genome.getWidth()) which_y = prng.randint(0, genome.getHeight()) final_value = genome[which_y][which_x] * abs(prng.normal(mu, sigma)) final_value = min(final_value, genome.getParam("rangemax", constants.CDefRangeMax)) final_value = max(final_value, genome.getParam("rangemin", constants.CDefRangeMin)) genome.setItem(which_y, which_x, final_value) return int(mutations)
52e739fe4c490064dbec5fe0b6a7443570cada0e
22,082
def convert_group_by(response, field): """ Convert to key, doc_count dictionary """ if not response.hits.hits: return [] r = response.hits.hits[0]._source.to_dict() stats = r.get(field) result = [{"key": key, "doc_count": count} for key, count in stats.items()] result_sorted = sorted( result, key=lambda i: i["doc_count"], reverse=True ) # sort by count return result_sorted
888321f300d88bd6f150a4bfda9420e920bab510
22,083
def _parse_single(argv, args_array, opt_def_dict, opt_val): """Function: _parse_single Description: Processes a single-value argument in command line arguments. Modifys the args_array by adding a dictionary key and a value. NOTE: Used by the arg_parse2() to reduce the complexity rating. Arguments: (input) argv -> Arguments from the command line. (input) args_array -> Array of command line options and values. (input) opt_def_dict -> Dict with options and default values. (input) opt_val -> List of options allow None or 1 value for option. (output) argv -> Arguments from the command line. (output) args_array -> Array of command line options and values. """ argv = list(argv) args_array = dict(args_array) opt_def_dict = dict(opt_def_dict) opt_val = list(opt_val) # If no value in argv for option and it is not an integer. if len(argv) < 2 or (argv[1][0] == "-" and not gen_libs.chk_int(argv[1])): if argv[0] in opt_val: args_array[argv[0]] = None else: # See if default value is available for argument. args_array = arg_default(argv[0], args_array, opt_def_dict) else: args_array[argv[0]] = argv[1] argv = argv[1:] return argv, args_array
5b44a891400b545940c9be3913af9710c37df898
22,085
def compOverValueTwoSets(setA={1, 2, 3, 4}, setB={3, 4, 5, 6}): """ task 0.5.9 comprehension whose value is the intersection of setA and setB without using the '&' operator """ return {x for x in (setA | setB) if x in setA and x in setB}
2b222d6c171e0170ace64995dd64c352f03aa99b
22,086
def d_beta(): """ Real Name: b'D BETA' Original Eqn: b'0.05' Units: b'' Limits: (None, None) Type: constant b'' """ return 0.05
32aa0e1fbf31761a36657b314a7c4bc4bf99889e
22,087
import csv def _get_data(filename): """ :param filename: name of a comma-separated data file with two columns: eccentricity and some other quantity x :return: eccentricities, x """ eccentricities = [] x = [] with open(filename) as file: r = csv.reader(file) for row in r: eccentricities.append(float(row[0])) x.append(float(row[1])) return np.array(eccentricities), np.array(x)
f8c86f0f1c9bf2ee91108382cd6da6b98445bf1f
22,088
def longestCommonPrefix(strs): """ :type strs: List[str] :rtype: str """ if len(strs) > 0: common = strs[0] for str in strs[1:]: while not str.startswith(common): common = common[:-1] return common else: return ''
a860d46df8dbaeaab90bb3bc69abb68484216b5b
22,089
def analytics_dashboard(request): """Main page for analytics related things""" template = 'analytics/analyzer/dashboard.html' return render(request, template)
068913de2f2b1a381f73395e53e2e06db7d02e8e
22,090
def insert(shape, axis=-1): """Shape -> shape with one axis inserted""" return shape[:axis] + (1,) + shape[axis:]
8c786df81b76cfa5dae78b51d16b2ee302263c53
22,091
import re def simplify_text(text): """ :param text: :return: """ no_html = re.sub('<[^<]+?>', '', str(text)) stripped = re.sub(r"[^a-zA-Z]+", "", str(no_html)) clean = stripped.lower() return clean
a867bd08a642843df9d8a2a517f1b0c13ea145b1
22,092
def is_numpy_convertable(v): """ Return whether a value is meaningfully convertable to a numpy array via 'numpy.array' """ return hasattr(v, "__array__") or hasattr(v, "__array_interface__")
163da2cf50e2172e1fc39ae8afd7c4417b02a852
22,093
def grower(array): """grows masked regions by one pixel """ grower = np.array([[0,1,0],[1,1,1],[0,1,0]]) ag = convolve2d(array , grower , mode = "same") ag = ag != 0 return ag
b6ae0c9eeb96ec13a5f9ef7a282ec428b5d536ad
22,094
def SMAPELossFlat(*args, axis=-1, floatify=True, **kwargs): """Same as `smape`, but flattens input and target. DOES not work yet """ return BaseLoss(smape, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs)
470e8f34cfd5fd6a867f9991e8babcad25fa03ff
22,095
from datetime import datetime def get_fake_datetime(now: datetime): """Generate monkey patch class for `datetime.datetime`, whose now() and utcnow() always returns given value.""" class FakeDatetime: """Fake datetime.datetime class.""" @classmethod def now(cls): """Return given value.""" return now @classmethod def utcnow(cls): """Return given value.""" return now return FakeDatetime
f268640c6459f4eb88fd9fbe72acf8c9d806d3bc
22,096
from typing import List def generate_order_by(fields: List[str], sort_orders: List[str], table_pre: str = '') -> str: """Функция генерит ORDER BY запрос для SQL Args: fields: список полей для сортировки sort_orders: список (asc\desc) значений table_pre: префикс таблицы в запросе Return: sql ORBER BY """ def _get_str_order(field: str, sort_order: str, table_pre: str = '') -> str: """Функция генерации одной FIELD ASC""" if sort_order.upper() not in ['ASC', 'DESC']: raise PGsqlOrderByExcept(f'sort_order value should consist of ASC or DESC but he {sort_order}') if table_pre: return f"{table_pre}.{field} {sort_order.upper()}" return f"{field} {sort_order.upper()}" if not fields: return '' orders_clause = [] for i, f in enumerate(fields): orders_clause.append(_get_str_order(f, sort_orders[i], table_pre)) return "ORDER BY " + ", ".join(orders_clause)
863d348d2bd844718e056c6767e1f282405b3edf
22,097
def toUnicode(glyph, isZapfDingbats=False): """Convert glyph names to Unicode, such as 'longs_t.oldstyle' --> u'ſt' If isZapfDingbats is True, the implementation recognizes additional glyph names (as required by the AGL specification). """ # https://github.com/adobe-type-tools/agl-specification#2-the-mapping # # 1. Drop all the characters from the glyph name starting with # the first occurrence of a period (U+002E; FULL STOP), if any. glyph = glyph.split(".", 1)[0] # 2. Split the remaining string into a sequence of components, # using underscore (U+005F; LOW LINE) as the delimiter. components = glyph.split("_") # 3. Map each component to a character string according to the # procedure below, and concatenate those strings; the result # is the character string to which the glyph name is mapped. result = [_glyphComponentToUnicode(c, isZapfDingbats) for c in components] return "".join(result)
42bfee52db47f466308dfc4782a609776f6b90b9
22,098
def list_watchlist_items_command(client, args): """ Get specific watchlist item or list of watchlist items. :param client: (AzureSentinelClient) The Azure Sentinel client to work with. :param args: (dict) arguments for this command. """ # prepare the request alias = args.get('watchlist_alias', '') url_suffix = f'watchlists/{alias}/watchlistItems' item_id = args.get('watchlist_item_id') if item_id: url_suffix += f'/{item_id}' # request result = client.http_request('GET', url_suffix) # prepare result raw_items = [result] if item_id else result.get('value') items = [{'WatchlistAlias': alias, **watchlist_item_data_to_xsoar_format(item)} for item in raw_items] readable_output = tableToMarkdown('Watchlist items results', items, headers=['ID', 'ItemsKeyValue'], headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=readable_output, outputs_prefix='AzureSentinel.WatchlistItem', outputs=items, outputs_key_field='ID', raw_response=result )
1567bd56c90e9560fcae583d8360e4299620c266
22,099
def blit_array(surface, array): """ Generates image pixels from a JNumeric array. Arguments include destination Surface and array of integer colors. JNumeric required as specified in numeric module. """ if not _initialized: _init() if len(array.shape) == 2: data = numeric.transpose(array, (1,0)) data = numeric.ravel(data) else: data = array[:,:,0]*0x10000 | array[:,:,1]*0x100 | array[:,:,2] data = numeric.transpose(data, (1,0)) data = numeric.ravel(data) if not surface.getColorModel().hasAlpha(): surface.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width) else: surf = Surface((surface.width,surface.height), BufferedImage.TYPE_INT_RGB) surf.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width) g2d = surface.createGraphics() g2d.drawImage(surf, 0, 0, None) g2d.dispose() return None
850b6451ecc780fd163f574176a8c5683174046e
22,100
def generate_IO_examples(program, N, L, V): """ Given a programs, randomly generates N IO examples. using the specified length L for the input arrays. """ input_types = program.ins input_nargs = len(input_types) # Generate N input-output pairs IO = [] for _ in range(N): input_value = [None]*input_nargs for a in range(input_nargs): minv, maxv = program.bounds[a] if input_types[a] == int: input_value[a] = np.random.randint(minv, maxv) elif input_types[a] == [int]: input_value[a] = list(np.random.randint(minv, maxv, size=L)) else: raise Exception("Unsupported input type " + input_types[a] + " for random input generation") output_value = program.fun(input_value) IO.append((input_value, output_value)) assert (program.out == int and output_value <= V) or (program.out == [int] and len(output_value) == 0) or (program.out == [int] and max(output_value) <= V) return IO
4449974cf5a1bd04a89e5575fb48362da8fa1621
22,102
def secrecy_capacity(dist, rvs=None, crvs=None, rv_mode=None, niter=None, bound_u=None): """ The rate at which X and Y can agree upon a key with Z eavesdropping, and no public communication. Parameters ---------- dist : Distribution The distribution of interest. rvs : iterable of iterables, len(rvs) == 2 The indices of the random variables agreeing upon a secret key. crvs : iterable The indices of the eavesdropper. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as random variable indices. If equal to 'names', the the elements are interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is consulted, which defaults to 'indices'. niter : int, None The number of hops to perform during optimization. bound_u : int, None The bound to use on the size of the variable U. If none, use the theoretical bound of |X|. Returns ------- sc : float The secrecy capacity. """ a = secrecy_capacity_directed(dist, rvs[0], rvs[1], crvs, rv_mode=rv_mode, niter=niter, bound_u=bound_u) b = secrecy_capacity_directed(dist, rvs[1], rvs[0], crvs, rv_mode=rv_mode, niter=niter, bound_u=bound_u) return max([a, b])
c55655847f9d9cf586bd4e274f72359d0241c349
22,104
def encrypt_message(partner, message): """ Encrypt a message :param parner: Name of partner :param message: Message as string :return: Message as numbers """ matrix = get_encryption_matrix(get_key(get_private_filename(partner))) rank = np.linalg.matrix_rank(matrix) num_blocks = int(np.ceil(1.0 * len(message) / rank)) padded_message = message for i in range(len(message), rank * num_blocks): padded_message += ' ' encoded_message = string_to_numbers(padded_message) encrypted_numbers = np.empty(rank * num_blocks, dtype=int) rhs = np.empty(rank, dtype=int) for b in range(num_blocks): for i in range(rank): rhs[i] = encoded_message[i + rank * b] lhs = np.dot(matrix, rhs) for i in range(rank): encrypted_numbers[i + rank * b] = lhs[i] return encrypted_numbers
e8e96f99f511afb3b91a9c264f8452e45b14e165
22,105
def create_event(title, start, end, capacity, location, coach, private): """Create event and submit to database""" event = Class(title=title, start=start, end=end, capacity=capacity, location=location, coach=coach, free=capacity, private=private) db.session.add(event) db.session.commit() return event
4a1b6314eee362f6ae7f35685cb09fe969175c0b
22,106
def regret_obs(m_list, inputs, true_ymin=0): """Immediate regret using past observations. Parameters ---------- m_list : list A list of GPy models generated by `OptimalDesign`. inputs : instance of `Inputs` The input space. true_ymin : float, optional The minimum value of the objective function. Returns ------- res : list A list containing the values of the immediate regret for each model in `m_list` using past observations: $r(n) = min y_i - y_{true}$ where y_i are the observations recorded in the first `n` iterations, and y_{true} the minimum of the objective function. """ res = np.zeros(len(m_list)) for ii, model in enumerate(m_list): res[ii] = model.Y.min() - true_ymin return res
e9cc2567f9740deae7b681cc754d20a387fbc894
22,107
import numpy def pmat2cam_center(P): """ See Hartley & Zisserman (2003) p. 163 """ assert P.shape == (3, 4) determinant = numpy.linalg.det # camera center X = determinant([P[:, 1], P[:, 2], P[:, 3]]) Y = -determinant([P[:, 0], P[:, 2], P[:, 3]]) Z = determinant([P[:, 0], P[:, 1], P[:, 3]]) T = -determinant([P[:, 0], P[:, 1], P[:, 2]]) C_ = nx.transpose(nx.array([[X / T, Y / T, Z / T]])) return C_
f959eab9feeeafd90c3a2178b77d81e509ef1282
22,108
def _http_req(mocker): """Fixture providing HTTP Request mock.""" return mocker.Mock(spec=Request)
651866118fd25909e50469cb92f35a9aa3a6d873
22,109
def transform_data(df, steps_per_floor_): """Transform original dataset. :param df: Input DataFrame. :param steps_per_floor_: The number of steps per-floor at 43 Tanner Street. :return: Transformed DataFrame. """ df_transformed = ( df .select( col('id'), concat_ws( ' ', col('first_name'), col('second_name')).alias('name'), (col('floor') * lit(steps_per_floor_)).alias('steps_to_desk'))) return df_transformed
163bedd83315828001f4cca2abdc130a2a77a55a
22,110
import logging def get_client(bucket): """Get the Storage Client appropriate for the bucket. Args: bucket (str): Bucket including Returns: ~Storage: Client for interacting with the cloud. """ try: protocol, bucket_name = str(bucket).lower().split('://', 1) except ValueError: raise ValueError('Invalid storage bucket name: {}'.format(bucket)) logger = logging.getLogger('storage.get_client') if protocol == 's3': storage_client = S3Storage(bucket_name) elif protocol == 'gs': storage_client = GoogleStorage(bucket_name) else: errmsg = 'Unknown STORAGE_BUCKET protocol: %s' logger.error(errmsg, protocol) raise ValueError(errmsg % protocol) return storage_client
46a27b4a028daa927507f3e8b0d5aeb453fd302b
22,111
def extract_text(xml_string): """Get text from the body of the given NLM XML string. Parameters ---------- xml_string : str String containing valid NLM XML. Returns ------- str Extracted plaintext. """ paragraphs = extract_paragraphs(xml_string) if paragraphs: return '\n'.join(paragraphs) + '\n' else: return None
f3e80d960837d8663d9711bd9696644f00ba21e9
22,112
def search_organizations(search_term: str = None, limit: str = None): """ Looks up organizations by name & location. :param search_term: e.g. "College of Nursing" or "Chicago, IL". :param limit: The maximum number of matches you'd like returned - defaults to 10, maximum is 50. :returns: String containing xml or an lxml element. """ return get_anonymous( 'searchOrganizations', search_term=search_term, limit=limit)
5523f6278b4eff5618979ce942fb175b20042079
22,114
def call_math_operator(value1, value2, op, default): """Return the result of the math operation on the given values.""" if not value1: value1 = default if not value2: value2 = default if not pyd.is_number(value1): try: value1 = float(value1) except Exception: pass if not pyd.is_number(value2): try: value2 = float(value2) except Exception: pass return op(value1, value2)
da1a163e4079cfd885d8ca163939111c9291767b
22,115
def addGems(ID, nbGems): """ Permet d'ajouter un nombre de gems à quelqu'un. Il nous faut son ID et le nombre de gems. Si vous souhaitez en retirer mettez un nombre négatif. Si il n'y a pas assez d'argent sur le compte la fonction retourne un nombre strictement inférieur à 0. """ old_value = valueAt(ID, "gems", GF.dbGems) new_value = int(old_value) + nbGems if new_value >= 0: updateField(ID, "gems", new_value, GF.dbGems) print("DB >> Le compte de " + str(ID) + " est maintenant de: " + str(new_value)) else: print("DB >> Il n'y a pas assez sur ce compte !") return str(new_value)
6f3778cec488138101a78a072591babb832d7f95
22,116
def BertzCT(mol, cutoff=100, dMat=None, forceDMat=1): """ A topological index meant to quantify "complexity" of molecules. Consists of a sum of two terms, one representing the complexity of the bonding, the other representing the complexity of the distribution of heteroatoms. From S. H. Bertz, J. Am. Chem. Soc., vol 103, 3599-3601 (1981) "cutoff" is an integer value used to limit the computational expense. A cutoff value tells the program to consider vertices topologically identical if their distance vectors (sets of distances to all other vertices) are equal out to the "cutoff"th nearest-neighbor. **NOTE** The original implementation had the following comment: > this implementation treats aromatic rings as the > corresponding Kekule structure with alternating bonds, > for purposes of counting "connections". Upon further thought, this is the WRONG thing to do. It results in the possibility of a molecule giving two different CT values depending on the kekulization. For example, in the old implementation, these two SMILES: CC2=CN=C1C3=C(C(C)=C(C=N3)C)C=CC1=C2C CC3=CN=C2C1=NC=C(C)C(C)=C1C=CC2=C3C which correspond to differentk kekule forms, yield different values. The new implementation uses consistent (aromatic) bond orders for aromatic bonds. THIS MEANS THAT THIS IMPLEMENTATION IS NOT BACKWARDS COMPATIBLE. Any molecule containing aromatic rings will yield different values with this implementation. The new behavior is the correct one, so we're going to live with the breakage. **NOTE** this barfs if the molecule contains a second (or nth) fragment that is one atom. """ atomTypeDict = {} connectionDict = {} numAtoms = mol.GetNumAtoms() if forceDMat or dMat is None: if forceDMat: # nope, gotta calculate one dMat = Chem.GetDistanceMatrix(mol, useBO=0, useAtomWts=0, force=1) mol._adjMat = dMat else: try: dMat = mol._adjMat except AttributeError: dMat = Chem.GetDistanceMatrix(mol, useBO=0, useAtomWts=0, force=1) mol._adjMat = dMat if numAtoms < 2: return 0 bondDict, neighborList, vdList = _CreateBondDictEtc(mol, numAtoms) symmetryClasses = _AssignSymmetryClasses(mol, vdList, dMat, forceDMat, numAtoms, cutoff) # print('Symmm Classes:',symmetryClasses) for atomIdx in range(numAtoms): hingeAtomNumber = mol.GetAtomWithIdx(atomIdx).GetAtomicNum() atomTypeDict[hingeAtomNumber] = atomTypeDict.get(hingeAtomNumber, 0) + 1 hingeAtomClass = symmetryClasses[atomIdx] numNeighbors = vdList[atomIdx] for i in range(numNeighbors): neighbor_iIdx = neighborList[atomIdx][i] NiClass = symmetryClasses[neighbor_iIdx] bond_i_order = _LookUpBondOrder(atomIdx, neighbor_iIdx, bondDict) # print('\t',atomIdx,i,hingeAtomClass,NiClass,bond_i_order) if (bond_i_order > 1) and (neighbor_iIdx > atomIdx): numConnections = bond_i_order * (bond_i_order - 1) / 2 connectionKey = (min(hingeAtomClass, NiClass), max(hingeAtomClass, NiClass)) connectionDict[connectionKey] = connectionDict.get(connectionKey, 0) + numConnections for j in range(i + 1, numNeighbors): neighbor_jIdx = neighborList[atomIdx][j] NjClass = symmetryClasses[neighbor_jIdx] bond_j_order = _LookUpBondOrder(atomIdx, neighbor_jIdx, bondDict) numConnections = bond_i_order * bond_j_order connectionKey = (min(NiClass, NjClass), hingeAtomClass, max(NiClass, NjClass)) connectionDict[connectionKey] = connectionDict.get(connectionKey, 0) + numConnections if not connectionDict: connectionDict = {'a': 1} return _CalculateEntropies(connectionDict, atomTypeDict, numAtoms)
0ca8119db47121dc22e0554e114e51ad916af455
22,117
def BOPTools_AlgoTools_CorrectRange(*args): """ * Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE1>, <aE2> :param aE1: :type aE1: TopoDS_Edge & :param aE2: :type aE2: TopoDS_Edge & :param aSR: :type aSR: IntTools_Range & :param aNewSR: :type aNewSR: IntTools_Range & :rtype: void * Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE>, <aF> :param aE: :type aE: TopoDS_Edge & :param aF: :type aF: TopoDS_Face & :param aSR: :type aSR: IntTools_Range & :param aNewSR: :type aNewSR: IntTools_Range & :rtype: void """ return _BOPTools.BOPTools_AlgoTools_CorrectRange(*args)
491b1930a017940137aa2a59c630f995f0fc8366
22,118
from typing import Tuple def approx_min_k(operand: Array, k: int, reduction_dimension: int = -1, recall_target: float = 0.95, reduction_input_size_override: int = -1, aggregate_to_topk: bool = True) -> Tuple[Array, Array]: """Returns min ``k`` values and their indices of the ``operand``. Args: operand : Array to search for min-k. k : Specifies the number of min-k. reduction_dimension: Integer dimension along which to search. Default: -1. recall_target: Recall target for the approximation. reduction_input_size_override : When set to a positive value, it overrides the size determined by operands[reduction_dim] for evaluating the recall. This option is useful when the given operand is only a subset of the overall computation in SPMD or distributed pipelines, where the true input size cannot be deferred by the operand shape. aggregate_to_topk: When true, aggregates approximate results to top-k. When false, returns the approximate results. Returns: Tuple[Array, Array] : Least k values and their indices of the inputs. """ if xc._version < 45: aggregate_to_topk = True return approx_top_k_p.bind( operand, k=k, reduction_dimension=reduction_dimension, recall_target=recall_target, is_max_k=False, reduction_input_size_override=reduction_input_size_override, aggregate_to_topk=aggregate_to_topk)
e4716369b4371b27ccabaaa61d2157e513cf06ed
22,120
def sitemap_host_xml(): """Supplementary Sitemap XML for Host Pages""" database_connection.reconnect() hosts = ww_host.info.retrieve_all(database_connection) sitemap = render_template("sitemaps/hosts.xml", hosts=hosts) return Response(sitemap, mimetype="text/xml")
79b21d1465ef84c1cfaf192be0b2fa5bf07f1014
22,121
def WTC(df,N): """Within Topic Coherence Measure. [Note] It ignores a word which does not have trained word vector. Parameters ---------- df : Word-Topic distribution K by V where K is number of topics and V is number of words N : Number of top N words Returns ------- total : WTC value of each topic (1 * K) """ df = df.iloc[:N,:] total = [] for col in df.columns: cos_val = 0 words = df[col].tolist() for c in combinations(words,2): # print(c) try: cos_val += 1-cosine(word2vec_model.get_vector(c[0]), word2vec_model.get_vector(c[1])) except: pass # print(c) # print(cosine(word2glove[c[0]], word2glove[c[1]])) print(col, cos_val) total.append(cos_val) return total
b5b50eef85f6e12c54c9ec16a2f7264aa057d344
22,122
def extract_static_override_features( static_overrides): """Extract static feature override values. Args: static_overrides: A dataframe that contains the value for static overrides to be passed to the GAM Encoders. Returns: A mapping from feature name to location and then to the override value. This is a two-level dictionary of the format: {feature: {location: value}} """ static_overrides_features = dict() for feature in set(static_overrides[constants.FEATURE_NAME_COLUMN]): static_overrides_features[feature] = dict() override_slice = static_overrides.loc[static_overrides[ constants.FEATURE_NAME_COLUMN] == feature] for location in set(override_slice[constants.GEO_ID_COLUMN]): override_sub_slice = override_slice.loc[override_slice[ constants.GEO_ID_COLUMN] == location] static_overrides_features[feature][location] = override_sub_slice[ constants.FEATURE_MODIFIER_COLUMN].to_numpy()[0] return static_overrides_features
9425674eb2e0578ecbc926b249a6eb2f6afb37d0
22,123
def job_list_View(request): """ """ job_list = Job.objects.filter() paginator = Paginator(job_list, 10) page_number = request.GET.get('page') page_obj = paginator.get_page(page_number) context = { 'page_obj': page_obj, } return render(request, 'jobapp/job-list.html', context)
a2928ea255ff3cb044462fc4ebf7c530bd54b2fb
22,124
import json def edit_schedule(request): """Edit automatic updates schedule""" if request.method == "POST": schedule = models.UpdateSchedule.objects.get() def fun(query): return [int(x.strip()) for x in query.split(" ") if x.strip() != ""] schedule.text = json.dumps({ models.YoutubeChannel.PRIORITY_LOW: fun(request.POST["low"]), models.YoutubeChannel.PRIORITY_MEDIUM: fun(request.POST["medium"]), models.YoutubeChannel.PRIORITY_HIGH: fun(request.POST["high"]), }) schedule.save() return redirect("notifpy:settings")
c87ef4ff0088bf4d67a1078ffe11b4c723272a8a
22,125
import math def infection_formula(name_model, infectious_number, classroom_volume, classroom_ach): """ Calculate infection rate of with/without a mask by selected model. """ if name_model == "wells_riley": # Use wells riley model. effect_mask = 1.0 / ((1.0 - config.EXHALATION_FILTRATION_EFFICIENCY) * (1.0 - config.RESPIRATION_FILTRATION_EFFICIENCY)) infection_rate_w_mask = 1.0 - math.exp(-infectious_number * config.QUANTUM_GENERATION_RATE * config.PULMONARY_VENTILATIION_RATE * (config.LESSON_TIME / 60) / (classroom_volume * classroom_ach * effect_mask)) infection_rate_wo_mask = 1.0 - math.exp(-infectious_number * config.QUANTUM_GENERATION_RATE * config.PULMONARY_VENTILATIION_RATE * (config.LESSON_TIME / 60) / (classroom_volume * classroom_ach)) else: # Future Work: Add infection models for calculate infection rate. infection_rate_w_mask = 0.0 infection_rate_wo_mask = 0.0 return infection_rate_w_mask, infection_rate_wo_mask
99b14a88c4ed02716626d8bc037b3f54211caa2a
22,126
def RetryWithBackoff(opts, fn, args=None, kwargs=None): """`fn` function must follow the interface suggested: * it should return tuple <status, err> where status - backoff status err - error that happend in function to propogate it to caller.""" args = args or () kwargs = kwargs or {} update_opts(opts) count = 0 backoff = opts['backoff'] while True: count += 1 status, err_or_rv = fn(*args, **kwargs) print status, err_or_rv if status == RETRY_BREAK: return err_or_rv if status == RETRY_RESET: backoff = opts['backoff'] count = wait = 0 if status == RETRY_CONTINUE: if opts['max_attempts'] > 0 and count >= opts['max_attempts']: raise RetryMaxAttemptsError( opts['max_attempts'], reason=err_or_rv) wait = (backoff + backoff * retry_jitter) * opts['constant_factor'] print "RETRIED IN ... %s" % wait if backoff > opts['max_backoff']: backoff = opts['max_backoff'] gevent.sleep(wait)
bf040a93015f3a283ac858c8738dc6bd8c48b2af
22,127
def noaa_api_formatter(raw, metrics=None, country_aggr=False): """Format the output of the NOAA API to the task-geo Data Model. Arguments: raw(pandas.DataFrame):Data to be formatted. metrics(list[str]): Optional.List of metrics requested,valid metric values are: TMIN: Minimum temperature. TMAX: Maximum temperature. TAVG: Average of temperature. SNOW: Snowfall (mm). SNWD: Snow depth (mm). PRCP: Precipitation country_aggr(bool): When True, only an aggregate for each date/country will be returned. Returns: pandas.DataFrame """ if metrics is None: metrics = [metric.lower() for metric in DEFAULT_METRICS if metric in raw.columns] data = raw.copy() data.columns = [column.lower() for column in data.columns] column_order = [ 'latitude', 'longitude', 'elevation', 'country', 'name', 'date', 'station'] column_order.extend(metrics) data.date = pd.to_datetime(data.date) for column in ['tmax', 'tavg', 'tmin']: if column in data.columns: data[column] = data[column].astype(float) if 'snwd' in data.columns: data['snwd'] = data['snwd'].astype(float) / 1000 data.snwd.fillna(0, inplace=True) if 'prcp' in data.columns: data['prcp'] = data['prcp'].astype(float) / 1000 data.prcp.fillna(0, inplace=True) data['country'] = data.station.str.slice(0, 2).apply(fips_to_name) data = data[column_order] if country_aggr: aggregations = {} if 'tmin' in metrics: aggregations['tmin'] = np.min if 'tmax' in metrics: aggregations['tmax'] = np.max agg_columns = list(aggregations.keys()) return data.groupby(['country', 'date'])[agg_columns].aggregate(aggregations).reset_index() return data
155b9a0cee72f85d6f5329a5a68aca4aa1dfe1eb
22,128
def crop_wav(wav, center, radius): """ Crop wav on [center - radius, center + radius + 1], and pad 0 for out of range indices. :param wav: wav :param center: crop center :param radius: crop radius :return: a slice whose length is radius*2 +1. """ left_border = center - radius right_border = center + radius + 1 if left_border < 0: zeros = np.zeros(-left_border) cropped_wav = np.concatenate([zeros, wav[0: right_border]]) elif right_border > len(wav): zeros = np.zeros(right_border - len(wav)) cropped_wav = np.concatenate([wav[left_border: len(wav)], zeros]) else: cropped_wav = wav[left_border: right_border] assert len(cropped_wav) == radius * 2 + 1 return cropped_wav
69a5a078f06b083694d5d5eb5328b63c70a6f17c
22,129
def markdown(text: str) -> str: """Helper function to escape markdown symbols""" return MD_RE.sub(r'\\\1', text)
2cb5fb3f5cac2d5cc5b6d256c4a8357832f3e53e
22,130
def import_sample(sample_name, db): """Import sample""" cur = db.cursor() cur.execute('select sample_id from sample where sample_name=?', (sample_name, )) res = cur.fetchone() if res is None: cur.execute('insert into sample (sample_name) values (?)', (sample_name, )) sample_id = cur.lastrowid else: sample_id = res[0] return sample_id
c477a4f036951cac88789b59f361cf9397a0e9ee
22,131
import torch def change_background_color_balck_digit(images, old_background, new_background, new_background2=None, p=1): """ :param images: BCHW :return: """ if new_background2 is None: assert old_background == [0] if not torch.is_tensor(new_background): new_background = torch.tensor(new_background, dtype=images.dtype) if images.max() <= 1 and new_background.max() > 1: new_background /= 255 if images.size(1) == 1 and len(new_background) == 3: images = images.expand(-1, 3, -1, -1) else: assert images.size(1) == len(new_background) # raise NotImplementedError(images.size(), new_background) images = images.clone() new_background = new_background.view(-1, 1, 1) n=images.size(0) ch=images.size(1) if (images.view(n,ch,-1).sum(2)==0).sum(1).sum()>n: #when input is already colored (digit or background) non_zero_ch_idx=torch.nonzero(images[0].view(ch,-1).sum(1)).squeeze() #torch.nonzero(images[0].view(n,ch,-1).sum(2)) non_zero_chnls = images[:,non_zero_ch_idx] if len(non_zero_chnls.shape)==3: non_zero_chnls=non_zero_chnls.unsqueeze(1) else: non_zero_chnls=non_zero_chnls[:,0].unsqueeze(1) if torch.sum(non_zero_chnls.view(n,-1)==0)>torch.sum(non_zero_chnls.view(n,-1)==1): #digit was previously colored bg_ratio = images.max() - non_zero_chnls bg = bg_ratio * new_background return images + bg else: #background is previously colored bg = (non_zero_chnls.expand(-1, 3, -1, -1)*new_background) images*=images.max()-new_background return images+bg else: #when input is greyscale bg_ratio = images.max() - images bg = bg_ratio * new_background # imgs = images + bg # print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item()) # print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item()) return bg #imgs else: assert old_background == [0] if not torch.is_tensor(new_background): new_background = torch.tensor(new_background, dtype=images.dtype) if images.max() <= 1 and new_background.max() > 1: new_background /= 255 if not torch.is_tensor(new_background2): new_background2 = torch.tensor(new_background2, dtype=images.dtype) if images.max() <= 1 and new_background2.max() > 1: new_background2 /= 255 if images.size(1) == 1 and len(new_background) == 3: images = images.expand(-1, 3, -1, -1) else: assert images.size(1) == len(new_background) # raise NotImplementedError(images.size(), new_background) images = images.clone() new_background = new_background.view(-1, 1, 1) new_background2 = new_background2.view(-1, 1, 1) n=images.size(0) ch=images.size(1) if (images.view(n,ch,-1).sum(2)==0).sum(1).sum()>n: raise NotImplementedError #when input is already colored (digit or background) non_zero_ch_idx=torch.nonzero(images[0].view(ch,-1).sum(1)).squeeze() #torch.nonzero(images[0].view(n,ch,-1).sum(2)) non_zero_chnls = images[:,non_zero_ch_idx] if len(non_zero_chnls.shape)==3: non_zero_chnls=non_zero_chnls.unsqueeze(1) else: non_zero_chnls=non_zero_chnls[:,0].unsqueeze(1) if torch.sum(non_zero_chnls.view(n,-1)==0)>torch.sum(non_zero_chnls.view(n,-1)==1): #digit was previously colored bg_ratio = images.max() - non_zero_chnls bg = bg_ratio * new_background return images + bg else: #background is previously colored bg = (non_zero_chnls.expand(-1, 3, -1, -1)*new_background) images*=images.max()-new_background return images+bg else: #when input is greyscale bg_ratio = images.max() - images idxs = torch.randperm(len(bg_ratio)) n_imgs=int(p*len(bg_ratio)) bg_ratio[idxs[:n_imgs]] *= new_background2 bg_ratio[idxs[n_imgs:]] *= new_background # imgs = images + bg # print(images[:, 0, :, :].std().item(),images[:, 1, :, :].std().item(),images[:, 2, :, :].std().item()) # print(imgs[:, 0, :, :].std().item(), imgs[:, 1, :, :].std().item(), imgs[:, 2, :, :].std().item()) return bg_ratio
f17f627616c75f3673d6ed043f0f36751ccde2a1
22,132
def render_sprites(sprites, scales, offsets, backgrounds, name="render_sprites"): """ Render a scene composed of sprites on top of a background. An scene is composed by scaling the sprites by `scales` and offseting them by offsets (using spatial transformers), and merging the sprites and background together using per-sprite alpha and importance channels. Sprites are organized into a series of `flights`. Each flight can use a different shape for the sprite maps, and there can be a different number of sprites in each flight. The coordinate system for scales and offsets has (0, 0) at the image top-left and (1, 1) at the image bottom-right. A sprite with scale (1, 1) and offset (0, 0) would occupy the whole output image. Uses bilinear interpolation for the spatial transformer sections. Args: sprites: List of tensors of length `n_flights`, each of shape (batch_size, sprite_height_i, sprite_width_i, n_channels+2) The sprite maps in flight i are assumed to have shape (sprite_height_i, sprite_width_i). The final two channels are the alpha and importance channels. scales: Tensor of shape `[batch_size, n_sprites, 2]` Amount to scale sprites by. Order is y, x. A value of 1 will have the sprite occupy the whole output image. offsets: Tensor of shape `[batch_size, n_sprites, 2]` Location of top-left corner of each sprite. Order is y, x. backgrounds: Tensor of shape `[batch_size, output_height, output_width, n_channels]` The background for each image. name: Optional name of the op. Returns: Tensor giving the stitched images. Shape is `(batch_size, output_height, output_width, n_channels)`, same as `backgrounds`. Raises: ImportError: if the wrapper generated during compilation is not present when the function is called. """ with ops.name_scope(name, "render_sprites", [sprites, scales, offsets, backgrounds]): sprites_tensor_list = [ ops.convert_to_tensor(s, name="sprites_flight_{}".format(i)) for i, s in enumerate(sprites)] scales_tensor_list = [ ops.convert_to_tensor(s, name="scales_flight_{}".format(i)) for i, s in enumerate(scales)] offsets_tensor_list = [ ops.convert_to_tensor(s, name="offsets_flight_{}".format(i)) for i, s in enumerate(offsets)] backgrounds_tensor = ops.convert_to_tensor(backgrounds, name="backgrounds") lib = render_sprites_so() output = lib.render_sprites( sprites_tensor_list, scales_tensor_list, offsets_tensor_list, backgrounds_tensor) return output
c4210a3b1f123368c77d89fcf15634ead3d97c85
22,133
import ctypes def load_shared_library(dll_path, lib_dir): """ Return the loaded shared library object from the dll_path and adding `lib_dir` to the path. """ # add lib path to the front of the PATH env var update_path_environment(lib_dir) if not exists(dll_path): raise ImportError('Shared library does not exists: %(dll_path)r' % locals()) if not isinstance(dll_path, bytes): # ensure that the path is not Unicode... dll_path = fsencode(dll_path) lib = ctypes.CDLL(dll_path) if lib and lib._name: return lib raise ImportError('Failed to load shared library with ctypes: %(dll_path)r and lib_dir: %(lib_dir)r' % locals())
983b6b42b25e5f7936117579b02babff30899d21
22,134
def preprocess_img(image): """Preprocess the image to adapt it to network requirements Args: Image we want to input the network (W,H,3) numpy array Returns: Image ready to input the network (1,W,H,3) """ # BGR to RGB in_ = image[:, :, ::-1] # image centralization # They are the mean color values of BSDS500 dataset in_ = np.subtract(in_, np.array((104.00699, 116.66877, 122.67892), dtype=np.float32)) # in_ = tf.subtract(tf.cast(in_, tf.float32), np.array((104.00699, 116.66877, 122.67892), dtype=np.float32)) # (W,H,3) to (1,W,H,3) in_ = np.expand_dims(in_, axis=0) return in_
c4a0136c03aa57a54db432e9abe8e35cbe43f0b6
22,135
def _parse_ec_record(e_rec): """ This parses an ENSDF electron capture + b+ record Parameters ---------- e_rec : re.MatchObject regular expression MatchObject Returns ------- en : float b+ endpoint energy in keV en_err : float error in b+ endpoint energy ib : float b+ branch intensity dib : float error in b+ branch intensity ie : float ec branch intensity die : float error in ec branch intensity logft : float logft of the decay dft : float error in logft """ en, en_err = _get_val_err(e_rec.group(2), e_rec.group(3)) ib, dib = _get_val_err(e_rec.group(4), e_rec.group(5)) ie, die = _get_val_err(e_rec.group(6), e_rec.group(7)) logft, dft = _get_val_err(e_rec.group(8), e_rec.group(9)) tti, dtti = _get_val_err(e_rec.group(10), e_rec.group(11)) return en, en_err, ib, dib, ie, die, logft, dft, tti, dtti
00480d031a3e6b118d880ed5e2abb890e7e8b410
22,136
import datetime def skpTime(time): """ Retorna un datetime con la hora en que la unidad genero la trama. >>> time = '212753.00' >>> datetime.time(int(time[0:2]), int(time[2:4]), int(time[4:6]), int(time[-2])) datetime.time(21, 27, 53) >>> """ return datetime.time(int(time[0:2]), int(time[2:4]), int(time[4:6]), int(time[-2]), tzinfo=timezone('UTC'))
8bfa7e4d7faa52152c0a63944502cd6b1975ebdf
22,137
def calc_max_moisture_set_point(bpr, tsd, t): """ (76) in ISO 52016-1:2017 Gabriel Happle, Feb. 2018 :param bpr: Building Properties :type bpr: BuildingPropertiesRow :param tsd: Time series data of building :type tsd: dict :param t: time step / hour of the year :type t: int :return: max moisture set point (kg/kg_dry_air) :rtype: double """ # from bpr get set point for humidification phi_int_set_dhu = bpr.comfort['RH_max_pc'] t_int = tsd['T_int'][t] p_sat_int = calc_saturation_pressure(t_int) x_set_max = 0.622 * (phi_int_set_dhu / 100 * p_sat_int) / ( P_ATM - phi_int_set_dhu / 100 * p_sat_int) return x_set_max
3fe2ab28b8f0ba3e6ba2139ed168f08e1b0e969d
22,138
def compress_pub_key(pub_key: bytes) -> bytes: """Convert uncompressed to compressed public key.""" if pub_key[-1] & 1: return b"\x03" + pub_key[1:33] return b"\x02" + pub_key[1:33]
05824112c6e28c36171c956910810fc1d133c865
22,139
def is_tensor(blob): """Whether the given blob is a tensor object.""" return isinstance(blob, TensorBase)
514e9fea7b6fc60078ea46c61d25462096fa47cc
22,140
def transform_dlinput( tlist=None, make_tensor=True, flip_prob=0.5, augment_stain_sigma1=0.5, augment_stain_sigma2=0.5): """Transform input image data for a DL model. Parameters ---------- tlist: None or list. If testing mode, pass as None. flip_prob augment_stain_sigma1 augment_stain_sigma2 """ tmap = { 'hflip': tvdt.RandomHorizontalFlip(prob=flip_prob), 'augment_stain': tvdt.RandomHEStain( sigma1=augment_stain_sigma1, sigma2=augment_stain_sigma2), } tlist = [] if tlist is None else tlist transforms = [] # go through various transforms for tname in tlist: transforms.append(tmap[tname]) # maybe convert to tensor if make_tensor: # transforms.append(tvdt.PILToTensor(float16=ISCUDA)) transforms.append(tvdt.PILToTensor(float16=False)) return tvdt.Compose(transforms)
9f7bacb5a27667667432d3775ae624f4fd57e2c6
22,141
def _(text): """Normalize white space.""" return ' '.join(text.strip().split())
f99f02a2fe84d3b214164e881d7891d4bfa0571d
22,142
import ipdb def mean_IOU_primitive_segment(matching, predicted_labels, labels, pred_prim, gt_prim): """ Primitive type IOU, this is calculated over the segment level. First the predicted segments are matched with ground truth segments, then IOU is calculated over these segments. :param matching :param pred_labels: N x 1, pred label id for segments :param gt_labels: N x 1, gt label id for segments :param pred_prim: K x 1, pred primitive type for each of the predicted segments :param gt_prim: N x 1, gt primitive type for each point """ batch_size = labels.shape[0] IOU = [] IOU_prim = [] for b in range(batch_size): iou_b = [] iou_b_prim = [] iou_b_prims = [] len_labels = np.unique(predicted_labels[b]).shape[0] rows, cols = matching[b] count = 0 for r, c in zip(rows, cols): pred_indices = predicted_labels[b] == r gt_indices = labels[b] == c # use only matched segments for evaluation if (np.sum(gt_indices) == 0) or (np.sum(pred_indices) == 0): continue # also remove the gt labels that are very small in number if np.sum(gt_indices) < 100: continue iou = np.sum(np.logical_and(pred_indices, gt_indices)) / ( np.sum(np.logical_or(pred_indices, gt_indices)) + 1e-8) iou_b.append(iou) # evaluation of primitive type prediction performance gt_prim_type_k = gt_prim[b][gt_indices][0] try: predicted_prim_type_k = pred_prim[b][r] except: ipdb.set_trace() iou_b_prim.append(gt_prim_type_k == predicted_prim_type_k) iou_b_prims.append([gt_prim_type_k, predicted_prim_type_k]) # find the mean of IOU over this shape IOU.append(np.mean(iou_b)) IOU_prim.append(np.mean(iou_b_prim)) return np.mean(IOU), np.mean(IOU_prim), iou_b_prims
cf405144206e824a868f4eb777635237e8cc59b8
22,143
def _infer_title(ntbk, strip_title_header=True): """Infer a title from notebook metadata. First looks in metadata['title'] and if nothing is found, looks for whether the first line of the first cell is an H1 header. Optionally it strips this header from the notebook content. """ # First try the notebook metadata, if not found try the first line title = ntbk.metadata.get('title') # If the first line of the ontebook is H1 header, assume it's the title. if title is None: first_cell_lines = ntbk.cells[0].source.lstrip().split('\n') if first_cell_lines[0].startswith('# '): title = first_cell_lines.pop(0).strip('# ') if strip_title_header is True: ntbk.cells[0].source = '\n'.join(first_cell_lines) return title
e8152f0c160d2cb7af66b1a20f4d95d4ea16c703
22,145
import hashlib def stable_hash(value): """Return a stable hash.""" return int(hashlib.md5(str(value).encode('utf-8')).hexdigest(), 16)
a5be51a971eb6c9a91489155216ef194f9d0d7ba
22,146
def minimal_community(community_owner): """Minimal community data as dict coming from the external world.""" return { "id": "comm_id", "access": { "visibility": "public", }, "metadata": { "title": "Title", "type": "topic" } }
18b99c30d4dff01b988e8ac311a6da92142e71ee
22,147
from typing import Counter def retrieve_descriptions(gene, descriptions, empties): """Given single gene name, grab possible descriptions from NCBI and prompt user to select one""" # Perform ESearch and grab list of IDs query = gene + '[Gene Name]' handle = Entrez.esearch(db='gene', term=query, retmax=100, retmode='xml') record = Entrez.read(handle) handle.close() idlist = ','.join(record["IdList"]) # Ensure you have results, exit if not if idlist == '': print('No records for {}, skipping...\n'.format(gene)) empties.append(gene) return # Generate summary from UID list handle = Entrez.esummary(db='gene', id=idlist) record = Entrez.read(handle) handle.close() # Grab description, counter for unique values desc_cnt = Counter() doc_sums = record[u'DocumentSummarySet'][u'DocumentSummary'] for i in range(len(doc_sums)): if doc_sums[i][u'NomenclatureName'] != '': desc = doc_sums[i][u'NomenclatureName'] else: desc = doc_sums[i][u'OtherDesignations'].split('|')[0] desc_cnt[desc] += 1 # Create list from counter keys for indexing purposes desc_list = filter(None, desc_cnt) if len(desc_cnt) > 1: print('{} has {} unique descriptions from {} results. These are:'.format( gene, len(desc_list), len(doc_sums))) ans_range = range(len(desc_list)) for i in ans_range: print ('{}: {} [{}/{}]'.format(i+1, desc_list[i], desc_cnt[desc_list[i]], len(doc_sums))) # Take user input to accept/reject a description while True: ans = raw_input('Which do you accept? [{}-{}/N]: '.format( min(ans_range)+1, max(ans_range)+1)) # Check if int or str entered try: ans = int(ans)-1 if ans in ans_range: print('Accepting #{}.\n'.format(ans+1)) descriptions[gene] = desc_list[ans] break else: print('{} is outside acceptable range. Try again.'.format( ans)) except: if ans in ['N', 'n', 'no', 'No']: print('Skipping this gene.\n') break else: print('Invalid input, try again.') # If there's only one unique description, accept/reject elif len(desc_cnt) == 1: desc_list2 = list(desc_cnt) desc = desc_list2[0] if desc == '': print('{} has empty description.'.format(gene)) empties.append(gene) return print('{} only has one unique description from {} results.'.format( gene, len(doc_sums))) print('This is:\n{}'.format(desc)) while True: ans = raw_input('Accept? Y/N: ') if ans in ['Y', 'y', 'yes', 'Yes']: print('Description accepted.\n') descriptions[gene] = desc break elif ans in ['N', 'n', 'no', 'No']: print('Skipping this gene.\n') empties.append(gene) break else: print('Invalid input, try again.') return(descriptions)
524d4955a51eb0e3143c06d91eb7ae611579d9dd
22,148
import time def readCmd(): """ Parses out a single character contained in '<>' i.e. '<1>' returns int(1) returns the single character as an int, or returns -1 if it fails""" recvInProgress = False timeout = time.time() + 10 while time.time() < timeout: try: rc = ser.read().decode("utf-8") except(UnicodeDecodeError): continue if recvInProgress == True: if rc != '>': cmd = rc else: #while(ser.in_waiting != 0): # ser.read() try: return int(cmd) except: print("Bad command parse") return -1 elif rc == '<': recvInProgress = True print("Timeout on readCmd") return -1
3e7b1eef27ab41b7079c966bf696e95923ebe6eb
22,149
def map_ground_truth(bounding_boxes, anchor_boxes, threshold=0.5): """ Assign a ground truth object to every anchor box as described in SSD paper :param bounding_boxes: :param anchor_boxes: :param threshold: :return: """ # overlaps shape: (bounding_boxes, anchor_boxes) overlaps = jaccard_overlap(bounding_boxes, anchor_boxes) # best_bbox_overlaps and best_bbox_ids shape: (bounding_boxes) # best_bbox_overlaps: IoU of overlap with the best anchor box for every ground truth box # best_bbox_ids: indexes of anchor boxes best_bbox_overlaps, best_bbox_ids = overlaps.max(1) # overlaps and bbox_ids shape: (anchor_boxes) # IoU and indexes of bounding boxes with the best overlap for every anchor box overlaps, bbox_ids = overlaps.max(0) # Combine the two: # best_bbox_overlaps takes precedence overlaps[best_bbox_ids] = 2 for bbox_id, anchor_id in enumerate(best_bbox_ids): bbox_ids[anchor_id] = bbox_id # Check for the threshold and return binary mask and bbox ids for each anchor is_positive = overlaps > threshold return is_positive, bbox_ids
1609bac66f4132249e893996b07b1a8752b8ab48
22,150
def MakeFrame(ea, lvsize, frregs, argsize): """ Make function frame @param ea: any address belonging to the function @param lvsize: size of function local variables @param frregs: size of saved registers @param argsize: size of function arguments @return: ID of function frame or -1 If the function did not have a frame, the frame will be created. Otherwise the frame will be modified """ func = idaapi.get_func(ea) if func is None: return -1 frameid = idaapi.add_frame(func, lvsize, frregs, argsize) if not frameid: if not idaapi.set_frame_size(func, lvsize, frregs, argsize): return -1 return func.frame
f0affe28d506a65d2a43fa64f2b9a99dbaf62b25
22,152